repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
trungdtbk/faucet
|
faucet/valve_table.py
|
Python
|
apache-2.0
| 11,432
| 0.001924
|
"""Abstraction of an OF table."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import hashlib
import struct
from faucet import valve_of
from faucet.faucet_pipeline import ValveTableConfig
class ValveTable: # pylint: disable=too-many-arguments,too-many-instance-attributes
"""Wrapper for an OpenFlow table."""
def __init__(self, name, table_config,
flow_cookie, notify_flow_removed=False, next_tables=None):
self.name = name
self.table_config = table_config
self.table_id = self.table_config.table_id
self.set_fields = self.table_config.set_fields
self.exact_match = self.table_config.exact_match
self.match_types = None
self.metadata_match = self.table_config.metadata_match
self.metadata_write = self.table_config.metadata_write
if next_tables:
self.next_tables = next_tables
else:
self.next_tables = []
if self.table_config.match_types:
self.match_types = {}
for field, mask in self.table_config.match_types:
self.match_types[field] = mask
self.flow_cookie = flow_cookie
self.notify_flow_removed = notify_flow_removed
def goto(self, next_table):
"""Add goto next table instruction."""
assert next_table.name in self.table_config.next_tables, (
'%s not configured as next table in %s' % (
next_table.name, self.name))
return valve_of.goto_table(next_table)
def goto_this(self):
return valve_of.goto_table(self)
def goto_miss(self, next_table):
"""Add miss goto table instruction."""
assert next_table.name == self.table_config.miss_goto, (
'%s not configured as miss table in %s' % (
next_table.name, self.name))
return valve_of.goto_table(next_table)
@staticmethod
def set_field(**kwds):
"""Return set field action."""
# raise exception if unknown set field.
valve_of.match_from_dict(kwds)
return valve_of.set_field(**kwds)
def set_external_forwarding_requested(self):
"""Set field for external forwarding requested."""
return self.set_field(**{valve_of.EXTERNAL_FORWARDING_FIELD: valve_of.PCP_EXT_PORT_FLAG})
def set_no_external_forwarding_requested(self):
"""Set field for no external forwarding reques
|
ted."""
return self.set_field(**{valve_of.EXTERNAL_FORWARDING_FIELD: valve_of.PCP_NONEXT_PORT_FLAG})
def set_vlan_vid(sel
|
f, vlan_vid):
"""Set VLAN VID with VID_PRESENT flag set.
Args:
vid (int): VLAN VID
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionSetField: set VID with VID_PRESENT.
"""
return self.set_field(vlan_vid=valve_of.vid_present(vlan_vid))
# TODO: verify actions
@staticmethod
@functools.lru_cache(maxsize=1024)
def match(in_port=None, vlan=None, # pylint: disable=too-many-arguments,too-many-locals
eth_type=None, eth_src=None, eth_dst=None, eth_dst_mask=None,
icmpv6_type=None, nw_proto=None, nw_dst=None, metadata=None,
metadata_mask=None, vlan_pcp=None, udp_src=None, udp_dst=None):
"""Compose an OpenFlow match rule."""
match_dict = valve_of.build_match_dict(
in_port, vlan, eth_type, eth_src,
eth_dst, eth_dst_mask, icmpv6_type,
nw_proto, nw_dst, metadata, metadata_mask,
vlan_pcp, udp_src, udp_dst)
return valve_of.match(match_dict)
def _verify_flowmod(self, flowmod):
match_fields = flowmod.match.items()
if valve_of.is_flowdel(flowmod):
if self.table_id != valve_of.ofp.OFPTT_ALL:
for match_type, match_field in match_fields:
assert match_type in self.match_types, (
'%s match in table %s' % (match_type, self.name))
else:
# TODO: ACL builder should not use ALL table.
if self.table_id == valve_of.ofp.OFPTT_ALL:
return
assert not (flowmod.priority == 0 and match_fields), (
'default flow cannot have matches on table %s: %s' % (self.name, flowmod))
for match_type, match_field in match_fields:
assert match_type in self.match_types, (
'%s match in table %s' % (match_type, self.name))
config_mask = self.match_types[match_type]
flow_mask = isinstance(match_field, tuple)
assert config_mask or (not config_mask and not flow_mask), (
'%s configured mask %s but flow mask %s in table %s (%s)' % (
match_type, config_mask, flow_mask, self.name, flowmod))
if self.exact_match and match_fields:
assert len(self.match_types) == len(match_fields), (
'exact match table %s matches %s do not match flow matches %s (%s)' % (
self.name, self.match_types, match_fields, flowmod))
def _trim_actions(self, actions):
new_actions = []
pending_actions = []
for action in actions:
if action.type in (valve_of.ofp.OFPAT_GROUP, valve_of.ofp.OFPAT_OUTPUT):
new_actions.extend(pending_actions)
new_actions.append(action)
pending_actions = []
else:
pending_actions.append(action)
set_fields = {action.key for action in new_actions if valve_of.is_set_field(action)}
if self.table_id != valve_of.ofp.OFPTT_ALL and set_fields:
assert set_fields.issubset(self.set_fields), (
'unexpected set fields %s configured %s in %s' % (set_fields, self.set_fields, self.name))
return new_actions
def _trim_inst(self, inst):
"""Discard actions on packets that are not output and not goto another table."""
inst_types = {instruction.type for instruction in inst}
if valve_of.ofp.OFPIT_GOTO_TABLE in inst_types:
return inst
new_inst = []
for instruction in inst:
if instruction.type == valve_of.ofp.OFPIT_APPLY_ACTIONS:
instruction.actions = self._trim_actions(instruction.actions)
new_inst.append(instruction)
return new_inst
def flowmod(self, match=None, priority=None, # pylint: disable=too-many-arguments
inst=None, command=valve_of.ofp.OFPFC_ADD, out_port=0,
out_group=0, hard_timeout=0, idle_timeout=0, cookie=None):
"""Helper function to construct a flow mod message with cookie."""
if priority is None:
priority = 0 # self.dp.lowest_priority
if not match:
match = self.match()
if inst is None:
inst = []
if cookie is None:
cookie = self.flow_cookie
flags = 0
if self.notify_flow_removed:
flags = valve_of.ofp.OFPFF_SEND_FLOW_REM
if inst:
inst = self._trim_inst(inst)
flowmod = valve_of.flowmod(
cookie,
command,
self.table_id,
priority,
out_port,
out_group,
match,
inst,
hard_timeout,
idle_timeout,
flags)
self._verify_flowmod(flowmod)
return flowmod
def flo
|
nirdizati/nirdizati-runtime
|
PredictiveMethods/RemainingTime/batch/PredictiveMonitor.py
|
Python
|
lgpl-3.0
| 3,958
| 0.002527
|
"""
Copyright (c) 2016-2017 The Nirdizati Project.
This file is part of "Nirdizati".
"Nirdizati" is free software; you can redistribute it and
|
/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
"Ni
|
rdizati" is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this program.
If not, see <http://www.gnu.org/licenses/lgpl.html>.
"""
from PredictiveModel import PredictiveModel
import numpy as np
import os.path
class PredictiveMonitor():
def __init__(self, event_nr_col, case_id_col, encoder_kwargs, cls_kwargs, cls_method="rf"):
self.event_nr_col = event_nr_col
self.case_id_col = case_id_col
self.cls_method = cls_method
self.encoder_kwargs = encoder_kwargs
self.cls_kwargs = cls_kwargs
self.models = {}
self.predictions = {}
def train(self, dt_train, max_events=None):
max_events = np.max(dt_train[self.event_nr_col]) if max_events is None else max_events
self.max_events = max_events
for nr_events in xrange(1, max_events + 1):
pred_model = PredictiveModel(nr_events=nr_events, case_id_col=self.case_id_col,
cls_method=self.cls_method,
encoder_kwargs=self.encoder_kwargs, cls_kwargs=self.cls_kwargs)
pred_model.fit(dt_train)
self.models[nr_events] = pred_model
def test(self, dt_test, output_filename=None, outfile_mode='w'):
results = self._test_single_conf(dt_test)
self.predictions = results
if output_filename is not None:
if not os.path.isfile(output_filename):
outfile_mode = 'w'
with open(output_filename, outfile_mode) as fout:
if outfile_mode == 'w':
fout.write("case_id,ground_truth,predictions_true,nr_prefixes\n")
for item in self.predictions:
fout.write("%s,%.4f,%s,%s\n" % (
item['case_name'], item['ground_truth'], item['prediction'], item['nr_events']))
def _test_single_conf(self, dt_test):
results = []
case_names_unprocessed = set(dt_test[self.case_id_col].unique())
max_events = min(np.max(dt_test[self.event_nr_col]), np.max(self.models.keys()))
nr_events = 1
# make predictions until there is at least one event ahead
while len(case_names_unprocessed) > 0 and nr_events < max_events:
# prepare test set
dt_test = dt_test[dt_test[self.case_id_col].isin(case_names_unprocessed)]
if len(dt_test[dt_test[self.event_nr_col] >= nr_events]) == 0: # all cases are shorter than nr_events
break
elif nr_events not in self.models:
nr_events += 1
continue
# select relevant model
pred_model = self.models[nr_events]
# predict
predictions_proba = pred_model.predict_proba(dt_test)
predictions_proba[predictions_proba < 0] = 0 # if predicted remaining time is negative, then make it zero
# predictions_proba = np.round(predictions_proba,decimals=4)
for Line in range(len(predictions_proba)):
results.append({"case_name": pred_model.test_case_names[Line],
"prediction": predictions_proba[Line],
"ground_truth": pred_model.test_y[Line],
"nr_events": nr_events})
nr_events += 1
return (results)
|
Nickil21/Indeed-ML-codesprint-2017
|
source/data_preprocessing.py
|
Python
|
mit
| 776
| 0.006443
|
import re
from functools import reduce
from nltk import word_tokenize
from nltk.corpus import stopwords
stop = set(stopwords.words("english"))
repls = ("(", ""), (")", ""), ("'s", "")
def tokenize(text):
lev1 = re.sub("[!#*%:,.;&-]", "", text) # Remove specific chars
lev2 = re.sub(r'[^\x00-\x7f]', r' ', lev1) # R
|
emove non ASCII chars
lev3 = reduce(lambda a, kv: a.replace(*kv), repl
|
s, lev2) # Replace using functional approach
tokens = map(lambda word: word.lower(), word_tokenize(lev3)) # Lowercase strings
words = [word for word in tokens if word not in stop] # Select words not present in stopwords set
return words
def join_strings(x):
return " ".join(sorted(x)).strip()
|
RCheungIT/phoenix
|
bin/queryserver.py
|
Python
|
apache-2.0
| 7,659
| 0.003395
|
#!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
|
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2
|
.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
#
# Script to handle launching the query server process.
#
# usage: queryserver.py [start|stop|makeWinServiceDesc] [-Dhadoop=configs]
#
import datetime
import getpass
import os
import os.path
import signal
import subprocess
import sys
import tempfile
try:
import daemon
daemon_supported = True
except ImportError:
# daemon script not supported on some platforms (windows?)
daemon_supported = False
import phoenix_utils
phoenix_utils.setPath()
command = None
args = sys.argv
if len(args) > 1:
if args[1] == 'start':
command = 'start'
elif args[1] == 'stop':
command = 'stop'
elif args[1] == 'makeWinServiceDesc':
command = 'makeWinServiceDesc'
if command:
# Pull off queryserver.py and the command
args = args[2:]
else:
# Just pull off queryserver.py
args = args[1:]
if os.name == 'nt':
args = subprocess.list2cmdline(args)
else:
import pipes # pipes module isn't available on Windows
args = " ".join([pipes.quote(v) for v in args])
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_config_path = phoenix_utils.hbase_conf_dir
hadoop_config_path = phoenix_utils.hadoop_conf
# TODO: add windows support
phoenix_file_basename = '%s-queryserver' % getpass.getuser()
phoenix_log_file = '%s.log' % phoenix_file_basename
phoenix_out_file = '%s.out' % phoenix_file_basename
phoenix_pid_file = '%s.pid' % phoenix_file_basename
# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
hbase_env_path = None
hbase_env_cmd = None
if os.name == 'posix':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
elif os.name == 'nt':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
if not hbase_env_path or not hbase_env_cmd:
print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
sys.exit(-1)
hbase_env = {}
if os.path.isfile(hbase_env_path):
p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
for x in p.stdout:
(k, _, v) = x.partition('=')
hbase_env[k.strip()] = v.strip()
java_home = hbase_env.get('JAVA_HOME') or os.getenv('JAVA_HOME')
if java_home:
java = os.path.join(java_home, 'bin', 'java')
else:
java = 'java'
tmp_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
opts = os.getenv('PHOENIX_QUERYSERVER_OPTS') or hbase_env.get('PHOENIX_QUERYSERVER_OPTS') or ''
pid_dir = os.getenv('PHOENIX_QUERYSERVER_PID_DIR') or hbase_env.get('HBASE_PID_DIR') or tmp_dir
log_dir = os.getenv('PHOENIX_QUERYSERVER_LOG_DIR') or hbase_env.get('HBASE_LOG_DIR') or tmp_dir
pid_file_path = os.path.join(pid_dir, phoenix_pid_file)
log_file_path = os.path.join(log_dir, phoenix_log_file)
out_file_path = os.path.join(log_dir, phoenix_out_file)
# " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n " + \
# " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true" + \
# The command is run through subprocess so environment variables are automatically inherited
java_cmd = '%(java)s -cp ' + hbase_config_path + os.pathsep + hadoop_config_path + os.pathsep + \
phoenix_utils.phoenix_client_jar + os.pathsep + phoenix_utils.phoenix_queryserver_jar + \
" -Dproc_phoenixserver" + \
" -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" -Dpsql.root.logger=%(root_logger)s" + \
" -Dpsql.log.dir=%(log_dir)s" + \
" -Dpsql.log.file=%(log_file)s" + \
" " + opts + \
" org.apache.phoenix.queryserver.server.Main " + args
if command == 'makeWinServiceDesc':
cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA,console', 'log_dir': log_dir, 'log_file': phoenix_log_file}
slices = cmd.split(' ')
print "<service>"
print " <id>queryserver</id>"
print " <name>Phoenix Query Server</name>"
print " <description>This service runs the Phoenix Query Server.</description>"
print " <executable>%s</executable>" % slices[0]
print " <arguments>%s</arguments>" % ' '.join(slices[1:])
print "</service>"
sys.exit()
if command == 'start':
if not daemon_supported:
print >> sys.stderr, "daemon mode not supported on this platform"
sys.exit(-1)
# run in the background
d = os.path.dirname(out_file_path)
if not os.path.exists(d):
os.makedirs(d)
with open(out_file_path, 'a+') as out:
context = daemon.DaemonContext(
pidfile = daemon.PidFile(pid_file_path, 'Query Server already running, PID file found: %s' % pid_file_path),
stdout = out,
stderr = out,
)
print 'starting Query Server, logging to %s' % log_file_path
with context:
# this block is the main() for the forked daemon process
child = None
cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA', 'log_dir': log_dir, 'log_file': phoenix_log_file}
# notify the child when we're killed
def handler(signum, frame):
if child:
child.send_signal(signum)
sys.exit(0)
signal.signal(signal.SIGTERM, handler)
print '%s launching %s' % (datetime.datetime.now(), cmd)
child = subprocess.Popen(cmd.split())
sys.exit(child.wait())
elif command == 'stop':
if not daemon_supported:
print >> sys.stderr, "daemon mode not supported on this platform"
sys.exit(-1)
if not os.path.exists(pid_file_path):
print >> sys.stderr, "no Query Server to stop because PID file not found, %s" % pid_file_path
sys.exit(0)
if not os.path.isfile(pid_file_path):
print >> sys.stderr, "PID path exists but is not a file! %s" % pid_file_path
sys.exit(1)
pid = None
with open(pid_file_path, 'r') as p:
pid = int(p.read())
if not pid:
sys.exit("cannot read PID file, %s" % pid_file_path)
print "stopping Query Server pid %s" % pid
with open(out_file_path, 'a+') as out:
print >> out, "%s terminating Query Server" % datetime.datetime.now()
os.kill(pid, signal.SIGTERM)
else:
# run in the foreground using defaults from log4j.properties
cmd = java_cmd % {'java': java, 'root_logger': 'INFO,console', 'log_dir': '.', 'log_file': 'psql.log'}
# Because shell=True is not set, we don't have to alter the environment
child = subprocess.Popen(cmd.split())
sys.exit(child.wait())
|
ayypot/dillys-place
|
test.py
|
Python
|
bsd-2-clause
| 3,208
| 0.063279
|
#!/usr/bin/python3
from lib.Element import *
from lib.Page import *
from lib.Site import *
import random
import shutil
import pprint
random.seed()
def makeChoice():
print( '\nSelect a lib to check: ' )
print( ' (1) Element.py' )
print( ' (2) Page.py' )
print( ' (3) Site.py' )
print( ' (e) Exit test.py' )
choice = input()
return choice
def makeDummyEle():
test_element = Element( 'text')
test_element.content = str( random.getrandbits( 32))
test_element.location = str( random.getrandbits( 8))
return test_element
def makeDummyPage( site):
name = random.getrandbits( 16)
print( 'Page name will be ' + str( name))
test_page = Page( site + str( name))
return test_page
def checkElement_py():
print( 'Testing instantiation...')
try:
test_element = Element( 'text')
except ElementError as why:
print( why.reason)
try:
test_element = Element( 'image')
except ElementError as why:
print( why.reason)
try:
test_element = Element( str( random.getrandbits( 32)))
print( 'Instantiation of unspecified Element type succeeded...FAILURE.')
except:
pass
print( 'Instantiation OK.')
print( 'Testing render()...')
try:
pprint.pprint( makeDummyEle().render())
except ElementError as why:
print( why.reason)
print( 'render() OK.')
print( 'Testing load()...')
try:
test_element.load( makeDummyEle().render())
except ElementError as why:
print( why.reason)
print( 'load() OK.')
def checkPage_py():
print( 'Testing instantiation...')
try:
test_page = makeDummyPage( '')
except PageError as why:
print( why.reason)
print( 'Instantiation OK.')
print( 'Testing add()...')
try:
test_element = makeDummyEle()
print( 'Adding element id ' + test_element.id)
test_page.add( test_element)
except PageError as why:
print( why.reason)
print( 'add() OK.')
print( 'Testing retrieve()...')
try:
print( test_page.retrieve( test_element.id))
except PageError as why:
print( why.reason)
print( 'retrieve() OK.')
print( 'Testing render()...')
try:
pprint.pprint( test_page.rende
|
r())
except PageError as why:
print( why.reason)
print( 'render() OK.')
print( 'Testing remove()...')
try:
test_page.remove( test_element.id)
except PageError as why:
print( why.reason)
print( 'remove() OK.')
shutil.rmtree( test_page.name)
def
|
checkSite_py():
print( 'Testing instantiation...')
try:
test_site = Site( "test_site")
except SiteError as why:
print( why.reason)
print( 'Instantiation OK.')
print( 'Testing add()...')
test_page = makeDummyPage( test_site.name + '/')
test_page.add( makeDummyEle())
test_site.add( test_page)
print( 'add() OK.')
print( 'Testing render()...')
test_site.render()
# for page in test_site.render():
# pprint.pprint( test_site.render()[page].render())
print( 'render() OK.')
print( 'Testing remove()...')
try:
test_site.remove( test_page.name)
except SiteError as why:
print( why.reason)
print( 'remove() OK.')
checking = True
while checking == True:
choice = makeChoice()
if choice == '1':
checkElement_py()
elif choice == '2':
checkPage_py()
elif choice == '3':
checkSite_py()
elif choice == 'e':
checking = False
else:
print( 'ERROR: Given option invalid' )
|
ntucker/django-user-accounts
|
account/forms.py
|
Python
|
mit
| 7,903
| 0.002657
|
from __future__ import unicode_literals
import re
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib import auth
from django.contrib.auth import get_user_model
from account.conf import settings
from account.hooks import hookset
from account.models import EmailAddress
from account.utils import get_user_lookup_kwargs
alnum_re = re.compile(r"^[\w\-\.\+]+$")
class SignupForm(forms.Form):
username = forms.CharField(
label=_("Username"),
max_length=30,
widget=forms.TextInput(),
required=True
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
password_confirm = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
email = forms.EmailField(
label=_("Email"),
widget=forms.TextInput(), required=True)
code = forms.CharField(
max_length=64,
required=False,
widget=forms.HiddenInput()
)
def clean_username(self):
if not alnum_re.search(self.cleaned_data["username"]):
raise forms.ValidationError(_("Usernames can only contain letters, numbers and the following special characters ./+/-/_"))
User = get_user_model()
lookup_kwargs = get_user_lookup_kwargs({
"{username}__iexact": self.cleaned_data["username"]
})
qs = User.objects.filter(**lookup_kwargs)
if not qs.exists():
return self.cleaned_data["username"]
raise forms.ValidationError(_("This username is already taken. Please choose another."))
def clean_email(self):
value = self.cleaned_data["email"]
qs = EmailAddress.objects.filter(email__iexact=value)
if not qs.exists() or not settings.ACCOUNT_EMAIL_UNIQUE:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
def clean(self):
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
if self.cleaned_data["password"] != self.cleaned_data["password_confirm"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
class LoginForm(forms.Form):
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
remember = forms.BooleanField(
label=_("Remember Me"),
required=False
)
user = None
def clean(self):
if self._errors:
return
user = auth.authenticate(**self.user_credentials())
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_("This account is inactive."))
else:
raise forms.ValidationError(self.authentication_fail_message)
return self.cleaned_data
def user_credentials(self):
return hookset.get_user_credentials(self, self.identifier_field)
class LoginUsernameForm(LoginForm):
username = forms.CharField(label=_("Username"), max_length=30)
authentication_fail_message = _("The username and/or password you specified are not correct.")
identifier_field = "username"
def __init__(self, *args, **kwargs):
super(LoginUsernameForm, self).__init__(*args, **kwargs)
field_order = ["username", "password", "remember"]
if not OrderedDict or hasattr(self.fields, "keyOrder"):
self.fields.keyOrder = field_order
else:
self.fields = OrderedDict((k, self.fields[k]) for k in field_order)
class LoginEmailForm(LoginForm):
email = forms.EmailField(label=_("Email"))
authentication_fail_message = _("The email address and/or password you specified are not correct.")
identifier_field = "email"
def __init__(self, *args, **kwargs):
super(LoginEmailForm, self).__init__(*args, **kwargs)
field_order = ["email", "password", "remember"]
if not OrderedDict or hasattr(self.fields, "keyOrder"):
self.fields.keyOrder = field_order
else:
self.fields = OrderedDict((k, self.fields[k]) for k in field_order)
class LoginHybridForm(LoginForm):
username = forms.CharField(label=_("Username or Email"))
authentication_fail_message = _("The username or email address and/or password you specified are not correct.")
identifier_field = "username"
def __init__(self, *args, **kwargs):
super(LoginHybridForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = ["username", "password", "remember"]
class ChangePasswordForm(forms.Form):
password_current = forms.CharField(
label=_("Current Password"),
widget=forms.PasswordInput(render_value=False)
)
password_new = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password_new_confirm = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super(ChangePasswordForm, self).__init__(*args, **kwargs)
def clean_password_current(self):
if not self.user.check_password(self.cleaned_data.get("password_current")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["password_current"]
def clean_password_new_confirm(self):
if "password_new" in self.cleaned_data and "password_new_confirm" in self.cleaned_data:
if self.cleaned_data["password_new"] != self.cleaned_data["password_new_confirm"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password_new_confirm"]
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), required=True)
def clean_email(self):
value = self.cleaned_data["email"]
if not EmailAddress.objects.filter(email__iexact=value).exists():
raise forms.ValidationError(_("Email address can not be found."))
return value
class PasswordResetTokenForm(forms.Form):
password = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password_confirm = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def clean_password_confirm
|
(self):
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
if self.cleaned_data["password"] != self.cleaned_data["password_confirm"]:
raise forms.ValidationError(_("You must ty
|
pe the same password each time."))
return self.cleaned_data["password_confirm"]
class SettingsForm(forms.Form):
email = forms.EmailField(label=_("Email"), required=True)
timezone = forms.ChoiceField(
label=_("Timezone"),
choices=[("", "---------")] + settings.ACCOUNT_TIMEZONES,
required=False
)
if settings.USE_I18N:
language = forms.ChoiceField(
label=_("Language"),
choices=settings.ACCOUNT_LANGUAGES,
required=False
)
def clean_email(self):
value = self.cleaned_data["email"]
if self.initial.get("email") == value:
return value
qs = EmailAddress.objects.filter(email__iexact=value)
if not qs.exists() or not settings.ACCOUNT_EMAIL_UNIQUE:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
|
pamfilos/invenio
|
modules/websubmit/lib/functions/Create_Modify_Interface.py
|
Python
|
gpl-2.0
| 15,906
| 0.006035
|
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This is the Create_Modify_Interface function (along with its helpers).
It is used by WebSubmit for the "Modify Bibliographic Information" action.
"""
__revision__ = "$Id$"
import os
import re
import time
import pprint
import cgi
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionError
from invenio.websubmit_functions.Retrieve_Data import Get_Field
from invenio.errorlib import register_exception
from invenio.htmlutils import escape_javascript_string
from invenio.messages import gettext_set_language, wash_language
def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""):
"""Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory.
Delete the text file after having read-in its value.
This function is called on the reload of the modify-record page. This way, the field in question
can be populated with the value last entered by the user (before reload), instead of always being
populated with the value still found in the DB.
"""
fld_val = ""
if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK):
fp = open( "%s/%s" % (cur_dir, fld), "r" )
fld_val = fp.read()
fp.close()
try:
os.unlink("%s/%s"%(cur_dir, fld))
except OSError:
# Cannot unlink file - ignore, let WebSubmit main handle this
pass
fld_val = fld_val.strip()
return fld_val
def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid):
"""Read a field's value from the record stored in the DB.
This function is called when the Create_Modify_Interface function is called for the first time
when modifying a given record, and field values must be retrieved from the database.
"""
fld_val = ""
if fieldcode != "":
for next_field_code in [x.strip() for x in fieldcode.split(",")]:
fld_val += "%s\n" % Get_Field(next_field_code, recid)
fld_val = fld_val.rstrip('\n')
return fld_val
def Create_Modify_Interface_transform_date(fld_val):
"""Accept a field's value as a string. If the value is a date in one of the following formats:
DD Mon YYYY (e.g. 23 Apr 2005)
YYYY-MM-DD (e.g. 2005-04-23)
...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005).
"""
if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y"))
except (ValueError, TypeError):
# bad date format:
pass
elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d"))
except (ValueError,TypeError):
# bad date format:
pass
return fld_val
def Create_Modify_Interface(parameters, curdir, form, user_info=None):
"""
Create an interface for the modification of a document, based on
the fields that the user has chosen to modify. This avoids having
to redefine a submission page for the modifications, but rely on
the elements already defined for the initial submission i.e. SBI
action (The only page that needs to be built for the modification
is the page letting the user specify a document to modify).
This function should be added at step 1 of your modification
workflow, after the functions that retrieves report number and
record id (Get_Report_Number, Get_Recid). Functions at step 2 are
the one executed upon successful submission of the form.
Create_Modify_Interface expects the following parameters:
* "fieldnameMBI" - the name of a text file in the submission
working directory that contains a list of the names of the
WebSubmit fields to include in the Modification interface.
These field names are separated by"\n" or "+".
* "prefix" - some content displayed before the main
modification interface. Can contain HTML (i.e. needs to be
pre-escaped). The prefix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "suffix" - some content displayed after the main modification
interface. Can contain HTML (i.e. needs to be
pre-escaped). The suffix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "button_label" - the label for the "END" button.
* "button_prefix" - some content displayed before the button to
submit the form. Can contain HTML (i.e. needs to be
pre-escaped). The prefix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "dates_conversion" - by default, values interpreted as dates
are converted to their 'DD/MM/YYYY' format, whenever
possible. Set another value for a different behaviour
(eg. 'none' for no conversion)
Given the list of WebSubmit fields to be included in the
modification interface, the values for each field are retrieved
for the given record (by way of each WebSubmit field being
configured with a MARC Code in the WebSubmit database). An HTML
FORM is then created. This form allows a user to modify certain
field values for a record.
The file referenced by 'fieldnameMBI' is usually generated from a
multiple select form field): users can then select one or several
fields to modify
|
Note that the function will display WebSubm
|
it Response elements,
but will not be able to set an initial value: this must be done by
the Response element iteself.
Additionally the function creates an internal field named
'Create_Modify_Interface_DONE' on the interface, that can be
retrieved in curdir after the form has been submitted.
This flag is an indicator for the function that displayed values
should not be retrieved from the database, but from the submitted
values (in case the page is reloaded). You can also rely on this
value when building your WebSubmit Response element in order to
retrieve value either from the record, or from the submission
directory.
"""
ln = wash_language(form['ln'])
_ = gettext_set_language(ln)
global sysno,rn
t = ""
# variables declaration
fieldname = parameters['fieldnameMBI']
prefix = ''
suffix = ''
end_button_label = 'END'
end_button_prefix = ''
date_conversion_setting = ''
if parameters.has_key('prefix'):
prefix = parameters['prefix']
if parameters.has_key('suffix'):
suffix = parameters['suffix']
if parameters.has_key('button_label') and parameters['button_label']:
end_button_label = parameters['button_label']
if parameters.has_key('button_prefix'):
end_button_prefix = parameters['button_prefix']
if parameters.has_key('dates_conversion'):
date_conversion_setting = parameters['dates_conversion']
# Path of file containing fields to modify
the_globals = {
'doctype'
|
vivekanand1101/fedmsg
|
fedmsg/tests/common.py
|
Python
|
lgpl-2.1
| 1,482
| 0
|
import os
import socket
import fedmsg.config
from nose.tools.nontrivial import make_decorator
try:
import unittest2 as unittest
except ImportError:
import unittest
def load_config(name='fedmsg-test-config.py'):
here = os.path.sep.join(__file__.split(os.path.sep)[:-1])
test_config = os.path.sep.join([here, name])
config = fedmsg.config.load_config(
[],
"awesome",
filenames=[
test_config,
],
invalidate_cache=True
)
# Enable all of our test consumers so they can do their thing.
config['test_consumer_enabled'] = True
# TODO -- this appears every
|
where and should be encapsulated in a func
# Massage the fedmsg config into the moksha config.
config['zmq_subscribe_endpoints'] = ','.join(
','.join(bunch) for bunch in config['endpoints'].values()
)
hub_name = "twisted.%s" % socket.gethostname().split('.', 1)[0]
config['zmq_publish_endpoints'] = ','.join(
config['endpoints'][hub_name]
)
return config
def requ
|
ires_network(function):
""" Decorator to skip tests if FEDMSG_NETWORK is not in os.environ """
@make_decorator(function)
def decorated_function(*args, **kwargs):
""" Decorated function, actually does the work. """
if not os.environ.get('FEDMSG_NETWORK'):
raise unittest.SkipTest("Skip test since we don't have network")
return function(*args, **kwargs)
return decorated_function
|
DailyActie/Surrogate-Model
|
01-codes/deap-master/deap/tools/emo.py
|
Python
|
mit
| 22,227
| 0.001575
|
from __future__ import division
import bisect
import math
import random
from collections import defaultdict
from itertools import chain
from operator import attrgetter, itemgetter
######################################
# Non-Dominated Sorting (NSGA-II) #
######################################
def selNSGA2(individuals, k, nd='standard'):
"""Apply NSGA-II selection operator on the *individuals*. Usually, the
size of *individuals* will be larger than *k* because any individual
present in *individuals* will appear in the returned list at most once.
Having the size of *individuals* equals to *k* will have no effect other
than sorting the population according to their front rank. The
list returned contains references to the input *individuals*. For more
details on the NSGA-II operator see [Deb2002]_.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param nd: Specify the non-dominated algorithm to use: 'standard' or 'log'.
:returns: A list of selected individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if nd == 'standard':
pareto_fronts = sortNondominated(individuals, k)
elif nd == 'log':
pareto_fronts = sortLogNondominated(individuals, k)
else:
raise Exception('selNSGA2: The choice of non-dominated sorting '
'method "{0}" is invalid.'.format(nd))
for front in pareto_fronts:
assignCrowdingDist(front)
chosen = list(chain(*pareto_fronts[:-1]))
k = k - len(chosen)
if k > 0:
sorted_front = sorted(pareto_fronts[-1], key=attrgetter("fitness.crowding_dist"), reverse=True)
chosen.extend(sorted_front[:k])
return chosen
def sortNondominated(individuals, k, first_front_only=False):
"""Sort the first *k* *individuals* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:`O(MN^2)`,
where :math:`M` is the number of objectives and :math:`N` the number of
individuals.
:param individuals: A list of individuals to select from.
:param k: The numbe
|
r of individuals to select.
:param first_front_only: If :obj:`True` sort only the first front and
exit.
:returns: A list of Par
|
eto fronts (lists), the first list includes
nondominated individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []
map_fit_ind = defaultdict(list)
for ind in individuals:
map_fit_ind[ind.fitness].append(ind)
fits = map_fit_ind.keys()
current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)
# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i + 1:]:
if fit_i.dominates(fit_j):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif fit_j.dominates(fit_i):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
current_front.append(fit_i)
fronts = [[]]
for fit in current_front:
fronts[-1].extend(map_fit_ind[fit])
pareto_sorted = len(fronts[-1])
# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(individuals), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += len(map_fit_ind[fit_d])
fronts[-1].extend(map_fit_ind[fit_d])
current_front = next_front
next_front = []
return fronts
def assignCrowdingDist(individuals):
"""Assign a crowding distance to each individual's fitness. The
crowding distance can be retrieve via the :attr:`crowding_dist`
attribute of each individual's fitness.
"""
if len(individuals) == 0:
return
distances = [0.0] * len(individuals)
crowd = [(ind.fitness.values, i) for i, ind in enumerate(individuals)]
nobj = len(individuals[0].fitness.values)
for i in xrange(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += (next[0][i] - prev[0][i]) / norm
for i, dist in enumerate(distances):
individuals[i].fitness.crowding_dist = dist
def selTournamentDCD(individuals, k):
"""Tournament selection based on dominance (D) between two individuals, if
the two individuals do not interdominate the selection is made
based on crowding distance (CD). The *individuals* sequence length has to
be a multiple of 4. Starting from the beginning of the selected
individuals, two consecutive individuals will be different (assuming all
individuals in the input list are unique). Each individual from the input
list won't be selected more than twice.
This selection requires the individuals to have a :attr:`crowding_dist`
attribute, which can be set by the :func:`assignCrowdingDist` function.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
"""
def tourn(ind1, ind2):
if ind1.fitness.dominates(ind2.fitness):
return ind1
elif ind2.fitness.dominates(ind1.fitness):
return ind2
if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:
return ind2
elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:
return ind1
if random.random() <= 0.5:
return ind1
return ind2
individuals_1 = random.sample(individuals, len(individuals))
individuals_2 = random.sample(individuals, len(individuals))
chosen = []
for i in xrange(0, k, 4):
chosen.append(tourn(individuals_1[i], individuals_1[i + 1]))
chosen.append(tourn(individuals_1[i + 2], individuals_1[i + 3]))
chosen.append(tourn(individuals_2[i], individuals_2[i + 1]))
chosen.append(tourn(individuals_2[i + 2], individuals_2[i + 3]))
return chosen
#######################################
# Generalized Reduced runtime ND sort #
#######################################
def identity(obj):
"""Returns directly the argument *obj*.
"""
return obj
def isDominated(wvalues1, wvalues2):
"""Returns whether or not *wvalues1* dominates *wvalues2*.
:param wvalues1: The weighted fitness values that would be dominated.
:param wvalues2: The weighted fitness values of the dominant.
:returns: :obj:`True` if wvalues2 dominates wvalues1, :obj:`False`
otherwise.
"""
not_equal = False
for self_wvalue, other_wvalue in zip(wvalues1, wvalues2):
if self_wvalue > other_wvalue:
return False
elif self_wvalue < other_wvalue:
not_equal = True
return not_equal
def median(seq, key=identity):
"""Returns the median of *seq* - the numeric value separating the higher
half of a sample from the lower half. If there is an even number of
elements in *seq*, it returns the mean of the two mid
|
jcdavis/yasp-stats
|
yasp_util.py
|
Python
|
apache-2.0
| 437
| 0.032037
|
import ConfigParser
import json
def get_player_id():
config = ConfigParser.ConfigParser()
config.read('yasp.cfg')
return config.get('yasp', 'player_id')
def get_hero_id():
config = ConfigParser.ConfigParser()
config.read(
|
'yasp.cfg')
return config.get('yasp', 'hero_id')
def get_her
|
o_data():
file = open("heroes.json")
data = json.load(file)
file.close()
return dict([hero['id'], hero] for hero in data['heroes'])
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/py-pyani/package.py
|
Python
|
lgpl-2.1
| 2,432
| 0.000411
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite
|
330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPyani(PythonPackage):
"""pyani is a Python3 module that provides support for calculating
average nucleotide identity (ANI) and related measures for whole genome
comparisons, and rendering relevant graphical summary output
|
. Where
available, it takes advantage of multicore systems, and can integrate
with SGE/OGE-type job schedulers for the sequence comparisons."""
homepage = "http://widdowquinn.github.io/pyani"
url = "https://pypi.io/packages/source/p/pyani/pyani-0.2.7.tar.gz"
version('0.2.7', '239ba630d375a81c35b7c60fb9bec6fa')
version('0.2.6', 'd5524b9a3c62c36063ed474ea95785c9')
depends_on('python@3.5:')
depends_on('py-setuptools', type='build')
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-seaborn', type=('build', 'run'))
# Required for ANI analysis
depends_on('py-biopython', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
# Required for ANIb analysis
depends_on('blast-plus~python', type='run')
# Required for ANIm analysis
depends_on('mummer', type='run')
|
Arabidopsis-Information-Portal/PMR_API
|
services/experiments_api/experiments_service.py
|
Python
|
gpl-2.0
| 4,048
| 0.002717
|
# PMR WebServices
# Copyright (C) 2016 Manhoi Hur, Belyaeva, Irina
# This file is part of PMR WebServices API.
#
# PMR API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# PMR API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PMR API. If not, see <http://www.gnu.org/licenses/>.
"""
Experiments Service API. Provides search, and serialization sevices
"""
import logging
import request_handler as rh
import exception
import jsonpickle
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# This function retrieves experiment record by experiment ID
def get_experiment_by_id(url, args):
""" Retrieve experiment record by experiment ID
from passed service url and parameters
validate parameters
perform experiment lookup
:type url: string
:param url: The service url
:type args: dict
:param args: The dictionary(map) of parameters submitted via query string
:rtype: json like string
:return: Returns Experiment as json-like string
"""
# retrieve experiment ID from request parameters
lookup_id = args['experimentID']
try:
int_lookup_id = int(lookup_id)
except ValueError:
raise Exception("Non integer experiment ID was submitted!")
log.debug("Experiment Lookup Id:" + str(lookup_id))
# get list of Experiment objects
response = get_experiment_as_objects(url, args)
if not response:
raise Exception ("Error ocurred. Cannot load experiments to search for experiment ID.")
# search for experiment by ID
lookup_object = find(lambda item: item.expId == int_lookup_id, response)
log.debug(lookup_object)
# raise not found exception if no such experiment
if not lookup_object:
raise exception.NotFound("No experiment found for experiment ID: " + str(lookup_id))
# transform to json like string
lookup_object_as_json_string = jsonpickle.encode(lookup_object, unpicklable=False)
log.debug("JSON deserialization:")
log.debug(lookup_object_as_json_string)
return lookup_object_as_json_string
# get all experiments as list of Experiment objects
def get_experiment_as_objects(url, args):
""" Retrieves all experiments as Experiment objects
return list of Experiment objects
:type url: string
:param url: request url
:type args: string
:param args: request parameters
:rtype: list
:return: Returns list of Experiment objects if success raises exception otherwise
"""
response = rh.loadExperiments(url, args, 'list')
if not response:
raise Exception ("Error ocurred. Cannot load list of experiments.")
return response
# This function get all experiments in json format
def get_experiments_as_json(url, args)
|
:
""" Retrieves all experiments in json format
return experiments in json format
:t
|
ype url: string
:param url: request url
:type args: string
:param args: request parameters
:rtype: list
:return: Returns list of Experiment objects in json format if success raises exception otherwise
"""
response = rh.build_payload(url, args, 'list')
return response
# This function performs an exact search by identifier
def find(f, seq):
""" Retrieves object by identifier
return experiment object
:type f: int
:param f: current value of identifier
:type seq: string
:param seq: value to search for
:rtype: Experiment
:return: Returns Experiment object if object found None otherwise
"""
for item in seq:
if f(item):
return item
|
williambout/Aira
|
wind/air.py
|
Python
|
mit
| 2,815
| 0.019538
|
#!/usr/bin/env python
import os
import re
import xively
import subprocess
import time
import datetime
import requests
import sys
temperature = 0
humidity = 0
# extract feed_id and api_key from environment variables
FEED_ID = "YOUR_FEED_ID" #CHANGE IT
API_KEY = "YOUR_KEY" #CHANG
|
E IT
DEBUG = False
# initialize api client
api = xively.XivelyAPIClient(API_KEY)
# Run the DHT program to get the humidity and temperature readings!
def read_dht():
while(True):
output = subprocess.check_output(["./Adafruit_DHT", "2302", "4"]);
if DEBUG:
print output
matches = re.search("Temp =\s+([0-9.]+)", output)
if (not matches):
time.sleep(3)
continue
temperature = float(matches.group(1))
# search for humidity printout
matches = re.search("Hum =\s+
|
([0-9.]+)", output)
if (not matches):
time.sleep(3)
continue
humidity = float(matches.group(1))
if DEBUG:
print "Temperature: %.1f C" % temperature
print "Humidity: %.1f %%" % humidity
return {'temperature':temperature,'humidity':humidity}
#time.sleep(10)
def get_datastream(feed):
try:
temp_datastream = feed.datastreams.get("Temperature")
if DEBUG:
print "Found existing temperature datastream"
except:
if DEBUG:
print "Creating new temperature datastream"
temp_datastream = feed.datastreams.create("Temperature", tags="temperature")
try:
humidity_datastream = feed.datastreams.get("Humidity")
if DEBUG:
print "Found existing humidity datastream"
except:
if DEBUG:
print "Creating new humidity datastream"
humidity_datastream = feed.datastreams.create("Humidity", tags="humidity")
return {'tempds':temp_datastream, 'humidityds':humidity_datastream}
def run():
print "Starting Xively DHT script"
feed = api.feeds.get(FEED_ID)
datastreams = get_datastream(feed)
datastreams['tempds'].max_value = None
datastreams['tempds'].min_value = None
datastreams['humidityds'].max_value = None
datastreams['humidityds'].min_value = None
while True:
dhtdata = read_dht()
if DEBUG:
print "Updating Xively feed with temperature: %.1f C" % dhtdata['temperature']
print "Updating Xively feed with humidity: %.1f percent" % dhtdata['humidity']
datastreams['tempds'].current_value = dhtdata['temperature']
datastreams['tempds'].at = datetime.datetime.utcnow()
datastreams['humidityds'].current_value = dhtdata['humidity']
datastreams['humidityds'].at = datetime.datetime.utcnow()
#print datetime.datetime.now()
#print datetime.datetime.utcnow()
try:
datastreams['tempds'].update()
datastreams['humidityds'].update()
except requests.HTTPError as e:
print "HTTPError({0}): {1}".format(e.errno, e.strerror)
time.sleep(120)
run()
|
dagorim/mytools
|
ssngen.py
|
Python
|
bsd-2-clause
| 524
| 0
|
#!/usr/bin/python
'''
Generate ps
|
eudo-random SSNs and save to a text file.
'''
import random
try:
myfile = open(str(raw_input("Output filename: ")), "w")
except ValueError:
print "Invalid input. Please enter a valid filename."
try:
for i in range(int(raw_input("How many random numbers?: "))):
line = str(random.randint(100000000, 999999999))
myfile.write(line)
myfile.write('\n')
except ValueError:
p
|
rint "User entered invalid input. Please only enter integers."
myfile.close()
|
llou/gpg_ansible
|
plugins/action_plugins/gpg_key.py
|
Python
|
gpl-3.0
| 45,135
| 0.004786
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
import types
import StringIO
from functools import wraps
import tempfile
import shutil
from pipes import quote
from ansible.runner.return_data import ReturnData
from ansible import utils
from ansible.utils import template
DEFAULT_KEYRING = ""
DEFAULT_GPG_BIN = "/usr/bin/gpg"
DEFAULT_SECRET = False
DEFAULT_USE_AGENT = False
DEFAULT_PATH = ''
DEFAULT_LOCATION = DEFAULT_PATH
DEFAULT_KEYRING_BASE_PATH = "/tmp"
VALID_STATES = ['present', 'absent', 'copied', 'signed']
DEFAULT_STATE = 'present'
clean_regex = r"\s"
id_regex = re.compile(r"^[A-F0-9]{8,16}$")
fingerprint_regex = re.compile(r"^[A-F0-9]{40}$")
class GPG:
passphrase_file_number = 3
extra_file_number = 5
bufsize = -1
chunk_size = 1024
@classmethod
def build_from_keyring(cls, path):
return cls(public_keyrings=[path], primary_keyring=path)
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
secret_keyrings=[], public_keyrings=[], primary_keyring=None,
no_default_keyring=False, trust_model=None,
use_agent=False):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
>>> gpg = GPG(gnupghome="/tmp/pygpgtest")
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
self.verbose = verbose
self.use_agent = use_agent
self.secret_keyrings = secret_keyrings
self.public_keyrings = public_keyrings
self.primary_keyring = primary_keyring
self.no_default_keyring = no_default_keyring
self.trust_model = trust_model
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome,0700)
# if not os.path.isfile(self.gnupghome + "/secring.gpg"):
# self.gen_key()
def _get_cmd(self, args, passphrase=None):
cmd = [self.gpgbinary, '--status-fd 2 --no-tty']
if self.gnupghome:
cmd.append('--homedir "%s" ' % self.gnupghome)
if self.secret_keyrings:
cmd.extend(['--secret-keyring "%s"' % x for x in self.secret_keyrings])
if self.public_keyrings:
cmd.extend(['--keyring "%s"' % x for x in self.public_keyrings])
if self.primary_keyring:
cmd.append('--primary-keyring "%s"' % self.primary_keyring)
if self.no_default_keyring:
cmd.append('--no-default-keyring')
if self.trust_model:
cmd.append('--trust-model "%s"' % self.trust_model)
if passphrase:
cmd.append('--passphrase-fd 3')
cmd.extend(args)
cmd = ' '.join(cmd)
if self.verbose:
print cmd
return cmd
def _open_subprocess(self, args, passphrase=None):
cmd = self._get_cmd(args, passphrase=passphrase)
p = PopenHi(cmd, [(self.passphrase_file_number, 'r')],
bufsize=self.bufsize)
child_stdout, child_stdin, child_stderr, child_pass = \
p.fromchild, p.tochild, p.childerr, \
p.childhis[self.passphrase_file_number]
if passphrase:
child_pass.write(passphrase + "\n")
child_pass.close()
return child_stdout, child_stdin, child_stderr
def _handle_gigo(self, args, result, file=None, passphrase=None,
extra=None):
descriptors = []
if passphrase:
descriptors.append((self.passphrase_file_number, 'r'))
if extra:
descriptors.append((self.extra_file_number, 'r'))
cmd = self._get_cmd(args, passphrase)
result.cmd = cmd
p = PopenHi(cmd, descriptors, bufsize=self.bufsize)
child_stdout, child_stdin, child_stderr = p.fromchild, p.tochild, p.childerr
if passphrase:
child_pass = p.childhis[self.passphrase_file_number]
child_pass.write(passphrase + "\n")
child_pass.close()
if extra:
child_extra = p.childhis[self.extra_file_number]
if file is None:
child_stdin.close()
def get_readies():
# WTF!!! placing it in the header raises module is not callable
# and no other presence of select in the module.
from select import select
files = [child_stdout, child_stderr]
selected = select([ x for x in files if not x.closed ],[],[], 0)
return selected[0]
if file is None:
child_stdin.close()
input_closed = file is None
output_closed = False
status_closed = False
extra_closed = False if extra else True
while True:
if input_closed and output_closed and status_closed and extra_closed:
break
readables = get_readies()
if not input_closed:
data = file.read(self.chunk_size)
if data == "":
input_closed = True
child_stdin.close()
else:
child_stdin.write(data)
if extra:
data = extra.read(self.chunk_size)
if data == "":
extra_closed = True
child_extra.close()
else:
child_
|
extra.write(data)
if child_stderr in readables:
line = child_stderr.readline()
if line == "":
status_closed = True
child_stderr.close()
else:
if self.verbose:
print line
result.write_status(line)
|
if child_stdout in readables:
data = child_stdout.read(self.chunk_size)
if data == "":
output_closed = True
else:
result.write_data(data)
result.set_code(p.poll())
return result
def temporary_homedir(method):
"""A decorator to temporary assign a homedir if not provided to avoid
using the user homedir in some functions that access to it."""
@wraps(method)
def f(self, *args, **kwargs):
if not self.gnupghome is None:
return method(self, *args, **kwargs)
self.gnupghome = tempfile.mkdtemp()
try:
result = method(self, *args, **kwargs)
finally:
shutil.rmtree(self.gnupghome)
self.gnupghome = None
return result
return f
@temporary_homedir
def list_keys(self,secret=False):
""" list the keys currently in the keyring"""
which= 'sigs'
if secret:
which='secret-keys'
args = "--list-%s --fixed-list-mode --fingerprint --with-colons" % (which)
args = [args]
child_stdout, child_stdin, child_stderr = \
self._open_subprocess(args)
child_stdin.close()
# there might be some status thingumy here I should handle... (amk)
# ...nope, unless you care about expired sigs or keys (stevegt)
# Get the response information
result = ListKeys()
self._handle_gigo(args, result)
result.process()
return result
def get_default_key(self):
secret_keys = self.list_keys(secret=True)
if secret_keys:
return secret_keys[0]
def get_default_key_id(self):
return self.get_default_key()['keyid']
def get_default_key_fingerprint(self):
return self.get_default_key()['fingerprint']
required_equal_fields = ("Name-Real", "Name-Comment", "Name-Email")
def find_key(self, **kwargs):
kwordize = lambda x: x.replace("-", "_")
for key in self.list_keys():
if not "uids" in key:
continue
for uid in key["uids"]:
if not uid:
continue
for f_name in self.requi
|
yoziru-desu/locomo-pebble
|
mock-server/app.py
|
Python
|
mit
| 1,150
| 0
|
"""Main Tornado app."""
from tornado import gen
from tornado.options import parse_command_line
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application
import logging
from tornado.options import options
from settin
|
gs import settings
from urls import url_patterns
class TornadoApp(Application):
"""Tornado Applition + rethinkDB + settings."""
def __init__(self):
"""Init function."""
Application.__init__(self, url_patterns, **settings)
@gen.coroutine
def main():
"""Main function."""
logging.info('Parsing command line')
|
parse_command_line()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
logging.info('Running in debug mode')
else:
logging.getLogger().setLevel(logging.INFO)
# Single db connection for everything thanks a lot Ben and Jeese
logging.info('starting server on port ' + str(options.port))
http_server = HTTPServer(TornadoApp())
http_server.listen(options.port, options.host)
if __name__ == "__main__":
IOLoop.current().run_sync(main)
IOLoop.current().start()
|
eldarion/django-chunked-uploads
|
docs/conf.py
|
Python
|
bsd-3-clause
| 645
| 0.00155
|
import os
import sys
extensions = []
templates_path = []
source_suffix = '.rst'
master_doc = 'index'
project = u'django-chunked-upl
|
oads'
copyright_holder = 'Eldarion'
copyright = u'2012, %s' % copyright_holder
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
htmlhelp_basename = '%sdoc' % project
latex_documents = [
('index', '%s.tex' % project, u'%s Documentation' % project, copyright_holder, 'manual'),
]
man_pages = [
('index', project, u'%s Documentation' % project, [copyright_holder], 1)
]
sys.path.insert(0, os.pardir)
m = __import__("chunked_uploads")
ver
|
sion = m.__version__
release = version
|
timrchavez/capomastro
|
projects/templatetags/projects_tags.py
|
Python
|
mit
| 543
| 0
|
from django.template.base import Library
from django.core.urlresolvers import reverse
from projects.models import Proj
|
ectBuild
register = Library()
@register.simple_tag()
def build_url(build_id):
"""
Fetches the ProjectBuild for a given build_id, if any.
"""
try:
build = ProjectBuild.objects.get(build_id=build_id)
return reverse(
"project_projectbuild_detail",
kwargs={"project_pk": build.project.pk, "build_pk": build.pk})
|
except ProjectBuild.DoesNotExist:
return ""
|
ildoc/ildoc.it
|
publishconf.py
|
Python
|
mit
| 488
| 0.004098
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future
|
__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'https://ildoc.dev'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
DISQUS_SITENAME = "ildoc
|
"
#GOOGLE_ANALYTICS = ""
|
eterevsky/animations
|
pyrene/pbrt.py
|
Python
|
mit
| 4,687
| 0.007041
|
import logging
import numpy as np
import os
import subprocess
import tempfile
from . import scene
from . import rman
ZERO = scene.npvector((0, 0, 0))
_film = rman.Identifier(
'Film', positional=['string'],
named={
'xresolution': 'integer',
'yresolution': 'integer',
'cropwindow': 'float[4]',
'filename': 'string'
})
_look_at = rman.Identifier('LookAt', positional=['point', 'point', 'vector'])
_camera = rman.Identifier(
'Camera', positional=['string'],
named={
'shutteropen': 'float',
'shutterclose': 'float',
'lensradius': 'float',
'focaldistance': 'float',
'fov': 'float',
'autofocus': 'bool'
|
})
_sampler = rman.Identifier(
'Sampler', positional=['string'],
named={
'pixelsamples': 'integer',
})
_area_light_source = rman.Identifier(
'AreaLightSource', positional=['string'],
named={'L': 'rgb'})
_translate = rman.Identifier('Translate', positional=['vector'])
_rotate = rman.Identifier('Rotate', positional=['float', 'vector'])
_shape = rman.Ide
|
ntifier(
'Shape', positional=['string'],
named={
'radius': 'float',
'indices': 'integer[]',
'P': 'point[]'
})
class PbrtRenderer(object):
def __init__(self, executable=None, output_file=None, scene_file=None,
width=384, height=256, samples_per_pixel=None, slaves=None,
exrpptm=None, exrnormalize=None, exrtopng=None):
self.executable = executable
self.output_file = output_file
self.scene_file = scene_file
self.width = width
self.height = height
self.samples_per_pixel = samples_per_pixel
self.scene_file_ext = 'pbrt'
self.exrpptm = exrpptm
self.exrnormalize = exrnormalize
self.exrtopng = exrtopng
@property
def output_file(self):
return self._output_file
@output_file.setter
def output_file(self, value):
logging.info('output_file = %s', value)
if value is None:
self._output_file = None
self._exr_file = None
return
self._output_file = value
base, ext = os.path.splitext(value)
logging.info('base = %s, ext = %s', base, ext)
assert ext == '.png'
self._exr_file = base + '.exr'
def render(self, scene, generate_only=False):
scene_file = self.scene_file or tempfile.mkstemp()[1]
logging.info('Created scene file %s', scene_file)
self._write_scene_file(scene, scene_file)
if not generate_only:
self._run_renderer(scene_file)
if not self.scene_file:
logging.info('Deleting %s', scene_file)
os.remove(scene_file)
def _write_object(self, writer, obj):
writer.begin_block('Attribute')
if obj.light is not None:
color = obj.light.color * obj.light.power
writer.write(_area_light_source('diffuse', L=obj.light.color))
if isinstance(obj, scene.Sphere):
if not np.array_equal(obj.center, ZERO):
writer.write(_translate(obj.center))
writer.write(_shape("sphere", radius=obj.radius))
else:
assert False, "Unsupported object type"
writer.end_block('Attribute')
def _write_scene_file(self, scene, scene_file):
with rman.FileWriter(scene_file) as writer:
writer.write(_look_at(scene.camera.loc, scene.camera.to, scene.camera.up))
writer.write(_film(
'image',
xresolution=self.width, yresolution=self.height,
filename=self._exr_file))
writer.write(_camera('perspective', fov=scene.camera.fov))
if self.samples_per_pixel:
writer.write(_sampler('lowdiscrepancy', pixelsamples=self.samples_per_pixel))
writer.begin_block('World')
for obj in scene.objects:
self._write_object(writer, obj)
writer.end_block('World')
def _run_renderer(self, scene_file):
if self.executable is None:
logging.error(
'Trying to call pbrt, but path to the executable is not specified.')
assert self.executable is not None
args = [self.executable, scene_file]
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
args = [self.exrpptm, '-c', '1.0', self._exr_file, self._exr_file + '.pp']
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
args = [self.exrnormalize, self._exr_file + '.pp', self._exr_file + '.n']
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
args = [self.exrtopng, self._exr_file + '.n', self.output_file]
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
def batch_render(self, scene_files):
logging.info('Rendering %d files', len(scene_files))
for f in scene_files:
self._run_renderer(f)
|
intake/filesystem_spec
|
fsspec/implementations/arrow.py
|
Python
|
bsd-3-clause
| 6,762
| 0.000739
|
import errno
import io
import os
import secrets
import shutil
from contextlib import suppress
from functools import wraps
from fsspec.spec import AbstractFileSystem
from fsspec.utils import infer_storage_options, mirror_from, stringify_path
def wrap_exceptions(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as exception:
if not exception.args:
raise
message, *args = exception.args
if isinstance(message, str) and "does not exist" in message:
raise FileNotFoundError(errno.ENOENT, message) from exception
else:
raise
return wrapper
class ArrowFSWrapper(AbstractFileSystem):
"""FSSpec-compatible wrapper of pyarrow.fs.FileSystem.
Parameters
----------
fs : pyarrow.fs.FileSystem
"""
root_marker = "/"
def __init__(self, fs, **kwargs):
self.fs = fs
super().__init__(**kwargs)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if "://" in path:
_, _, path = path.partition("://")
return path
def ls(self, path, detail=False, **kwargs):
from pyarrow.fs import FileSelector
entries = [
self._make_entry(entry)
for entry in self.fs.get_file_info(FileSelector(path))
]
if detail:
return entries
else:
return [entry["name"] for entry in entries]
def info(self, path, **kwargs):
path = self._strip_protocol(path)
[info] = self.fs.get_file_info([path])
return self._make_entry(info)
def exists(self, path):
path = self._strip_protocol(path)
try:
self.info(path)
except FileNotFoundError:
return False
else:
return True
def _make_entry(self, info):
from pyarrow.fs import FileType
if info.type is FileType.Directory:
kind = "directory"
elif info.type is FileType.File:
kind = "file"
elif info.type is FileType.NotFound:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path)
else:
kind = "other"
return {
"name": info.path,
"size": info.size,
"type": kind,
"mtime": info.mtime,
}
@wrap_exceptions
def cp_file(self, path1, path2, **kwargs):
path1 = self._strip_protocol(path1).rstrip("/")
path2 = self._strip_protocol(path2).rstrip("/")
with self._open(path1, "rb") as lstream:
tmp_fname = "/".join([self._parent(path2), f".tmp.{secrets.token_hex(16)}"])
try:
with self.open(tmp_fname, "wb") as rstream:
shutil.copyfileobj(lstream, rstream)
self.fs.move(tmp_fname, path2)
except BaseException: # noqa
with suppress(FileNotFoundError):
self.fs.delete_file(tmp_fname)
raise
@wrap_exceptions
def mv(self, path1, path2, **kwargs):
path1 = self._strip_protocol(path1).rstrip("/")
path2 = self._strip_protocol(path2).rstrip("/")
self.fs.move(path1, path2)
mv_file = mv
@wrap_exceptions
def rm_file(self, path):
path = self._strip_protocol(path)
self.fs.delete_file(path)
@wrap_exceptions
def rm(self, path, recursive=False, maxdepth=None):
path = self._strip_protocol(p
|
ath).rstrip("/")
if self.isdir(path):
if recursive:
self.fs.delete_dir(path)
else:
raise ValueError("Can't delete directories without recursive=False")
else:
self.fs.delete_file(path)
@wrap_exceptions
def _open(se
|
lf, path, mode="rb", block_size=None, **kwargs):
if mode == "rb":
stream = self.fs.open_input_stream(path)
elif mode == "wb":
stream = self.fs.open_output_stream(path)
else:
raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}")
return ArrowFile(self, stream, path, mode, block_size, **kwargs)
@wrap_exceptions
def mkdir(self, path, create_parents=True, **kwargs):
path = self._strip_protocol(path)
if create_parents:
self.makedirs(path, exist_ok=True)
else:
self.fs.create_dir(path, recursive=False)
@wrap_exceptions
def makedirs(self, path, exist_ok=False):
path = self._strip_protocol(path)
self.fs.create_dir(path, recursive=True)
@wrap_exceptions
def rmdir(self, path):
path = self._strip_protocol(path)
self.fs.delete_dir(path)
@mirror_from(
"stream", ["read", "seek", "tell", "write", "readable", "writable", "close"]
)
class ArrowFile(io.IOBase):
def __init__(self, fs, stream, path, mode, block_size=None, **kwargs):
self.path = path
self.mode = mode
self.fs = fs
self.stream = stream
self.blocksize = self.block_size = block_size
self.kwargs = kwargs
def __enter__(self):
return self
def __exit__(self, *args):
return self.close()
class HadoopFileSystem(ArrowFSWrapper):
"""A wrapper on top of the pyarrow.fs.HadoopFileSystem
to connect it's interface with fsspec"""
protocol = "hdfs"
def __init__(
self,
host="default",
port=0,
user=None,
kerb_ticket=None,
extra_conf=None,
**kwargs,
):
"""
Parameters
----------
host: str
Hostname, IP or "default" to try to read from Hadoop config
port: int
Port to connect on, or default from Hadoop config if 0
user: str or None
If given, connect as this username
kerb_ticket: str or None
If given, use this ticket for authentication
extra_conf: None or dict
Passed on to HadoopFileSystem
"""
from pyarrow.fs import HadoopFileSystem
fs = HadoopFileSystem(
host=host,
port=port,
user=user,
kerb_ticket=kerb_ticket,
extra_conf=extra_conf,
)
super().__init__(fs=fs, **kwargs)
@staticmethod
def _get_kwargs_from_urls(path):
ops = infer_storage_options(path)
out = {}
if ops.get("host", None):
out["host"] = ops["host"]
if ops.get("username", None):
out["user"] = ops["username"]
if ops.get("port", None):
out["port"] = ops["port"]
return out
|
sdiehl/rpygtk
|
rpygtk/runtests.py
|
Python
|
gpl-3.0
| 818
| 0.02445
|
import unittest
import os
from ui import main
print os.getcwd()
class TestMain(unittest.TestCase):
def setUp(self):
se
|
lf.m = m
|
ain.MainWindow()
def test_mainWindow(self):
assert(self.m)
def test_dataframe(self):
import numpy
#Random 25x4 Numpy Matrix
self.m.render_dataframe(numpy.random.rand(25,4) ,name='devel',rownames=xrange(0,25))
assert(self.m.active_robject)
assert(self.m.active_robject.columns)
assert(self.m.active_robject.column_data)
def test_imports(self):
datasets = ['iris','Nile','morley','freeny','sleep','mtcars']
for a in datasets:
main.rsession.r('%s=%s' % (a,a))
self.m.sync_with_r()
assert(a in self.m.robjects)
unittest.main()
|
unixfreak0037/mwzoo
|
mwzoo.py
|
Python
|
mit
| 1,715
| 0.004082
|
#!/usr/bin/env python
# vim: ts=4:sw=4:et
#
# malware zoo
#
import mwzoo
import argparse
import os
import logging
parser = argparse.ArgumentParser(description='MalwareZoo')
parser.add_argument(
'--mwzoo-home', action='store', dest='mwzoo_home', default=None, required=False,
help='Path to the base installation directory of the malware zoo. This overrides MWZOO_HOME environment variable, if set.')
parser.add_argument(
'-c', '--config-path', action='store', dest='config_path', default='etc/mwzoo.ini', required=False,
help='Path to configuration file for the malware zoo.')
parser.add_argument(
'--logging-config-path', action='store', dest='logging_config_path', default='etc/logging.ini', required=False,
help='Path to logging configuration file for the malware zoo.')
parser.add_argument(
'-m', '--maximum-process-count', action='store', dest='maximum_process_count', type=int, default=0, required=False,
help='Maximum number of processes to spawn to process samples. Set to 0 for serial processing.')
args = parser.parse_args()
if args.mwzoo_home is not None:
os.environ['MWZOO_HOME'] = args.mwzoo_home
# if we don't specify a directory then we default to cwd
if 'MWZOO_HOME' not in os.environ:
os.environ['MWZOO_HOME'] = '.'
try:
os.chdir(os.environ['MWZOO_HOME'])
except Exception, e:
sys.stderr.write("unable to change working directory to {0}: {1}\n",
os.environ['MWZOO_HOME'], str(e))
|
sys.exit(1)
logging.config.fileConfig(args.logging_co
|
nfig_path)
mwzoo.load_global_config(args.config_path)
zoo = mwzoo.MalwareZoo(args.maximum_process_count)
zoo.start()
logging.info("starting malware zoo http server")
mwzoo.HTTPServer(zoo).start()
|
prologic/mio
|
tests/test_objects.py
|
Python
|
mit
| 4,906
| 0
|
from pytest import raises
from mio import runtime
from mio.errors import AttributeError, TypeError
def test_clone(mio):
mio.eval("World = Object clone()")
assert mio.eval("World")
assert mio.eval("World parent") == runtime.find("Object")
def test_type(mio):
assert mio.eval("Object type") == "Object"
assert mio.eval("1 type") == "Number"
def test_setParent(mio):
assert mio.eval("World = Object clone()")
assert mio.eval("World parent") == runtime.find("Object")
with raises(TypeError):
mio.eval("World setParent(World)", reraise=True)
assert mio.eval("Foo = Object clone()")
assert mio.eval("World setParent(Foo)")
assert mio.eval("World parent") == mio.eval("Foo")
def test_do(mio):
mio.eval("do(x = 1)")
assert mio.eval("x") == 1
def test_eq(mio):
assert mio.eval("1 ==(1)")
def test_forward(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo x = 1")
assert mio.eval("Foo x") == 1
assert mio.eval("Foo Object") == runtime.find("Object")
def test_get(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo x = 1")
assert mio.eval("Foo get(\"x\")") == 1
with raises(AttributeError):
mio.eval("Foo z", reraise=True)
def test_get_no_forward(mio):
mio.eval("Foo = Object clone()")
mio.eval("Foo del(\"forward\")")
assert mio.eval("Foo x = 1")
assert mio.eval("Foo get(\"x\")") == 1
with raises(AttributeError):
mio.eval("Foo z", reraise=True)
def test_has(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo x = 1")
assert mio.eval("Foo has(\"x\")")
def test_has2(mio):
mio.eval("Foo = Object clone()")
assert not mio.eval("Foo has(\"x\")")
def test_hash(mio):
assert mio.eval("Object __hash__()") == hash(runtime.find("Object"))
def test_hash_invalid(mio):
assert mio.frommio(mio.eval("Dict __hash__()")) is None
def test_id(mio):
assert mio.eval("(Object id) == (Object id)")
def test_keys(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo x = 1")
assert mio.eval("Foo y = 2")
keys = list(mio.eval("Foo keys"))
assert "x" in keys
assert "y" in keys
def test_method(mio):
mio.eval("foo = method(1)")
assert mio.eval("foo()") == 1
mio.eval("Foo = Object clone()")
assert mio.eval("Foo x = 1") == 1
mio.eval("Foo foo = method(self x)")
assert mio.eval("Foo foo()") == 1
def test_neq(mio):
assert mio.eval("1 !=(0)")
def test_set(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo x = 1")
assert mio.eval("Foo get(\"x\")") == 1
assert mio.eval("Foo set(\"x\", 2)") == 2
assert mio.eval("Foo x") == 2
def test_del(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo count = 1")
assert mio.eval("Foo del(\"count\")").value is None
with raises(AttributeError):
mio.eval("Foo count", reraise=True)
def test_print(mio, capfd):
assert mio.eval("print(\"Hello World!\")").value is None
|
out, err = capfd.readouterr()
assert out == "Hello World!\n"
def test_print_sep(mio, capfd):
assert mio.eval("print(1, 2, 3, sep=\", \")").value is None
out, err = capfd.readouterr()
assert out == "1, 2, 3\n"
def test_print_end(mio, capfd):
assert mio.eval("print(1, 2, 3, end=\"\")").value is None
out, err = capfd.readouterr()
assert out == "1 2 3"
def test_repr(mio):
assert mio.eval("1 __repr__()") == "1"
assert mio
|
.eval("\"foo\"__repr__()") == "u\"foo\""
def test_str(mio):
assert mio.eval("1 __str__()") == "1"
assert mio.eval("\"foo\" __str__()") == "foo"
def test_bool(mio):
assert mio.eval("bool(1)")
assert not mio.eval("bool(0)")
assert mio.eval("bool(\"foo\")")
assert not mio.eval("bool(\"\")")
def test_parent(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo parent is Object")
def test_parent2(mio):
assert mio.eval("Object parent is Object")
def test_value(mio):
assert mio.eval("Object value is None")
def test_setValue(mio):
mio.eval("Foo = Object clone()")
assert mio.eval("Foo value is None")
mio.eval("Foo setValue(1)")
assert mio.eval("Foo value == 1")
def test_primitive(mio):
mio.eval("Foo = Object :clone()")
assert not mio.eval("Foo is Object")
def test_primitive2(mio):
with raises(AttributeError):
mio.eval("Object :asdf", reraise=True)
def test_state(mio):
assert repr(mio.eval("state")) == "NormalState()"
def test_callable(mio):
mio.eval("""
Foo = Object clone() do (
__call__ = method("foo")
)
""")
assert mio.eval("Foo()") == "foo"
def test_callable2(mio):
mio.eval("""
Foo = Object clone() do (
foo = method("foo")
)
""")
with raises(TypeError):
assert mio.eval("Foo()", reraise=True)
|
bbengfort/memorandi
|
reading/__init__.py
|
Python
|
apache-2.0
| 539
| 0.003711
|
# reading
# Manag
|
es current reading from sources like Instapaper and Goodreads
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Fri Jan 01 09:01:03 2021 -0500
#
# Copyright (C) 2020 Bengfort.com
# For license information, see LICENSE
#
# ID: __init__.py [] benjamin@bengfort.com $
"""
Manages current reading from sources like Instapaper and Goodreads
"""
##########################################################################
## Imports
#########################################
|
#################################
|
ascott1/college-costs
|
paying_for_college/config/urls.py
|
Python
|
cc0-1.0
| 801
| 0.001248
|
from django.conf.urls import url, include
from django.conf import settings
from paying_for_college.views import LandingView
from django.contrib import admin
# from django.conf.urls.static import static
urlpatterns = [
# url(r'^admin/', include(adm
|
in.site.urls)),
url(r'^$',
LandingView.as_view(), name='pfc-landing'),
url(r'^compare-financial-aid-and-college-cost/',
include('paying_for_college.disclosures.urls', namespace='disclosures')),
url(r'^repay-stu
|
dent-debt/',
include('paying_for_college.debt.urls', namespace='debt')),
url(r'^guides/',
include('paying_for_college.guides.urls', namespace='guides')),
]
# if settings.DEBUG:
# urlpatterns += static(settings.STATIC_URL,
# document_root=settings.STATIC_ROOT)
|
tony-joseph/livre
|
items/views.py
|
Python
|
bsd-3-clause
| 22,123
| 0.001718
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from .models import Category, Language, BookDetail, BookCopy, Periodical, PeriodicalIssue
from .forms import CategoryForm, LanguageForm, BookDetailForm, BookCopyForm, PeriodicalForm, PeriodicalIssueForm
@login_required
def add_category(request):
"""View to add a new category."""
if not request.user.is_staff:
raise Http404
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
category = Category.objects.create(
title=form.cleaned_data['title'],
slug=form.cleaned_data['slug'],
description=form.cleaned_data['description'],
created_by=request.user,
updated_by=request.user,
)
messages.add_message(request, messages.SUCCESS, 'New category added.')
return redirect(category.get_absolute_url())
else:
form = CategoryForm()
context = {
'form': form,
}
return render(request, 'items/add-category.html', context)
def list_categories(request):
"""View to list all categories."""
return render(request, 'items/list-categories.html', {'categories': Category.objects.all()})
def view_category(request, slug):
"""View to display category details."""
category = get_object_or_404(Category, slug=slug)
return render(request, 'items/view-category.html', {'category': category})
@login_required
def edit_category(request, slug):
"""View to edit a category."""
if not request.user.is_staff:
raise Http404
category = get_object_or_404(Category, slug=slug)
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
category.title = form.cleaned_data['title']
category.slug = form.cleaned_data['slug']
category.description = form.cleaned_data['description']
category.updated_by = request.user
category.save()
messages.add_message(request, messages.SUCCESS, 'Details updated.')
return redirect(category.get_absolute_url())
else:
form = CategoryForm()
context = {
'form': form,
'category': category,
}
return render(request, 'items/edit-category.html', context)
@login_required
def delete_category(request, slug):
"""View to delete a category."""
if not request.user.is_staff:
raise Http404
category = get_object_or_404(Category, slug=slug)
if BookDetail.objects.filter(category=category).exists():
messages.add_message(request, messages.ERROR, 'Unable to delete. Books exists in this category')
return redirect(reverse('items:list_categories'))
category.delete()
return redirect(reverse('items:categories'))
def category_books(request, slug):
"""View to display paginated list of books in the category."""
category = get_object_or_404(Category, slug=slug)
book_details = BookDetail.objects.filter(category=category)
page = request.GET.get('page')
paginator = Paginator(book_details, 50)
try:
book_details = paginator.page(page)
except PageNotAnInteger:
book_details = paginator.page(1)
except EmptyPage:
book_details = paginator.page(paginator.num_pages)
context = {
'book_details': book_details,
'category': category,
}
return render(request, 'items/category-books.html', context)
@login_required
def add_language(request):
"""View to add a new language."""
if not request.user.is_staff:
raise Http404
if request.method == 'POST':
form = LanguageForm(request.POST)
if form.is_valid():
language = Language.objects.create(
name=form.cleaned_data['name'],
short_code=form.cleaned_data['short_code'],
description=form.cleaned_data['description'],
created_by=request.user,
updated_by
|
=request.user,
)
messages.add_message(request, messages.SUCCESS, 'New language added.')
return redirect(language.get_absolute_url())
else:
form = LanguageForm()
context = {
'form': form,
}
return render(request, 'items/add-language.html', context)
def list_languages(request):
"""View to list all language
|
s."""
return render(request, 'items/list-languages.html', {'languages': Language.objects.all()})
def view_language(request, slug):
"""View to display language details."""
language = get_object_or_404(Language, short_code=slug)
return render(request, 'items/view-language.html', {'language': language})
@login_required
def edit_language(request, slug):
"""View to edit a language."""
if not request.user.is_staff:
raise Http404
language = get_object_or_404(Language, short_code=slug)
if request.method == 'POST':
form = LanguageForm(request.POST)
if form.is_valid():
language.name = form.cleaned_data['name']
language.short_code = form.cleaned_data['short_code']
language.description = form.cleaned_data['description']
language.updated_by = request.user
language.save()
messages.add_message(request, messages.SUCCESS, 'Details updated.')
return redirect(language.get_absolute_url())
else:
form = LanguageForm()
context = {
'form': form,
'language': language,
}
return render(request, 'items/edit-language.html', context)
@login_required
def delete_language(request, slug):
"""View to delete a language."""
if not request.user.is_staff:
raise Http404
language = get_object_or_404(Language, short_code=slug)
if BookDetail.objects.filter(language=language).exists():
messages.add_message(request, messages.ERROR, 'Unable to delete. Books exists in this language')
return redirect(language.get_absolute_url())
language.delete()
return redirect(reverse('items:list_languages'))
def language_books(request, slug):
"""View to display paginated list of books in the language."""
language = get_object_or_404(Language, short_code=slug)
book_details = BookDetail.objects.filter(language=language)
page = request.GET.get('page')
paginator = Paginator(book_details, 50)
try:
book_details = paginator.page(page)
except PageNotAnInteger:
book_details = paginator.page(1)
except EmptyPage:
book_details = paginator.page(paginator.num_pages)
context = {
'book_details': book_details,
'language': language,
}
return render(request, 'items/language-books.html', context)
@login_required
def add_book_detail(request):
"""View to add a new detail."""
if not request.user.is_staff:
raise Http404
if request.method == 'POST':
form = BookDetailForm(request.POST)
if form.is_valid():
language = get_object_or_404(Language, short_code=form.cleaned_data['language'])
category = get_object_or_404(Category, slug=form.cleaned_data['category'])
book_detail = BookDetail.objects.create(
title=form.cleaned_data['title'],
author=form.cleaned_data['author'],
description=form.cleaned_data['description'],
category=category,
language=language,
publisher=form.cleaned_data['publisher'],
published_on=form.cleaned_data['published_on'],
pages=form.cleaned_data['pages'],
isbn=form.cleaned_data['isbn'],
tags=form.cleaned_data['tags'],
created_by=request.user,
updated_by=request.user,
)
BookCopy.objects.crea
|
gpospelov/BornAgain
|
Examples/fit52_Advanced/multiple_datasets.py
|
Python
|
gpl-3.0
| 6,791
| 0
|
"""
Fitting example: simultaneous fit of two datasets
"""
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import bornagain as ba
from bornagain import deg, angstrom, nm
def get_sample(params):
"""
Returns a sample with uncorrelated cylinders and pyramids.
"""
radius_a = params["radius_a"]
radius_b = params["radius_b"]
height = params["height"]
m_vacuum = ba.HomogeneousMaterial("Vacuum", 0.0, 0.0)
m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8)
m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8)
formfactor = ba.FormFactorHemiEllipsoid(radius_a, radius_b, height)
particle = ba.Particle(m_particle, formfactor)
layout = ba.ParticleLayout()
layout.addParticle(particle)
vacuum_layer = ba.Layer(m_vacuum)
vacuum_layer.addLayout(layout)
substrate_layer = ba.Layer(m_substrate)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(vacuum_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_simulation(params):
"""
Returns a GISAXS simulation with beam and detector defined.
"""
incident_angle = params["incident_angle"]
simulation = ba.GISASSimulation()
simulation.setDetectorParameters(50, -1.5*deg, 1.5*deg, 50, 0.0*deg,
2.0*deg)
simulation.setBeamParameters(1.0*angstrom, incident_angle, 0.0*deg)
simulation.beam().setIntensity(1e+08)
simulation.setSample(get_sample(params))
return simulation
def simulation1(params):
params["incident_angle"] = 0.1*deg
return get_simulation(params)
def simulation2(params):
params["incident_angle"] = 0.4*deg
return get_simulation(params)
def create_real_data(incident_alpha):
"""
Generating "real" data by adding noise to the simulated data.
"""
params = {
'radius_a': 5.0*nm,
'radius_b': 6.0*nm,
'height': 8.0*nm,
"incident_angle": incident_alpha
}
simulation = get_simulation(params)
simulation.runSimulation()
# retrieving simulated data in the form of numpy array
real_data = simulation.result().array()
# spoiling simulated data with the noise to produce "real" data
noise_factor = 0.1
noisy = np.random.normal(real_data, noise_factor*np.sqrt(real_data))
noisy[noisy < 0.1] = 0.1
return noisy
class PlotObserver():
"""
Draws fit progress every nth iteration. Real data, simulated data
and chi2 map will be shown for both datasets.
"""
def __init__(self):
self.fig = plt.figure(figsize=(12.8, 10.24))
self.fig.canvas.draw()
def __call__(self, fit_objective):
self.update(fit_objective)
@staticmethod
def plot_dataset(fit_objective, canvas):
for i_dataset in range(0, fit_objective.fitObjectCount()):
real_data = fit_objective.experimentalData(i_dataset)
simul_data = fit_objective.simulationResult(i_dataset)
chi2_map = fit_objective.relativeDifference(i_dataset)
zmax = real_data.histogram2d().getMaximum()
plt.subplot(canvas[i_dataset*3])
ba.plot_colormap(real_data,
title="\"Real\" data - #" + str(i_dataset + 1),
zmin=1.0,
zmax=zmax,
zlabel="")
plt.subplot(canvas[1 + i_dataset*3])
ba.plot_colormap(simul_data,
title="Simulated data - #" + str(i_dataset + 1),
zmin=1.0,
zmax=zmax,
zlabel="")
plt.subplot(canvas[2 + i_dataset*3])
ba.plot_colormap(chi2_map,
title="Chi2 map - #" + str(i_dataset + 1),
zmin=0.001,
zmax=10.0,
zlabel="")
@staticmethod
def display_fit_parameters(fit_objective):
"""
Displays fit parameters, chi and iteration number.
"""
plt.title('Parameters')
plt.axis('off')
iteration_info = fit_objective.iterationInfo()
plt.text(
0.01, 0.85,
"Iterations " + '{:d}'.format(iteration_info.iterationCount()))
plt.text(0.01, 0.75,
"Chi2 " + '{:8.4f}'.format(iteration_info.chi2()))
for index, params in enumerate(iteration_info.parameters()):
plt.text(0.01, 0.55 - index*0.1,
'{:30.30s}: {:6.3f}'.format(params.name(), params.value))
@staticmethod
def plot_fit_parameters(fit_objective):
"""
Displays fit parameters, chi and iteration number.
"""
plt.axis('off')
iteration_info = fit_objective.iterationInfo()
plt.text(
0.01, 0.95,
"Iterations " + '{:d}'.format(iteration_info.iterationCount()))
plt.text(0.01, 0.70,
"Chi2 " + '{:8.4f}'.format(iteration_info.chi2()))
for index, params in enumerate(iteration_info.parameters()):
plt.text(0.01, 0.30 - index*0.3,
'{:30.30s}: {:6.3f}'.format(params.name(), params.value))
def update(self, fit_objective):
self.fig.clf()
# we divide figure to have 3x3 subplots, with two first rows occupying
# most of the space
canvas = matplotlib.gridspec.GridSpec(3,
3,
width_ratios=[1, 1, 1],
height_ratios=[4, 4, 1])
canvas.update(left=0.05, right=0.95, hspace=0.5, wspace=0.2)
self.plot_dataset(fit_objective, canvas)
plt.subplot(canvas[6:])
self.plot_fit_parameters(fit_objective)
plt.draw()
plt.pause(0.01)
def run_fitting():
"""
main function to run fitting
"""
data1 = create_real_data(0.1*deg)
data2 =
|
create_real_data(0.4*deg)
fit_objective = ba.FitObjective()
fit_objective.addSimulationAndData(simulation1, data1, 1.0)
fit_objective.addSimulationAndData(simulation2, data2, 1.0)
fit_objective.initPrint(10)
# creating custom observer which will draw fit progress
plotter = PlotObserver()
fit_objective.initPlot(10, plotter.update)
params = ba.Parameters
|
()
params.add("radius_a", 4.*nm, min=2.0, max=10.0)
params.add("radius_b", 6.*nm, vary=False)
params.add("height", 4.*nm, min=2.0, max=10.0)
minimizer = ba.Minimizer()
result = minimizer.minimize(fit_objective.evaluate, params)
fit_objective.finalize(result)
if __name__ == '__main__':
run_fitting()
plt.show()
|
chatelak/RMG-Py
|
rmgpy/molecule/parserTest.py
|
Python
|
mit
| 2,890
| 0.007612
|
import unittest
from rmgpy.molecule.molecule import Molecule
from .parser import *
class ParserTest(unittest.TestCase):
def test_fromAugmentedInChI(self):
aug_inchi = 'InChI=1S/CH4/h1H4'
mol = fromAugment
|
edInChI(Molecule(), aug_inchi)
self.assertTrue(not mol.InChI == '')
aug_inchi = 'InChI=1/CH4/h1H4'
mol = fromAugmentedInChI(Molecule(), aug_inchi)
self.assertTrue(not mol.InChI == '')
def test_toRDKitMol(self):
"""
Test that toRDKitMol returns correct indices and atom mappings.
"""
bondOrderDict = {'SINGLE':'S','DOUBLE':'D','TRIPLE':'T','AROMATIC':'B'}
|
mol = fromSMILES(Molecule(), 'C1CCC=C1C=O')
rdkitmol, rdAtomIndices = mol.toRDKitMol(removeHs=False, returnMapping=True, sanitize=True)
for atom in mol.atoms:
# Check that all atoms are found in mapping
self.assertTrue(atom in rdAtomIndices)
# Check that all bonds are in rdkitmol with correct mapping and order
for connectedAtom, bond in atom.bonds.iteritems():
bondType = str(rdkitmol.GetBondBetweenAtoms(rdAtomIndices[atom],rdAtomIndices[connectedAtom]).GetBondType())
rdkitBondOrder = bondOrderDict[bondType]
self.assertEqual(bond.order, rdkitBondOrder)
# Test for removeHs = True
rdkitmol2, rdAtomIndices2 = mol.toRDKitMol(removeHs=True, returnMapping=True, sanitize=True)
for atom in mol.atoms:
# Check that all non-hydrogen atoms are found in mapping
if atom.symbol != 'H':
self.assertTrue(atom in rdAtomIndices)
# Check that all bonds connected to non-hydrogen have the correct mapping and order
for connectedAtom, bond in atom.bonds.iteritems():
if connectedAtom.symbol != 'H':
bondType = str(rdkitmol.GetBondBetweenAtoms(rdAtomIndices[atom],rdAtomIndices[connectedAtom]).GetBondType())
rdkitBondOrder = bondOrderDict[bondType]
self.assertEqual(bond.order, rdkitBondOrder)
class ResetLonePairsTest(unittest.TestCase):
def test_Methane(self):
smi = 'C'
mol = Molecule().fromSMILES(smi)
p_indices = []
reset_lone_pairs(mol, p_indices)
for at in mol.atoms:
self.assertEquals(at.lonePairs, 0)
def test_SingletMethylene(self):
adjlist = """
multiplicity 1
1 C u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
p_indices = [1]
reset_lone_pairs(mol, p_indices)
for at in mol.atoms:
if at.symbol == 'C':
self.assertEquals(at.lonePairs, 1)
else:
self.assertEquals(at.lonePairs, 0)
|
amyliu345/zulip
|
zerver/management/commands/realm_alias.py
|
Python
|
apache-2.0
| 2,022
| 0.000989
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.models import Realm, RealmAlias, get_realm, can_add_alias
from zer
|
ver.lib.actions import do_get_realm_aliases
import sys
class Command(BaseCommand):
help = """Manage aliases for the specified realm"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argume
|
nt('-r', '--realm',
dest='string_id',
type=str,
required=True,
help='The subdomain or string_id of the realm.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('alias', metavar='<alias>', type=str, nargs='?',
help="alias to add or remove")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = get_realm(options["string_id"])
if options["op"] == "show":
print("Aliases for %s:" % (realm.domain,))
for alias in do_get_realm_aliases(realm):
print(alias["domain"])
sys.exit(0)
domain = options['alias'].lower()
if options["op"] == "add":
if not can_add_alias(domain):
print("A Realm already exists for this domain, cannot add it as an alias for another realm!")
sys.exit(1)
RealmAlias.objects.create(realm=realm, domain=domain)
sys.exit(0)
elif options["op"] == "remove":
RealmAlias.objects.get(realm=realm, domain=domain).delete()
sys.exit(0)
else:
self.print_help("./manage.py", "realm_alias")
sys.exit(1)
|
Krakn/learning
|
src/python/hackerrank/algorithms/compare_the_triplets/compare_the_triplets.py
|
Python
|
isc
| 597
| 0.005025
|
#!/bin/python3
import sys
def s
|
olve(a0, a1, a2, b0, b1, b2):
score = [0, 0]
alist = [a0, a1, a2]
blist = [b0, b1, b2]
clist = zip(alist, blist)
for pair in clist:
if pair[0] > pair[1]:
score[0] += 1
elif pair[0] < pair[1]:
score[1] += 1
else:
continue
return s
|
core
a0, a1, a2 = input().strip().split(' ')
a0, a1, a2 = [int(a0), int(a1), int(a2)]
b0, b1, b2 = input().strip().split(' ')
b0, b1, b2 = [int(b0), int(b1), int(b2)]
result = solve(a0, a1, a2, b0, b1, b2)
print (" ".join(map(str, result)))
|
taigaio/taiga-back
|
taiga/base/utils/diff.py
|
Python
|
agpl-3.0
| 1,614
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def make_diff(first: dict, second: dict, not_found_value=None,
excluded_keys: tuple = ()) -> dict:
"""
Compute a diff between two dicts.
"""
diff = {}
# Check all keys in first dict
for key in first:
if key not in second:
diff[key] = (
|
first[key], not_found_value)
elif first[key] != second[key]:
diff[key] = (first[key], second[key])
# Check all keys in second dict to fi
|
nd missing
for key in second:
if key not in first:
diff[key] = (not_found_value, second[key])
# Remove A -> A changes that usually happens with None -> None
for key, value in list(diff.items()):
frst, scnd = value
if frst == scnd:
del diff[key]
# Removed excluded keys
for key in excluded_keys:
if key in diff:
del diff[key]
return diff
|
yylangchen/Sample_CPP_Cocos2dx
|
tools/cocos2d-console/bin/cocos_project.py
|
Python
|
mit
| 18,116
| 0.005078
|
import os
import re
import json
import cocos
class Project(object):
CPP = 'cpp'
LUA = 'lua'
JS = 'js'
CONFIG = '.cocos-project.json'
KEY_PROJ_TYPE = 'project_type'
KEY_HAS_NATIVE = 'has_native'
KEY_CUSTOM_STEP_SCRIPT = "custom_step_script"
CUSTOM_STEP_PRE_BUILD = "pre-build"
CUSTOM_STEP_POST_BUILD = "post-build"
CUSTOM_STEP_PRE_NDK_BUILD = "pre-ndk-build"
CUSTOM_STEP_POST_NDK_BUILD = "post-ndk-build"
CUSTOM_STEP_PRE_COPY_ASSETS = "pre-copy-assets"
CUSTOM_STEP_POST_COPY_ASSETS = "post-copy-assets"
CUSTOM_STEP_PRE_ANT_BUILD = "pre-ant-build"
CUSTOM_STEP_POST_ANT_BUILD = "post-ant-build"
@staticmethod
def list_for_display():
return [x.lower() for x in Project.language_list()]
@staticmethod
def language_list():
return (Project.CPP, Project.LUA, Project.JS)
def __init__(self, project_dir):
# parse the config file
self.info = self._parse_project_json(project_dir)
def _parse_project_json(self, src_dir):
proj_path = self._find_project_dir(src_dir)
# config file is not found
if proj_path == None:
raise cocos.CCPluginError("Can't find config file %s in path %s" % (Project.CONFIG, src_dir))
project_json = os.path.join(proj_path, Project.CONFIG)
try:
f = open(project_json)
project_info = json.load(f)
f.close()
except Exception:
if f is not None:
f.close()
raise cocos.CCPluginError("Configuration file %s is broken!" % project_json)
if project_info is None:
raise cocos.CCPluginError("Parse configuration in file \"%s\" failed." % Project.CONFIG)
if not project_info.has_key(Project.KEY_PROJ_TYPE):
raise cocos.CCPluginError("Can't get value of \"%s\" in file \"%s\"." % (Project.KEY_PROJ_TYPE, Project.CONFIG))
lang = project_info[Project.KEY_PROJ_TYPE]
lang = lang.lower()
# The config is invalide
if not (lang in Project.language_
|
list()):
raise cocos.CCPluginError("The value of \"%s\" must be one of (%s)" % (Project.KEY_PROJ_TYPE, ', '.join(Project.list_for_display())))
# record the dir & language of the project
self._
|
project_dir = proj_path
self._project_lang = lang
# if is script project, record whether it has native or not
self._has_native = False
if (self._is_script_project() and project_info.has_key(Project.KEY_HAS_NATIVE)):
self._has_native = project_info[Project.KEY_HAS_NATIVE]
# if has custom step script, record it
self._custom_step = None
if (project_info.has_key(Project.KEY_CUSTOM_STEP_SCRIPT)):
script_path = project_info[Project.KEY_CUSTOM_STEP_SCRIPT]
if not os.path.isabs(script_path):
script_path = os.path.join(self._project_dir, script_path)
if os.path.isfile(script_path):
import sys
script_dir, script_name = os.path.split(script_path)
sys.path.append(script_dir)
self._custom_step = __import__(os.path.splitext(script_name)[0])
cocos.Logging.info("Find custom step script: %s" % script_path)
else:
cocos.Logging.warning("Can't find custom step script %s" % script_path)
self._custom_step = None
return project_info
def invoke_custom_step_script(self, event, tp, args):
try:
if self._custom_step is not None:
self._custom_step.handle_event(event, tp, args)
except Exception as e:
cocos.Logging.warning("Custom step invoke failed: %s" % e)
def _find_project_dir(self, start_path):
path = start_path
while True:
if cocos.os_is_win32():
# windows root path, eg. c:\
if re.match(".+:\\\\$", path):
break
else:
# unix like use '/' as root path
if path == '/' :
break
cfg_path = os.path.join(path, Project.CONFIG)
if (os.path.exists(cfg_path) and os.path.isfile(cfg_path)):
return path
path = os.path.dirname(path)
return None
def get_proj_config(self, key):
project_json = os.path.join(self._project_dir, Project.CONFIG)
f = open(project_json)
project_info = json.load(f)
f.close()
ret = None
if project_info.has_key(key):
ret = project_info[key]
return ret
def write_proj_config(self, key, value):
project_json = os.path.join(self._project_dir, Project.CONFIG)
if os.path.isfile(project_json):
f = open(project_json)
project_info = json.load(f)
f.close()
if project_info is None:
project_info = {}
project_info[key] = value
outfile = open(project_json, "w")
json.dump(project_info, outfile, sort_keys = True, indent = 4)
outfile.close()
def get_project_dir(self):
return self._project_dir
def get_language(self):
return self._project_lang
def has_android_libs(self):
if self._is_script_project():
proj_android_path = os.path.join(self.get_project_dir(), "frameworks", "runtime-src", "proj.android", "libs")
else:
proj_android_path = os.path.join(self.get_project_dir(), "proj.android", "libs")
return os.path.isdir(proj_android_path)
def _is_native_support(self):
return self._has_native
def _is_script_project(self):
return self._is_lua_project() or self._is_js_project()
def _is_cpp_project(self):
return self._project_lang == Project.CPP
def _is_lua_project(self):
return self._project_lang == Project.LUA
def _is_js_project(self):
return self._project_lang == Project.JS
class Platforms(object):
ANDROID = 'android'
IOS = 'ios'
MAC = 'mac'
WEB = 'web'
WIN32 = 'win32'
LINUX = 'linux'
CFG_CLASS_MAP = {
ANDROID : "cocos_project.AndroidConfig",
IOS : "cocos_project.iOSConfig",
MAC : "cocos_project.MacConfig",
WEB : "cocos_project.WebConfig",
WIN32 : "cocos_project.Win32Config",
LINUX : "cocos_project.LinuxConfig"
}
@staticmethod
def list_for_display():
return [x.lower() for x in Platforms.list()]
@staticmethod
def list():
return (Platforms.ANDROID, Platforms.IOS, Platforms.MAC, Platforms.WEB, Platforms.WIN32, Platforms.LINUX)
def __init__(self, project, current):
self._project = project
proj_info = self._project.info
self._gen_available_platforms(proj_info)
self._current = None
if current is not None:
current_lower = current.lower()
if current_lower in self._available_platforms.keys():
self._current = current_lower
else:
raise cocos.CCPluginError("Current available platforms : %s. '%s' is not available." % (self._available_platforms.keys(), current))
def _filter_platforms(self, platforms):
ret = []
for p in platforms:
if cocos.os_is_linux():
if p == Platforms.WEB or p == Platforms.LINUX or p == Platforms.ANDROID:
ret.append(p)
if cocos.os_is_mac():
if p == Platforms.WEB or p == Platforms.IOS or p == Platforms.MAC or p == Platforms.ANDROID:
ret.append(p)
if cocos.os_is_win32():
if p == Platforms.WEB or p == Platforms.WIN32 or p == Platforms.ANDROID:
ret.append(p)
return ret
def _gen_available_platforms(self, proj_info):
# generate the platform list for different projects
if self._project._is_lua_project():
if self._project._is_native_support():
platform_list = [ Platforms.ANDROID, Platforms.WIN32, Platforms.
|
patrick91/pycon
|
backend/api/schedule/types.py
|
Python
|
mit
| 1,130
| 0
|
from typing import TYPE_CHECKING, List, Optional
import strawberry
from api.languages.types import Language
from api.submissions.types import Submission
from api.users.types import User
from strawberry.types.datetime import DateTime
if TYPE_CHECKING: # pragma: no cover
from api.conferences.types import Conference, AudienceLevel # noqa
@strawberry.type
class Room:
id: strawberry.ID
name: str
conference: "Conference"
type: str
@strawberry.type
class ScheduleItem:
id: strawberry.ID
conference: "Conference"
title: str
start: DateTime
end: DateTime
submission: Optional[Submission]
slug: str
description: s
|
tr
type: str
duration: Optional[int]
highlight_color: Optional[str]
speakers: List[User]
|
language: Language
audience_level: Optional["AudienceLevel"]
@strawberry.field
def rooms(self, info) -> List[Room]:
return self.rooms.all()
@strawberry.field
def image(self, info) -> Optional[str]:
if not self.image:
return None
return info.context["request"].build_absolute_uri(self.image.url)
|
vsco/grpc
|
tools/run_tests/run_tests.py
|
Python
|
bsd-3-clause
| 56,812
| 0.010068
|
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollsig', 'poll', 'poll-cv'],
# TODO(ctiller, sreecha): enable epoll1, epollex, epoll-thread-pool
'mac': ['poll'],
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
elif self.platform == 'windows':
self._use_cmake = False
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._use_cmake
|
= False
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
try:
cflags += subprocess.check_output(['pkg-conf
|
ig', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
|
pastgift/seed-website-py
|
app/site_users/__init__.py
|
Python
|
mit
| 139
| 0.007194
|
# -*- coding:
|
utf-8 -*-
from flask import Blu
|
eprint
site_users_blueprint = Blueprint('site_users', __name__)
from . import views, hooks
|
Alex-Ian-Hamilton/sunpy
|
sunpy/tests/setup_command.py
|
Python
|
bsd-2-clause
| 4,158
| 0.000481
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 7 19:36:08 2014
@author: Stuart Mumford
This file is designed to be imported and ran only via setup.py, hence it's
dependency on astropy_helpers which will be available in that context.
"""
from __future__ import absolute_import, division, print_function
import os
from astropy_helpers.commands.test import AstropyTest
from astropy_helpers.compat import _fix_user_options
class SunPyTest(AstropyTest):
description = 'Run the tests for this package'
user_options = [
# Package to test
('package=',
|
'P',
"The name of a specific package to test, e.g. 'io' or 'utils'. "
"If nothing is specified, all default tests are run."),
# Print all the things
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
# plugins to e
|
nable
('plugins=', 'p',
'Plugins to enable when running pytest.'),
# Run online tests?
('online', 'R',
'Also run tests that do require a internet connection.'),
# Run only online tests?
('online-only', None,
'Only run test that do require a internet connection.'),
# Run tests that check figure generation
('figure', None,
'Run tests that compare figures against stored hashes.'),
# Calculate test coverage
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('cov-report=', None,
'Specify the type of coverage report to generate. (Default terminal)'),
# Run tests in parallel
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If negative, all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
# Pass additional cli args to pytest
('args=', 'a',
'Additional arguments to be passed to pytest.')
]
user_options = _fix_user_options(user_options)
package_name = ''
def initialize_options(self):
self.package = ''
#self.test_path = None
self.verbose_results = False
self.plugins = None
self.args = None
self.online = False
self.online_only = False
self.figure = False
self.coverage = False
self.cov_report = 'term' if self.coverage else None
self.docs_path = os.path.abspath('doc')
self.parallel = 0
self.temp_root = None
def _validate_required_deps(self):
"""
This method checks that any required modules are installed before
running the tests.
"""
try:
import sunpy
except ImportError:
raise ImportError(
"The 'test' command requires the sunpy package to be "
"installed and importable.")
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
online = self.online
offline = not self.online_only
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.self_test('
'modulename={1.package!r}, '
'args={1.args!r}, '
'verbose={1.verbose_results!r}, '
'parallel={1.parallel!r}, '
'online={online!r}, '
'offline={offline!r}, '
'figure={figure!r}, '
'coverage={1.coverage!r}, '
'cov_report={1.cov_report!r})); '
'{cmd_post}'
'sys.exit(result)')
x = cmd.format('pass',
self,
online=online,
offline=offline,
figure=self.figure,
cmd_pre=cmd_pre,
cmd_post=cmd_post)
return x
|
OaklandPeters/task_logger
|
task_logger/logger.py
|
Python
|
mit
| 27,065
| 0.010309
|
"""
@TODO: ProcessLogger().attempts - update/interact based on __enter__/__exit__
@TODO: Review ProcessLoggerABC, and make ProcessLogger inherit from it
@TODO: ProcessingAttempt: make uncaught exception in switch_on_exit() cause parent
to close all other attempts.
@TODO: Write ProcessLoggerABC in task_logger/. Then switch .validate()
from checking if log is JSONProgressLog() to ProcessLoggerABC().
ProcessLoggerABC() needs to be MutableMapping + ability to close attempts.
"""
import os
import collections
import json
import traceback
import datetime
#----
from local_packages import rich_core
from local_packages import rich_collections
from local_packages import rich_property
#---- Local Modules
import enum #local version of enum
import pa_exceptions
LOGGER_SUPPRESSES_ERRORS = True
class JSONProgressLog(rich_collections.BasicMutableMapping):
"""
~(1) MutableMapping
+ (2) ability to read/write to file
+ (3) recursive getters/setters/deleters
+ (4) recursive setters should automatically create nested structure
As in rich_misc.defaultlist
ex. log = JSONProgressLog()
log['import-expanded']['virtual-chemistry-01'][3] = AttemptRecord(...)
--> will automatically create the structure for
self.data['import-expanded'] = {}
self.data['import-expanded']['virtual-chemistry-01'] = []
expand_list(self.data['import-expanded']['virtual-chemistry-01'], 3)
self.data['import-expanded']['virtual-chemistry-01'][3] = AttemptRecord(...)
"""
def __init__(self, logpath=None):
if logpath == None:
self.logpath = 'default-log.json'
else:
self.logpath = logpath
self.data = None #Default until opened
self.opened = False
def open(self):
try:
self.data = self.read()
except ValueError as exc:
#File found, but 'No JSON object could be decoded'
#Create and initialize the log file
self.write({})
self.data = self.read()
except IOError as exc:
#Usually: 'No such file or directory'
#Create and initialize the log file
self.write({})
self.data = self.read()
self.opened = True
return self
def close(self):
#This should check that all attempts are closed
self.write(self.data)
#self.data = None
self.opened = False
return self
def read(self):
"""Return dict of data in self.logpath:
{
(*args):{attempt-record},
...
}
"""
with open(self.logpath, mode='r') as fi:
raw = json.load(fi)
_unserialized = self.unserialize(raw)
#Unpack nested structures
return _convert_to_string(_unserialized)
def write(self, data=None):
"""Write
"""
if data == None:
data = self.data
with open(self.logpath, mode='w') as fi:
json.dump(self.serialize(data), fi, indent=1)
return self
#-------- Context Manager
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
#----------- Serialization so it can be stored as JSON array
def serialize(self, data=None):
"""Translate data dictionary into JSON object of array of key-value pairs."""
if data == None:
data = self.data
rich_core.AssertKlass(data, collections.Mapping, name='data')
return {
'log': [[key, value] for key, value in data.items()]
}
def unserialize(self, raw):
"""Translate JSON array of key-value pairs into a dict."""
rich_core.AssertKlass(raw, collections.Mapping, name='raw')
rich_core.AssertKeys(raw, ['log'], name='raw')
rich_core.AssertKlass(raw['log'], collections.Sequence, name="raw['log']")
return dict(
#(rich_core.ensure_tuple(key), value)
(seq2tuple(key), value)
for key, value in raw['log']
)
#---- Overriding BasicMutableMapping
@rich_collections.BasicMutableMapping.data.validator
def data(self, data):
rich_core.AssertKlass(data, (collections.MutableMapping, type(None)), name='data')
return data
def __getitem__(self, key):
try:
return self.data[key]
except TypeError as exc:
raise KeyError(
"'{0}' not found, likely because log file is not open.".format(key))
def __setitem__(self, key, value):
try:
self.data[key] = value
except TypeError as exc:
raise KeyError(
"'{0}' not found likely because log file is not open.".format(key))
def __delitem__(self, key):
tr
|
y:
del self.data[key]
except TypeError as exc:
raise KeyError(
"'{0}' not found likely because log file is not open.".format(k
|
ey))
# States used to track state of ProcessingAttempt
States = enum.Enum([
('new','untried'),
('completed','complete','done','finished'),
('errored','error','exception','stopped'),
('attempting','attempted','in progress','in_progress','running')
])
class ProcessingAttempt(rich_collections.BasicMutableMapping):
"""
Records and tracks a single attempt to apply processing to a single set
of input arguments. Translates to a single record in the log.
This class exists as a subject-class to ProcessLogger.
@TODO: Consider whether initial data should be included. I suspect not.
@TODO: Consider difficult issue: in switch_on_exit(): NonSuppressedError()
- should it trigger an event in the parent log(), to close out of all
other open attempts before raising that error?
@TODO: Make the exception catching functionality ALSO catch on KeyboardInterrupt
"""
def __init__(self, log, arguments, data=None):
"""Memoize on arguments.
log should be a log, treatable as a MutableMapping
data is initial values for the processing attempt's record
self.data is copied back to self.log[self.arguments], upon __exit__
"""
self.open = False
self.started = datetime.datetime.now()
(self.log,
self.arguments,
self.data) = self.validate(log, arguments, data)
def validate(self, log, arguments, data):
#rich_core.AssertKlass(log, task_logger.ProcessLoggerABC, name='log')
rich_core.AssertKlass(log, collections.MutableMapping, name='log')
# Revised: ensure that arguments are something Hashable
# But do not insist they are a tuple, allows for arguments being a string
rich_core.AssertKlass(arguments, collections.Hashable, name='arguments')
if data == None:
#data = rich_core.defaults(self.data, self.new_record())
data = {}
else:
rich_core.AssertKlass(data, collections.MutableMapping, name='data')
return log, arguments, data
def __enter__(self):
"""Entering 'with' context manager.
Some initialization of keys handled here; however - initialization the Mapping
(self ~ self.data) occurs in in the data property getter.
"""
self.open = True
#defs = rich_core.defaults(self.data, self.new_record())
#self.update(defs)
if 'state' not in self:
self['state'] = States['new'].name
self.switch_on_enter(self['state'])
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""
"""
self.open = False
self.elapsed()
self.switch_on_exit(exc_type, exc_value, exc_traceback)
return True # Suppress exception
#-------
def elapsed(self):
"
|
sylvestre/rna
|
rna/models.py
|
Python
|
mpl-2.0
| 6,479
| 0.000309
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import datetime
from django.conf import settings
from django.db import models
from django_extensions.db.fields import CreationDateTimeField
class TimeStampedModel(models.Model):
"""
Replacement for django_extensions.db.models.TimeStampedModel
that updates the modified timestamp by default, but allows
that behavior to be overridden by passing a modified=False
parameter to the save method
"""
created = CreationDateTimeField()
modified = models.DateTimeField(editable=False, blank=True, db_index=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
if kwargs.pop('modified', True):
self.modified = datetime.now()
super(TimeStampedModel, self).save(*args, **kwargs)
class Release(TimeStampedModel):
CHANNELS = ('Nightly', 'Aurora', 'Beta', 'Release', 'ESR')
PRODUCTS = ('Firefox', 'Firefox for Android',
'Firefox Extended Support Release', 'Firefox OS',
'Thunderbird')
product = models.CharField(max_length=255,
choices=[(p, p) for p in PRODUCTS])
channel = models.CharField(max_length=255,
choices=[(c, c) for c in CHANNELS])
version = models.CharField(max_length=255)
release_date = models.DateTimeField()
text = models.TextField(blank=True)
is_public = models.BooleanField(default=False)
bug_list = models.TextField(blank=True)
bug_search_url = models.CharField(max_length=2000, blank=True)
system_requirements = models.TextField(blank=True)
def major_version(self):
return self.version.split('.', 1)[0]
def get_bug_search_url(self):
if self.bug_search_url:
return self.bug_search_url
if self.product == 'Thunderbird':
return (
'https://bugzilla.mozilla.org/buglist.cgi?'
'classification=Client%20Software&query_format=advanced&'
'bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&'
'target_milestone=Thunderbird%20{version}.0&product=Thunderbird'
'&resolution=FIXED'
).format(version=self.major_version())
return (
'https://bugzilla.mozilla.org/buglist.cgi?'
'j_top=OR&f1=target_milestone&o3=equals&v3=Firefox%20{version}&'
'o1=equals&resolution=FIXED&o2=anyexact&query_format=advanced&'
'f3=target_milestone&f2=cf_status_firefox{version}&'
'bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&'
'v1=mozilla{version}&v2=fixed%2Cverified&limit=0'
).format(version=self.major_version())
def equivalent_release_for_product(self, product):
"""
Returns the release for a specified product with the same
channel and major version with the highest minor version,
or None if no such releases exist
"""
releases = self._default_manager.filter(
version__startswith=self.major_version() + '.',
channel=self.channel, product=product).order_by('-version')
if not getattr(settings, 'DEV', False):
releases = releases.filter(is_public=True)
if releases:
return sorted(
sorted(releases, reverse=True,
key=lambda r: len(r.version.split('.'))),
|
reverse=True, key=lambda r: r.version.split('.')[1])[0]
def equivalent_android_release(self):
if self.product == 'Firefox':
return self.equivalent_release_for_product('Firefox for Android')
def equivalent_desktop_release(self):
if self.product == 'Firefox for Android':
return self.equivalent_release_for_product('Firefox')
def notes(self, public_only=False):
"""
Retrieve a list of Note instances that should be shown for this
|
release, grouped as either new features or known issues, and sorted
first by sort_num highest to lowest, which is applied to both groups,
and then for new features we also sort by tag in the order specified
by Note.TAGS, with untagged notes coming first, then finally moving
any note with the fixed tag that starts with the release version to
the top, for what we call "dot fixes".
"""
tag_index = dict((tag, i) for i, tag in enumerate(Note.TAGS))
notes = self.note_set.order_by('-sort_num')
if public_only:
notes = notes.filter(is_public=True)
known_issues = [n for n in notes if n.is_known_issue_for(self)]
new_features = sorted(
sorted(
(n for n in notes if not n.is_known_issue_for(self)),
key=lambda note: tag_index.get(note.tag, 0)),
key=lambda n: n.tag == 'Fixed' and n.note.startswith(self.version),
reverse=True)
return new_features, known_issues
def __unicode__(self):
return '{product} {version} {channel}'.format(
product=self.product, version=self.version, channel=self.channel)
class Meta:
# TODO: see if this has a significant performance impact
ordering = ('product', '-version', 'channel')
unique_together = (('product', 'version'),)
class Note(TimeStampedModel):
TAGS = ('New', 'Changed', 'HTML5', 'Feature', 'Language', 'Developer',
'Fixed')
bug = models.IntegerField(null=True, blank=True)
note = models.TextField(blank=True)
releases = models.ManyToManyField(Release, blank=True)
is_known_issue = models.BooleanField(default=False)
fixed_in_release = models.ForeignKey(Release, null=True, blank=True,
related_name='fixed_note_set')
tag = models.CharField(max_length=255, blank=True,
choices=[(t, t) for t in TAGS])
sort_num = models.IntegerField(default=0)
is_public = models.BooleanField(default=True)
image = models.ImageField(upload_to=lambda instance, filename: '/'.join(['screenshot', str(instance.pk), filename]))
def is_known_issue_for(self, release):
return self.is_known_issue and self.fixed_in_release != release
def __unicode__(self):
return self.note
|
nbir/gambit-scripts
|
scripts/LAnhoodAnalysis/src/disp_combined.py
|
Python
|
apache-2.0
| 3,378
| 0.043517
|
# Gambit scripts
#
# Copyright (C) USC Information Sciences Institute
# Author: Nibir Bora <nbora@usc.edu>
# URL: <http://cbg.isi.edu/>
# For license information, see LICENSE
import os
import sys
import numpy
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFo
|
rmatter
import settings as my
sys.path.insert(0, os.path.abspath('..'))
#
# DISPLACEMENT FROM HOME -
|
COMBINED PLOTS
#
DATA = {'all_days': {'hbk': (2.51862656908 , -0.784949777304),
'south-la': (0.443342017512 , -0.584871677246),
'west-la': (1.84785616418 , -0.752511521025),
'south-bay': (1.90968037696 , -0.755235428729)},
'weekdays': {'hbk': (5.29351309589 , -0.875495033521),
'south-la': (0.446816556448 , -0.58491267971),
'west-la': (2.39057082739 , -0.78488223198),
'south-bay': (2.89477474957 , -0.807413173376)},
'weekends': {'hbk': (0.65568123293 , -0.638785600777),
'south-la': (0.54851373091 , -0.620647897449),
'west-la': (1.25991614386 , -0.712834712369),
'south-bay': (1.0458891347 , -0.688917748776)}}
REGIONS = ['hbk', 'south-la', 'west-la', 'south-bay']
'''COLORS = {'hbk': '#377EB8',
'south-la' : '#FA71AF',
'west-la' : '#4DAF4A',
'south-bay' : '#A65628',
'pomona' : '#3B3B3B',
'bernardino' : '#984EA3',
'riverside' : '#FF7F00'}'''
COLORS = {'hbk' : 'b',
'south-la' : 'm',
'west-la' : 'g',
'south-bay' : '#A65628'}
LINESTYLE = {'all_days': '-',
'weekdays' : '--',
'weekends' : '-.'}
RANGE = (100, 15000)
def out_disp_combined():
'''Plot Power Law fits for regions on single plot'''
a = 'all_days'
wd = 'weekdays'
we = 'weekends'
h = 'hbk'
s = 'south-la'
w = 'west-la'
b = 'south-bay'
x = numpy.arange(RANGE[0], RANGE[1], 100)
_plot(x, [(a,h), (a,s), (a,w), (a,b)], 'all_days_all_regions')
_plot(x, [(wd,h), (wd,s), (wd,w), (wd,b)], 'weekdays_all_regions')
_plot(x, [(we,h), (we,s), (we,w), (we,b)], 'weekends_all_regions')
_plot(x, [(a,h), (wd,h), (we,h)], 'hbk')
_plot(x, [(a,s), (wd,s), (we,s)], 'south-la')
_plot(x, [(a,w), (wd,w), (we,w)], 'west-la')
_plot(x, [(a,b), (wd,b), (we,b)], 'south-bay')
_plot(x, [(a,h), (wd,h), (we,h), (a,s), (wd,s), (we,s), (a,w), (wd,w), (we,w), (a,b), (wd,b), (we,b)], 'all')
def _plot(x, Y, file_name):
title = file_name.replace('_', ' ').upper()
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(111)
plt.subplots_adjust(left=0.075, right=0.96, top=0.92, bottom=0.08)
#ax.set_autoscaley_on(False)
#ax.set_ylim([0,0.1])
ax.set_xlim(0, RANGE[1])
powerlaw = lambda x, amp, index: amp * (x**index)
for y in Y:
day, region = y
amp, index = DATA[day][region]
label = '{region} ({day})'.format(day=day, region=region).upper()
ax.plot(x, powerlaw(x, amp, index), label=label, linewidth=1, color=COLORS[region], alpha=0.95, linestyle=LINESTYLE[day])
formatter = FuncFormatter(lambda v, pos: str(round(v*100, 2))+'%')
plt.gca().yaxis.set_major_formatter(formatter)
formatter = FuncFormatter(lambda v, pos: '' if v/1000 == 0 else str(int(v/1000))+'km')
plt.gca().xaxis.set_major_formatter(formatter)
ax.set_title(title, fontsize=11)
ax.legend(fontsize=10)
if not os.path.exists('data/' + my.DATA_FOLDER + 'disp_stat/'):
os.makedirs('data/' + my.DATA_FOLDER + 'disp_stat/')
plt.savefig('data/' + my.DATA_FOLDER + 'disp_stat/' + file_name + '.png')
print 'Stored chart: %s' % file_name
|
mirageglobe/upp-tracker
|
tracer/ftraceadvance.py
|
Python
|
apache-2.0
| 6,390
| 0.006416
|
from ftracer_script import *
## note the capture details file is
## warnings is needed to suppress errors from mousehook tracker
## Function: This is the main script file which houses all the scripts
import sys
import pythoncom, pyHook
import win32con, win32com.client
import win32gui, win32api
import codecs
import wmi
import chardet
import time, pickle
import warnings
import sqlite3
## ----------------------------------
## Global Var
## ----------------------------------
ftraceadvance_lastaction = 'start' # this stores the last action of the user
ftraceadvance_sqlitedb = 'tracemouse.sqlite' # structure is id int and (mousetimeepoch, mousetimeboot, mousepos, winfile, winhandle, winname, winforename)
## ----------------------------------
## Mouse / Keyboard Tracing Functions
## ----------------------------------
def winEnumHandler( hwnd, ctx ):
if win32gui.IsWindowVisible( hwnd ):
print hex(hwnd), win32gui.GetWindowText( hwnd )
def strutf8encode(sstring):
rtnString = ""
if sstring != None:
## codecs.lookup(sstring)
## ustring = unicode(sstring,'utf_8')
## print ustring
rtn_encoding = chardet.detect(sstring)
if rtn_encoding['encoding'] != None:
rtnString = sstring.decode(rtn_encoding['encoding'],'replace')
return rtnString.encode('utf_8')
def OnKeyboardEvent(event):
## Function: Allows escape key to be pressed to exit any script is running
if event.Key == "Escape":
exit()
return True
def OnMouseEvent(event):
## this function uses mouse to trace the user input of applications
global ftraceadvance_lastaction
global ftraceadvance_sqlitedb
# called when mouse events are received. prints out mouse events
if event.MessageName != "mouse move":
print 'MessageName:', event.MessageName
print 'Message:', event.Message
print 'Time:', event.Time
print 'WindowHandler:', hex(event
|
.Window)
print 'WindowName:', strutf8encode(event.WindowName)
print 'Position:', event.Position
|
print 'Wheel:', event.Wheel #not used in wheel detection
print 'Injected:', event.Injected #rarely used
print time.time()
if event.WindowName == None:
window_name = 'None'
else:
window_name = event.WindowName
ftemp_wfore = strutf8encode(win32gui.GetWindowText(win32gui.GetForegroundWindow())) # This special method captures window name
ftemp_wname = AppDetector(strutf8encode(event.WindowName))
ftemp_whand = str(event.Window) #window handler
ftemp_mpos = str(event.Position)
ftemp_mact = str(event.MessageName)
ftemp_mnum = int(event.Message)
ftemp_epoc = time.time() #epoch time of mouse
ftemp_rtime = event.Time #running counter of mouse
ftemp_wfile = str('')
conn = sqlite3.connect(ftraceadvance_sqlitedb)
conn.text_factory = str
curs = conn.cursor()
if ftraceadvance_lastaction != window_name:
print ftraceadvance_lastaction
curs.execute('insert into ftrace(mousetimeepoch, mousetimeboot, mousepos, mouseact, mousenum, winfile, winhandle, winname, winforename) values(?, ?, ?, ?, ?, ?, ?, ?, ?)',(ftemp_epoc,ftemp_rtime,ftemp_mpos,ftemp_mact,ftemp_mnum,ftemp_wfile,ftemp_whand,ftemp_wname,ftemp_wfore))
ftraceadvance_lastaction = strutf8encode(event.WindowName)
print ftraceadvance_lastaction
conn.commit()
curs.close()
return True # return true is always needed, otherwise it will show an error
def AppDetector(data_window=''):
## This novel function tries to put in a value for the application detected
values = {
'': 'Unknown',
'Unknown': 'Unknown',
'C:\Python27\python.exe': 'Python',
'C:\Python26\python.exe': 'Python',
'FolderView': 'Windows Explorer - Folderview',
'Downloads': 'Windows Explorer - downloads',
'OpenOffice.org Writer': 'OpenOffice Writer'
}
return values.get(data_window, 'Unknown')
## ----------------------------------
## SQLite Writing Functions
## ----------------------------------
def sqlite_table(file_write='tracemouse.sqlite'):
# function creates sqlite 3 db and connects to a new file
conn = connect(file_write)
curs = conn.cursor()
curs.execute('''create table if not exists ftrace (id integer primary key, mousetimeepoch float, mousetimeboot float, mousepos text, mouseact text, mousenum integer, winfile text, winhandle text, winname text, winforename text)''')
curs.execute('''create table if not exists fswitch (id integer primary key, objsource text, objtarget text, rstrength integer)''')
conn.commit()
curs.close()
return True
def sqlite_query(mquery, file_write='tracemouse.sqlite'):
# function inserts into a sqlite table
conn = connect(file_write)
curs = conn.cursor()
curs.execute(mquery)
conn.commit()
curs.close()
return True
def sqlite_cleardb():
conn = connect('tracemouse.sqlite')
curs = conn.cursor()
conn.commit()
curs.close()
return True
## ----------------------------------
## Other handy Functions
## ----------------------------------
def rem_duplicates(seq, idfun=None):
# order preserving
# remove duplicates from a list
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def fxn():
warnings.warn("depreciated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
if __name__ == "__main__":
# Captures mouse events and writes into log file (common_script.py). Use win32 py to hook on mouse events
# This trace module will run in a continual loop until application is stopped
sqlite_table('tracemouse.sqlite')
bm = pyHook.HookManager()
bm.MouseAll = OnMouseEvent
bm.HookMouse()
bk = pyHook.HookManager()
bk.KeyDown = OnKeyboardEvent
bk.HookKeyboard()
pythoncom.PumpMessages()
#shell = win32com.client.Dispatch("WScript.Shell")
#shell.AppActivate('Command Prompt') # this sets window to focus
#x1 = win32com.client.DispatchEx("PDFcreator.Application")
|
akosyakov/intellij-community
|
python/testData/formatter/trailingBlankLinesWithBackslashesAtFunctionEndNoNewLine.py
|
Python
|
apache-2.0
| 25
| 0.08
|
d
|
ef foo():
pass
|
\
\
\
|
phrocker/accumulo
|
test/system/auto/sleep.py
|
Python
|
apache-2.0
| 856
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regar
|
ding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this fi
|
le except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
scale = 1.0
def sleep(secs):
import time
time.sleep(secs*scale)
|
sussexstudent/falmer
|
falmer/frontend/views.py
|
Python
|
mit
| 1,727
| 0.004053
|
from django.http import Http404, HttpResponse
from django.template.context_processors import csrf
from rest_framework.authentication import TokenAuthentication
from rest_framework.parsers import JSONParser
from rest_framework.permissions import DjangoModelPermissions
from rest_framework.views import APIView
from .models import FrontendDeployment
dev = """
<!doctype html>
<html lang="en">
<head>
<title>Loading | Falmer</title>
</head>
<body class="FalmerSite">
<script type="text/javascript">window.CSRF = "{csrf_token}";</script>
<div class="FalmerAppRoot"></div>
<script type="text/javascript" src="http://localhost:8080/vendor.js"></script>
<script type="text/javascript" src="http://localhost:8080/devFonts.js"></script>
<script type="text/javascript" src="http://localhost:8080/main.js"></script>
<script type="text/javascript" src="http://localhost:8080/productionFonts.js"></script>
</body>
</html>
"""
def application_serve(request):
if request.is_ajax() is False:
try:
deployment = FrontendDeployment.objects.filter(enabled=True).latest('created_at')
except FrontendDeployment.DoesNotExist:
return HttpResponse(dev.format(csrf_token=csrf(request)['csrf_token']))
return HttpResponse(deployment.content.format(csrf_token=csrf(request)['csrf_token']))
raise Http404()
class FrontendAPI(APIView):
authentication_classes = [TokenAuthentication, ]
permission_classes = [DjangoModelPermissions, ]
queryset = FrontendDeployment.objects.none()
def post(self,
|
request):
FrontendDeployment.objects.create(
co
|
ntent=request.data['contents'],
)
return HttpResponse(status=200)
|
d-kiss/candyland
|
trees/binary_tree.py
|
Python
|
gpl-3.0
| 3,771
| 0.00053
|
from immutable import Immutable
class BinaryTree(Immutable):
"""Immutable Binary Tree.
Left child always smaller than the parent.
Right child always bigger than the parent.
Each element exists only once in a binary tree.
Attributes:
left (BinaryTree): left node.
right (BinaryTree): right node.
value (object): node's value.
"""
def __init__(self, left, right, value):
super(BinaryTree, self).__init__()
self.value = value
self.right = right
self.left = left
def add(self, element):
if self.value == element:
return self # Element already exists
return self.add_subtree(BinaryTree(None,
|
None, element))
def add_subtree(self, subtree):
if subtree is None:
return self
if self.value == subtree.value:
# Need to merge subtree's children with this tree's children
new_left = (self.left.add_subtree(subtree.left)
if self.left is not None else subtree.left)
new_right = (self.right.add_subtree(subtree.right)
if self.right is not None else subtree.right)
|
return BinaryTree(new_left, new_right, self.value)
elif subtree.value > self.value: # Element goes on right
if self.right is not None:
return BinaryTree(self.left, self.right.add_subtree(subtree),
self.value)
else:
return BinaryTree(self.left, subtree, self.value)
else: # Element goes on the left
if self.left is not None:
return BinaryTree(self.left.add_subtree(subtree), self.right,
self.value)
else:
return BinaryTree(subtree, self.right, self.value)
def remove(self, element):
if self.value == element:
if self.left is None and self.right is None:
return None
if self.left is None:
return self.right
if self.right is None:
return self.left
return self.left.add_subtree(self.right)
if self.value < element and self.right is not None:
return BinaryTree(self.left, self.right.remove(element), self.value)
elif self.value > element and self.left is not None:
return BinaryTree(self.left.remove(element), self.right, self.value)
return self # Element Not Found
def __contains__(self, item):
if self.value == item:
return True
elif item > self.value:
return item in self.right if self.right is not None else False
else:
return item in self.left if self.left is not None else False
def __str__(self):
return ",".join(str(value) for value in filter(lambda x: x is not None,
[self.value,
self.left,
self.right]))
def __repr__(self):
return "%s(%s, %s, %s)" % (self.__class__.__name__,
repr(self.left),
repr(self.right),
repr(self.value))
def __iter__(self):
if self.left:
for left_num in self.left:
yield left_num
yield self.value
if self.right:
for right_num in self.right:
yield right_num
@classmethod
def make(cls, arr):
tree = cls(None, None, arr[0])
for var in arr[1:]:
tree = tree.add(var)
return tree
|
zhenxuan00/mmdgm
|
conv-mmdgm/optimization/optimizer_separated.py
|
Python
|
mit
| 6,121
| 0.01405
|
'''
Different optimizer for minimization
'''
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
def shared32(x, name=None, borrow=False):
return theano.shared(np.asarray(x, dtype='float32'), name=name, borrow=borrow)
def get_momentum_optimizer_max(learning_rate=0.01, momentum=0.9, weight_decay=0.0):
print 'momentum', learning_rate.get_value(), momentum, weight_decay
def get_optimizer(w, g, l, d):
# Store the parameters in dict or in list
#updates = OrderedDict()
updates = []
for i in xrange(len(w)):
gi = g[i]
if weight_decay > 0:
gi -= weight_decay * d[i] * w[i]
mom = shared32(w[i].get_value() * 0.)
# Update moments
mom_new = momentum * mom + learning_rate * l[i] * (1 - momentum) * gi
# Do update
w_new = w[i] + mom_new
updates = updates + [(w[i], w_new),(mom, mom_new)]
return updates
return get_optimizer
def get_momentum_optimizer_min(learning_rate=0.01, momentum=0.9, weight_decay=0.0
|
):
print 'momentum', learning_rate.get_value(), momentum, weight_decay
def get_optimizer(w, g, l, d):
# Store the parameters in dict or in list
#updates = OrderedDict()
updates = []
for i in xrange(len(w)):
gi = g[i]
if weight_decay > 0:
gi += weight_decay * d[i] * w[i]
mom = shared32(w[i].get_value() * 0.)
# Update moments
mom_new = mo
|
mentum * mom + learning_rate * l[i] * (1 - momentum) * gi
# Do update
w_new = w[i] - mom_new
updates = updates + [(w[i], w_new),(mom, mom_new)]
return updates
return get_optimizer
def get_adam_optimizer_max(learning_rate=0.001, decay1=0.1, decay2=0.001, weight_decay=0.0, epsilon=1e-8):
'''
Implementation of AdaM
All of the parameters are default in the ICLR paper
Not the exact procedure, no lambda in paper ,even by changing decay = 1 - beta
Used for minimization
'''
print 'AdaM', learning_rate.get_value(), decay1, decay2, weight_decay, epsilon
def shared32(x, name=None, borrow=False):
return theano.shared(np.asarray(x, dtype='float32'), name=name, borrow=borrow)
def get_optimizer(w, g, l, d):
# Store the parameters in dict or in dist
#updates = OrderedDict()
updates = []
it = shared32(0.)
###
#updates[it] = it + 1.
it_new = it + 1.
updates+=[(it, it_new)]
fix1 = 1.-(1.-decay1)**(it+1.) # To make estimates unbiased
fix2 = 1.-(1.-decay2)**(it+1.) # To make estimates unbiased
lr_t = learning_rate * T.sqrt(fix2) / fix1
###
#print xrange(len(w))
#for i in w:
for i in xrange(len(w)):
gi = g[i]
if weight_decay > 0:
gi -= weight_decay * d[i] * w[i] #T.tanh(w[i])
# mean_squared_grad := E[g^2]_{t-1}
mom1 = shared32(w[i].get_value() * 0.)
mom2 = shared32(w[i].get_value() * 0.)
# Update moments
mom1_new = mom1 + decay1 * (gi - mom1)
mom2_new = mom2 + decay2 * (T.sqr(gi) - mom2)
# Compute the effective gradient and effective learning rate
effgrad = mom1_new / (T.sqrt(mom2_new) + epsilon)
effstep_new = lr_t * l[i] * effgrad
# Do update
w_new = w[i] + effstep_new
# Apply update
#updates[w[i]] = w_new
#updates[mom1] = mom1_new
#updates[mom2] = mom2_new
###
updates = updates + [(w[i], w_new),(mom1, mom1_new),(mom2, mom2_new)]
return updates
return get_optimizer
def get_adam_optimizer_min(learning_rate=0.001, decay1=0.1, decay2=0.001, weight_decay=0.0, epsilon=1e-8):
'''
Implementation of AdaM
All of the parameters are default in the ICLR paper
Not the exact procedure, no lambda in paper ,even by changing decay = 1 - beta
Used for minimization
'''
print 'AdaM', learning_rate.get_value(), decay1, decay2, weight_decay, epsilon
def shared32(x, name=None, borrow=False):
return theano.shared(np.asarray(x, dtype='float32'), name=name, borrow=borrow)
def get_optimizer(w, g, l, d):
# Store the parameters in dict or in dist
#updates = OrderedDict()
updates = []
it = shared32(0.)
###
#updates[it] = it + 1.
it_new = it + 1.
updates+=[(it, it_new)]
fix1 = 1.-(1.-decay1)**(it+1.) # To make estimates unbiased
fix2 = 1.-(1.-decay2)**(it+1.) # To make estimates unbiased
lr_t = learning_rate * T.sqrt(fix2) / fix1
###
#print xrange(len(w))
#for i in w:
for i in xrange(len(w)):
gi = g[i]
if weight_decay > 0:
gi += weight_decay * d[i] * w[i] #T.tanh(w[i])
# mean_squared_grad := E[g^2]_{t-1}
mom1 = shared32(w[i].get_value() * 0.)
mom2 = shared32(w[i].get_value() * 0.)
# Update moments
mom1_new = mom1 + decay1 * (gi - mom1)
mom2_new = mom2 + decay2 * (T.sqr(gi) - mom2)
# Compute the effective gradient and effective learning rate
effgrad = mom1_new / (T.sqrt(mom2_new) + epsilon)
effstep_new = lr_t * l[i] * effgrad
# Do update
w_new = w[i] - effstep_new
# Apply update
#updates[w[i]] = w_new
#updates[mom1] = mom1_new
#updates[mom2] = mom2_new
###
updates = updates + [(w[i], w_new),(mom1, mom1_new),(mom2, mom2_new)]
return updates
return get_optimizer
|
AWegnerGitHub/IRVING
|
irving/dashboard/management/commands/exportirvingdata.py
|
Python
|
gpl-2.0
| 2,221
| 0.029716
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.core import serializers
from django.db.models.loading import get_model
from dashboard import models
import re, o
|
s, sys
class Command(BaseCommand):
args = 'system_model [system_id]'
help = 'Exports data and prepares for import in another system'
def handle(self, *args, **options):
if len(args) > 1:
self.dump_system_data(args[0], system_id = args[1])
else:
if args[0] == 'Permissions':
self.dump_permi
|
ssions()
else:
self.dump_system_data(args[0])
def dump_system_data(self, model = None, system_id = None):
if model:
modelN = get_model(model.split(".")[0],model.split(".")[1])
if modelN == models.QueryData:
if not system_id:
data = serializers.serialize("json", modelN.objects.all())
data = self.cleanExport(data)
system_id = None
else:
data = serializers.serialize("json", modelN.objects.filter(system=system_id))
data = self.cleanExport(data)
else:
data = serializers.serialize("json", modelN.objects.all())
data = self.cleanExport(data)
system_id = None
self.writeExport(model, system_id, data)
def dump_permissions(self):
sys.stdout = open(os.path.join(os.getcwd(), "dashboard/IMPORT_FILES/system_permissions.json"), "w")
call_command('dumpdata', 'auth.group', 'contenttypes.contenttype', 'auth.permission', format="json", indent=4)
sys.stdout.close()
def cleanExport(self, dString):
dString = re.sub('"pk": [0-9]{1,5}', '"pk": null', dString)
dString = re.sub('"system": [0-9]{1,5}', '"system": null', dString)
dString = re.sub('""', 'null', dString)
return dString
def writeExport(self, modelName, filterSystem, dString):
if filterSystem:
systemName = models.System.objects.filter(id=filterSystem)[0].database_name
else:
systemName = 'all'
if os.path.isdir(os.path.join(os.getcwd(), "dashboard/IMPORT_FILES/")):
out = open(os.path.join(os.getcwd(), "dashboard/IMPORT_FILES/", modelName + "_system_" + systemName + ".json"), "w")
out.write(dString)
out.close()
else:
raise IOError(os.path.join(os.getcwd(), "dashboard/IMPORT_FILES/ does not exist"))
|
bendemott/solr-zkutil
|
solrzkutil/util.py
|
Python
|
mit
| 13,103
| 0.0087
|
from __future__ import unicode_literals
from __future__ import print_function
import socket
import time
import six
import math
import threading
from random import choice
import logging
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.protocol.states import EventType
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.exceptions import OperationTimeoutError
log = logging.getLogger(__name__)
MODE_LEADER = 'leader'
CONNECTION_CACHE_ENABLED = True
CONNECTION_CACHE = {}
def kazoo_client_cache_enable(enable):
"""
You may disable or enable the connection cache using this function.
The connection cache reuses a connection object when the same connection parameters
are encountered that have been used previously. Because of the design of parts of this program
functionality needs to be independent and uncoupled, which means it needs to establ
|
ish its own
connections.
Connections to Zookeeper are the most time consuming part of most interactions so caching
connections enables much
|
faster running of tests health checks, etc.
"""
global CONNECTION_CACHE_ENABLED
CONNECTION_CACHE_ENABLED = enable
def kazoo_client_cache_serialize_args(kwargs):
'''
Returns a hashable object from keyword arguments dictionary.
This hashable object can be used as the key in another dictionary.
:param kwargs: a dictionary of connection parameters passed to KazooClient
Supported connection parameters::
hosts - Comma-separated list of hosts to connect to (e.g. 127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
timeout - The longest to wait for a Zookeeper connection.
client_id - A Zookeeper client id, used when re-establishing a prior session connection.
handler - An instance of a class implementing the IHandler interface for callback handling.
default_acl - A default ACL used on node creation.
auth_data - A list of authentication credentials to use for the connection.
Should be a list of (scheme, credential) tuples as add_auth() takes.
read_only - Allow connections to read only servers.
randomize_hosts - By default randomize host selection.
connection_retry - A kazoo.retry.KazooRetry object to use for retrying the connection to
Zookeeper. Also can be a dict of options which will be used for creating one.
command_retry - A kazoo.retry.KazooRetry object to use for the KazooClient.retry() method.
Also can be a dict of options which will be used for creating one.
logger - A custom logger to use instead of the module global log instance.
'''
return frozenset(kwargs.items())
def kazoo_client_cache_get(kwargs):
if CONNECTION_CACHE_ENABLED:
return CONNECTION_CACHE.get(kazoo_client_cache_serialize_args(kwargs))
def kazoo_client_cache_put(kwargs, client):
global CONNECTION_CACHE
CONNECTION_CACHE[kazoo_client_cache_serialize_args(kwargs)] = client
def kazoo_clients_connect(clients, timeout=5, continue_on_error=False):
"""
Connect the provided Zookeeper client asynchronously.
This is the fastest way to connect multiple clients while respecting a timeout.
:param clients: a sequence of KazooClient objects or subclasses of.
:param timeout: connection timeout in seconds
:param continue_on_error: don't raise exception if SOME of the hosts were able to connect
"""
asyncs = []
for client in clients:
# returns immediately
asyncs.append(client.start_async())
tstart = time.time()
while True:
elapsed = time.time() - tstart
remaining = math.ceil(max(0, timeout - elapsed))
connecting = [async for idx, async in enumerate(asyncs) if not clients[idx].connected]
connected_ct = len(clients) - len(connecting)
if not connecting:
# successful - all hosts connected
return connected_ct
if not remaining:
# stop connection attempt for any client that timed out.
for client in clients:
if client.connected:
continue
else:
client.stop()
if len(connecting) < len(clients):
# if some of the clients connected, return the ones that are connected
msg = 'Connection Timeout - %d of %d clients timed out after %d seconds' % (
len(connecting),
len(clients),
timeout
)
if continue_on_error:
log.warn(msg)
return connected_ct
else:
OperationTimeoutError(msg)
raise OperationTimeoutError('All hosts timed out after %d secs' % timeout)
# Wait the remaining amount of time to connect
# note that this will wait UP TO remaining, but will only wait as long as it takes
# to connect.
connecting[0].wait(remaining)
def kazoo_clients_from_client(kazoo_client):
"""
Construct a series of KazooClient connection objects from a single KazooClient instance
A client will be constructed per host within the KazooClient, so if the KazooClient was
constructed with 3 hosts in its connection string, 3 KazooClient instanctes will be returned
The class constructed will be the same type as is passed in kazoo_client, this functionality
is so that this method will work with mocked connection objects or customized subclasses of
KazooClient.
"""
# TODO support all connection arguments
connection_strings = zk_conns_from_client(kazoo_client)
cls = kazoo_client.__class__
clients = []
for conn_str in connection_strings:
args = {'hosts': conn_str}
client = kazoo_client_cache_get(args)
if not client:
client = cls(**args)
kazoo_client_cache_put(args, client)
clients.append(client)
return clients
def get_leader(zk_hosts):
# TODO refactor me to accept KazooClient object.
for host in zk_hosts:
zk = KazooClient(hosts=host, read_only=True)
try:
zk.start()
except KazooTimeoutError as e:
print('ZK Timeout host: [%s], %s' % (host, e))
continue
properties_str = zk.command(cmd=b'srvr')
properties = properties_str.split('\n')
for line in properties:
if not line.strip().lower().startswith('mode:'):
continue
key, val = line.split(':')
if val.strip().lower() == MODE_LEADER:
return host
zk.stop()
raise RuntimeError('no leader available, from connections given')
def get_server_by_id(zk_hosts, server_id):
# TODO refactor me to accept KazooClient object.
if not isinstance(server_id, int):
raise ValueError('server_id must be int, got: %s' % type(server_id))
for host in zk_hosts:
zk = KazooClient(hosts=host, read_only=True)
try:
zk.start()
except KazooTimeoutError as e:
print('ZK Timeout host: [%s], %s' % (host, e))
continue
properties_str = zk.command(cmd=b'conf')
properties = properties_str.split('\n')
for line in properties:
if not line.strip().lower().startswith('serverid='):
continue
key, val = line.split('=')
val = int(val)
if val == server_id:
return host
continue
zk.stop()
raise ValueError("no host available with that server id [%d], from connections given" % server_id)
def zk_conn_from_client(kazoo_client):
|
RK70825/TogePy
|
pkmn_test.py
|
Python
|
gpl-2.0
| 2,964
| 0.008772
|
"""
togePy.pkmn_test
This module tests functionality for togePy
"""
import pokeStructs
import cPickle as pickle
import random
import numpy as np
# Load Data
with open('pokedex', 'rb') as f:
pokedex = pickle.load(f)
with open('abilities', 'rb') as f:
abilities = pickle.load(f)
with open('items', 'rb') as f:
items = pickle.load(f)
with open('moves', 'rb') as f:
moves = pickle.load(f)
# Create Test Data Suite
def random_Move(only_dmg = True):
if only_dmg:
is_dmg = False
while is_dmg == False:
m = moves[random.choice(moves.keys())]
is_dmg = (m.Damage in ('physical', 'special'))
return m
else:
r
|
eturn moves[random.choice(moves.keys())]
def random_Moveset(only_dmg = True):
ms = pokeStructs.Moveset()
ms.set_All([random_Move(only_dmg) for _ in xrange(4)])
return ms
def random_Poke():
def random_EVs():
EV = np.random.randint(256, size=6).astype(float)
wh
|
ile EV.sum() != 510 or any(EV > 255):
EV_old = EV
if EV.sum() != 510:
EV = np.round(510./EV.sum()*EV)
EV[EV > 255] = 255
if all(EV_old == EV):
EV = np.random.randint(256, size=6).astype(float)
return pokeStructs.createEVs(EV.astype(int).tolist())
def random_IVs():
return pokeStructs.createIVs(np.random.randint(32, size=6).tolist())
def random_Nature():
return random.choice(['Hardy', 'Lonely', 'Brave', 'Adamant', 'Naughty', 'Bold', 'Docile', 'Relaxed', 'Impish', 'Lax', 'Timid', 'Hasty', 'Serious', 'Jolly', 'Naive', 'Modest', 'Mild', 'Quiet', 'Bashful', 'Rash', 'Calm', 'Gentle', 'Sassy', 'Careful', 'Quirky'])
def random_Happiness():
return random.randint(0,255)
def random_Ability():
return random.choice(p_dex.Abilities)
def random_Moves():
return random_Moveset()
p_dex = pokedex[random.choice(pokedex.keys())]
p = pokeStructs.Pokemon(p_dex, np.random.randint(1, 101), random_EVs(), random_IVs(), random_Nature(), random_Happiness(), random_Ability(), random_Moves(), random.choice(items.keys()))
p.CurHP = int(np.round(p.CurHP * np.random.random()))
p.Status = random.choice([None, 'FZN', 'PAR', 'SLP', 'PSN', 'BRN'])
return p
def random_Team():
t = pokeStructs.Team('Random Team')
t.set_All([random_Poke() for _ in xrange(6)])
return t
def restore_Team(t):
for p in t.Members.values():
p.CurHP = p.Stats['HP']
p.Status = None
def nature_list():
l_natures = ['Hardy', 'Lonely', 'Brave', 'Adamant', 'Naughty', 'Bold', 'Docile', 'Relaxed', 'Impish', 'Lax', 'Timid', 'Hasty', 'Serious', 'Jolly', 'Naive', 'Modest', 'Mild', 'Quiet', 'Bashful', 'Rash', 'Calm', 'Gentle', 'Sassy', 'Careful', 'Quirky']
return l_natures
if __name__ == '__main__':
print 'Ready'
|
codeMarble/codeMarble_Web
|
codeMarble_Web/resource/otherResource.py
|
Python
|
gpl-3.0
| 214
| 0.004673
|
# -*- coding: utf-8 -*-
TRIPLE_DES_KEY = '1234567812345678'
LIMIT_TITLE_VIEW_L
|
ENGTH = 25
RANK_LIST = 5
BLOCK = 11
LIST = 25
VIEW_SERVER_NOTICE =
|
2
VIEW_NOTICE = 3
TEXTAREA_ROW = 400
MAX_ROW = 700
REPLY_ROW = 50
|
pytorch/fairseq
|
examples/wav2vec/unsupervised/scripts/filter_tsv.py
|
Python
|
mit
| 955
| 0
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", required=True, type=str)
parser.add_argument("--no-skip", action="store_true")
parser.add_argument("--keep", action="store_true")
params = parser.parse_args()
def get_fname(line)
|
:
p = os.path.basename(line.split("\t")[0])
p = os.path.splitext(p)[0]
return p
# filenames to exclude
seen = set()
with open(params.tsv) as f:
if not params.no_skip:
root = next(f).rstrip()
for line in f:
seen.add(get_fname(line))
for i, line in enumerate(sys.stdin):
exists = get_fname(line) in seen
keep = (exists and params.keep) or (not exists and not params.k
|
eep)
if i == 0 or keep:
print(line, end="")
|
mabl/PyPylon
|
pypylon/__init__.py
|
Python
|
bsd-3-clause
| 142
| 0.007042
|
from pyp
|
ylon.cython.fa
|
ctory import Factory
from pypylon.cython.version import PylonVersion
factory = Factory()
pylon_version = PylonVersion()
|
leveille/blog.v1
|
wurdig/lib/helpers.py
|
Python
|
mit
| 1,130
| 0.004425
|
"""Helper functions
Consists of functions to typically be used wi
|
thin templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
from routes import url_for
from webhelpers.html import literal
from webhelpers.htm
|
l.secure_form import secure_form
from webhelpers.html.tags import *
from webhelpers.html.tools import auto_link, mail_to
from webhelpers.text import truncate, chop_at, plural
from webob.exc import strip_tags
from wurdig.lib import auth
from wurdig.lib.comment import *
from wurdig.lib.cookie import *
from wurdig.lib.conf_helper import *
from wurdig.lib.widgets import *
from wurdig.lib.html import *
from wurdig.lib.mdown import *
from wurdig.lib.tag import cloud, post_tags
from wurdig.lib.tidy_helper import *
from wurdig.lib.utils_helper import *
def load_stylesheet_assets(csslist='FCSSLIST'):
import pylons
import os
path = os.path.join(pylons.config['pylons.paths']['static_files'], 'css', '%s')
f = open(path % csslist,'r')
stylesheets = f.read()
f.close()
return ['/css/%s.css?%s' % (f, mtime('/css/%s.css' % f)) for f in stylesheets.split()]
|
liweitianux/atoolbox
|
astro/query_ned.py
|
Python
|
mit
| 3,683
| 0.004344
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-2018 Weitian LI <wei
|
tian@aaronly.me>
# MIT License
#
# TODO:
# * allow to query by coordinates & radius range
# * filter queried results according to the type/other...
# * if not queried by name, then try query by coordinates
#
|
"""
Query NED with the provided name or coordinate.
NASA/IPAC Extragalactic Database: http://ned.ipac.caltech.edu/
References
----------
* astroquery: NedClass
https://astroquery.readthedocs.org/en/latest/api/astroquery.ned.NedClass.html
"""
import sys
import argparse
import csv
from collections import OrderedDict
from astroquery.ned import Ned
from astroquery.exceptions import RemoteServiceError
# Ned configurations
Ned.TIMEOUT = 20
def query_name(name, verbose=False, print_header=False):
"""
Query NED by source name.
"""
try:
q = Ned.query_object(name)
objname = q["Object Name"][0]
objtype = q["Type"][0].decode("utf-8")
ra = q["RA(deg)"][0]
dec = q["DEC(deg)"][0]
velocity = q["Velocity"][0]
z = q["Redshift"][0]
z_flag = q["Redshift Flag"][0].decode("utf-8")
refs = q["References"][0]
notes = q["Notes"][0]
except RemoteServiceError:
objname = None
objtype = None
ra = None
dec = None
velocity = None
z = None
z_flag = None
refs = None
notes = None
if verbose:
print("*** %s: not found ***" % name, file=sys.stderr)
#
results = OrderedDict([
("Name", name),
("NED_Name", objname),
("Type", objtype),
("RA", ra),
("DEC", dec),
("Velocity", velocity),
("z", z),
("z_Flag", z_flag),
("References", refs),
("Notes", notes),
])
if verbose:
if print_header:
print(",".join(results.keys()))
print(",".join([str(v) for v in results.values()]))
return results
def main():
parser = argparse.ArgumentParser(
description="Query NED database by source name")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true",
help="show verbose information")
parser.add_argument("-b", "--brief", dest="brief",
action="store_true",
help="be brief and do not print header")
parser.add_argument("-i", "--input", dest="input", required=True,
help="source names to be queried (sep by comma); " +
"or a file contains the names (one per line)")
parser.add_argument("-o", "--output", dest="output",
default=sys.stdout,
help="output CSV file with queried data")
args = parser.parse_args()
try:
names = list(map(str.strip, open(args.input).readlines()))
except FileNotFoundError:
names = list(map(str.strip, args.input.split(",")))
results_list = []
print_header = True
for name in names:
qr = query_name(name, verbose=args.verbose,
print_header=print_header)
print_header = False
results_list.append(qr)
try:
of = open(args.output, "w")
except TypeError:
of = args.output
writer = csv.writer(of)
if not args.brief:
writer.writerow(results_list[0].keys())
for res in results_list:
writer.writerow(res.values())
if of is not sys.stdout:
of.close()
if __name__ == "__main__":
main()
|
deepmind/pysc2
|
pysc2/lib/renderer_human.py
|
Python
|
apache-2.0
| 73,287
| 0.008405
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A viewer for starcraft observations/replays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import ctypes
import functools
import itertools
from absl import logging
import ma
|
th
import os
import platform
import re
import subprocess
import threading
import time
import enum
from future.builtins import range # pylint: disable=redefined-builtin
import numpy as np
import pygame
import queue
from pysc2.lib import buffs
from pysc2.lib import colors
from pysc2.lib import features
from pysc2.lib import memoize
from pysc2.lib import point
from pysc2.lib import remote_controller
fro
|
m pysc2.lib import stopwatch
from pysc2.lib import transform
from pysc2.lib import video_writer
from s2clientprotocol import error_pb2 as sc_err
from s2clientprotocol import raw_pb2 as sc_raw
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import spatial_pb2 as sc_spatial
from s2clientprotocol import ui_pb2 as sc_ui
# Disable attribute-error because of the multiple stages of initialization for
# RendererHuman.
# pytype: disable=attribute-error
sw = stopwatch.sw
render_lock = threading.Lock() # Serialize all window/render operations.
def with_lock(lock):
"""Make sure the lock is held while in this function."""
def decorator(func):
@functools.wraps(func)
def _with_lock(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return _with_lock
return decorator
def clamp(n, smallest, largest):
return max(smallest, min(n, largest))
class MouseButtons(enum.IntEnum):
# https://www.pygame.org/docs/ref/mouse.html
LEFT = 1
MIDDLE = 2
RIGHT = 3
WHEEL_UP = 4
WHEEL_DOWN = 5
class SurfType(enum.IntEnum):
"""Used to tell what a mouse click refers to."""
CHROME = 1 # ie help, feature layer titles, etc
SCREEN = 2
MINIMAP = 4
FEATURE = 8
RGB = 16
class ActionCmd(enum.Enum):
STEP = 1
RESTART = 2
QUIT = 3
class _Ability(collections.namedtuple("_Ability", [
"ability_id", "name", "footprint_radius", "requires_point", "hotkey"])):
"""Hold the specifics of available abilities."""
def __new__(cls, ability, static_data):
specific_data = static_data[ability.ability_id]
if specific_data.remaps_to_ability_id:
general_data = static_data[specific_data.remaps_to_ability_id]
else:
general_data = specific_data
return super(_Ability, cls).__new__(
cls,
ability_id=general_data.ability_id,
name=(general_data.friendly_name or general_data.button_name or
general_data.link_name),
footprint_radius=general_data.footprint_radius,
requires_point=ability.requires_point,
hotkey=specific_data.hotkey)
class _Surface(object):
"""A surface to display on screen."""
def __init__(self, surf, surf_type, surf_rect, world_to_surf, world_to_obs,
draw):
"""A surface to display on screen.
Args:
surf: The actual pygame.Surface (or subsurface).
surf_type: A SurfType, used to tell how to treat clicks in that area.
surf_rect: Rect of the surface relative to the window.
world_to_surf: Convert a world point to a pixel on the surface.
world_to_obs: Convert a world point to a pixel in the observation.
draw: A function that draws onto the surface.
"""
self.surf = surf
self.surf_type = surf_type
self.surf_rect = surf_rect
self.world_to_surf = world_to_surf
self.world_to_obs = world_to_obs
self.draw = draw
def draw_line(self, color, start_loc, end_loc, thickness=1):
"""Draw a line using world coordinates and thickness."""
pygame.draw.line(self.surf, color,
self.world_to_surf.fwd_pt(start_loc).round(),
self.world_to_surf.fwd_pt(end_loc).round(),
max(1, thickness))
def draw_arc(self, color, world_loc, world_radius, start_angle, stop_angle,
thickness=1):
"""Draw an arc using world coordinates, radius, start and stop angles."""
center = self.world_to_surf.fwd_pt(world_loc).round()
radius = max(1, int(self.world_to_surf.fwd_dist(world_radius)))
rect = pygame.Rect(center - radius, (radius * 2, radius * 2))
pygame.draw.arc(self.surf, color, rect, start_angle, stop_angle,
thickness if thickness < radius else 0)
def draw_circle(self, color, world_loc, world_radius, thickness=0):
"""Draw a circle using world coordinates and radius."""
if world_radius > 0:
center = self.world_to_surf.fwd_pt(world_loc).round()
radius = max(1, int(self.world_to_surf.fwd_dist(world_radius)))
pygame.draw.circle(self.surf, color, center, radius,
thickness if thickness < radius else 0)
def draw_rect(self, color, world_rect, thickness=0):
"""Draw a rectangle using world coordinates."""
tl = self.world_to_surf.fwd_pt(world_rect.tl).round()
br = self.world_to_surf.fwd_pt(world_rect.br).round()
rect = pygame.Rect(tl, br - tl)
pygame.draw.rect(self.surf, color, rect, thickness)
def blit_np_array(self, array):
"""Fill this surface using the contents of a numpy array."""
with sw("make_surface"):
raw_surface = pygame.surfarray.make_surface(array.transpose([1, 0, 2]))
with sw("draw"):
pygame.transform.scale(raw_surface, self.surf.get_size(), self.surf)
def write_screen(self, font, color, screen_pos, text, align="left",
valign="top"):
"""Write to the screen in font.size relative coordinates."""
pos = point.Point(*screen_pos) * point.Point(0.75, 1) * font.get_linesize()
text_surf = font.render(str(text), True, color)
rect = text_surf.get_rect()
if pos.x >= 0:
setattr(rect, align, pos.x)
else:
setattr(rect, align, self.surf.get_width() + pos.x)
if pos.y >= 0:
setattr(rect, valign, pos.y)
else:
setattr(rect, valign, self.surf.get_height() + pos.y)
self.surf.blit(text_surf, rect)
def write_world(self, font, color, world_loc, text):
text_surf = font.render(text, True, color)
rect = text_surf.get_rect()
rect.center = self.world_to_surf.fwd_pt(world_loc)
self.surf.blit(text_surf, rect)
class MousePos(collections.namedtuple("MousePos", ["world_pos", "surf"])):
"""Holds the mouse position in world coordinates and the surf it came from."""
__slots__ = ()
@property
def surf_pos(self):
return self.surf.world_to_surf.fwd_pt(self.world_pos)
@property
def obs_pos(self):
return self.surf.world_to_obs.fwd_pt(self.world_pos)
def action_spatial(self, action):
"""Given an Action, return the right spatial action."""
if self.surf.surf_type & SurfType.FEATURE:
return action.action_feature_layer
elif self.surf.surf_type & SurfType.RGB:
return action.action_render
else:
assert self.surf.surf_type & (SurfType.RGB | SurfType.FEATURE)
class PastAction(collections.namedtuple("PastAction", [
"ability", "color", "pos", "time", "deadline"])):
"""Holds a past action for drawing over time."""
@memoize.memoize
def _get_desktop_size():
"""Get the desktop size."""
if platform.system() == "Linux":
try:
xrandr_query = subprocess.check_output(["xrandr", "--query"])
sizes = re.findall(r"\bconnected primary (\d+)x(\d+)", str(xrandr_query))
if sizes[0]:
return point.Point(int(sizes[0][0]), int(sizes[0][1]))
except: # pylint: disable=bare-except
logging.error("Failed to get the resolution from xrandr.
|
pyblish/pyblish-maya
|
pyblish_maya/version.py
|
Python
|
lgpl-3.0
| 230
| 0
|
VERSION_MAJOR = 2
VE
|
RSION_MINOR = 1
VERSION_PATCH = 10
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
version = '%i.%i.%i' % version_info
__version__ = version
__all__ = ['version',
|
'version_info', '__version__']
|
FelixCao/ProjectEuler
|
Problem1.py
|
Python
|
mit
| 310
| 0.016129
|
#If we li
|
st all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. Th
|
e sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
result = 0
for i in range(1,1000):
if i % 3 == 0 or i % 5 == 0:
result += + i
print(str(result))
print(result)
|
vroncevic/py_util
|
ats_utilities/singleton/base.py
|
Python
|
gpl-3.0
| 2,465
| 0
|
# -*- coding: UTF-8 -*-
'''
Module
__init__.py
Copyright
Copyright (C) 2017 Vladimir Roncevic <elektron.ronca@gmail.com>
ats_utilities is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ats_utilities is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
Info
Defined class Singleton with attribute(s) and method(s).
Created API for auto-register singleton object.
'''
__author__ = 'Vladimir Roncevic'
__copyright__ = 'Copyright 2017, https://vroncevic.github.io/ats_utilities'
__credits__ = ['Vladimir Roncevic']
__license__ = 'https://github.com/vroncevic/ats_utilities/blob/dev/LICENSE'
__version__ = '1.8.8'
__maintain
|
er__ = 'Vladimir Roncevic'
__email__ = 'elektron.ronca@gmail.com'
__status__ = 'Updated'
class Singleton:
'''
Defined class Singleton with attribute(s) and method(s).
Created API for auto-register singleton object.
It defines:
:attributes:
| __INSTANCES - class dictionary f
|
or collecting instances.
:methods:
| __new__ - set class instance.
| __str__ - dunder method for Singleton.
'''
__INSTANCE = None
def __new__(class_, *args, **kwargs):
'''
Set class instance.
:param *args: iteration object.
:type *args: <iter>
:param **kwargs: iteration object.
:type **kwargs: <dict>
:return: python object instance.
:rtype: <Python Object>
:exceptions: None
'''
if not isinstance(class_.__INSTANCE, class_):
class_.__INSTANCE = object.__new__(class_, *args, **kwargs)
return class_.__INSTANCE
def __str__(self):
'''
Dunder method for Singleton.
:return: object in a human-readable format.
:rtype: <str>
:exceptions: None
'''
return '{0} ()'.format(self.__name__)
|
stweil/letsencrypt
|
windows-installer/windows_installer/construct.py
|
Python
|
apache-2.0
| 5,560
| 0.003417
|
#!/usr/bin/env python3
import ctypes
import os
import shutil
import struct
import subprocess
import sys
import time
PYTHON_VERSION = (3, 8, 9)
PYTHON_BITNESS = 32
NSIS_VERSION = '3.06.1'
def main():
if os.name != 'nt':
raise RuntimeError('This script must be run under Windows.')
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
# Administrator privileges are required to properly
|
install NSIS through Chocolatey
raise RuntimeError('This script must be run wit
|
h administrator privileges.')
if sys.version_info[:2] != PYTHON_VERSION[:2]:
raise RuntimeError('This script must be run with Python {0}'
.format('.'.join(str(item) for item in PYTHON_VERSION[0:2])))
if struct.calcsize('P') * 8 != PYTHON_BITNESS:
raise RuntimeError('This script must be run with a {0} bit version of Python.'
.format(PYTHON_BITNESS))
build_path, repo_path, venv_path, venv_python = _prepare_environment()
_copy_assets(build_path, repo_path)
installer_cfg_path = _generate_pynsist_config(repo_path, build_path)
_prepare_build_tools(venv_path, venv_python, repo_path)
_compile_wheels(repo_path, build_path, venv_python)
_build_installer(installer_cfg_path)
print('Done')
def _build_installer(installer_cfg_path):
print('Build the installer')
subprocess.check_call([sys.executable, '-m', 'nsist', installer_cfg_path])
def _compile_wheels(repo_path, build_path, venv_python):
print('Compile wheels')
wheels_path = os.path.join(build_path, 'wheels')
os.makedirs(wheels_path)
certbot_packages = ['acme', 'certbot']
# Uncomment following line to include all DNS plugins in the installer
# certbot_packages.extend([name for name in os.listdir(repo_path) if name.startswith('certbot-dns-')])
wheels_project = [os.path.join(repo_path, package) for package in certbot_packages]
constraints_file_path = os.path.join(repo_path, 'tools', 'requirements.txt')
env = os.environ.copy()
env['PIP_CONSTRAINT'] = constraints_file_path
command = [venv_python, '-m', 'pip', 'wheel', '-w', wheels_path]
command.extend(wheels_project)
subprocess.check_call(command, env=env)
def _prepare_build_tools(venv_path, venv_python, repo_path):
print('Prepare build tools')
subprocess.check_call([sys.executable, '-m', 'venv', venv_path])
subprocess.check_call([venv_python, os.path.join(repo_path, 'tools', 'pipstrap.py')])
subprocess.check_call(['choco', 'upgrade', '--allow-downgrade', '-y', 'nsis', '--version', NSIS_VERSION])
def _copy_assets(build_path, repo_path):
print('Copy assets')
if os.path.exists(build_path):
os.rename(build_path, '{0}.{1}.bak'.format(build_path, int(time.time())))
os.makedirs(build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'assets', 'certbot.ico'), build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'assets', 'run.bat'), build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'assets', 'template.nsi'), build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'assets', 'renew-up.ps1'), build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'assets', 'renew-down.ps1'), build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'assets', 'preamble.py'), build_path)
def _generate_pynsist_config(repo_path, build_path):
print('Generate pynsist configuration')
installer_cfg_path = os.path.join(build_path, 'installer.cfg')
certbot_pkg_path = os.path.join(repo_path, 'certbot')
certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'],
universal_newlines=True, cwd=certbot_pkg_path).strip()
# If we change the installer name from `certbot-beta-installer-win32.exe`, it should
# also be changed in tools/create_github_release.py
with open(installer_cfg_path, 'w') as file_h:
file_h.write('''\
[Application]
name=Certbot
version={certbot_version}
icon=certbot.ico
publisher=Electronic Frontier Foundation
target=$INSTDIR\\run.bat
[Build]
directory=nsis
nsi_template=template.nsi
installer_name=certbot-beta-installer-{installer_suffix}.exe
[Python]
version={python_version}
bitness={python_bitness}
[Include]
local_wheels=wheels\\*.whl
files=run.bat
renew-up.ps1
renew-down.ps1
[Command certbot]
entry_point=certbot.main:main
extra_preamble=preamble.py
'''.format(certbot_version=certbot_version,
installer_suffix='win_amd64' if PYTHON_BITNESS == 64 else 'win32',
python_bitness=PYTHON_BITNESS,
python_version='.'.join(str(item) for item in PYTHON_VERSION)))
return installer_cfg_path
def _prepare_environment():
print('Prepare environment')
try:
subprocess.check_output(['choco', '--version'])
except subprocess.CalledProcessError:
raise RuntimeError('Error: Chocolatey (https://chocolatey.org/) needs '
'to be installed to run this script.')
script_path = os.path.realpath(__file__)
repo_path = os.path.dirname(os.path.dirname(os.path.dirname(script_path)))
build_path = os.path.join(repo_path, 'windows-installer', 'build')
venv_path = os.path.join(build_path, 'venv-config')
venv_python = os.path.join(venv_path, 'Scripts', 'python.exe')
return build_path, repo_path, venv_path, venv_python
if __name__ == '__main__':
main()
|
titilambert/home-assistant
|
homeassistant/components/bond/fan.py
|
Python
|
apache-2.0
| 4,093
| 0.001222
|
"""Support for Bond fans."""
import math
from typing import Any, Callable, List, Optional
from bond_api import Action, DeviceType, Direction
from homeassistant.components.fan import (
DIRECTION_FORWARD,
DIRECTION_REVERSE,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_DIRECTION,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .e
|
ntity import BondEntity
from .utils import BondDevice, BondHub
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Bond fan devices."""
hub: BondHub = hass.data[DOMAIN][entry.entry_id]
|
fans = [
BondFan(hub, device) for device in hub.devices if DeviceType.is_fan(device.type)
]
async_add_entities(fans, True)
class BondFan(BondEntity, FanEntity):
"""Representation of a Bond fan."""
def __init__(self, hub: BondHub, device: BondDevice):
"""Create HA entity representing Bond fan."""
super().__init__(hub, device)
self._power: Optional[bool] = None
self._speed: Optional[int] = None
self._direction: Optional[int] = None
def _apply_state(self, state: dict):
self._power = state.get("power")
self._speed = state.get("speed")
self._direction = state.get("direction")
@property
def supported_features(self) -> int:
"""Flag supported features."""
features = 0
if self._device.supports_speed():
features |= SUPPORT_SET_SPEED
if self._device.supports_direction():
features |= SUPPORT_DIRECTION
return features
@property
def speed(self) -> Optional[str]:
"""Return the current speed."""
if self._power == 0:
return SPEED_OFF
if not self._power or not self._speed:
return None
# map 1..max_speed Bond speed to 1..3 HA speed
max_speed = max(self._device.props.get("max_speed", 3), self._speed)
ha_speed = math.ceil(self._speed * (len(self.speed_list) - 1) / max_speed)
return self.speed_list[ha_speed]
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def current_direction(self) -> Optional[str]:
"""Return fan rotation direction."""
direction = None
if self._direction == Direction.FORWARD:
direction = DIRECTION_FORWARD
elif self._direction == Direction.REVERSE:
direction = DIRECTION_REVERSE
return direction
async def async_set_speed(self, speed: str) -> None:
"""Set the desired speed for the fan."""
max_speed = self._device.props.get("max_speed", 3)
if speed == SPEED_LOW:
bond_speed = 1
elif speed == SPEED_HIGH:
bond_speed = max_speed
else:
bond_speed = math.ceil(max_speed / 2)
await self._hub.bond.action(
self._device.device_id, Action.set_speed(bond_speed)
)
async def async_turn_on(self, speed: Optional[str] = None, **kwargs) -> None:
"""Turn on the fan."""
if speed is not None:
await self.async_set_speed(speed)
else:
await self._hub.bond.action(self._device.device_id, Action.turn_on())
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the fan off."""
await self._hub.bond.action(self._device.device_id, Action.turn_off())
async def async_set_direction(self, direction: str):
"""Set fan rotation direction."""
bond_direction = (
Direction.REVERSE if direction == DIRECTION_REVERSE else Direction.FORWARD
)
await self._hub.bond.action(
self._device.device_id, Action.set_direction(bond_direction)
)
|
sameetb-cuelogic/edx-platform-test
|
cms/djangoapps/contentstore/views/videos.py
|
Python
|
agpl-3.0
| 12,545
| 0.001594
|
"""
Views related to the video upload feature
"""
from boto import s3
import csv
from uuid import uuid4
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.http import require_GET, require_http_methods
import rfc6266
from edxval.api import create_video, get_videos_for_ids, SortDirection, VideoSortField
from opaque_keys.edx.keys import CourseKey
from contentstore.models import VideoUploadConfig
from contentstore.utils import reverse_course_url
from edxmako.shortcuts import render_to_response
from util.json_request import expect_json, JsonResponse
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import modulestore
from .course import get_course_and_check_access
__all__ = ["videos_handler", "video_encodings_download"]
# String constant used in asset keys to identify video assets.
VIDEO_ASSET_TYPE = "video"
# Default expiration, in seconds, of one-time URLs used for uploading videos.
KEY_EXPIRATION_IN_SECONDS = 86400
class StatusDisplayStrings(object):
"""
A class to map status strings as stored in VAL to display strings for the
video upload page
"""
# Translators: This is the status of an active video upload
_UPLOADING = ugettext_noop("Uploading")
# Translators: This is the status for a video that the servers are currently processing
_IN_PROGRESS = ugettext_noop("In Progress")
# Translators: This is the status for a video that the servers have successfully processed
_COMPLETE = ugettext_noop("Ready")
# Translators: This is the status for a video that the servers have failed to process
_FAILED = ugettext_noop("Failed")
# Translators: This is the status for a video for which an invalid
# processing token was provided in the course settings
_INVALID_TOKEN = ugettext_noop("Invalid Token")
# Translators: This is the status for a video that is in an unknown state
_UNKNOWN = ugettext_noop("Unknown")
_STATUS_MAP = {
"upload": _UPLOADING,
"ingest": _IN_PROGRESS,
"transcode_queue": _IN_PROGRESS,
"transcode_active": _IN_PROGRESS,
"file_delivered": _COMPLETE,
"file_complete": _COMPLETE,
"file_corrupt": _FAILED,
"pipeline_error": _FAILED,
"invalid_token": _INVALID_TOKEN
}
@staticmethod
def get(val_status):
"""Map a VAL status string to a localized display string"""
return _(StatusDisplayStrings._STATUS_MAP.get(val_status, StatusDisplayStrings._UNKNOWN))
@expect_json
@login_required
@require_http_methods(("GET", "POST"))
def videos_handler(request, course_key_string):
"""
The restful handler for video uploads.
GET
html: return an HTML page to display previous video uploads and allow
new ones
json: return json representing the videos that have been uploaded and
their statuses
POST
json: create a new video upload; the actual files should not be provided
to this endpoint but rather PUT to the respective upload_url values
contained in the response
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
if request.method == "GET":
if "application/json" in request.META.get("HTTP_ACCEPT", ""):
return videos_index_json(course)
else:
return videos_index_html(course)
else:
return videos_post(course, request)
@login_required
@require_GET
def video_encodings_download(request, course_key_string):
"""
Returns a CSV report containing the encoded video URLs for video uploads
in the following format:
Video ID,Name,Status,Profile1 URL,Profile2 URL
aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
def get_profile_header(profile):
"""Returns the column header string for the given profile's URLs"""
# Translators: This is the header for a CSV file column
# containing URLs for video encodings for the named profile
# (e.g. desktop, mobile high quality, mobile low quality)
return _("{profile_name} URL").format(profile_name=profile)
profile_whitelist = VideoUploadConfig.get_profile_whitelist()
videos = list(_get_videos(course))
name_col = _("Name")
duration_col = _("Duration")
added_col = _("Date Added")
video_id_col = _("Video ID")
status_col = _("Status")
profile_cols = [get_profile_header(profile) for profile in profile_whitelist]
def make_csv_dict(video):
"""
Makes a dictionary suitable for writing CSV output. This involves
extracting the required items from the original video dict and
converting all keys and values to UTF-8
|
encoded string objects,
because the CSV module doesn't play well with unicode objects.
"""
# Translators: This is listed as the duration for a video that has not
# yet reached the point in its processing by the servers where its
# duration is determined.
duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending")
r
|
et = dict(
[
(name_col, video["client_video_id"]),
(duration_col, duration_val),
(added_col, video["created"].isoformat()),
(video_id_col, video["edx_video_id"]),
(status_col, video["status"]),
] +
[
(get_profile_header(encoded_video["profile"]), encoded_video["url"])
for encoded_video in video["encoded_videos"]
if encoded_video["profile"] in profile_whitelist
]
)
return {
key.encode("utf-8"): value.encode("utf-8")
for key, value in ret.items()
}
response = HttpResponse(content_type="text/csv")
# Translators: This is the suggested filename when downloading the URL
# listing for videos uploaded through Studio
filename = _("{course}_video_urls").format(course=course.id.course)
# See https://tools.ietf.org/html/rfc6266#appendix-D
response["Content-Disposition"] = rfc6266.build_header(
filename + ".csv",
filename_compat="video_urls.csv"
)
writer = csv.DictWriter(
response,
[
col_name.encode("utf-8")
for col_name
in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols
],
dialect=csv.excel
)
writer.writeheader()
for video in videos:
writer.writerow(make_csv_dict(video))
return response
def _get_and_validate_course(course_key_string, user):
"""
Given a course key, return the course if it exists, the given user has
access to it, and it is properly configured for video uploads
"""
course_key = CourseKey.from_string(course_key_string)
# For now, assume all studio users that have access to the course can upload videos.
# In the future, we plan to add a new org-level role for video uploaders.
course = get_course_and_check_access(course_key, user)
if (
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and
getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) and
course and
course.video_pipeline_configured
):
return course
else:
return None
def _get_videos(course):
"""
Retrieves the list of videos from VAL corresponding to the videos listed in
the asset metadata store.
"""
edx_videos_ids = [
v.asset_id.path
for v in modulestore().get_all_asset_metadata(course.id, VIDEO_ASSET_TYPE)
]
videos = list(get_videos_for_ids(edx_videos_ids, VideoSortField.created, SortDirection.desc))
#
|
blm08/omxWebRemote
|
manage.py
|
Python
|
apache-2.0
| 255
| 0
|
#!/usr/b
|
in/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "omxwebremote.settings")
from django.core.management import execute_from_command_line
execute_from_com
|
mand_line(sys.argv)
|
kleisauke/pyvips
|
examples/try16.py
|
Python
|
mit
| 302
| 0
|
#!/usr/bin/python3
import logg
|
ing
import sys
import pyvips
logging.basicConfig(level=logging.DEBUG)
# pyvips.cache_set_trace(True)
a = pyvips.Image.new_from_file(sys.argv[1])
x = a.erode([[128, 255, 128],
[255, 255, 255],
[128, 255, 128]])
x.write_to_file(sys.ar
|
gv[2])
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/xaxis/_rangebreaks.py
|
Python
|
mit
| 3,243
| 0.000617
|
import _plotly_utils.basevalidators
class RangebreaksValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="rangebreaks", parent_name="layout.xaxis", **kwargs):
super(RangebreaksValidator, self).__init_
|
_(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Rangebreak"),
data_docs=kwargs.pop(
"data_docs",
"""
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
|
Sets the size of each `values` item. The
default is one day in milliseconds.
enabled
Determines whether this axis rangebreak is
enabled or disabled. Please note that
`rangebreaks` only work for "date" axis type.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pattern
Determines a pattern on the time line that
generates breaks. If *day of week* - days of
the week in English e.g. 'Sunday' or `sun`
(matching is case-insensitive and considers
only the first three characters), as well as
Sunday-based integers between 0 and 6. If
"hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info.
Examples: - { pattern: 'day of week', bounds:
[6, 1] } or simply { bounds: ['sat', 'mon'] }
breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8]
} breaks from 5pm to 8am (i.e. skips non-work
hours).
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use
`dvalue` to set the size of the values along
the axis.
""",
),
**kwargs
)
|
pez2001/sVimPy
|
test_scripts/test5.py
|
Python
|
gpl-2.0
| 172
| 0.052326
|
x =
|
1
x = x + 1
x = x + x + 2
print("x:",x,"\n")
x = "Hallo Welt"
print("x:",x,"\n")
y = "Gute Nacht"
print("y:",y,"\n")
v = "Und bis morgen"
|
print("y:"+ y+ " " + v +"\n")
|
fretboardfreak/potty_oh
|
experiments/signal_generator.py
|
Python
|
apache-2.0
| 2,261
| 0
|
#!/usr/bin/env python3
# Copyright 2016 Curtis Sand
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific lang
|
uage governing permissions and
# limitations under the License.
"""A basic Signal Generator program."""
from potty_oh import common
from potty_oh.signal_generator import Generator
def whitenoise(args, generator):
"""Generate some whitenoise."""
generator.whitenoise()
def sin_constant(args, generator):
"""Generate a constant frequency sinusoid."""
generator.sin_constant(args.frequency)
def sin_linear(args, generator):
"""Generate a sinusoid with linearly changing frequency."""
generator.sin_linear(args.frequency / 2, args.frequency * 2)
def main():
ui_map = {'noise': whitenoise, 'constant': sin_constant,
'linear': sin_linear}
parser = common.get_cmd_line_parser(description=__doc__)
parser.add_argument(
'-t', '--type', help='Type of signal to generate',
choices=ui_map.keys())
common.ParserArguments.filename(parser)
common.ParserArguments.length(parser)
common.ParserArguments.plot(parser)
common.ParserArguments.frequency(parser)
common.ParserArguments.set_defaults(parser, type='constant')
args = parser.parse_args()
common.defaults.framerate = 8000
sg = Generator(length=args.length, framerate=common.defaults.framerate,
verbose=args.debug)
ui_map[args.type](args, sg)
waveform = sg.waveform
if args.plot:
import potty_oh.plot as plot
plot.plot_waveform(waveform.frames, waveform.channels, 0, 4000)
else:
from potty_oh.wav_file import wav_file_context
with wav_file_context(args.filename) as fout:
fout.write_frames(waveform.frames)
return 0
if __name__ == "__main__":
common.call_main(main)
|
dsuch/sec-wall
|
code/tests/test_constants.py
|
Python
|
gpl-3.0
| 672
| 0.002976
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at gefira.pl>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_liter
|
als
# stdlib
import re
from uuid import uuid4
# nose
from nose.tools import assert_true, eq_
def test_constants():
""" Makes sure the number of constants defined is as expected and there
are no duplicates amongst them.
"""
_locals = {}
_globals = {}
exe
|
c 'from secwall.constants import *' in _globals, _locals
expected = 19
eq_(len(_locals), expected)
eq_(len(set(_locals.values())), expected)
|
ryankaiser/django-super-favicon
|
favicon/__init__.py
|
Python
|
bsd-3-clause
| 334
| 0
|
"""
Dja
|
ngo app for:
- Generate favicon in multiple format
- Put in a storage backend
- Include HTML tags for use favicon
"""
VERSION = (0, 6, 0)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Anthony Monthe (ZuluPro)'
__email__ = 'anthony.monthe@gmail.com'
__url__ = '
|
https://github.com/ZuluPro/django-super-favicon'
|
cancerregulome/gidget
|
commands/feature_matrix_construction/main/addGnabFeatures.py
|
Python
|
mit
| 38,773
| 0.004384
|
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# these are system modules
import numpy
import sys
# these are my local ones
from env import gidgetConfigVars
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def cleanUpName(aName):
bName = ''
aName = aName.upper()
## ii = aName.find(" - Homo sapiens (human)")
ii = aName.find(" - HOMO SAPIENS (HUMAN)")
if (ii >= 0):
aName = aName[:ii]
aName = aName.strip()
ii = aName.find("(")
while (ii >= 0):
jj = aName.find(")", ii)
aName = aName[:ii] + aName[jj + 1:]
ii = aName.find("(")
aName = aName.strip()
ii = aName.find("<")
while (ii >= 0):
jj = aName.find(">", ii)
aName = aName[:ii] + aName[jj + 1:]
ii = aName.find("<")
aName = aName.strip()
for ii in range(len(aName)):
if (aName[ii] == ','):
continue
elif (aName[ii] == '('):
bName += '_'
elif (aName[ii] == ')'):
bName += '_'
elif (aName[ii] == '-'):
bName += '_'
elif (aName[ii] == '/'):
bName += '_'
elif (aName[ii] == ';'):
bName += '_'
elif (aName[ii] == '&'):
continue
elif (aName[ii] == '#'):
continue
elif (aName[ii] == ' '):
bName += '_'
else:
bName += aName[ii].upper()
ii = bName.find("__")
while (ii >= 0):
print " ", ii, bName
bName = bName[:ii] + bName[ii + 1:]
print " ", bName
ii = bName.find("__")
return (bName)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def readPathways():
fh = file(
gidgetConfigVars['TCGAFMP_BIOINFORMATICS_REFERENCES'] + "/nci_pid/only_NCI_Nature_ver4.tab", 'r')
pwDict = {}
for aLine in fh:
aLine = aLine.strip()
aLine = aLine.upper()
tokenList = aLine.split('\t')
if (len(tokenList) != 3):
continue
if (tokenList[0] == "pathway"):
continue
longPathwayName = tokenList[0]
shortPathwayName = tokenList[1]
geneTokens = tokenList[2].strip()
geneList = geneTokens.split(',')
geneList.sort()
if (len(geneList) > 0):
while (geneList[0] == ''):
geneList = gen
|
eList[1:]
if (len(geneList) == 0):
continue
if (len(geneList) == 0):
continue
pathwayName = cleanUpName(shortPathwayName)
if (pathwayName not in pwDict.keys()):
# print " adding pathway %s (%d) " % ( pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
else:
if (len(pwDict[pathwayName]) <
|
len(geneList)):
# print " substituting shorter list of genes for %s (%d) " % (
# pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
# else:
# print " NOT substituing list for %s " % pathwayName
fh.close()
print " "
print " have pathway dictionary with %d pathways " % len(pwDict)
print " --> now looking for duplicate pathways ... "
pwList = pwDict.keys()
pwList.sort()
delList = []
pairDict = {}
for ii in range(len(pwList) - 1):
iiName = pwList[ii]
iiLen = len(pwDict[iiName])
for jj in range(ii + 1, len(pwList)):
jjName = pwList[jj]
jjLen = len(pwDict[jjName])
if (jjLen != iiLen):
continue
if (pwDict[iiName] == pwDict[jjName]):
print "\n\n SAME !!! "
print iiName, iiLen
print pwDict[iiName]
print jjName, jjLen
print pwDict[jjName]
iiSplit = iiName.split('__')
jjSplit = jjName.split('__')
if (iiSplit[1] <= jjSplit[1]):
pairNames = (iiSplit[1], jjSplit[1])
else:
pairNames = (jjSplit[1], iiSplit[1])
if (pairNames in pairDict.keys()):
pairDict[pairNames] += 1
else:
pairDict[pairNames] = 1
if (iiSplit[1] == jjSplit[1]):
if (len(iiName) <= len(jjName)):
delList += [jjName]
else:
delList += [iiName]
else:
if (iiSplit[1] == "NCI-NATURE"):
delList += [jjName]
elif (jjSplit[1] == "NCI-NATURE"):
delList += [iiName]
elif (iiSplit[1] == "PID"):
delList += [jjName]
elif (jjSplit[1] == "PID"):
delList += [iiName]
elif (iiSplit[1] == "KEGG"):
delList += [jjName]
elif (jjSplit[1] == "KEGG"):
delList += [iiName]
elif (iiSplit[1] == "PWCOMMONS"):
delList += [jjName]
elif (jjSplit[1] == "PWCOMMONS"):
delList += [iiName]
elif (iiSplit[1] == "REACTOME"):
delList += [jjName]
elif (jjSplit[1] == "REACTOME"):
delList += [iiName]
elif (iiSplit[1] == "WIKIPATHWAYS"):
delList += [jjName]
elif (jjSplit[1] == "WIKIPATHWAYS"):
delList += [iiName]
elif (iiSplit[1] == "WIKIPW"):
delList += [jjName]
elif (jjSplit[1] == "WIKIPW"):
delList += [iiName]
elif (iiSplit[1] == "SMPDB"):
delList += [jjName]
elif (jjSplit[1] == "SMPDB"):
delList += [iiName]
elif (iiSplit[1] == "HUMANCYC"):
delList += [jjName]
elif (jjSplit[1] == "HUMANCYC"):
delList += [iiName]
else:
sys.exit(-1)
for aName in delList:
try:
del pwDict[aName]
except:
doNothing = 1
print " "
print " returning pathway dictionary with %d pathways " % len(pwDict)
print " "
for aKey in pairDict.keys():
print aKey, pairDict[aKey]
print " "
print " "
return (pwDict)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def setFeatBits(rowLabels, featPrefix, doesContainList, notContainList):
numSet = 0
numRow = len(rowLabels)
bitVec = numpy.zeros(numRow, dtype=numpy.bool)
for iR in range(numRow):
if (featPrefix != ""):
if (not rowLabels[iR].startswith(featPrefix)): continue
if (len(doesContainList) > 0):
skipFlag = 1
for aStr in doesContainList:
if (rowLabels[iR].find(aStr) >= 0): skipFlag = 0
if (len(notContainList) > 0):
skipFlag = 0
for aStr in notContainList:
if (rowLabels[iR].find(aStr) >= 0): skipFlag = 1
if (skipFlag): continue
## set bit if we get here ...
bitVec[iR] = 1
numSet += 1
print featPrefix, doesContainList, notContainList, numRow, numSet
if (numSet == 0):
print " numSet=0 ... this is probably a problem ... "
# sys.exit(-1)
return (bitVec)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# B:GNAB:ADAM7:chr8:24298509:24384483:+:y_n_somatic y_n y_del
# --> B:GNAB:ADAM7:chr8:24298509:24384483:+:y_del_somatic
def makeNewFeatureName(curFeatName, oldStringList, newStringList):
for jj in range(len(oldStringList))
|
spahan/unixdmoain
|
lib/test/Classes.py
|
Python
|
bsd-3-clause
| 1,575
| 0.013333
|
import unittest
import UniDomain.Classes as Classes
#---- unittest Test Classes below here
class TestConfig(unittest.TestCase):
"""Test Config Class"""
def test_Config(self):
"""Check if required config defaults are set"""
self.config = Classes.Config()
self.assertTrue('plugin_authen' in self.config.config, 'no authen plugin in default config')
self.assertTrue('plugin_author' in self.config.config, 'no author plugin in default config')
self.assertTrue('cachedir' in self.config.config, 'no cache directory in default config')
self.assertTrue
|
('policydir' in self.config.config, 'no policy directory in default config')
self.assertTrue('dnszone' in self.config.config, 'no dnszone in default config')
self.assertTrue('passwdfile' in self.config.config, 'no passwdfile in default c
|
onfig')
self.assertTrue('groupfile' in self.config.config, 'no groupfile in default config')
def test_readconf(self):
"""check if readconf behaves like we want"""
self.config = Classes.Config(file = 'testconf.xml', passwdfile = 'xyz')
self.assertEqual(len(self.config.ldapservers), 1, 'reading value from file does not work.')
self.assertEqual(type(self.config.debug),type(True), 'debug value is not bool!')
self.assertEqual(self.config.passwdfile, 'xyz', 'passing config vars as args doesnt work')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestConfig)
unittest.TextTestRunner(verbosity=2).run(suite)
|
cloakedcode/CouchPotatoServer
|
couchpotato/core/providers/torrent/thepiratebay/main.py
|
Python
|
gpl-3.0
| 5,523
| 0.010139
|
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt, cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
from couchpotato.environment import Env
import re
import time
import traceback
log = CPLog(__name__)
class ThePirateBay(TorrentMagnetProvider):
urls = {
'detail': '%s/torrent/%s',
'search': '%s/search/%s/%s/7/%d'
}
cat_ids = [
([207], ['720p', '1080p']),
([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
([202], ['dvdr'])
]
cat_backup_id = 200
disable_provider = False
http_time_between_calls = 0
proxy_list = [
'https://thepiratebay.sx',
'https://tpb.ipredator.sx',
'https://depiraatbaai.be',
'https://piratereverse.info',
'https://tpb.pirateparty.org.uk',
'https://argumentomteemigreren.nl',
'https://livepirate.com',
'https://www.getpirate.com',
'https://tpb.partipirate.org',
'https://tpb.piraten.lu',
'https://kuiken.co',
]
def __init__(self):
self.domain = self.conf('domain')
super(ThePirateBay, self).__init__()
def _searchOnTitle(self, title, movie, quality, results):
page = 0
total_pages = 1
while page < total_pages:
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s" %s' % (title, movie['library']['year'])), page, self.getCatId(quality['identifier'])[0])
page += 1
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table:
return
try:
total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
except:
pass
entries = results_table.find_all('tr')
for result in entries[2:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
try:
size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size')
except:
continue
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helper
|
s')) != None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None]
return confirmed + trusted + vip + moderated
results.append({
'id': re.search('/(?P<id>\d+)/
|
', link['href']).group('id'),
'name': link.string,
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def isEnabled(self):
return super(ThePirateBay, self).isEnabled() and self.getDomain()
def getDomain(self, url = ''):
if not self.domain:
for proxy in self.proxy_list:
prop_name = 'tpb_proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed tpb proxy %s', proxy)
if 'title="Pirate Search"' in data:
log.debug('Using proxy: %s', proxy)
self.domain = proxy
break
Env.prop(prop_name, time.time())
if not self.domain:
log.error('No TPB proxies left, please add one in settings, or let us know which one to add on the forum.')
return None
return cleanHost(self.domain).rstrip('/') + url
def getMoreInfo(self, item):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'class':'nfo'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
|
eHealthAfrica/LMIS
|
LMIS/locations/urls.py
|
Python
|
gpl-2.0
| 151
| 0.019868
|
#!/usr/bin/env python
# encoding=utf-8
# locations.urls
f
|
rom dj
|
ango.conf.urls import patterns, url
urlpatterns = patterns('',
# ex: /afp/
)
|
iulian787/spack
|
var/spack/repos/builtin/packages/ocl-icd/package.py
|
Python
|
lgpl-2.1
| 2,788
| 0.005022
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class OclIcd(AutotoolsPackage):
"""This package aims at creating an Open Source alternative to vendor specific
OpenCL ICD loaders."""
homepage = "https://github.com/OCL-dev/ocl-icd"
url = "https://github.com/OCL-dev/ocl-icd/archive/v2.2.12.tar.gz"
version('2.2.13', sha256='f85d59f3e8327f15637b91e4ae8df0829e94daeff68c647b2927b8376b1f8d92')
version('2.2.12', sha256='17500e5788304eef5b52dbe784cec197bdae64e05eecf38317840d2d05484272')
version('2.2.11', sha256='c1865ef7701b8201ebc6930ed3ac757c7e5cb30f3aa4c1e742a6bc022f4f2292')
version('2.2.10', sha256='d0459fa1421e8d86aaf0a4df092185ea63bc4e1a7682d3af261ae5d3fae063c7')
version('2.2.9', sha256='88da749bc2bd75149f0bb6e72eb4a9d74401a54f4508bc730f13cc03c57a17ed')
version('2.2.8', sha256='8a8a405c7d659b905757a358dc467f4aa3d7e4dff1d1624779065764d962a246')
version('2.2.7', sha256='b8e68435904e1a95661c385f24d6924ed28f416985c6db5a3c7448698ad5fea2')
version('2.2.6', sha256='4567cae92f58c1d6ecfc771c456fa95f206d8a5c7c5d6c9010ec688a9fd83750')
version('2.2.5', sha256='50bf51f4544f83e69a5a2f564732a2adca63fbe9511430aba12f8d6f3a53ae59')
version('2.2.4', sha256='92853137ffff393cc74f829357fdd80ac46a82b46c970e80195db86164cca316')
version('2.2.3', sha256='46b8355d90f8cc240555e4e077f223c47b950abeadf3e1af52d6e68d2efc2ff3')
variant("headers", default=False, description="Install also OpenCL headers to use this as OpenCL provider")
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build'
|
)
depends_on('ruby', type='build')
depends_on('asciidoc-py3', type='build')
depends_on('xml
|
to', type='build')
depends_on('opencl-headers@2.2:', when='+headers')
provides('opencl@:2.2', when='@2.2.12:+headers')
provides('opencl@:2.1', when='@2.2.8:2.2.11+headers')
provides('opencl@:2.0', when='@2.2.3:2.2.7+headers')
def flag_handler(self, name, flags):
if name == 'cflags' and self.spec.satisfies('@:2.2.12'):
# https://github.com/OCL-dev/ocl-icd/issues/8
# this is fixed in version grater than 2.2.12
flags.append('-O2')
# gcc-10 change the default from -fcommon to fno-common
# This is fixed in versions greater than 2.2.12:
# https://github.com/OCL-dev/ocl-icd/commit/4667bddd365bcc1dc66c483835971f0083b44b1d
if self.spec.satisfies('%gcc@10:'):
flags.append('-fcommon')
return (flags, None, None)
|
matrix-org/synapse
|
scripts-dev/dump_macaroon.py
|
Python
|
apache-2.0
| 532
| 0
|
#!/usr/bin/env pyth
|
on
import sys
import pymacaroons
if len(sys.argv) == 1:
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
sys.exit(1)
macaroon_string = sys.argv[1]
key = sys.argv[2] if len(sys.argv) > 2 else None
macaroon = pymacaroons.Macaroon.deserialize(macar
|
oon_string)
print(macaroon.inspect())
print("")
verifier = pymacaroons.Verifier()
verifier.satisfy_general(lambda c: True)
try:
verifier.verify(macaroon, key)
print("Signature is correct")
except Exception as e:
print(str(e))
|
ryfeus/lambda-packs
|
pytorch/source/torch/nn/modules/module.py
|
Python
|
mit
| 41,372
| 0.001015
|
from collections import OrderedDict
import functools
import itertools
import torch
from ..backends.thnn import backend as thnn_backend
from ..parameter import Parameter
import torch.utils.hooks as hooks
def _addindent(s_, numSpaces):
s = s_.split('\n')
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
class Module(object):
r"""Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in
a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call :meth:`to`, etc.
"""
dump_patches = False
r"""This allows better BC support for :meth:`load_state_dict`. In
:meth:`state_dict`, the version number will be saved as in the attribute
`_metadata` of the returned state dict, and thus pickled. `_metadata` is a
dictionary with keys that follow the naming convention of state dict. See
``_load_from_state_dict`` on how to use this information in loading.
If new parameters/buffers are added/removed from a module, this number shall
be bumped, and the module's `_load_from_state_dict` method can compare the
version number and do appropriate changes if the state dict is from before
the change."""
_version = 1
def __init__(self):
self._backend = thnn_backend
self._parameters = OrderedDict()
self._buffers = OrderedDict()
self._backward_hooks = OrderedDict()
self._forward_hooks = OrderedDict()
self._forward_pre_hooks = OrderedDict()
self._state_dict_hooks = OrderedDict()
self._load_state_dict_pre_hooks = OrderedDict()
self._modules = OrderedDict()
self.training = True
def forward(self, *input):
r"""Defines the computation performed at every call.
Should be overridden by all subclasses.
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
registered hooks while the latter silently ignores them.
"""
raise NotImplementedError
def register_buffer(self, name, tensor):
r"""Adds a persistent buffer to the module.
This is typically used to register a buffer that should not to be
considered a model parameter. For example, BatchNorm's ``running_mean``
is not a parameter, but is part of the persistent state.
Buffers can be accessed as attributes using given names.
Args:
name (string): name of the buffer. The buffer can be accessed
from this module using the given name
tensor (Tensor): buffer to be registered.
Example::
>>> self.register_buffer('running_mean', torch.zeros(num_features))
"""
if not isinstance(name, torch._six.string_classes):
raise TypeError("buffer name should be a string. "
"Got {}".format(torch.typename(name)))
elif '.' in name:
raise KeyError("buffer name can't contain \".\"")
elif name == '':
raise KeyError("buffer name can't be empty string \"\"")
elif hasattr(self, name) and name not in self._buffers:
raise KeyError("attribute '{}' already exists".format(name))
elif tensor is not None and not isinstance(tensor, torch.Tensor):
raise TypeError("cannot assign '{}' object to buffer '{}' "
"(torch Tensor or None required)"
.format(torch.typename(tensor), name))
else:
self._buffers[name] = tensor
def register_parameter(self, name, param):
r"""Adds a parameter to the module.
The parameter can be accessed as an attribute using given name.
Args:
name (string): name of the parameter. The parameter can be accessed
from this module using the given name
parameter (Parameter): parameter to be added to the module.
"""
if '_parameters' not in self.__dict__:
raise AttributeError(
"cannot assign parameter before Module.__init__() call")
elif not isinstance(name, torch._six.string_classes):
raise TypeError("parameter name should be a string. "
"Got {}".format(torch.typename(name)))
elif '.' in name:
raise KeyError("parameter name can't contain \".\"")
elif name == '':
raise KeyError("parameter name can't be empty string \"\"")
elif hasattr(self, name) and name not in self._parameters:
raise KeyError("attribute '{}' already exists".format(name))
if param is None:
self._parameters[name] = None
elif not isinstance(param, Parameter):
raise TypeError("cannot assign '{}' object to parameter '{}' "
"(torch.nn.Parameter or None required)"
.format(torch.typename(param), name))
elif param.grad_fn:
raise ValueError(
"Cannot assign non-leaf Tensor to parameter '{0}'. Model "
"parameters must be created explicitly. To express '{0}' "
"as a function of another Tensor, compute the value in "
"the forward() method.".format(name))
else:
self._parameters[name] = param
def add_module(self, name, module):
r"""Adds a child module to the current module.
The module can be accessed as an attribute using the given name.
Args:
name (string): name of the child module. The child module can be
accessed from this module using the given name
parameter (Module): child module to be added to the module.
"""
if not isinstance(module, Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(
torch.typename(module)))
elif not isinstance(name, torch._six.string_classes):
raise TypeErro
|
r("module name should be a string. Got {}".format(
torch.typename(name)))
elif hasattr(self, name) and name not in self._modules:
raise KeyError("attribute '{}' already exists".format(name))
elif '.' in name:
raise KeyError("module name can't contain \".\"")
elif name == '':
raise KeyError("module name can't be empty string \"\"")
self._modules[name] = module
def _apply(self, fn):
for mod
|
ule in self.children():
module._apply(fn)
for param in self._parameters.values():
if param is not None:
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for key, buf in self._buffers.items():
if buf is not None:
self._buffers[key] = fn(buf)
return self
def apply(self, fn):
r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)
as well as self. Typical use includes initializing the parameters
|
biswajitsahu/kuma
|
vendor/packages/translate/storage/placeables/interfaces.py
|
Python
|
mpl-2.0
| 1,302
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licen
|
ses/>.
"""
This file contains abstract (semantic) interfaces for placeable
implementations.
"""
from translate.storage.placeables.strelem import StringElem
class BasePlaceable(StringElem):
"""Base class for all placeables."""
parse = None
class InvisiblePlaceable(BasePlaceable):
pass
class MaskingPlaceable(BasePlaceabl
|
e):
pass
class ReplacementPlaceable(BasePlaceable):
pass
class SubflowPlaceable(BasePlaceable):
pass
class Delimiter(object):
pass
class PairedDelimiter(object):
pass
|
nrego/westpa
|
lib/examples/stringmethodexamples/examples/Mueller/get_strings.py
|
Python
|
gpl-3.0
| 1,298
| 0.009245
|
import numpy as np
import westpa
import cPickle as pickle
def dist(pt1, pt2):
return np.sum((pt1-pt2)**2)
def mueller(x, y):
aa = [-1, -1, -6.5, 0.7]
bb = [0, 0, 11, 0.6]
cc = [-10, -10, -6.5, 0.7]
AA = [-200, -100, -170, 15]
XX = [1, 0, -0.5, -1]
YY = [0, 0.5, 1.5, 1]
V1 = 0
for j in range(4):
V1 += AA[j] * np.exp(aa[j] * (x - XX[j])**2 + \
bb[j] * (x - XX[j]) * (y - YY[j]) + cc[j] * (y - YY[j])**2)
return V1
def calculate_length(x):
dd = x - np.roll(x, 1, axis=0)
dd[0,:] = 0.0
return np.cumsum(np.sqrt((dd*dd).sum(axis=1)))
# Get the (free) energy as a function of string images
xx, yy = np.
|
mgrid[-1.5:1.2:0.01, -0.2:2.0:0.01]
assert xx.shape == yy.shape
nx = xx.shape[0]
ny = xx.shape[1]
ene
|
rgy = mueller(xx, yy)
energy -= energy.min()
dm = westpa.rc.get_data_manager()
dm.open_backing()
hashes = dm.we_h5file['bin_topologies']['index']['hash']
mapper = dm.get_bin_mapper(hashes[0])
strings = np.zeros((len(hashes), mapper.centers.shape[0], mapper.centers.shape[1]))
for i, hashval in enumerate(hashes):
mapper = dm.get_bin_mapper(hashval)
plt.plot(mapper.centers[:,0], mapper.centers[:,1], '-o', label='{}'.format(i))
strings[i] = mapper.centers
pickle.dump(strings, open('strings.pkl', 'w'))
|
t3dev/odoo
|
addons/hr_org_chart/models/hr_employee.py
|
Python
|
gpl-3.0
| 1,787
| 0.003917
|
# -*- c
|
oding: utf-8 -*-
# Part of Odoo. See LICENSE file for ful
|
l copyright and licensing details.
from odoo import api, fields, models
class Employee(models.Model):
_name = "hr.employee"
_inherit = "hr.employee"
child_all_count = fields.Integer(
'Indirect Surbordinates Count',
compute='_compute_subordinates', store=False)
subordinate_ids = fields.One2many('hr.employee', string='Subordinates', compute='_compute_subordinates', help="Direct and indirect subordinates", groups='base.group_user')
def _get_subordinates(self, parents=None):
"""
Helper function to compute subordinates_ids.
Get all subordinates (direct and indirect) of an employee.
An employee can be a manager of his own manager (recursive hierarchy; e.g. the CEO is manager of everyone but is also
member of the RD department, managed by the CTO itself managed by the CEO).
In that case, the manager in not counted as a subordinate if it's in the 'parents' set.
"""
if not parents:
parents = self.env['hr.employee']
indirect_subordinates = self.env['hr.employee']
parents |= self
direct_subordinates = self.child_ids - parents
for child in direct_subordinates:
child_subordinate = child._get_subordinates(parents=parents)
child.subordinate_ids = child_subordinate
indirect_subordinates |= child_subordinate
return indirect_subordinates | direct_subordinates
@api.depends('child_ids', 'child_ids.child_all_count')
def _compute_subordinates(self):
for employee in self:
employee.subordinate_ids = employee._get_subordinates()
employee.child_all_count = len(employee.subordinate_ids)
|
felliott/waterbutler
|
tests/providers/s3/fixtures.py
|
Python
|
apache-2.0
| 5,588
| 0.000537
|
import os
from collections import OrderedDict
import pytest
from waterbutler.providers.s3.metadata import (S3Revision,
S3FileMetadata,
S3FolderMetadata,
S3FolderKeyMetadata,
S3FileMetadataHeaders,
)
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {
'access_key': 'Dont dead',
'secret_key': 'open inside',
}
@pytest.fixture
def settings():
return {
'bucket': 'that kerning',
'encrypt_uploads': False
}
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def folder_metadata():
with open(os.path.join(os.path.dirname(__file__), 'fixtures/folder_metadata.xml'), 'r') as fp:
return fp.read()
@pytest.fixture
def folder_single_item_metadata():
with open(os.path.join(os.path.dirname(__file__),
'fixtures/folder_single_item_metadata.xml'), 'r') as fp:
return fp.read()
@pytest.fixture
def folder_item_metadata():
with open(os.path.join(os.path.dirname(__file__),
'fixtures/folder_item_metadata.xml'), 'r') as fp:
return fp.read()
@pytest.fixture
def folder_and_contents():
with open(os.path.join(os.path.dirname(__file__),
'fixtures/folder_and_contents.xml'), 'r') as fp:
return fp.read()
@pytest.fixture
def version_metadata():
with open(os.path.join(os.path.dirname(__file__),
'fixtures/version_metadata.xml'), 'r') as fp:
return fp.read()
@pytest.fixture
def single_version_metadata():
with open(os.path.join(os.path.dirname(__file__),
'fixtures/single_version_metadata.xml'), 'r') as fp:
return fp.read()
@pytest.fixture
def folder_empty_metadata():
with open(os.path.join(os.path.dirname(__file__),
'fixtures/folder_empty_metadata.xml'), 'r') as fp:
return fp.read()
@pytest.fixture
def file_header_metadata():
return {
'Content-Length': '9001',
'Last-Modified': 'SomeTime',
'Content-Type': 'binary/octet-stream',
'Etag': '"fba9dede5f27731c9771645a39863328"',
'x-amz-server-side-encryption': 'AES256'
}
@pytest.fixture
def file_metadata_headers_object(file_header_metadata):
return S3FileMetadataHeaders('test-path', file_header_metadata)
@pytest.fixture
def file_metadata_object():
content = OrderedDict(Key='my-image.jpg',
LastModified='2009-10-12T17:50:30.000Z',
ETag="fba9dede5f27731c9771645a39863328",
Size='434234',
StorageClass='STANDARD')
return S3FileMetadata(content)
@pytest.fixture
def folder_key_metadata_object():
content = OrderedDict(Key='naptime/',
LastModified='2009-10-12T17:50:30.000Z',
ETag='"fba9dede5f27731c9771645a39863328"',
Size='0',
StorageClass='STANDARD')
return S3FolderKeyMetadata(content)
@pytest.fixture
def folder_metadata_object():
content = OrderedDict(Prefix='photos/')
return S3FolderMetadata(content)
@pytest.fixture
def revision_metadata_object():
content = OrderedDict(
Key='single-version.file',
VersionId='3/L4kqtJl40Nr8X8gdRQBpUMLUo',
IsLatest='true',
LastModified='2009-10-12T17:50:30.000Z',
ETag='"fba9dede5f27731c9771645a39863328"',
|
Size=434234,
StorageClass='STANDARD',
Owner=Orde
|
redDict(
ID='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a',
DisplayName='mtd@amazon.com'
)
)
return S3Revision(content)
@pytest.fixture
def create_session_resp():
file_path = 'fixtures/chunked_uploads/create_session_resp.xml'
with open(os.path.join(os.path.dirname(__file__), file_path), 'r') as fp:
return fp.read()
@pytest.fixture
def generic_http_404_resp():
file_path = 'fixtures/chunked_uploads/generic_http_404_resp.xml'
with open(os.path.join(os.path.dirname(__file__), file_path), 'r') as fp:
return fp.read()
@pytest.fixture
def generic_http_403_resp():
file_path = 'fixtures/chunked_uploads/generic_http_403_resp.xml'
with open(os.path.join(os.path.dirname(__file__), file_path), 'r') as fp:
return fp.read()
@pytest.fixture
def list_parts_resp_empty():
file_path = 'fixtures/chunked_uploads/list_parts_resp_empty.xml'
with open(os.path.join(os.path.dirname(__file__), file_path), 'r') as fp:
return fp.read()
@pytest.fixture
def list_parts_resp_not_empty():
file_path = 'fixtures/chunked_uploads/list_parts_resp_not_empty.xml'
with open(os.path.join(os.path.dirname(__file__), file_path), 'r') as fp:
return fp.read()
@pytest.fixture
def complete_upload_resp():
file_path = 'fixtures/chunked_uploads/complete_upload_resp.xml'
with open(os.path.join(os.path.dirname(__file__), file_path), 'r') as fp:
return fp.read()
@pytest.fixture
def upload_parts_headers_list():
file_path = 'fixtures/chunked_uploads/upload_parts_headers_list.json'
with open(os.path.join(os.path.dirname(__file__), file_path), 'r') as fp:
return fp.read()
|
openstack/mistral
|
mistral/tests/unit/config.py
|
Python
|
apache-2.0
| 992
| 0
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# lim
|
itations under the License.
import os
from oslo_config import cfg
def parse_args():
# Look for .mistral.conf
|
in the project directory by default.
project_dir = '%s/../../..' % os.path.dirname(__file__)
config_file = '%s/.mistral.conf' % os.path.realpath(project_dir)
config_files = [config_file] if os.path.isfile(config_file) else None
cfg.CONF(args=[], default_config_files=config_files)
|
kekoa428/Interview-Prep
|
words_in_string.py
|
Python
|
gpl-3.0
| 712
| 0.016854
|
"""
Given a string 's' and a dictionary of words 'dict', determine if s can be segmented into a space-separated sequence of one or more dictionary words. Return true is so, and false otherwise.
For example:
s = "practicemakespermanent",
dict = ["makes", "permanent", "practice"].
f(s) // true
"""
s = "practicemakespermanent",
dict = ["makes", "permanent", "practice"]
def words_
|
in_string(s):
subs = []
for size in range(1, len(s)+1):
for index in range(len(s)+1-size):
substring = s[index:index+size]
subs.append(substring)
subs = set(subs)
print subs
for word in subs:
if word in dict:
return True
else:
return False
print words_
|
in_string(s)
|
OpenJUB/jay
|
jay/settings.py
|
Python
|
mit
| 2,511
| 0
|
"""
Django settings for jay project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_forms_bootstrap',
'filters',
'settings',
'users',
'votes',
'core'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
|
)
ROOT_URLCONF = 'jay.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplat
|
es',
'DIRS': [BASE_DIR + "/templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jay.wsgi.application'
# OpenJUB auth
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
'users.ojub_auth.OjubBackend')
# Default after login redirect
# These are named URL routes
LOGIN_URL = "login"
LOGOUT_URL = "logout"
LOGIN_REDIRECT_URL = "home"
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_URL = '/static/'
|
webcamoid/webcamoid.github.io
|
internal/tasks.py
|
Python
|
agpl-3.0
| 2,105
| 0.004276
|
# -*- coding: utf-8 -*-
import os
import shutil
import sys
import datetime
from invoke import task
from invoke.util import cd
from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer
CONFIG = {
# Local path configuration (can be absolute or relative to tasks.py)
'deploy_path': '..',
# Github Pages configuration
'github_pages_branch': 'gh-pages',
'commit_message': "'Publish site on {}'".format(datetime.date.today().isoformat()),
# Port for `serve
|
`
'port': 8000,
}
@task
def clean(c):
"""Remove generated files"""
if os.path.isdir(CONFIG['deploy_path']):
shutil.rmtree(CONFIG['deploy_path'])
os.makedirs(CONFIG['deploy_path'])
@task
def build(c):
"""Build local version of site"""
c.run('pelican -s pelicanconf.py')
@task
def rebuild(c):
"""`build` with the delete switch"""
c.run('pelican -d -s pelicanconf.py')
|
@task
def regenerate(c):
"""Automatically regenerate site upon file modification"""
c.run('pelican -r -s pelicanconf.py')
@task
def serve(c):
"""Serve site at http://localhost:8000/"""
class AddressReuseTCPServer(RootedHTTPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(
CONFIG['deploy_path'],
('', CONFIG['port']),
ComplexHTTPRequestHandler)
sys.stderr.write('Serving on port {port} ...\n'.format(**CONFIG))
server.serve_forever()
@task
def reserve(c):
"""`build`, then `serve`"""
build(c)
serve(c)
@task
def preview(c):
"""Build production version of site"""
c.run('pelican -s publishconf.py')
@task
def publish(c):
"""Publish to production via rsync"""
c.run('pelican -s publishconf.py')
c.run(
'rsync --delete --exclude ".DS_Store" -pthrvz -c '
'{} {production}:{dest_path}'.format(
CONFIG['deploy_path'].rstrip('/') + '/',
**CONFIG))
@task
def gh_pages(c):
"""Publish to GitHub Pages"""
preview(c)
c.run('ghp-import -b {github_pages_branch} '
'-m {commit_message} '
'{deploy_path} -p'.format(**CONFIG))
|
Exploit-install/Veil-Pillage
|
modules/impacket/smbexec_shell.py
|
Python
|
gpl-3.0
| 2,025
| 0.004938
|
"""
Execute impacket's smbexec shell on a partiular host.
This creates a semi-interactive shell without uploading
a binary, but creates lots of shit in the event logs!
All cred to the the awesome Impacket project !
https://code.google.com/p/impacket/
Module built by @harmj0y
"""
from lib import impacket_smbexec
from lib import helpers
class Module:
def __init__(self, targets=None, creds=None, args=None):
self.name = "Smbexec Shell"
|
self.description = ("Execute Impacket's smbexec.py module to create a "
"semi-interactive shell on a target without "
|
"uploading any binaries.")
# internal list() that holds one or more targets set by the framework
self.targets = targets
# internal list() that holds one or more cred tuples set by the framework
# [ (username, pw), (username2, pw2), ...]
self.creds = creds
# any relevant text to echo to the output file
self.output = ""
# user interaction for- format is {Option : [Value, Description]]}
self.required_options = {"service_name" : ["SystemDiag", "Name of the service created on the box."]}
def run(self):
# assume single set of credentials for this module
username, password = self.creds[0]
# see if we need to extract a domain from "domain\username"
domain = ""
if "/" in username:
domain,username = username.split("/")
# the service name to create on the box
serviceName = self.required_options["service_name"][0]
executer = impacket_smbexec.CMDEXEC("445/SMB", username, password, domain, None, "SHARE", "C$", serviceName=serviceName)
print "\n\n [*] Type "+helpers.color("'exit'") + " to exit the shell\n"
for target in self.targets:
executer.run(target)
self.output += "[*] Impacket smbexec.py shell run using creds '"+username+":"+password+"' on "+target+"\n"
|
platformio/platformio-core
|
platformio/commands/device/filters/time.py
|
Python
|
apache-2.0
| 1,381
| 0
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from platformio.commands.device import DeviceMonitorFilte
|
r
class Timestamp(DeviceMonitorFilter):
NAME = "time"
def __init__(self, *args, **kwargs):
super(Timestamp, self).__init__(*args, **kwargs)
|
self._line_started = False
def rx(self, text):
if self._line_started and "\n" not in text:
return text
timestamp = datetime.now().strftime("%H:%M:%S.%f")[:-3]
if not self._line_started:
self._line_started = True
text = "%s > %s" % (timestamp, text)
if text.endswith("\n"):
self._line_started = False
return text[:-1].replace("\n", "\n%s > " % timestamp) + "\n"
return text.replace("\n", "\n%s > " % timestamp)
|
Spirals-Team/apolline-python
|
apolline/Alphasense_ADC/AlphasenseADC.py
|
Python
|
agpl-3.0
| 3,147
| 0.010486
|
#!/usr/bin/env python
"""
Alphasense ADC Driver for Apolline
"""
import argparse
import se
|
rial
import time
from influxdb import InfluxDBClient
from influxdb import SeriesHelper
class ADCSensor:
"""
? Alphasense ADC
"""
def __init__(self,database='apolline'):
self.dbname = database
self.parser = argparse.ArgumentParser(description='Apolline agent for Alphasense ADC sensor')
self.parser.add_argument('--host', type=str, req
|
uired=False,
default='apolline.lille.inria.fr', help='hostname of Apolline backend')
self.parser.add_argument('--port', type=int, required=False,
default=8086, help='port of Apolline backend')
self.parser.add_argument('--device', type=str, required=False,
default='/dev/ttyUSB1', help='serial device used to measure')
self.parser.add_argument('--location', type=str, required=False,
default='unknown', help='physical location of the sensor')
self.parser.add_argument('--database', type=str, required=False,
default='sandbox', help='remote database used to upload the measurements')
self.parser.add_argument('--frequency', type=float, required=False,
default=0.5, help='data retrieval frequency in seconds')
self.parser.add_argument('--user', type=str, required=True,
help='user login to upload data online')
self.parser.add_argument('--password', type=str, required=True,
help='user password to upload data online')
def configure(self):
args = self.parser.parse_args()
self.location = args.location
self.device = args.device
self.frequency = args.frequency
self.connection = InfluxDBClient(args.host, args.port, args.user, args.password, args.database)
def run(self):
try:
self.configure()
ser=serial.Serial(self.device, 9600, timeout=123)
while 1:
self.sense(ser)
time.sleep(self.frequency)
ser.close()
def sense(self,ser):
class ADCHelper(SeriesHelper):
class Meta:
series_name = 'events.stats.{location}'
fields = ['temperature','Voie_1','Voie_1V','Voie_2','Voie_2V','Voie_3','Voie_3V','Voie_4','Voie_4V','Voie_5','Voie_5V','Voie_6','Voie_6V']
tags = ['location']
client = self.connection
autocommit = False
try:
line = ser.readline()
if not line: return
line = line.replace("\r\n","")
value = line.split(";")
if len(value) == 15:
ADCHelper(location=self.location, temperature=value[2], Voie_1=value[3], Voie_1V=value[4], Voie_2=value[5], Voie_2V=value[6], Voie_3=value[7], Voie_3V=value[8], Voie_4=value[9], Voie_4V=value[10], Voie_1=value[11], Voie_1V=value[12], Voie_2=value[13], Voie_2V=value[14])
ADCHelper.commit()
except:
print "Failed to read metrics from ADC sensor on "+self.device
if __name__ == '__main__':
sensor = ADCSensor()
sensor.run()
|
ric2b/Vivaldi-browser
|
chromium/testing/unexpected_passes_common/expectations_unittest.py
|
Python
|
bsd-3-clause
| 21,813
| 0.003668
|
#!/usr/bin/env vpython3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import datetime
import os
import sys
import tempfile
import unittest
if sys.version_info[0] == 2:
import mock
else:
import unittest.mock as mock
from pyfakefs import fake_filesystem_unittest
from unexpected_passes_common import data_types
from unexpected_passes_common import expectations
from unexpected_passes_common import unittest_utils as uu
FAKE_EXPECTATION_FILE_CONTENTS = """\
# tags: [ win linux ]
# results: [ Failure RetryOnFailure Skip Pass ]
crbug.com/1234 [ win ] foo/test [ Failure ]
[ linux ] foo/test [ Failure ]
crbug.com/2345 [ linux ] bar/* [ RetryOnFailure ]
crbug.com/3456 [ linux ] some/bad/test [ Skip ]
crbug.com/4567 [ linux ] some/good/test [ Pass ]
"""
SECONDARY_FAKE_EXPECTATION_FILE_CONTENTS = """\
# tags: [ mac ]
# results: [ Failure ]
crbug.com/4567 [ mac ] foo/test [ Failure ]
"""
FAKE_EXPECTATION_FILE_CONTENTS_WITH_TYPO = """\
# tags: [ win linux ]
# results: [ Failure RetryOnFailure Skip ]
crbug.com/1234 [ wine ] foo/test [ Failure ]
[ linux ] foo/test [ Failure ]
crbug.com/2345 [ linux ] bar/* [ RetryOnFailure ]
crbug.com/3456 [ linux ] some/bad/test [ Skip ]
"""
class CreateTestExpectationMapUnittest(unittest.TestCase):
def setUp(self):
self.instance = expectations.Expectations()
self._expectation_content = {}
self._content_patcher = mock.patch.object(
self.instance, '_GetNonRecentExpectationContent')
self._content_mock = self._content_patcher.start()
self.addCleanup(self._content_patcher.stop)
def SideEffect(filepath, _):
return self._expectation_content[filepath]
self._content_mock.side_effect = SideEffect
def testExclusiveOr(self):
"""Tests that only one input can be specified."""
with self.assertRaises(AssertionError):
self.instance.CreateTestExpectationMap(None, None, 0)
with self.assertRaises(AssertionError):
self.instance.CreateTestExpectationMap('foo', ['bar'], 0)
def testExpectationFile(self):
"""Tests reading expectations from an expectation file."""
filename = '/tmp/foo'
self._expectation_content[filename] = FAKE_EXPECTATION_FILE_CONTENTS
expectation_map = self.instance.CreateTestExpectationMap(filename, None, 0)
# Skip expectations should be omitted, but everything else should be
# present.
# yapf: disable
expected_expectation_map = {
filename: {
data_types.Expectation(
'foo/test', ['win'], ['Failure'], 'crbug.com/1234'): {},
data_types.Expectation('foo/test', ['linux'], ['Failure']): {},
data_types.Expectation(
'bar/*', ['linux'], ['RetryOnFailure'], 'crbug.com/2345'): {},
},
}
# yapf: enable
self.assertEqual(expectation_map, expected_expectation_map)
self.assertIsInstance(expectation_map, data_types.TestExpectationMap)
def testMultipleExpectationFiles(self):
"""Tests reading expectations from multiple files."""
filename1 = '/tmp/foo'
filename2 = '/tmp/bar'
expectation_files = [filename1, filename2]
self._expectation_content[filename1] = FAKE_EXPECTATION_FILE_CONTENTS
self._expectation_content[
filename2] = SECONDARY_FAKE_EXPECTATION_FILE_CONTENTS
expectation_map = self.instance.CreateTestExpectationMap(
expectation_files, None, 0)
# yapf: disable
expected_expectation_map = {
expectation_files[0]: {
data_types.Expectation(
'foo/test', ['win'], ['Failure'], 'crbug.com/1234'): {},
data_types.Expectation('foo/test', ['linux'], ['Failure']): {},
data_types.Expectation(
'bar/*', ['linux'], ['RetryOnFailure'], 'crbug.com/2345'): {},
},
expectation_files[1]: {
data_types.Expectation(
'foo/test', ['mac'], ['Failure'], 'crbug.com/4567'): {},
}
}
# yapf: enable
self.assertEqual(expectation_map, expected_expectation_map)
self.assertIsInstance(expectation_map, data_types.TestExpectationMap)
def testIndividualTests(self):
"""Tests reading expectations from a list of tests."""
expectation_map = self.instance.CreateTestExpectationMap(
None, ['foo/test', 'bar/*'], 0)
expected_expectation_map = {
'': {
data_types.Expectation('foo/test', [], ['RetryOnFailure']): {},
data_types.Expectation('bar/*', [], ['RetryOnFailure']): {},
},
}
self.assertEqual(expectation_map, expected_expectation_map)
self.assertIsInstance(expectation_map, data_types.TestExpectationMap)
class GetNonRecentExpectationContentUnittest(unittest.TestCase):
def setUp(self):
self.instance = uu.CreateGenericExpectations()
self._output_patcher = mock.patch(
'unexpected_passes_common.expectations.subprocess.check_output')
self._output_mock = self._output_patcher.start()
self.addCleanup(self._output_patcher.stop)
def testBasic(self):
"""Tests that only expectations that are old enough are kept."""
today_date = datetime.date.today()
yesterday_date = today_date - datetime.timedelta(days=1)
older_date = today_date - datetime.timedelta(days=2)
today_str = today_date.isoformat()
yesterday_str = yesterday_date.isoformat()
older_str = older_date.isoformat()
# pylint: disable=line-too-long
blame_output = """\
5f03bc04975c04 (Some R. Author {today_date} 00:00:00 +0000 1)# tags: [ tag1 ]
98637cd80f8c15 (Some R. Author {yesterday_date} 00:00:00 +0000 2)# tags: [ tag2 ]
3fcadac9d861d0 (Some R. Author {older_date} 00:00:00 +0000 3)# results: [ Failure ]
5f03bc04975c04 (Some R. Author {today_date} 00:00:00 +0000 4)
5f03bc04975c04 (Some R. Author {today_date} 00:00:00 +0000 5)crbug.com/1234 [ tag1 ] testname [ Failure ]
98637cd80f8c15 (Some R. Author {yesterday_date} 00:00:00 +0000 6)[ tag2 ] testname [ Failure ] # Comment
3fcadac9d
|
861d0 (Some R. Author {older_date} 00:00:00 +0000 7)[ tag1 ] othertest [ Failure ]"""
# pylint: enable=line-too-long
blame_output = blame_output.format(today_date=today_str,
yesterday_date=yesterday_str,
older_date=older_str)
self._output_mock.return_value = blame_output.encode('utf-8')
expected_content = """\
# tags: [ tag1 ]
# tags: [ tag2 ]
# results: [ Fai
|
lure ]
[ tag1 ] othertest [ Failure ]"""
self.assertEqual(self.instance._GetNonRecentExpectationContent('', 1),
expected_content)
def testNegativeGracePeriod(self):
"""Tests that setting a negative grace period disables filtering."""
today_date = datetime.date.today()
yesterday_date = today_date - datetime.timedelta(days=1)
older_date = today_date - datetime.timedelta(days=2)
today_str = today_date.isoformat()
yesterday_str = yesterday_date.isoformat()
older_str = older_date.isoformat()
# pylint: disable=line-too-long
blame_output = """\
5f03bc04975c04 (Some R. Author {today_date} 00:00:00 +0000 1)# tags: [ tag1 ]
98637cd80f8c15 (Some R. Author {yesterday_date} 00:00:00 +0000 2)# tags: [ tag2 ]
3fcadac9d861d0 (Some R. Author {older_date} 00:00:00 +0000 3)# results: [ Failure ]
5f03bc04975c04 (Some R. Author {today_date} 00:00:00 +0000 4)
5f03bc04975c04 (Some R. Author {today_date} 00:00:00 +0000 5)crbug.com/1234 [ tag1 ] testname [ Failure ]
98637cd80f8c15 (Some R. Author {yesterday_date} 00:00:00 +0000 6)[ tag2 ] testname [ Failure ] # Comment
3fcadac9d861d0 (Some R. Author {older_date} 00:00:00 +0000 7)[ tag1 ] othertest [ Failure ]"""
# pylint: enable=line-too-long
blame_output = blame_output.format(today_date=today_str,
yesterday_date=yesterday_str,
older_date=older_str)
self._output_mock.return_value = blame_output.encode('utf-8')
expected_content = """\
# tags: [ tag1 ]
# tags: [ tag2 ]
# results: [ Failure ]
crbug.com/
|
gammu/python-gammu
|
gammu/worker.py
|
Python
|
gpl-2.0
| 10,344
| 0
|
# vim: expandtab sw=4 ts=4 sts=4:
#
# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>
#
# This file is part of python-gammu <https://wammu.eu/python-gammu/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Asynchronous communication to phone.
Mostly you should use only L{GammuWorker} class, others are only helpers
which are used by this class.
"""
import queue
import threading
import gammu
class InvalidCommand(Exception):
"""
Exception indicating invalid command.
"""
def __init__(self, value):
"""
Initializes exception.
@param value: Name of wrong command.
@type value: string
"""
super().__init__()
self.value = value
def __str__(self):
"""
Returns textual representation of exception.
"""
return f'Invalid command: "{self.value}"'
def check_worker_command(command):
"""
Checks whether command is valid.
@param command: Name of command.
@type command: string
"""
if hasattr(gammu.StateMachine, command):
return
raise InvalidCommand(command)
class GammuCommand:
"""
Storage of single command for gammu.
"""
def __init__(self, command, params=None, percentage=100):
"""
Creates single command instance.
"""
check_worker_command(command)
self._command = command
self._params = params
self._percentage = percentage
def get_command(self):
"""
Returns command name.
"""
return self._command
def get_params(self):
"""
Returns command params.
"""
return self._params
def get_percentage(self):
"""
Returns percentage of current task.
"""
return self._percentage
def __str__(self):
"""
Returns textual representation.
"""
if self._params is not None:
return f"{self._command} {self._params}"
else:
return f"{self._command} ()"
class GammuTask:
"""
Storage of taks for gammu.
"""
def __init__(self, name, commands):
"""
Creates single command instance.
@param name: Name of task.
@type name: string
@param commands: List of commands to execute.
@type commands: list of tuples or strings
"""
self._name = name
self._list = []
self._pointer = 0
for i in range(len(commands)):
if isinstance(commands[i], tuple):
cmd = commands[i][0]
try:
params = commands[i][1]
except IndexError:
params = None
else:
cmd = commands[i]
params = None
percents = round(100 * (i + 1) / len(commands))
self._list.append(GammuCommand(cmd, params, percents))
def get_next(self):
"""
Returns next command to be executed as L{GammuCommand}.
"""
result = self._list[self._pointer]
self._pointer += 1
return result
def get_name(self):
"""
Returns task name.
"""
return self._name
def gammu_pull_device(state_machine):
state_machine.ReadDevice()
class GammuThread(threading.Thread):
"""
Thread for phone communication.
"""
def __init__(self, queue, config, callback, pull_func=gammu_pull_device):
"""
Initialises thread data.
@param queue: Queue with events.
@type queue: queue.Queue object.
@param config: Gammu configuration, same as
L{StateMachine.SetConfig} accepts.
@type config: hash
@param callback: Function which will be called upon operation
completing.
@type callback: Function, needs to accept four params: name of
completed operation, result of it, error code and percentage of
overall operation. This callback is called from different
thread, so please take care of various threading issues in other
modules you use.
"""
super().__init__()
self._kill = False
self._terminate = False
self._sm = gammu.StateMachine()
self._callback = callback
self._queue = queue
self._sm.SetConfig(0, config)
self._pull_func = pull_func
def _do_command(self, name, cmd, params, percentage=100):
"""
Executes single command on phone.
"""
func = getattr(self._sm, cmd)
error = "ERR_NONE"
result = None
try:
if params is None:
result = func()
elif isinstance(params, dict):
result = func(**params)
else:
result = func(*params)
except gammu.GSMError as info:
errcode = info.args[0]["Code"]
error = gammu.ErrorNumbers[errcode]
self._callback(name, result, error, percentage)
def run(self):
"""
Thread body, which handles phone communication. This should not
be used from outside.
"""
start = True
while not self._kill:
try:
if start:
task = GammuTask("Init", ["Init"])
start = False
else:
# Wait at most ten seconds for next command
task = self._queue.get(True, 10)
try:
while True:
cmd = task.get_next()
self._do_command(
task.get_name(),
cmd.get_command(),
cmd.get_params(),
cmd.get_percentage(),
)
except IndexError:
try:
if task.get_name() != "Init":
self._queue.task_done()
except (AttributeError, ValueError):
pass
except queue.Empty:
if self._terminate:
break
# Read the device to catch possible incoming events
try:
self._pull_func(self._sm)
except Exception as ex:
self._callback("ReadDevice", None, ex, 0)
def kill(self):
"""
Forces thread end without emptying queue.
"""
self._kill = True
def join(self, timeout=None):
"""
Terminates thread and waits for it.
"""
self._terminate = True
super().join(timeout)
class GammuWorker:
"""
Wrapper class for asynchronous communic
|
ation with Gammu. It spaws
own thread and then passes all commands to this thread. When task is
done, caller is notified via callback.
"""
def __init__(self, callback, pull_func=gammu_pull_device):
"""
Initializes worker class.
@param callback: See L{GammuThread.__init__} for description.
"""
self._thread = Non
|
e
self._callback = callback
self._config = {}
self._lock = threading.Lock()
self._queue = queue.Queue()
self._pull_func = pull_func
def enqueue_command(self, command, params):
"""
Enqueues command.
@param command: Command(s) to execute. Each command is tuple
containing function name and it's
|
mylokin/mustache
|
mustache/template.py
|
Python
|
mit
| 1,242
| 0.006441
|
import os
import re
from . import utils
PARTIAL = re.compile('(?P<tag>{{>\s*(?P<name>.+?)\s*}})')
PARTIAL_CUSTOM = re.compile('^(?P<whitespace>\s*)(?P<tag>{{>\s*(?P<name>.+?)\s*}}(?(1)\r?\n?))', re.M)
# def get_template(path, ext='html', partials=None):
# path = os.path.join(TEMPLATES_DIR, '{}.{}'.format(path, ext))
# with open(path, 'r') as fp:
# template = fp.read()
# return build(template, partials)
def build(template, partials=None):
template = '{}\n'.format
|
(template)
for regex in (PARTIAL_CUSTOM, PARTIAL):
for match in regex.finditer(template):
if partials is None:
substitution = get_template(match.group('name'))
else:
substitution = partials.get(match.group('name'), u'')
if substitution:
try:
substitution = '\n'.join('{}{}'.format(match.group('whitespace'), s) if s else s for s in substitution.split('\n'))
except IndexError
|
:
pass
else:
substitution = substitution[len(match.group('whitespace')):]
template = template.replace(match.group('tag'), substitution)
return utils.purify(template)
|
zhangyuygss/WSL
|
evaluate/py_demo.py
|
Python
|
bsd-3-clause
| 3,102
| 0.020954
|
import numpy as np
import sys
caffe_root = '/home/guillem/git/caffe/'
sys.path.insert(1, caffe_root+'python/')
import caffe
import cv2
from py_returnCAMmap import py_returnCAMmap
from py_map2jpg import py_map2jpg
import scipy.io
def im2double(im):
return cv2.normalize(im.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
## Be aware that since Matlab is 1-indexed and column-major,
## the usual 4 blob dimensions in Matlab are [width, height, channels, num]
## In python the dimensions are [num, channels, width, height]
model = 'googlenet'
if model == 'alexnet':
net_weights = 'models/alexnetplusCAM_imagenet.caffemodel'
net_model = 'models/deploy_alexnetplusCAM_imagenet.prototxt'
out_layer = 'fc9'
last_conv = 'conv7'
crop_size = 227
elif model == 'googlenet':
net_weights = 'models/imagenet_googleletCAM_train_iter_120000.caffemodel'
net_model = 'models/deploy_googlenetCAM.prototxt'
out_layer = 'CAM_fc'
crop_size = 224
last_conv = 'CAM_conv'
else:
raise Exception('This model is not defined')
categories = scipy.io.loadmat('categories1000.mat')
# load CAM model and extract features
net = caffe.Net(net_model, net_weights, caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(caffe
|
_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))
#transformer.set_channel_swap('data', (2,1,0)) # the refe
|
rence model has channels in BGR order instead of RGB
weights_LR = net.params[out_layer][0].data # get the softmax layer of the network
# shape: [1000, N] N-> depends on the network
image = cv2.imread('img2.jpg')
image = cv2.resize(image, (256, 256))
# Take center crop.
center = np.array(image.shape[:2]) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-np.array([crop_size, crop_size]) / 2.0,
np.array([crop_size, crop_size]) / 2.0
])
crop = crop.astype(int)
input_ = image[crop[0]:crop[2], crop[1]:crop[3], :]
# extract conv features
net.blobs['data'].reshape(*np.asarray([1,3,crop_size,crop_size])) # run only one image
net.blobs['data'].data[...][0,:,:,:] = transformer.preprocess('data', input_)
out = net.forward()
scores = out['prob']
activation_lastconv = net.blobs[last_conv].data
## Class Activation Mapping
topNum = 5 # generate heatmap for top X prediction results
scoresMean = np.mean(scores, axis=0)
ascending_order = np.argsort(scoresMean)
IDX_category = ascending_order[::-1] # [::-1] to sort in descending order
curCAMmapAll = py_returnCAMmap(activation_lastconv, weights_LR[IDX_category[:topNum],:])
curResult = im2double(image)
for j in range(topNum):
# for one image
curCAMmap_crops = curCAMmapAll[:,:,j]
curCAMmapLarge_crops = cv2.resize(curCAMmap_crops, (256,256))
curHeatMap = cv2.resize(im2double(curCAMmapLarge_crops),(256,256)) # this line is not doing much
curHeatMap = im2double(curHeatMap)
curHeatMap = py_map2jpg(curHeatMap, None, 'jet')
curHeatMap = im2double(image)*0.2+im2double(curHeatMap)*0.7
cv2.imshow(categories['categories'][IDX_category[j]][0][0], curHeatMap)
cv2.waitKey(0)
|
hellowebbooks/hellowebbooks-website
|
blog/migrations/0001_initial.py
|
Python
|
mit
| 5,381
| 0.002602
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-18 10:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.contrib.routable_page.models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
import wagtailmd.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0019_delete_filter'),
('taggit', '0002_auto_20150616_2121'),
('wagtailcore', '0040_page_draft_title'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=80, unique=True)),
],
options={
'verbose_name_plural': 'Categories',
'verbose_name': 'Category',
},
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('description', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
bases=(wagtail.contrib.routable_page.models.RoutablePageMixin, 'wagtailcore.page'),
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LandingPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.del
|
etion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image')), ('two_columns', wagtail.core.blocks.StructBlock((('left_column
|
', wagtail.core.blocks.StreamBlock((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())), icon='arrow-right', label='Left column content')), ('right_column', wagtail.core.blocks.StreamBlock((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())), icon='arrow-right', label='Right column content'))))), ('embedded_video', wagtail.embeds.blocks.EmbedBlock(icon='media'))), blank=True, null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtailmd.utils.MarkdownField()),
('date', models.DateTimeField(default=datetime.datetime.today, verbose_name='Post date')),
('excerpt', wagtailmd.utils.MarkdownField(blank=True, verbose_name='excerpt')),
('categories', modelcluster.fields.ParentalManyToManyField(blank=True, to='blog.BlogCategory')),
('header_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Tag',
fields=[
],
options={
'indexes': [],
'proxy': True,
},
bases=('taggit.tag',),
),
migrations.AddField(
model_name='postpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='blog.BlogPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='blogpagetag',
name='content_object',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_tags', to='blog.PostPage'),
),
migrations.AddField(
model_name='blogpagetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_blogpagetag_items', to='taggit.Tag'),
),
]
|
vdeluca/tfi
|
geonode/contrib/dynamic/postgis.py
|
Python
|
gpl-3.0
| 10,137
| 0.000197
|
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
# Copyright (C) 2008 Neogeo Technologies
#
# This file is part of Opencarto project
#
# Opencarto is
|
free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Opencarto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public
|
License for more details.
# You should have received a copy of the GNU General Public License
# along with Opencarto. If not, see <http://www.gnu.org/licenses/>.
#
from django import db
from django.contrib.gis.gdal import DataSource, SpatialReference, OGRGeometry
from django.utils.text import slugify
def get_model_field_name(field):
"""Get the field name usable without quotes.
"""
# Remove spaces and strange characters.
field = slugify(field)
# Use underscores instead of dashes.
field = field.replace('-', '_')
# Use underscores instead of semicolons.
field = field.replace(':', '_')
# Do not let it be called id
if field in ('id',):
field += '_'
# Avoid postgres reserved keywords.
if field.upper() in PG_RESERVED_KEYWORDS:
field += '_'
# Do not let it end in underscore
if field[-1:] == '_':
field += 'field'
# Make sure they are not numbers
try:
int(field)
float(field)
field = "_%s" % field
except ValueError:
pass
return field
def transform_geom(wkt, srid_in, srid_out):
proj_in = SpatialReference(int(srid_in))
proj_out = SpatialReference(int(srid_out))
ogr = OGRGeometry(wkt)
if hasattr(ogr, 'srs'):
ogr.srs = proj_in
else:
ogr.set_srs(proj_in)
ogr.transform_to(proj_out)
return ogr.wkt
def get_extent_from_text(points, srid_in, srid_out):
"""Transform an extent from srid_in to srid_out."""
proj_in = SpatialReference(srid_in)
proj_out = SpatialReference(srid_out)
if srid_out == 900913:
if int(float(points[0])) == -180:
points[0] = -179
if int(float(points[1])) == -90:
points[1] = -89
if int(float(points[2])) == 180:
points[2] = 179
if int(float(points[3])) == 90:
points[3] = 89
wkt = 'POINT(%f %f)' % (float(points[0]), float(points[1]))
wkt2 = 'POINT(%f %f)' % (float(points[2]), float(points[3]))
ogr = OGRGeometry(wkt)
ogr2 = OGRGeometry(wkt2)
if hasattr(ogr, 'srs'):
ogr.srs = proj_in
ogr2.srs = proj_in
else:
ogr.set_srs(proj_in)
ogr2.set_srs(proj_in)
ogr.transform_to(proj_out)
ogr2.transform_to(proj_out)
wkt = ogr.wkt
wkt2 = ogr2.wkt
mins = wkt.replace('POINT (', '').replace(')', '').split(' ')
maxs = wkt2.replace('POINT (', '').replace(')', '').split(' ')
mins.append(maxs[0])
mins.append(maxs[1])
return mins
def merge_geometries(geometries_str, sep='$'):
"""Take a list of geometries in a string, and merge it."""
geometries = geometries_str.split(sep)
if len(geometries) == 1:
return geometries_str
else:
pool = OGRGeometry(geometries[0])
for geom in geometries:
pool = pool.union(OGRGeometry(geom))
return pool.wkt
def file2pgtable(infile, table_name, srid=4326):
"""Create table and fill it from file."""
table_name = table_name.lower()
datasource = DataSource(infile)
layer = datasource[0]
# création de la requête de création de table
geo_type = str(layer.geom_type).upper()
coord_dim = 0
# bizarre, mais les couches de polygones MapInfo ne sont pas détectées
if geo_type == 'UNKNOWN' and (
infile.endswith('.TAB') or infile.endswith('.tab') or
infile.endswith('.MIF') or infile.endswith('.mif')):
geo_type = 'POLYGON'
sql = 'BEGIN;'
# Drop table if exists
sql += 'DROP TABLE IF EXISTS %s;' % (table_name)
sql += "CREATE TABLE %s(" % (table_name)
first_feature = True
# Mapping from postgis table to shapefile fields.
mapping = {}
for feature in layer:
# Getting the geometry for the feature.
geom = feature.geom
if geom.geom_count > 1:
if not geo_type.startswith('MULTI'):
geo_type = 'MULTI' + geo_type
if geom.coord_dim > coord_dim:
coord_dim = geom.coord_dim
if coord_dim > 2:
coord_dim = 2
if first_feature:
first_feature = False
fields = []
fields.append('id' + " serial NOT NULL PRIMARY KEY")
fieldnames = []
for field in feature:
field_name = get_model_field_name(field.name)
if field.type == 0: # integer
fields.append(field_name + " integer")
fieldnames.append(field_name)
elif field.type == 2: # float
fields.append(field_name + " double precision")
fieldnames.append(field_name)
elif field.type == 4:
fields.append(field_name + " character varying(%s)" % (
field.width))
fieldnames.append(field_name)
elif field.type == 8 or field.type == 9 or field.type == 10:
fields.append(field_name + " date")
fieldnames.append(field_name)
mapping[field_name] = field.name
sql += ','.join(fields)
sql += ');'
sql += "SELECT AddGeometryColumn('public','%s','geom',%d,'%s',%d);" % \
(table_name, srid, geo_type, coord_dim)
sql += 'END;'
# la table est créée il faut maintenant injecter les données
fieldnames.append('geom')
mapping['geom'] = geo_type
# Running the sql
execute(sql)
return mapping
def execute(sql):
"""Turns out running plain SQL within Django is very hard.
The following code is really weak but gets the job done.
"""
cursor = db.connections['datastore'].cursor()
try:
cursor.execute(sql)
except:
raise
finally:
cursor.close()
# Obtained from
# http://www.postgresql.org/docs/9.2/static/sql-keywords-appendix.html
PG_RESERVED_KEYWORDS = ('ALL',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASYMMETRIC',
'AUTHORIZATION',
'BOTH',
'BINARY',
'CASE',
'CAST',
'CHECK',
'COLLATE',
'COLLATION',
'COLUMN',
'CONSTRAINT',
'CREATE',
'CROSS',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'DEFAULT',
'DEFERRABLE',
'DESC',
'DISTINCT',
'DO',
'ELSE',
'END',
'EXCEPT',
'FALSE',
'FETCH',
'FOR',
'FOREIGN',
'FREE
|
ingenieroariel/webandgis
|
manage.py
|
Python
|
mit
| 252
| 0
|
#!/usr/bin/env python
import os
|
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webandgis.setting
|
s")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
keras-team/keras
|
keras/feature_column/dense_features_test.py
|
Python
|
apache-2.0
| 43,567
| 0.005624
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import test_util as tf_test_utils # pylint: disable=g-direct-tensorflow-import
from keras.testing_infra import test_combinations
from keras.feature_column import dense_features as df
def _initialized_session(config=None):
sess = tf.compat.v1.Session(config=config)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
return sess
class DenseFeaturesTest(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=['graph', 'eager']))
def test_retrieving_input(self):
features = {'a': [0.]}
dense_features = df.DenseFeatures(tf.feature_column.numeric_column('a'))
inputs = self.evaluate(dense_features(features))
self.assertAllClose([[0.]], inputs)
@test_combinations.generate(test_combinations.combine(mode=['eager']))
def test_reuses_variables(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(1, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
@test_combinations.generate(test_combinations.combine(mode=['eager']))
def test_dense_feature_with_partitioner(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0), (3, 0)),
values=(0, 1, 3, 2),
dense_shape=(4, 4))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=4)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
offset = partition_info._var_offset[0]
del shape # unused
del dtype # unused
if offset == 0:
embedding_values = (
(1, 0), # id 0
(0, 1)) # id 1
else:
embedding_values = (
(1, 1), # id 2
(2, 2)) # id 3
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures(
[embedding_column], partitioner=tf.compat.v1.fixed_size_partitioner(2))
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [2, 2], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(2, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(2, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
self.assertIs(variables[1], dense_features.variables[1])
@test_combinations.generate(test_combinations.combine(mode=['eager']))
def test_feature_column_dense_features_gradient(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embe
|
dding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0,
|
1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = dense_features(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(ValueError,
'feature_columns must not be empty'):
df.DenseFeatures(feature_columns=[])(features={})
def test_should_be_dense_column(self):
with self.assertRaisesRegex(ValueError, 'must be a .*DenseColumn'):
df.DenseFeatures(feature_columns=[
tf.feature_column.categorical_column_with_hash_bucket('wire_cast', 4)
])(
features={
'a': [[0]]
})
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
df.DenseFeatures(
feature_columns={'a': tf.feature_column.numeric_column('a')})(
features={
'a': [[0]]
})
def test_bare_column(self):
with tf.Graph().as_default():
features = features = {'a': [0.]}
net = df.DenseFeatures(tf.feature_column.numeric_column('a'))(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with tf.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (tf.feature_column.numeric_column(key) for key in features)
net = df.DenseFeatures(columns)(features)
self.evaluate(tf.compat.v1.global_variables_initializer()
|
qiankunshe/sky_engine
|
sky/tools/skydoc.py
|
Python
|
bsd-3-clause
| 1,262
| 0.001585
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
import webbrowser
SKY_TOOLS_DIR = os.path.dirname(os.path.abspath(__file__)
|
)
SKY_ROOT = os.path.dirname(SKY_TOOLS_DIR)
SRC_ROOT = os.path.dirname(SKY_ROOT)
WORKBENCH_DIR = os.path.join(SRC_ROOT, 'sky', 'packages', 'workbench')
SKY_PACKAGE = os.path.join(SRC_ROOT, 'sky', 'package
|
s', 'sky')
DART_SDK = os.path.join(SRC_ROOT, 'third_party', 'dart-sdk', 'dart-sdk', 'bin')
DARTDOC = os.path.join(DART_SDK, 'dartdoc')
PUB_CACHE = os.path.join(SRC_ROOT, 'dart-pub-cache')
def main():
parser = argparse.ArgumentParser(description='Sky Documentation Generator')
parser.add_argument('--open', action='store_true',
help='Open docs after building.')
args = parser.parse_args()
doc_dir = os.path.join(SKY_PACKAGE, 'doc', 'api')
cmd = [
DARTDOC,
'--input', SKY_PACKAGE,
'--output', doc_dir
]
subprocess.check_call(cmd, cwd=WORKBENCH_DIR)
if args.open:
webbrowser.open(os.path.join(doc_dir, 'index.html'))
if __name__ == '__main__':
sys.exit(main())
|
gnmiller/craig-bot
|
craig-bot/lib/python3.6/site-packages/discord/player.py
|
Python
|
mit
| 10,909
| 0.001467
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, I
|
NCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
DEALINGS IN THE SOFTWARE.
"""
import threading
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
from .errors import ClientException
from .opus import Encoder as OpusEncoder
log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegPCMAudio',
'PCMVolumeTransformer',
)
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self):
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self):
"""Checks if the audio source is already encoded in Opus.
Defaults to ``False``.
"""
return False
def cleanup(self):
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self):
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: file-like object
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream):
self.stream = stream
def read(self):
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegPCMAudio(AudioSource):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, BinaryIO]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is True then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If true, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[BinaryIO]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(self, source, *, executable='ffmpeg', pipe=False, stderr=None, before_options=None, options=None):
stdin = None if not pipe else source
args = [executable]
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
self._process = None
try:
self._process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr)
self._stdout = self._process.stdout
except FileNotFoundError:
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException('Popen failed: {0.__class__.__name__}: {0}'.format(exc)) from exc
def read(self):
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def cleanup(self):
proc = self._process
if proc is None:
return
log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
proc.kill()
if proc.poll() is None:
log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
self._process = None
class PCMVolumeTransformer(AudioSource):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: float
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original, volume=1.0):
if not isinstance(original, AudioSource):
raise TypeError('expected AudioSource not {0.__class__.__name__}.'.format(original))
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original = original
self.volume = volume
@property
def volume(self):
"""Retrieves or sets the volume as a floating point percentage (e.g. 1.0 for 100%)."""
return self._volume
@volume.setter
def volume(self, value):
self._volume = max(value, 0.0)
def cleanup(self):
self.original.cleanup()
def read(self):
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source, client, *, after=None):
threading.Thread.__init__(self)
self.daemon = True
self.source = source
self.client = client
self.after = after
self._end = threading.Event()
self._resumed = threading.Event()
self._resumed.set() # we are not paused
self._current_error = None
self._connected = client._connected
|
qalhata/Python-Scripts-Repo-on-Data-Science
|
SQL_Det_Pop_Sum_by_Column.py
|
Python
|
gpl-3.0
| 1,516
| 0.00066
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 22 23:03:45 2017
@author: Shabaka
"""
# import pandas
import pandas as pd
# Import Pyplot as plt from matplotlib
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
# Import func
from sqlalchemy.sql import func
from sqlalchemy import MetaData, Table
metadata = MetaData()
engine = create_engine('sqlite:///census_nyc.sqlite')
# Reflect census table from the engine: census
census = Table('census', metadata, autoload=True, autoload_with=engine)
# Build an expression to calculate the sum of pop2008 labeled as population
pop2008_sum = func.sum(census.columns.pop2008).label("population")
# Build a query to select the state and sum of pop2008 as population grouped by
# state: stmt
stmt = select([census.columns.state, pop2008_sum])
# Append group by state
stmt = stmt.group_by(census.columns.state)
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
#
|
Print the keys/column names of the results returned
print(results[0].keys())
# Create a DataFrame from the results: df
df = pd.DataFrame(results)
# Set column names
df.columns = results[0].keys()
|
# Print the Dataframe
print(df)
# Create a DataFrame from the results: df
df = pd.DataFrame(results)
# Set Column names
df.columns = results[0].keys()
# Print the DataFrame
print(df)
# Plot the DataFrame
df.plot.bar()
plt.show()
|
node13h/droll
|
droll/core/tests/factories.py
|
Python
|
agpl-3.0
| 1,048
| 0
|
# Copyright (C) 2017 Sergej Alikov <sergej.alikov@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
|
.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fa
|
ctory
from ..models import Link
from droll.access.tests.factories import UserFactory
class LinkFactory(factory.django.DjangoModelFactory):
class Meta:
model = Link
title = factory.Sequence(lambda n: 'Link nr. {}'.format(n))
user = factory.SubFactory(UserFactory)
url = 'http://www.google.com/'
|
django-leonardo/django-constance
|
constance/models.py
|
Python
|
bsd-3-clause
| 1,142
| 0.001751
|
from django.db.models import signals
def create_perm(*args, **kwargs):
"""
Creates a fake content type and permission
to be able to check for permissions
"""
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django import VERSION
if ContentType._meta.installed and Permission._meta.installed:
if VERSION >= (1, 8):
content_type, created = ContentType.objects.get_or_create(
app_label='constance',
model='config')
else:
content_type, created = ContentType.objects.get_or_create(
name='config',
app_label='constance',
|
model='con
|
fig')
permission, created = Permission.objects.get_or_create(
name='Can change config',
content_type=content_type,
codename='change_config')
if hasattr(signals, 'post_syncdb'):
signals.post_syncdb.connect(create_perm, dispatch_uid="constance.create_perm")
else:
signals.post_migrate.connect(create_perm, dispatch_uid="constance.create_perm")
|
balamuruhans/avocado-vt
|
virttest/libvirt_xml/nwfilter_protocols/base.py
|
Python
|
gpl-2.0
| 3,970
| 0
|
"""
Common base classes for filter rule protocols
"""
from six import StringIO
from virttest import xml_utils
from virttest.libvirt_xml import base, xcepts, accessors
class UntypedDeviceBase(base.LibvirtXMLBase):
"""
Base class implementing common functions for all rule protocol XML w/o a
type attr.
"""
__slots__ = ('protocol_tag',)
# Subclasses are expected to hide protocol_tag
def __init__(self, protocol_tag, virsh_instance=base.virsh):
"""
Initialize untyped filter rule instance's basic XML with protocol_tag
"""
super(UntypedDeviceBase, self).__init__(virsh_instance=virsh_instance)
# Just a regular dictionary value
# (Using a property to change element tag won't work)
self['protocol_tag'] = protocol_tag
# setup bare-bones XML
self.xml = u"<%s/>" % protocol_tag
def from_element(self, element):
"""
Stateful component to helper method for new_from_element.
"""
class_name = self.__class__.__name__
if element.tag != class_name.lower():
raise xcepts.LibvirtXMLError('Refusing to create %s instance'
'from %s tagged element'
% (class_name, element.tag))
# XMLTreeFile only supports element trees
etree = xml_utils.ElementTree.ElementTree(element)
# ET only writes to open file-like objects
xmlstr = StringIO()
# Need element tree string value to initialize LibvirtXMLBase.xml
etree.write(xmlstr, xml_utils.ENCODING)
# Create a new XMLTreeFile object based on string input
self.xml = xmlstr.getvalue()
@classmethod
def new_from_element(cls, element, virsh_instance=base.virsh):
"""
Create a new filter rule XML instance from an single ElementTree
element
"""
# subclasses __init__ only takes virsh_instance parameter
instance = cls(virsh_instance=virsh_instance)
instance.from_element(element)
return instance
@classmethod
def new_from_dict(cls, properties, virsh_instance=base.virsh):
"""
Create a new filter rule XML instance from a dict-like object
"""
instance = cls(virsh_instance=virsh_instance)
for key, value in list(properties.items()):
setattr(instance, key, value)
return instance
class TypedDeviceBase(UntypedDeviceBase):
"""
Base class implementing common functions for all filter rule XML w/o a
type attr.
"""
__slots__ = ('type_name',)
# Subclasses are expected to hide protocol_tag
def __init__(self, protocol_tag, type_name, virsh_instance=base.virsh):
"""
Initialize Typed filter rule protocol instance's basic XML with
type_name & protocol_tag
"""
# generate getter, setter, deleter for 'type_name' property
accessors.XMLAttribute('type_name', self,
# each rule protocol is it's own XML "document"
# because python 2.6 ElementPath is broken
parent_xpath='/',
|
tag_name=protocol_tag,
attribute='type')
super(TypedDeviceBase, self).__init__(protocol_tag=protocol_tag,
virsh_instance=virsh_instance)
# Calls accessor to modify xml
self.type_name = type_name
@classmethod
def new_from_element(cls, element, virsh_instance=base.vir
|
sh):
"""
Hides type_name from superclass new_from_element().
"""
type_name = element.get('type', None)
# subclasses must hide protocol_tag parameter
instance = cls(type_name=type_name,
virsh_instance=virsh_instance)
instance.from_element(element)
return instance
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.