repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
google/differentiable-atomistic-potentials
|
dap/tf/visualize.py
|
Python
|
apache-2.0
| 4,530
| 0.001987
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization utilities for tensorflow graphs."""
import tensorflow as tf
from graphviz import Digraph
import tempfile
import hashlib
import numpy as np
import os
import webbrowser
from IPython.display import clear_output, Image, display, HTML
import time
def tf_to_dot(graph=None, fname=None, format=None):
"""
Create an image from a tensorflow graph.
graph: The tensorflow graph to visualize. Defaults to tf.get_default_graph()
fname: Filename to save the graph image in
format: Optional image extension. If you do not use this, the extension is
derived from the fname.
Returns an org-mode link to the path where the image is.
Adapted from https://blog.jakuba.net/2017/05/30/tensorflow-visualization.html
Note: This can make very large images for complex graphs.
"""
dot = Digraph()
if graph is None:
graph = tf.get_default_graph()
shapes = {'Const': 'circle',
'Placeholder': 'oval'}
for n in graph.as_graph_def().node:
shape = tuple([dim.size for dim
in n.attr['value'].tensor.tens
|
or_shape.dim])
dot.node(n.name, label=f'{n.name} {shape}',
shape=shapes.get(n.op, None))
for i in n.inpu
|
t:
dot.edge(i, n.name)
m = hashlib.md5()
m.update(str(dot).encode('utf-8'))
if fname is None:
fname = 'tf-graph-' + m.hexdigest()
if format is None:
base, ext = os.path.splitext(fname)
fname = base
format = ext[1:] or 'png'
dot.format = format
dot.render(fname)
os.unlink(fname)
print(f'{fname}, {format}')
return f'[[./{fname}.{format}]]'
# Tensorboard visualizations
# Adapted from https://gist.githubusercontent.com/yaroslavvb/97504b8221a8529e7a51a50915206d68/raw/f1473d2873676c0e885b9fbd363c882a7a83b28a/show_graph
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = f"<stripped {size} bytes>".encode('utf-8')
return strip_def
def show_graph(graph_def=None, browser=True,
width=1200, height=800,
max_const_size=32, ungroup_gradients=False):
"""Open a graph in Tensorboard. By default this is done in a browser. If you set
browser to False, then html will be emitted that shows up in a Jupyter
notebook.
"""
if not graph_def:
graph_def = tf.get_default_graph().as_graph_def()
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
data = str(strip_def)
if ungroup_gradients:
data = data.replace('"gradients/', '"b_')
#print(data)
code = """<style>.container {{ width:100% !important; }}</style>
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(data), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:100%;height:100%;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
if browser:
fh, tmpf = tempfile.mkstemp(prefix='tf-graph-', suffix='.html')
os.close(fh)
with open(tmpf, 'w') as f:
f.write(iframe)
webbrowser.open('file://' + tmpf)
else:
display(HTML(iframe))
|
jjconti/ayrton
|
ayrton/ast_pprinter.py
|
Python
|
gpl-3.0
| 18,702
| 0.020426
|
from ast import Module, ImportFrom, Expr, Call, Name, FunctionDef, Assign, Str
from ast import dump, If, Compare, Eq, For, Attribute, Gt, Num, IsNot, BinOp
from ast import NameConstant, Mult, Add, Import, List, Dict, Is, BoolOp, And
from ast import Subscript, Index, Tuple, Lt, Sub, Global, Return, AugAssign
from ast import While, UnaryOp, Not, ClassDef, Mod, Yield, NotEq, Try, Pass
from ast import ExceptHandler, Break, Slice, USub, ListComp, In, Lambda, BitAnd
from ast import BitOr, Or, Delete, Bytes, Raise, NotIn, RShift, GeneratorExp
from ast import Assert, Set, SetComp, LtE, IfExp, FloorDiv, GtE, With, Continue
from ast import YieldFrom, UAdd, LShift, DictComp, Div, Starred, BitXor, Pow
from _ast import arguments, arg as arg_type, keyword as keyword_type
from _ast import alias as alias_type, comprehension, withitem
try:
# python3.5 support
from _ast import AsyncFor, AsyncFunctionDef, AsyncWith, Await
except ImportError:
AsyncFor= AsyncFunctionDef= AsyncWith= Await= object()
def pprint_body (body, level):
for statement in body:
yield ' '*level
for i in pprint_inner (statement, level): yield i
yield '\n'
def pprint_seq (seq, sep=', '):
for index, elem in enumerate (seq):
if type (elem)==str:
yield elem
else:
for i in pprint_inner (elem): yield i
if index<len (seq)-1:
if type (sep)==str:
yield sep
else:
for i in pprint_inner (sep): yield i
def pprint_orelse (orelse, level):
if len (orelse)>0:
yield ' '*level+'else:\n'
for i in pprint_body (orelse, level+1): yield i
def pprint_args (args, defaults):
# TODO: anotations
# args=[arg(arg='a', annotation=None), arg(arg='b', annotation=None)]
# defaults=[Num(n=1)]
d_index= len (args)-len (defaults)
for index, arg in enumerate (args):
yield arg.arg
if index>=d_index:
yield '='
for i in pprint_inner (defaults[index-d_index]): yield i
if index<len (args)-1:
yield ', '
def pprint (node, level=0):
return ''.join (pprint_inner (node, level))
def pprint_inner (node, level=0):
t= type (node)
if t==Add:
yield '+'
elif t==And:
yield ' and '
elif t==Assert:
# Assert(test=..., msg=None)
yield 'assert '
for i in pprint_inner (node.test): yield i
# TODO: msg
elif t==Assign:
# Assign(targets=[Name(id='c', ctx=Store())],
# value=...)
for i in pprint_inner_seq (node.targets): yield i
yield '= '
for i in pprint_inner (node.value): yield i
elif t==AsyncFor:
yield 'async '
# For(target=..., iter=..., body=[...], orelse=[...])
node= For (target=node.target, iter=node.iter, body=node.body, orelse=node.orelse)
for i in pprint_inner (node): yield i
elif t==AsyncFunctionDef:
yield 'async '
# FunctionDef(name='foo', args=arguments(...), body=[ ... ], decorator_list=[], returns=None)
node= FunctionDef (name=node.name, args=node.args, body=node.body, decorator_list=node.decorator_list,
returns=node.returns)
for i in pprint_inner (node): yield i
elif t==AsyncWith:
yield 'async '
# With(items=[...], body=[...])
node= With (items=node.items, body=node.body)
for i in pprint_inner (node): yield i
elif t==Attribute:
# Attribute(value=Name(id='node', ctx=Load()), attr='body', ctx=Load())
for i in pprint_inner (node.value): yield i
yield '.'
yield node.attr
elif t==AugAssign:
# AugAssign(target=Name(id='ans', ctx=Store()), op=Add(), value=Name(id='a', ctx=Load()))
for i in pprint_inner (node.target): yield i
for i in pprint_inner (node.op): yield i
yield '= '
for i in pprint_inner (node.value): yield i
elif t==Await:
# value=Await(value=...)
yield 'await '
for i in pprint_inner (node.value): yield i
elif t==BinOp:
# BUG:
# m= ast.parse ('5*(3+4)')
# ayrton.ast_pprinter.pprint (m)
# 5*3+4
for i in pprint_inner (node.left): yield i
for i in pprint_inner (node.op): yield i
for i in pprint_inner (node.right): yield i
elif t==BitAnd:
yield ' & '
elif t==BitOr:
yield '|'
elif t==BitXor:
yield '^'
elif t==BoolOp:
pprint_seq (node.values, node.op)
elif t==Break:
yield 'break'
elif t==Bytes:
yield repr (node.s)
elif t==Call:
# Call(func=Name(id='foo', ctx=Load()), args=[], keywords=[], starargs=None, kwargs=None)
# TODO: annotations
for i in pprint_inner (node.func): yield i
yield ' ('
for i in pprint_seq (node.args): yield i
if len (node.args)>0 and (len (node.keywords)>0 or
node.starargs is not None or
node.kwargs is not None):
yield ', '
for i in pprint_seq (node.keywords): yield i
if ((len (node.args)>0 or len (node.keywords)>0) and
(node.starargs is not None or node.kwargs is not None)):
yield ', '
if node.starargs is not None:
yield '*'
for i in pprint_inner (node.starargs): yield i
if ((len (node.args)>0 or
len (node.keywords)>0 or
(node.starargs is not None) and node.kwargs is not None)):
yield ', '
if node.kwargs is not None:
yield '**'
for i in pprint_inner (node.kwargs): yield i
yield ')'
elif
|
t==ClassDef:
# ClassDef(name='ToExpand', bases=[Name(id='object', ctx=Load())],
# keywords=[], starargs=None, kwargs=None, body=[...]
yield 'class '
yield node.name
# TODO: more
if len (node.bas
|
es)>0:
yield ' ('
for i in pprint_seq (node.bases): yield i
yield ')'
yield ':'
for i in pprint_body (node.body, level+1): yield i
elif t==Compare:
# Compare(left=Name(id='t', ctx=Load()), ops=[Eq()], comparators=[Name(id='Module', ctx=Load())])
# TODO: do properly
for i in pprint_inner (node.left): yield i
for op in node.ops:
for i in pprint_inner (op): yield i
for comparator in node.comparators:
for i in pprint_inner (comparator): yield i
elif t==Continue:
yield 'continue'
elif t==Delete:
yield 'delete '
for i in pprint_seq (node.targets): yield i
elif t==Dict:
yield '{ '
for k, v in zip (node.keys, node.values):
for i in pprint_inner (k): yield i
yield '='
for i in pprint_inner (v): yield i
yield ', '
yield ' }'
elif t==DictComp:
# DictComp(key=Name(id='v', ctx=Load()), value=Name(id='k', ctx=Load()), generators=[comprehension(target=Tuple(elts=[Name(id='k', ctx=Store()), Name(id='v', ctx=Store())], ctx=Store()), iter=Call(func=Name(id='enumerate', ctx=Load()), args=[Name(id='_b32alphabet', ctx=Load())], keywords=[], starargs=None, kwargs=None), ifs=[])])
yield '{ '
for i in pprint_inner (node.key): yield i
yield ': '
for i in pprint_inner (node.value): yield i
yield ' for '
# TODO: more
for i in pprint_inner (node.generators[0]): yield i
yield ' }'
elif t==Div:
yield '/'
elif t==Eq:
yield '=='
elif t==ExceptHandler:
# ExceptHandler(type=Name(id='KeyError', ctx=Load()), name=None, body=[Pass()])
yield ' '*level+'except '
if node.type is not None:
for i in pprint_inner (node.type): yield i
if node.name is not None:
yield ' as '
yield node.name
yield ':'
for i in pprint_body (node.body, level+1): yield i
elif t==Expr:
# Expr(value=...)
for i in pprint_inner (node.valu
|
JuezUN/INGInious
|
inginious/frontend/plugins/problem_bank/pages/api/filter_tasks_api.py
|
Python
|
agpl-3.0
| 2,183
| 0
|
import web
from inginious.frontend.plugins.utils.admin_api import AdminApi
from inginious.frontend.plugins.utils import get_mandatory_parameter
class FilterTasksApi(AdminApi):
def API_POST(self):
parameters = web.input()
task_query = get_mandatory_parameter(pa
|
rameters, "task_query")
limit = int(get_mandatory_parameter(parameters, "limit"))
page = int(get_mandatory_parameter(parameters, "page"))
course_ids = set(bank["courseid"]
for bank in self.database.problem_banks.find())
for course_id, course in self.course_factory.get_all_courses()
|
.items():
if self.user_manager.has_admin_rights_on_course(course):
course_ids.add(course_id)
tasks = list(self.database.tasks_cache.aggregate([
{
"$match":
{
"$text": {
"$search": task_query,
"$diacriticSensitive": False,
"$caseSensitive": False
}
}
},
{
"$match":
{
"course_id": {"$in": list(course_ids)}
}
},
{
"$project": {
"course_id": 1,
"task_id": 1,
"task_name": 1,
"task_author": 1,
"task_context": 1,
"tags": 1,
"course_name": 1,
"_id": 0,
"score": {"$meta": "textScore"}
}
},
{
"$sort": {"score": -1}
}
]))
left = limit * (page - 1)
right = left + limit
total_pages = len(tasks) // limit
if len(tasks) % limit != 0 or total_pages == 0:
total_pages += 1
if right >= len(tasks):
tasks = tasks[left:]
else:
tasks = tasks[left:right]
response = {'total_pages': total_pages, "tasks": tasks}
return 200, response
|
WilliamYi96/Machine-Learning
|
LeetCode/0154.py
|
Python
|
apache-2.0
| 701
| 0.002853
|
class Solution:
def findMin(self, nums):
mlength = len(nums)
if mlength == 0:
return -1
left = 0
right = mlength - 1
while left <= right:
mid = (left + right) >> 1
if mid ==
|
mlength - 1:
return nums[0]
if nums[mid] > nums[mid+1]:
return nums[mid+1]
else:
if nums[left] > nums[mid]:
right = mid - 1
elif nums[lef
|
t] == nums[mid]:
left += 1
elif nums[left] < nums[mid]:
left = mid + 1
return nums[0]
# There is some problems of this file
|
mganeva/mantid
|
scripts/Inelastic/Direct/dgreduce.py
|
Python
|
gpl-3.0
| 15,191
| 0.009413
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
""" Empty class temporary left for compatibility with previous interfaces """
from __future__ import (absolute_import, division, print_function)
import Direct.DirectEnergyConversion as DRC
from mantid.simpleapi import *
from mantid.kernel import funcinspect
from mantid import api
# the class which is responsible for data reduction
global Reducer
Reducer = None
# Statement used at debug time to pull changes in DirectEnergyConversion into Mantid
#DRC=reload(DRC)
def getReducer():
# needed on Linux to adhere to correct reference return
global Reducer
return Reducer
def setup(instname=None,reload=False):
"""
setup('mar')
setup instrument reduction parameters from instname_parameter.xml file
if the instrument has already been defined, does nothing unless
reload = True is specified
"""
global Reducer
if instname is None :
instname = config['default.instrument']
if not (Reducer is None or Reducer.prop_man is None):
old_name=Reducer.prop_man.instr_name
if old_name.upper()[0:3] == instname.upper()[0:3] :
if not reload :
return # has been already defined
Reducer = DRC.setup_reducer(instname,reload)
def arb_units(wb_run,sample_run,ei_guess,rebin,map_file='default',monovan_run=None,second_wb=None,**kwargs):
""" One step conversion of run into workspace containing information about energy transfer
Usage:
>>arb_units(wb_run,sample_run,ei_guess,rebin)
>>arb_units(wb_run,sample_run,ei_guess,rebin,**arguments)
>>arb_units(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments)
>>arb_units(wb_run Whitebeam run number or file name or workspace
sample_run sample run number or file name or workspace
ei_guess Ei guess
rebin Rebin parameters
mapfile Mapfile -- if absent/'default' the defaults from IDF are used
monovan_run If present will do the absolute units normalization. Number of additional parameters
specified in **kwargs is usually requested for this. If they are absent, program uses defaults,
but the defaults (e.g. sample_mass or sample_rmm ) are usually incorrect for a particular run.
arguments The dictionary containing additional keyword arguments.
The list of allowed additional arguments is defined in InstrName_Parameters.xml file, located in
MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions
with run numbers as input:
>>dgreduce.arb_units(1000,10001,80,[-10,.1,70]) # will run on default instrument
>>dgreduce.arb_units(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required)
>>dgreduce.arb_units(1000,10001,80,'-10,.1,70','mari_res',fixei=True)
A detector calibration file must be specified if running the reduction with workspaces as input
namely:
>>w2=iliad("wb_wksp","run_wksp",ei,rebin_params,mapfile,det_cal_file=cal_file
,diag_remove_zero=False,norm_method='current')
type help() for the list of all available keywords. All available keywords are provided in InstName_Parameters.xml file
Some samples are:
norm_method =[monitor-1],[monitor-2][Current]
background =False , True
fixei =False , True
save_format =['.spe'],['.nxspe'],'none'
detector_van_range =[20,40] in mev
bkgd_range =[15000,19000] :integration range fo
|
r background tests
second_white - If provided an addit
|
ional set of tests is performed on this. (default = None)
hardmaskPlus - A file specifying those spectra that should be masked without testing (default=None)
tiny - Minimum threshold for acceptance (default = 1e-10)
large - Maximum threshold for acceptance (default = 1e10)
bkgd_range - A list of two numbers indicating the background range (default=instrument defaults)
diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01)
diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.)
diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1)
diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5)
diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
difference with respect to the median value must also exceed this number of error bars (default=0.0)
diag_remove_zero - If true then zeroes in the vanadium data will count as failed (default = True)
diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0)
diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0)
diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the"
difference with respect to the median value must also exceed this number of error bars (default=3.3)
variation -The number of medians the ratio of the first/second white beam can deviate from
the average by (default=1.1)
bleed_test - If true then the CreatePSDBleedMask algorithm is run
bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube
bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the
bleed test diagnostic
print_results - If True then the results are printed to the screen
diag_remove_zero =True, False (default):Diag zero counts in background range
bleed=True , turn bleed correction on and off on by default for Merlin and LET
sum =True,False(default) , sum multiple files
det_cal_file= a valid detector block file and path or a raw file. Setting this
will use the detector calibraion from the specified file NOT the
input raw file
mask_run = RunNumber to use for diag instead of the input run number
one2one =True, False :Reduction will not use a mapping file
hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask
hardmaskOnly=Filename :load a hardmask and use as only mask
"""
global Reducer
if Reducer is None or Reducer.instrument is None:
raise ValueError("instrument has not been defined, call setup(instrument_name) first.")
# --------------------------------------------------------------------------------------------------------
# Deal with mandatory parameters for this and may be some top level procedures
# --------------------------------------------------------------------------------------------------------
if sample_run:
Reducer.sample_run = sample_run
sample_run = None
try:
n,r=funcinspect.lhs_info('both')
wksp_out=r[0]
except:
wksp_out = "reduced_ws"
#
res = Reducer.convert_to_energy(wb_run,sample_run,ei_guess,rebin,map_file,monovan_run,second_wb,**kwargs)
#
results_name = res.name()
if results_name != wksp_out:
RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)
return res
def runs_are_equal(ws1,ws2):
"""Compare two run numbers, provided either as run numbers,
or as workspaces or as ws names"""
if ws1 == ws2:
return True
#-----------------------------------------------
def get_run_num(name_or_ws):
err = None
try:
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/community_crafting/component/shared_reinforced_wall_module.py
|
Python
|
mit
| 480
| 0.045833
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMP
|
LES
from swgp
|
y.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/community_crafting/component/shared_reinforced_wall_module.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
mjbradburn/masters_project
|
node_modules/neo4j-driver/neokit/neorun.py
|
Python
|
apache-2.0
| 6,800
| 0.004118
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: neorun.py <cmd=arg>
--start=path/to/neo4j/home <cmd> [arg]
: start the neo4j server in the folder specified by the path
-v version : download the version provided if no neo4j detected
-n neo4j-version: download this specific neo4j enterprise nightly version from teamcity with basic access auth
-l download-url : download the neo4j provided by this url if no neo4j found
-p new-password : change the default password to this new password
--stop=path/to/neo4j/home : stop a neo4j server
-h : show this help message
Example: neorun.py -h
neorun.py --start=path/to/neo4j/home -v 3.0.1 -p TOUFU
neorun.py --start=path/to/neo4j/home -n 3.0 -p TOUFU
neorun.py --start=path/to/neo4j/home -n 3.1
neorun.py --stop=path/to/neo4j/home
"""
import getopt
from sys import argv, stdout, exit
from neoget import neo4j_default_archive, neo4j_archive, download
from neoctl import neo4j_start, neo4j_stop, neo4j_update_default_password
from os import path, rename, getenv
import socket
from time import time, sleep, strftime
KNOWN_HOST = path.join(path.expanduser("~"), ".neo4j", "known_hosts")
NEORUN_START_ARGS_NAME = "NEORUN_START_ARGS"
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
ServerStatus = Enum(["STARTED", "STOPPED" ])
def main():
if len(argv) <= 1:
print_help()
exit(2)
try:
opts, args = getopt.getopt(argv[1:], "hv:n:l:p:", ["start=", "stop="])
except getopt.GetoptError as err:
print(str(err))
print_help()
exit(2)
else:
exit_code = 0
for opt, arg in opts:
if opt == '-h':
print_help()
exit(2)
if opt == "--start":
neo4j_home = path.abspath(arg)
if neo4j_status() == ServerStatus.STARTED:
stdout.write("Failed to start neo4j as a neo4j server is already running on this machine.\n")
exit(2)
# get the opts from env
env = getenv(NEORUN_START_ARGS_NAME)
if env:
stdout.write("WARNING: using env var `NEORUN_START_ARGS=%s`\n" % env)
try:
start_opts, start_args = getopt.getopt(env.split(), "v:n:l:p:")
except getopt.GetoptError as err:
print(str(err))
print_help()
exit(2)
else:
start_opts = opts
# parse the opts under --start
archive_url, archive_name, require_basic_auth = neo4j_default_archive()
password = ''
for start_opt, start_arg in start_opts:
if start_opt == "-p":
password = start_arg
elif start_opt in ['-v', '-n', '-l']:
archive_url, archive_name, require_basic_auth = neo4j_archive(start_opt, start_arg)
exit_code = handle_start(archive_url, archive_name, neo4j_home, require_basic_auth)
if exit_code == 0 and password:
exit_code = neo4j_update_default_password("localhost", 7474, new_password=password) or 0
elif opt == "--stop":
if neo4j_status() == ServerStatus.STOPPED:
stdout.write("Failed to stop server as no neo4j server is running on this machine.\n")
exit(2)
exit_code = neo4j_stop(neo4j_home=arg) or test_neo4j_status(ServerStatus.STOPPED) or 0
if exit_code != 0:
break
exit(exit_code)
def handle_start(archive_url, archive_name, neo4j_home, require_basic_auth):
if not path.exists(neo4j_home):
folder_name=download(archive_url, archive_name, path.dirname(neo4j_home), require_basic_auth)
if not path.exists(neo4j_home):
# the untared name is different from what the user gives
rename(path.join(path.dirname(neo4j_home), folder_name), neo4j_home)
if path.exists(KNOWN_HOST):
known_host_backup_name = KNOWN_HOST + strftime("%Y%m%d-%H%M%S") + ".backup"
stdout.write("Found an existing known_host file, renaming it to %s.\n" % (known_host_backup_name))
rename(KNOWN_HOST, known_host_backup_name)
exit_code = neo4j_start(neo4j_home) or 0
if exit_code == 0:
exit_code = test_neo4j_status()
return exit_code
# Test if the neo4j server is started (status = STARTED)
# or if the neo4j server is s
|
topped (status = STOPPED) within 4 mins.
# Return 0 if the test
|
success, otherwise 1
def test_neo4j_status(status = ServerStatus.STARTED):
success = False
start_time = time()
timeout = 60 * 4 # in seconds
count = 0
while not success:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
actual_status = s.connect_ex(("localhost", 7474))
if status == ServerStatus.STARTED:
success = True if actual_status == 0 else False
else:
success = True if actual_status != 0 else False
s.close()
current_time = time()
if current_time - start_time > timeout:
# failed to connect to server within timeout
stdout.write("Failed to start server in 4 mins\n")
return 1
count += 1
if count % 10 == 0:
stdout.write(".") # print .... to indicate working on it
sleep(0.1) # sleep for 100ms
# server is started
stdout.write("\n")
return 0
def neo4j_status():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = ServerStatus.STARTED if s.connect_ex(("localhost", 7474)) == 0 else ServerStatus.STOPPED
s.close()
return server_status
def print_help():
print(__doc__)
if __name__ == "__main__":
main()
|
UdK-VPT/Open_eQuarter
|
mole3/stat_corr/window_wall_ratio_east_SDH_by_building_age_correlation.py
|
Python
|
gpl-2.0
| 520
| 0.040385
|
# OeQ autogenerate
|
d correlation for 'Window/Wall Ratio East in Correlation to the Building Age'
import math
import numpy as np
from . import oeqCorrelation as oeq
def get(*xin):
# OeQ autogenerated correlation for 'Window to Wall Ratio in Eastern Direction'
A_WIN_E_BY_AW= oeq.correlation(
const= -6820.10365041,
a= 14.4414621854,
b= -0.0114555878046,
c= 4.03451500345e-06,
d= -5.32281636263e-10,
mode= "lin")
return dict(A_WIN_E_BY_AW=A_WIN
|
_E_BY_AW.lookup(*xin))
|
xalt/xalt
|
contrib/upgradeDB_From0.7.1.py
|
Python
|
lgpl-2.1
| 5,640
| 0.009929
|
#!/usr/bin/env python
# -*- python -*-
#
# Git Version: @git@
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT
|
ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#------
|
-----------------------------------------------------------------
from __future__ import print_function
import os, sys, re, MySQLdb
dirNm, execName = os.path.split(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.realpath(os.path.join(dirNm, "../libexec")))
from XALTdb import XALTdb
from xalt_util import dbConfigFn
import argparse
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--dbname", dest='dbname', action="store", default = "xalt", help="xalt")
args = parser.parse_args()
return args
def main():
"""
This program upgrade the Database used by XALT to the current version
from the version specified in script name.
"""
args = CmdLineOptions().execute()
configFn = dbConfigFn(args.dbname)
if (not os.path.isfile(configFn)):
dirNm, exe = os.path.split(sys.argv[0])
fn = os.path.join(dirNm, configFn)
if (os.path.isfile(fn)):
configFn = fn
else:
configFn = os.path.abspath(os.path.join(dirNm, "../site", configFn))
xalt = XALTdb(configFn)
db = xalt.db()
try:
conn = xalt.connect()
cursor = conn.cursor()
# If MySQL version < 4.1, comment out the line below
cursor.execute("SET SQL_MODE=\"NO_AUTO_VALUE_ON_ZERO\"")
cursor.execute("USE "+xalt.db())
idx = 1
print("start")
###########################################################################
# NOTE: New DB schema createDB.py uses unsigned int for some columns, but #
# modifying a signed to unsigned is problematic for columns already #
# referenced as FOREIGN KEYS. Therefor this script does not update #
# those columns. #
###########################################################################
# 1
cursor.execute("""
ALTER TABLE `xalt_link`
MODIFY COLUMN `link_program` varchar(64) NOT NULL,
ADD COLUMN `link_path` varchar(1024) NOT NULL AFTER `link_program`,
ADD COLUMN `link_module_name` varchar(64) AFTER `link_path`,
ADD COLUMN `link_line` blob AFTER `link_module_name`,
ADD INDEX `index_date` (`date`)
""")
print("(%d) upgraded xalt_link table" % idx); idx += 1
# 4
cursor.execute("""
ALTER TABLE `xalt_run`
MODIFY COLUMN `job_id` char(64) NOT NULL,
MODIFY COLUMN `num_cores` int(11) unsigned NOT NULL,
MODIFY COLUMN `job_num_cores` int(11) unsigned NOT NULL,
MODIFY COLUMN `num_nodes` int(11) unsigned NOT NULL,
MODIFY COLUMN `num_threads` smallint(6) unsigned NOT NULL,
MODIFY COLUMN `exit_code` smallint(6) NOT NULL,
ADD COLUMN `cmdline` blob NOT NULL AFTER `cwd`,
ADD INDEX `index_date` (`date` )
""")
print("(%d) upgraded xalt_run table" % idx); idx += 1
# 6
cursor.execute("""
ALTER TABLE `xalt_env_name`
ADD INDEX `a_env_name` (`env_name`)
""")
print("(%d) upgraded xalt_env_name table" % idx)
idx += 1
# 7
cursor.execute("""
ALTER TABLE `join_run_env`
MODIFY COLUMN `join_id` bigint(20) unsigned NOT NULL auto_increment
""")
print("(%d) upgraded join_run_env table" % idx)
idx += 1
#11
cursor.execute("""
ALTER TABLE `join_link_function`
MODIFY COLUMN `join_id` int(11) unsigned NOT NULL auto_increment
""")
print("(%d) upgraded xalt_function table" % idx)
idx += 1
# 12
cursor.execute("""
CREATE TABLE IF NOT EXISTS `xalt_total_env` (
`envT_id` bigint(20) unsigned NOT NULL auto_increment,
`run_id` int(11) NOT NULL,
`env_blob` blob NOT NULL,
PRIMARY KEY (`envT_id`),
FOREIGN KEY (`run_id`) REFERENCES `xalt_run`(`run_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci AUTO_INCREMENT=1
""")
print("(%d) created xalt_env table" % idx); idx += 1
cursor.close()
except MySQLdb.Error as e:
print ("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit (1)
if ( __name__ == '__main__'): main()
|
radioprotector/flask-admin
|
examples/methodview/app.py
|
Python
|
bsd-3-clause
| 1,160
| 0
|
from flask import Flask, redirect, request
import flask_admin as admin
from flask.views import MethodView
class ViewWithMethodViews(admin.BaseView):
@admin.expose('/')
def index(self):
return self.render('methodtest.html')
@admin.expose_plugview('/_api/1')
class API_v1(MethodView):
def get(self, cls):
return cls.render('test.html', request=request, name="API_v1")
def post(self, cls):
return cls.render('test.html', request=request, name="API_v1")
@admin.expose_plugview('/_api/2')
class API_v2(MethodView):
def get(self, cls):
return cls.render('test.html', request=request, name="API_v2")
def post(self, cls):
return cls.render('test.html', request=request, name="API_v2")
# Create flask app
app = Flask(__name__, template_folder='templates')
# Flask views
@app.route('/')
def index():
ret
|
urn redirect('/admin')
if __name__ == '__main__':
# Create admin
|
interface
admin = admin.Admin(name="Example: MethodView")
admin.add_view(ViewWithMethodViews())
admin.init_app(app)
# Start app
app.run(debug=True)
|
sajuptpm/neutron-ipam
|
neutron/plugins/ml2/driver_api.py
|
Python
|
apache-2.0
| 21,394
| 0
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod, abstractproperty
import six
# The following keys are used in the segment dictionaries passed via
# the driver API. These are defined separately from similar keys in
# neutron.extensions.providernet so that drivers don't need to change
# if/when providernet moves to the core API.
#
ID = 'id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
@six.add_metaclass(ABCMeta)
class TypeDriver(object):
"""Define stable abstract interface for ML2 type drivers.
ML2 type drivers each support a specific network_type for provider
and/or tenant network segments. Type drivers must implement this
abstract interface, which defines the API by which the plugin uses
the driver to manage the persistent type-specific resource
allocation state associated with network segments of that type.
Network segments are represented by segment dictionaries using the
NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined
above, corresponding to the provider attributes. Future revisions
of the TypeDriver API may add additional segment dictionary
keys. Attributes not applicable for a particular network_type may
either be excluded or stored as None.
"""
@abstractmethod
def get_type(self):
"""Get driver's network type.
:returns network_type value handled by this driver
"""
pass
@abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@abstractmethod
def validate_provider_segment(self, segment):
"""Validate attributes of a provider network segment.
:param segment: segment dictionary using keys defined above
:raises: neutron.common.exceptions.InvalidInput if invalid
Called outside transaction context to validate the provider
attributes for a provider network segment. Raise InvalidInput
if:
- any required attribute is missing
- any prohibited or unrecognized attribute is present
- any attribute value is not valid
The network_type attribute is present in segment, but
need not be validated.
"""
pass
@abstractmethod
def reserve_provider_segment(self, session, segment):
"""Reserve resource associated with a provider network segment.
:param session: database session
:param segment: segment dictionary using keys defined above
Called inside transaction context on session to reserve the
type-specific resource for a provider network segment. The
segment dictionary passed in was returned by a previous
validate_provider_segment() call.
"""
pass
@abstractmethod
def allocate_tenant_segment(self, session):
"""Allocate resource for a new tenant network segment.
:param session: database session
:returns: segment dictionary using keys defined above
Called inside transaction context on session to allocate a new
tenant network, typically from a type-specific resource
pool. If successful, return a segment dictionary describing
the segment. If tenant network segment cannot be allocated
(i.e. tenant networks not supported or resource pool is
exhausted), return None.
"""
pass
@abstractmethod
def release_segment(self, session, segment):
"""Release network segment.
:param session: database session
:param segment: segment dictionary using keys defined above
Called inside transact
|
ion context on session to release a
tenant or provider network's type-specific resource. Runtime
errors are not expected, but raising an exception will result
in rollback of the transaction.
"""
pass
@six.add_metaclass(ABCMeta)
class NetworkContext(object):
"""Context passed to MechanismDrivers for changes to network resources.
A NetworkContext instance wraps a network resource. It provides
helper methods for accessing other relevant information. Resul
|
ts
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abstractproperty
def current(self):
"""Return the current state of the network.
Return the current state of the network, as defined by
NeutronPluginBaseV2.create_network and all extensions in the
ml2 plugin.
"""
pass
@abstractproperty
def original(self):
"""Return the original state of the network.
Return the original state of the network, prior to a call to
update_network. Method is only valid within calls to
update_network_precommit and update_network_postcommit.
"""
pass
@abstractproperty
def network_segments(self):
"""Return the segments associated with this network resource."""
pass
@six.add_metaclass(ABCMeta)
class SubnetContext(object):
"""Context passed to MechanismDrivers for changes to subnet resources.
A SubnetContext instance wraps a subnet resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abstractproperty
def current(self):
"""Return the current state of the subnet.
Return the current state of the subnet, as defined by
NeutronPluginBaseV2.create_subnet and all extensions in the
ml2 plugin.
"""
pass
@abstractproperty
def original(self):
"""Return the original state of the subnet.
Return the original state of the subnet, prior to a call to
update_subnet. Method is only valid within calls to
update_subnet_precommit and update_subnet_postcommit.
"""
pass
@six.add_metaclass(ABCMeta)
class PortContext(object):
"""Context passed to MechanismDrivers for changes to port resources.
A PortContext instance wraps a port resource. It provides helper
methods for accessing other relevant information. Results from
expensive operations are cached so that other MechanismDrivers can
freely access the same information.
"""
@abstractproperty
def current(self):
"""Return the current state of the port.
Return the current state of the port, as defined by
NeutronPluginBaseV2.create_port and all extensions in the ml2
plugin.
"""
pass
@abstractproperty
def original(self):
"""Return the original state of the port.
Return the original state of the port, prior to a call to
update_port. Method is only valid within calls to
update_port_precommit and update_port_postcommit.
"""
pass
@abstractproperty
def network(self):
"""Return the NetworkContext associated with this port."""
pass
@abstractproperty
def bound_segment(self):
"""Return the currently bound segment dictionary."""
pass
@abstractproperty
def original_bound_segment(self):
"""Return the original bound segment dic
|
kawamon/hue
|
desktop/core/ext-py/pyasn1-0.4.6/pyasn1/codec/ber/decoder.py
|
Python
|
apache-2.0
| 58,050
| 0.001602
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
from pyasn1 import debug
from pyasn1 import error
from pyasn1.codec.ber import eoo
from pyasn1.compat.integer import from_bytes
from pyasn1.compat.octets import oct2int, octs2ints, ints2octs, null
from pyasn1.type import base
from pyasn1.type import char
from pyasn1.type import tag
from pyasn1.type import tagmap
from pyasn1.type import univ
from pyasn1.type import useful
__all__ = ['decode']
LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
noValue = base.noValue
class AbstractDecoder(object):
protoComponent = None
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,))
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
|
raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,))
class AbstractSimpleDecoder(AbstractDecoder):
@staticmethod
def substrateCollector(asn1Object, substrate, length):
return substrate[:length], substrate[leng
|
th:]
def _createComponent(self, asn1Spec, tagSet, value, **options):
if options.get('native'):
return value
elif asn1Spec is None:
return self.protoComponent.clone(value, tagSet=tagSet)
elif value is noValue:
return asn1Spec
else:
return asn1Spec.clone(value)
class ExplicitTagDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any('')
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, '', **options),
substrate, length
)
head, tail = substrate[:length], substrate[length:]
value, _ = decodeFun(head, asn1Spec, tagSet, length, **options)
if LOG:
LOG('explicit tag container carries %d octets of trailing payload '
'(will be lost!): %s' % (len(_), debug.hexdump(_)))
return value, tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, '', **options),
substrate, length
)
value, substrate = decodeFun(substrate, asn1Spec, tagSet, length, **options)
eooMarker, substrate = decodeFun(substrate, allowEoo=True, **options)
if eooMarker is eoo.endOfOctets:
return value, substrate
else:
raise error.PyAsn1Error('Missing end-of-octets terminator')
explicitTagDecoder = ExplicitTagDecoder()
class IntegerDecoder(AbstractSimpleDecoder):
protoComponent = univ.Integer(0)
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if tagSet[0].tagFormat != tag.tagFormatSimple:
raise error.PyAsn1Error('Simple tag format expected')
head, tail = substrate[:length], substrate[length:]
if not head:
return self._createComponent(asn1Spec, tagSet, 0, **options), tail
value = from_bytes(head, signed=True)
return self._createComponent(asn1Spec, tagSet, value, **options), tail
class BooleanDecoder(IntegerDecoder):
protoComponent = univ.Boolean(0)
def _createComponent(self, asn1Spec, tagSet, value, **options):
return IntegerDecoder._createComponent(
self, asn1Spec, tagSet, value and 1 or 0, **options)
class BitStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.BitString(())
supportConstructedForm = True
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
head, tail = substrate[:length], substrate[length:]
if substrateFun:
return substrateFun(self._createComponent(
asn1Spec, tagSet, noValue, **options), substrate, length)
if not head:
raise error.PyAsn1Error('Empty BIT STRING substrate')
if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
trailingBits = oct2int(head[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
value = self.protoComponent.fromOctetString(
head[1:], internalFormat=True, padding=trailingBits)
return self._createComponent(asn1Spec, tagSet, value, **options), tail
if not self.supportConstructedForm:
raise error.PyAsn1Error('Constructed encoding form prohibited '
'at %s' % self.__class__.__name__)
if LOG:
LOG('assembling constructed serialization')
# All inner fragments are of the same type, treat them as octet string
substrateFun = self.substrateCollector
bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
while head:
component, head = decodeFun(head, self.protoComponent,
substrateFun=substrateFun, **options)
trailingBits = oct2int(component[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
bitString = self.protoComponent.fromOctetString(
component[1:], internalFormat=True,
prepend=bitString, padding=trailingBits
)
return self._createComponent(asn1Spec, tagSet, bitString, **options), tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if substrateFun:
return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length)
# All inner fragments are of the same type, treat them as octet string
substrateFun = self.substrateCollector
bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
while substrate:
component, substrate = decodeFun(substrate, self.protoComponent,
substrateFun=substrateFun,
allowEoo=True, **options)
if component is eoo.endOfOctets:
break
trailingBits = oct2int(component[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
bitString = self.protoComponent.fromOctetString(
component[1:], internalFormat=True,
prepend=bitString, padding=trailingBits
)
else:
raise error.SubstrateUnderrunError('No EOO seen before substrate ends')
return self._createComponent(asn1Spec, tagSet, bitString, **options), substrate
class OctetStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.OctetString('')
supportConstructedForm = True
def va
|
waveform80/presentations
|
concurrency/demo3.py
|
Python
|
cc0-1.0
| 390
| 0
|
impo
|
rt zmq
ctx = zmq.Context.instance()
server = ctx.socket(zmq.PUSH)
server.bind('inproc://foo')
clients = [ctx.socket(zmq.PULL) for i in range(10)]
poller = zmq.Poller()
for client in clients:
client.connect('inproc://foo')
poller.register(client, zmq.POLLIN)
for client in clients:
server.send(b'DATA')
for sock, flags in poller.poll(0):
print(sock, repr(sock.recv()
|
))
|
diszgaurav/projecture
|
projecture/projects/python/myproject/myproject/myproject.py
|
Python
|
mit
| 353
| 0.005666
|
"""myproject
"""
__author__ = 'my
|
project:author_name'
__email_
|
_ = 'myproject:author_email'
#----------------------------------------------------------------------
def hello_world(extend_hello=False):
"""prints hello world
:returns: None
:rtype: None
"""
print 'Hello World!{}'.format(' Beautiful World!' if extend_hello else '')
|
wlan0/cattle
|
tests/integration-v1/cattletest/core/test_compose.py
|
Python
|
apache-2.0
| 5,735
| 0
|
from common import * # NOQA
SERVICE = 'com.docker.compose.service'
PROJECT = 'com.docker.compose.project'
NUMBER = 'com.docker.compose.container-number'
def test_container_create_count(client, context):
project, service, c = _create_service(client, context)
assert c.labels['io.rancher.service.deployment.unit'] is not None
assert c.labels['io.rancher.service.launch.config'] == \
'io.rancher.service.primary.launch.config'
assert c.labels['io.rancher.stack_service.name'] == project + '/' + service
assert c.labels['io.rancher.stack.name'] == project
s = find_one(c.services)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert s.name == service
assert s.type == 'composeService'
assert s.kind == 'composeService'
assert s.state == 'active'
assert s.state == 'active'
selector = 'com.docker.compose.project={}, ' \
'com.docker.compose.service={}'.format(project, service)
assert s.selectorContainer == selector
assert env.name == project
assert env.state == 'active'
assert env.type == 'composeProject'
assert env.kind == 'composeProject'
assert set(env.actions.keys()) == set(['remove'])
assert set(s.actions.keys()) == set(['remove'])
def _create_service(client, context, project=None, service=None):
if project is None:
project = 'p-' + random_str()
if service is None:
service = 's-' + random_str()
c = context.create_container(name='{}_{}_1'.format(service, project),
labels={
SERVICE: service,
PROJECT: project,
NUMBER: '1',
}, networkMode='none')
assert c.state == 'running'
return project, service, c
def test_container_remove(client, context):
project, service, c = _create_service(client, context)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
c = client.delete(c)
c = client.wait_success(c)
assert c.state == 'removed'
wait_for(lambda: client.reload(map).state != 'active')
map = client.wait_success(map)
assert map.state == 'removed'
s = client.wait_success(s)
env = client.wait_success(env)
assert s.state == 'removed'
assert env.state == 'removed'
def test_container_two_remove(client, context):
project, service, c = _create_service(client, context)
project, service, c = _create_service(client, context, project, service)
s = find_one(c.services)
maps = s.serviceExposeMaps()
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert len(maps) == 2
c = client.delete(c)
c = client.wait_success(c)
assert c.state == 'removed'
wait_for(lambda: len([x for x in s.serviceExposeMaps()
if x.removed is None]) == 1)
s = client.wait_success(s)
env = client.wait_success(env)
assert s.state == 'active'
assert env.state == 'active'
def test_service_two_remove(client, context):
project, service, c = _create_service(client, context)
project, _, _ = _create_service(client, context, project)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert len(env.services()) == 2
assert s.state == 'active'
s = client.delete(s)
s = client.wait_success(s)
assert s.state == 'removed'
map = client.wait_success(map)
assert map.state == 'removed'
c = client.wait_success(c)
assert c.state == 'removed'
env = client.wait_success(env)
assert env.state == 'active
|
'
def test_service_remove(client, context):
project, service, c = _create_service(client, context)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert s
|
.state == 'active'
s = client.delete(s)
s = client.wait_success(s)
assert s.state == 'removed'
map = client.wait_success(map)
assert map.state == 'removed'
c = client.wait_success(c)
assert c.state == 'removed'
env = client.wait_success(env)
assert env.state == 'removed'
def test_env_remove(client, context):
project, service, c = _create_service(client, context)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert s.state == 'active'
env = client.delete(env)
env = client.wait_success(env)
assert env.state == 'removed'
s = client.wait_success(s)
assert s.state == 'removed'
map = client.wait_success(map)
assert map.state == 'removed'
c = client.wait_success(c)
assert c.state == 'removed'
def test_compose_project_create_required(client, context):
template = 'nginx:\n image: nginx'
assert_required_fields(client.create_compose_project, name=random_str(),
templates={'x': template})
def test_compose_project_create(client, context):
name = random_str()
template = 'nginx:' \
' image: nginx'
project = client.create_compose_project(name=name,
templates={'x': template})
project = client.wait_success(project)
assert project.name == name
assert project.state == 'active'
assert project.type == 'composeProject'
assert project.kind == 'composeProject'
assert project.templates == {'x': template}
|
zielmicha/satori
|
satori.events/satori/events/__init__.py
|
Python
|
mit
| 1,410
| 0.002128
|
# vim:ts=4:sts=4:sw=4:expandtab
"""Package. Manages event queues.
Writing event-driven code
-------------------------
Event-driven procedures should be written as python coroutines (extended generators).
To call the event API, yield an instance of the appropriate command. You can use
sub-procedures - just yield the appropriate generator (a minor nuisance is that you
cannot have such sub-procedure return a value).
Example
-------
.. code:: python
from satori.events import *
def countdown():
queue = QueueId('any string will do')
mapping = yield Map({}, queue)
yield Attach(queue)
yield Send(Event(left=10))
while True:
q, event = yield Receive()
if event.left == 0:
break
event.left -= 1
yield Send(event)
yield Unmap(mapping)
yield Detach(queue)
"""
from .api import Event, MappingId, QueueId
from .protocol import Attach, Detach
from .protocol im
|
port Map, Unmap
from .protocol import Send, Receive
from .protocol import KeepAlive, Disconnect, ProtocolError
from .api import Manager
from .master import Master
from .slave import Slave
from .client2 import Client2
from .slave2 import Slave2
__all__ = (
'Event', 'MappingId', 'QueueId',
'Attach', 'Detach',
'Map', 'U
|
nmap',
'Send', 'Receive',
'KeepAlive', 'ProtocolError',
'Master', 'Slave',
)
|
uclouvain/OSIS-Louvain
|
base/migrations/0277_auto_20180601_1458.py
|
Python
|
agpl-3.0
| 682
| 0.001466
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-06-01 12:58
from __future__ import unicode_literals
from django.db
|
import migrations, models
c
|
lass Migration(migrations.Migration):
dependencies = [
('base', '0276_professional_integration'),
]
operations = [
migrations.AlterField(
model_name='learningunit',
name='end_year',
field=models.IntegerField(blank=True, null=True, verbose_name='end_year_title'),
),
migrations.AlterField(
model_name='learningunit',
name='start_year',
field=models.IntegerField(verbose_name='start_year'),
),
]
|
Duke-GCB/DukeDSHandoverService
|
download_service/views.py
|
Python
|
mit
| 899
| 0.003337
|
from django.http import StreamingHttpResponse, HttpResponseServerError
from download_service.zipbuilder import DDSZipBuilder, NotFoundException, NotSupportedException
from django.contrib.auth.decorators import login_required
from download_service.utils import make_client
from django.http import Http404
@login_required
def dds_project_zip(request, project_id, filename):
client = ma
|
ke_client(request.user
|
)
builder = DDSZipBuilder(project_id, client)
try:
builder.raise_on_filename_mismatch(filename)
response = StreamingHttpResponse(builder.build_streaming_zipfile(), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
except NotFoundException as e:
raise Http404(str(e))
except NotSupportedException as e:
return HttpResponseServerError(content=str(e))
|
tpltnt/SimpleCV
|
SimpleCV/examples/detection/MotionTracker.py
|
Python
|
bsd-3-clause
| 1,540
| 0.012987
|
#!/usr/bin/python
'''
This SimpleCV example uses a technique called frame differencing to determine
if motion has occured. You take an initial image, then another, subtract
the difference, what is left over is what has changed between those two images
this are typically blobs on the images, so we do a blob s
|
earch to count
the number of blobs and if they exist then motion has occured
'''
from __future__ import print_function
import sys, time, socket
from SimpleCV import *
cam = Camera() #setup the camera
#settings for the project
min_size = 0.1*cam.getProperty("width")*cam.getProperty("height") #make the threshold adapatable for various camera sizes
thresh = 10 # frame diff threshold
show_message_for = 2 #
|
the amount of seconds to show the motion detected message
motion_timestamp = int(time.time())
message_text = "Motion detected"
draw_message = False
lastImg = cam.getImage()
lastImg.show()
while True:
newImg = cam.getImage()
trackImg = newImg - lastImg # diff the images
blobs = trackImg.findBlobs() #use adapative blob detection
now = int(time.time())
#If blobs are found then motion has occured
if blobs:
motion_timestamp = now
draw_message = True
#See if the time has exceeded to display the message
if (now - motion_timestamp) > show_message_for:
draw_message = False
#Draw the message on the screen
if(draw_message):
newImg.drawText(message_text, 5,5)
print(message_text)
lastImg = newImg # update the image
newImg.show()
|
jralls/gramps
|
gramps/plugins/drawreport/fanchart.py
|
Python
|
gpl-2.0
| 33,745
| 0.001245
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012-2014 Paul Franklin
# Copyright (C) 2012 Nicolas Adenis-Lamarre
# Copyright (C) 2012 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" fanchart report """
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from math import pi, cos, sin, log10, acos
def log2(val):
"""
Calculate the log base 2 of a value.
"""
return int(log10(val)/log10(2))
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import ReportError
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER,
IndexMark, INDEX_TYPE_TOC)
from gramps.gen.plug.menu import (EnumeratedListOption, NumberOption,
PersonOption, BooleanOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.config import config
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.lib import EventType
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.display.name import displayer as _nd
#------------------------------------------------------------------------
#
# private constants
#
#------------------------------------------------------------------------
FULL_CIRCLE = 0
HALF_CIRCLE = 1
QUAR_CIRCLE = 2
BACKGROUND_WHITE = 0
BACKGROUND_GEN = 1
RADIAL_UPRIGHT = 0
RADIAL_ROUNDABOUT = 1
# minor offset just usefull for generation 11,
# to not a bit offset between the text and the polygon
# this can be considered as a bad hack
WEDGE_TEXT_BARRE_OFFSET = 0.0016
#------------------------------------------------------------------------
#
# private functions
#
#------------------------------------------------------------------------
def draw_wedge(doc, style, centerx, centery, radius, start_angle,
end_angle, do_rendering, short_radius=0):
"""
Draw a wedge shape.
"""
while end_angle < start_angle:
end_angle += 360
path = []
degreestoradians = pi / 180.0
radiansdelta = degreestoradians / 2
sangle = start_angle * degreestoradians
eangle = end_angle * degreestoradians
while eangle < sangle:
eangle = eangle + 2 * pi
angle = sangle
if short_radius == 0:
if (end_angle - start_angle) != 360:
path.append((centerx, centery))
else:
origx = (centerx + cos(angle) * short_radius)
origy = (centery + sin(angle) * short_radius)
path.append((origx, origy))
while angle < eangle:
_x_ = centerx + cos(angle) * radius
_y_ = centery + sin(angle) * radius
path.append((_x_, _y_))
angle = angle + radiansdelta
_x_ = centerx + cos(eangle) * radius
_y_ = centery + sin(eangle) * radius
path.append((_x_, _y_))
if short_radius:
_x_ = centerx + cos(eangle) * short_radius
_y_ = centery + sin(eangle) * short_radius
path.append((_x_, _y_))
angle = eangle
while angle >= sangle:
_x_ = centerx + cos(angle) * short_radius
_y_ = centery + sin(angle) * short_radius
path.append((_x_, _y_))
angle -= radiansdelta
if do_rendering:
doc.draw_path(style, path)
delta = (eangle - sangle) / 2.0
rad = short_radius + (radius - short_radius) / 2.0
return ((centerx + cos(sangle + delta + WEDGE_TEXT_BARRE_OFFSET) * rad),
(centery + sin(sangle + delta + WEDGE_TEXT_BARRE_OFFSET) * rad))
#------------------------------------------------------------------------
#
# FanChart
#
#------------------------------------------------------------------------
class FanChart(Report):
def __init__(self, database, options, user):
"""
Create the FanChart object that produces the report.
The arguments are:
database - the Gramps database instance
options - instance of the Options class for
|
this report
user - a gen.user.User instance
This report needs the following pa
|
rameters (class variables)
that come in the options class.
maxgen - Maximum number of generations to include.
circle - Draw a full circle, half circle, or quarter circle.
background - Background color is generation dependent or white.
radial - Print radial texts roundabout or as upright as possible.
draw_empty - draw background when there is no information
same_style - use the same style for all generation
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, self._locale)
self.database = CacheProxyDb(self.database)
self.max_generations = menu.get_option_by_name('maxgen').get_value()
self.circle = menu.get_option_by_name('circle').get_value()
self.background = menu.get_option_by_name('background').get_value()
self.radial = menu.get_option_by_name('radial').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.draw_empty = menu.get_option_by_name('draw_empty').get_value()
self.same_style = menu.get_option_by_name('same_style').get_value()
self.center_person = self.database.get_person_from_gramps_id(pid)
if self.center_person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
self.graphic_style = []
self.text_style = []
for i in range(0, self.max_generations):
self.graphic_style.append('FC-Graphic' + '%02d' % i)
self.text_style.append('FC-Text' + '%02d' % i)
self.calendar = 0
self.height = 0
self.map = [None] * 2**self.max_generations
self.text = {}
def apply_filter(self, person_handle, index):
"""traverse the ancestors recursively until either the end
of a line is found, or until we reach the maximum number of
generations that we want to deal with"""
if (not person_handle) or (index >= 2**self.max_generations):
return
self.map[index-1] = person_handle
self.text[index-1] = self.get_info(person_handle, log2(index))
person = self.database.get_person_from_handle(person_handle)
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.database.get_family_from_handle(family_handle)
self.apply_filter(family.get_father_handle(),
|
Reat0ide/plugin.video.pelisalacarta
|
pelisalacarta/channels/itastreaming.py
|
Python
|
gpl-3.0
| 11,292
| 0.021891
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
#------------------------------------------------------------
import selenium
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
import urlparse,urllib2,urllib,re,xbmcplugin,xbmcgui,xbmcaddon,xbmc
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
import cookielib
import requests
import os.path
__channel__ = "itastreaming"
__category__ = "F"
__type__ = "generic"
__title__ = "itastreaming"
__language__ = "IT"
COOKIEFILE = "/Users/arturo/itacookie.lwp"
h = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:37.0) Gecko/20100101 Firefox/37.0'}
baseUrl = "http://itastreaming.co"
def createCookies():
if not os.path.isfile(COOKIEFILE):
print "File not exists"
#get cookies!
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:37.0) Gecko/20100101 Firefox/37.0")
browser = webdriver.PhantomJS(executable_path='/bin/phantomjs',desired_capabilities = dcap, service_log_path=os.path.devnull)
browser.get(baseUrl)
time.sleep(10)
a = browser.get_cookies()
print 'Got cloudflare cookies:\n'
browser.close()
b = cookielib.MozillaCookieJar()
for i in a:
# create the cf_session_cookie
ck = cookielib.Cookie(name=i['name'], value=i['value'], domain=i['domain'], path=i['path'], secure=i['secure'], rest=False, version=0,port=None,port_specified=False,domain_specified=False,domain_initial_dot=False,path_specified=True,expires=i['expiry'],discard=True,comment=None,comment_url=None,rfc2109=False)
b.set_cookie(ck)
# save into a file
print b
b.save(filename=COOKIEFILE, ignore_discard=True, ignore_expires=False)
else:
print "found it, do nothing!"
b = True
return b
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.itastreaming mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__ , action="movies", title="ultimi film inseriti..." , url="http://itastreaming.co" ))
itemlist.append( Item(channel=__channel__ , action="search", title="Cerca Film"))
itemlist.append( Item(channel=__channel__ , action="movies", title="animazione" , url="http://itastreaming.co/genere/animazione" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="avventura" , url="http://itastreaming.co/genere/avventura" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="azione" , url="http://itastreaming.co/genere/azione" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="biografico" , url="http://itastreaming.co/genere/biografico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="comico" , url="http://itastreaming.co/genere/comico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="commedia" , url="http://itastreaming.co/genere/commedia" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="documentario" , url="http://itastreaming.co/genere/documentario" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="drammatico" , url="http://itastreaming.co/genere/drammatico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="erotico" , url="http://itastreaming.co/genere/erotico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="fantascienza" , url="http://itastreaming.co/genere/fantascienza" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="fantasy" , url="http://itastreaming.co/genere/fantasy" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="gangstar" , url="http://itastreaming.co/genere/gangstar" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="giallo" , url="http://itastreaming.co/genere/giallo" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="guerra" , url="http://itastreaming.co/genere/guerra" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="horror" , url="http://itastreaming.co/genere/horror" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="musical" , url="http://itastreaming.co/genere/musical" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="romantico" , url="http://itastreaming.co/genere/romantico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="storico" , url="http://itastreaming.co/genere/storico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="thriller" , url="http://itastreaming.co/genere/thriller" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="western" , url="http://itastreaming.co/genere/western" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="HD" , url="http://itastreaming.co/qualita/hd" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="DVD-RIP" , url="http://itastreaming.co/qualita/dvdripac3" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="CAM" , url="http://itastreaming.co/qualita/cam" ))
itemlist.append( Item(channel=__channel
|
__ , action="movies", title="HD-MD" , url="http://itastreaming.co/qualita/hd-md" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="HD-TS" , url="http://itastreaming.co/qualita/hd-ts" ))
return itemlist
#searching for films
def search(item, text):
createCookies()
itemlist = []
text = text.replace(" ", "%20")
item.url = "http://itastr
|
eaming.co/?s=" + text
try:
biscotto = cookielib.MozillaCookieJar()
biscotto.load(COOKIEFILE)
data = requests.get(item.url, cookies=biscotto, headers=h)
data = data.text.encode('utf-8')
data = data.replace('–','-').replace('’',' ')
pattern = '<img class="imx" style="margin-top:0px;" src="?([^>"]+)"?.*?alt="?([^>"]+)"?.*?'
pattern += '<h3><a href="?([^>"]+)"?.*?</h3>'
matches = re.compile(pattern,re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
#thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
thumbnail = scrapthumb(title)
itemlist.append(Item(channel=__channel__, action="grabing", title=title, url=url, thumbnail=thumbnail, folder=True))
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
#azione "movies" server per estrerre i titoli
def movies(item):
createCookies()
itemlist = []
biscotto = cookielib.MozillaCookieJar()
biscotto.load(COOKIEFILE)
data = requests.get(item.url, cookies=biscotto, headers=h)
data = data.text.encode('utf-8')
data = data.replace('–','-').replace('’',' ')
patron = '<div class="item">\s*'
patron += '<a href="?([^>"]+)"?.*?title="?([^>"]+)"?.*?'
patron += '<div class="img">\s*'
patron += '<img.*?src="([^>"]+)'
matches = re.compile(patron,re.DOTALL).findall(data)
if not matches:
print "Coockies expired!, delete it"
os.remove(COOKIEFILE)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = scrapthumb(title)
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="grabing", title=title , url=url , thumbnail=thumbnail , plot=scrapedplot , folder=True) )
#next page
patternpage = '<a rel="nofollow" class="previouspostslink\'" href="(.*?)">Seguente \›</a>'
matches = re.compile(patternpage,re.DOTALL).findall(data)
#print matches
if not matches:
patternpa
|
kartikeys98/coala
|
coalib/bearlib/aspects/Spelling.py
|
Python
|
agpl-3.0
| 2,014
| 0
|
from coalib.bearlib.aspects import Root, Taste
@Root.subaspect
class Spelling:
"""
How words should be written.
"""
class docs:
example = """
'Tihs si surly som incoreclt speling.
`Coala` is always written with a lowercase `c`.
"""
example_language = 'reStructuredText'
importance_reason = """
Words should always be written as th
|
ey are supposed to be;
standardisation facilitates communication.
"""
fix_suggestions = """
Use the correct spelling for the misspelled words.
"""
@Spelling.subaspect
class DictionarySpelling:
"""
Valid language's words spelling.
"""
class docs:
example = """
|
This is toatly wonrg.
"""
example_language = 'reStructuredText'
importance_reason = """
Good spelling facilitates communication and avoids confusion. By
following the same rules for spelling words, we can all understand
the text we read. Poor spelling distracts the reader and they lose
focus.
"""
fix_suggestions = """
You can use a spell-checker to fix this for you or just ensure
yourself that things are well written.
"""
@Spelling.subaspect
class OrgSpecificWordSpelling:
"""
Organisations like coala specified words' spelling.
"""
class docs:
example = """
`Coala` is always written with a lower case c, also at the beginning
of the sentence.
"""
example_language = 'reStructuredText'
importance_reason = """
There are words you want to be written as you want, like your
organisation's name.
"""
fix_suggestions = """
Simply make sure those words match with what is provided by the
organisation.
"""
specific_word = Taste[list](
'Represents the regex of the specific word to check.',
(('c[o|O][a|A][l|L][a|A]',), ), default=list())
|
smerritt/swift
|
doc/source/conf.py
|
Python
|
apache-2.0
| 8,171
| 0
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2010-2012 OpenStack Foundation.
#
# Swift documentation build configuration file, created by
# sphinx-quickstart on Tue May 18 13:50:15 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import logging
import os
from swift import __version__
import sys
# NOTE(amotoki): Our current doc build job uses an older ver
|
sion of
# liberasurecode which comes from Ubuntu 16.04.
# pyeclib emits a warning message if liberasurecode <1.3.1 is used [1] and
# this causes the doc build failure if warning-is-error is enabled in Sphinx.
# As a workaround we suppress the warn
|
ing message from pyeclib until we use
# a newer version of liberasurecode in our doc build job.
# [1] https://github.com/openstack/pyeclib/commit/d163972b
logging.getLogger('pyeclib').setLevel(logging.ERROR)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.extend([os.path.abspath('../swift'), os.path.abspath('..'),
os.path.abspath('../bin')])
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'openstackdocstheme']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Swift'
copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['swift.']
# -- Options for HTML output -----------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# html_theme_path = ["."]
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path = ['_extra']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'swiftdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Swift.tex', u'Swift Documentation',
u'Swift Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# -- Options for openstackdocstheme -------------------------------------------
repository_name = 'openstack/swift'
bug_project = 'swift'
bug_tag = ''
|
Spycho/aimmo
|
aimmo-game-worker/simulation/location.py
|
Python
|
agpl-3.0
| 529
| 0
|
class Location(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, direction):
return Location(self.x + d
|
irection.x, self.y + direction.y)
def __sub__(self, direction):
return Location(self.x - direction.x, self.y - direction.y)
def __repr__(self):
return 'Location({}, {})'.format(self.x, self.y)
def __eq__(self, other):
return self.x == other.x and self.
|
y == other.y
def __hash__(self):
return hash((self.x, self.y))
|
xuru/pyvisdk
|
pyvisdk/esxcli/handlers/ha_cli_handler_storage_core_device_world.py
|
Python
|
mit
| 877
| 0.010262
|
from pyvisdk.esxcli.executer im
|
port execute_soap
from pyvisdk.esxcli.base import Base
class StorageCoreDeviceWorld(Base):
'''
Operations on worlds pertaining to the pluggable storage architectures' logical devices on the system.
'''
moid = 'ha-cli-handler-storage-core-device-world'
def list(self, device=None):
'''
Get a list of th
|
e worlds that are currently using devices on the ESX host.
:param device: string, Filter the output of the command to limit the output to a specific device. This device name can be any of the UIDs registered for a device.
:returns: vim.EsxCLI.storage.core.device.world.list.ScsiDeviceWorld[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.storage.core.device.world.List',
device=device,
)
|
adrienemery/auv-control-pi
|
navio/rcinput.py
|
Python
|
mit
| 484
| 0.004132
|
class RCInput():
CHANNEL_COUNT = 14
channels = []
def __init__(self):
for i in range(0
|
, self.CHANNEL_COUNT):
try:
f = open("/sys/kernel/rcio/rcin/ch%d" % i, "r")
self.channels.append(f)
except:
print ("Can't open file /sys/kernel/rcio/rcin/ch%d" % i)
def read(self, ch):
value = self.channels[ch].read()
position = self.channels[ch].seek(0, 0)
return value[:-1]
| |
billiob/papyon
|
papyon/msnp2p/webcam.py
|
Python
|
gpl-2.0
| 9,665
| 0.001242
|
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2007 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2008 Richard Spiers <richard.spiers@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from papyon.msnp2p.constants import *
from papyon.msnp2p.SLP import *
from papyon.msnp2p.transport import *
from papyon.msnp2p.session import P2PSession
from papyon.util.decorator import rw_property
import papyon.util.element_tree as ElementTree
import struct
import gobject
import logging
import base64
import os
import random
from papyon.media import MediaCall, MediaCandidate, MediaCandidateEncoder, \
MediaSessionMessage, MediaStreamDescription
from papyon.media.constants import MediaStreamDirection, MediaSessionType
__all__ = ['WebcamSession']
logger = logging.getLogger("papyon.msnp2p.webcam")
class WebcamSession(P2PSession, MediaCall):
def __init__(self, producer, session_manager, peer, peer_guid,
euf_guid, message=None):
if producer:
type = MediaSessionType.WEBCAM_SEND
else:
type = MediaSessionType.WEBCAM_RECV
P2PSession.__init__(self, session_manager, peer, peer_guid, euf_guid,
ApplicationID.WEBCAM, message)
MediaCall.__init__(self, type)
self._producer = producer
self._answered = False
self._sent_syn = False
self._session_id = self._generate_id(9999)
self._xml_needed = False
@property
def producer(self):
return self._producer
def invite(self):
self._answered = True
context = "{B8BE70DE-E2CA-4400-AE03-88FF85B9F4E8}"
context = context.decode('ascii').encode('utf-16_le')
self._invite(context)
def accept(self):
self._answered = True
temp_application_id = self._application_id
self._application_id = 0
self._accept()
self._application_id = temp_application_id
self.send_binary_syn()
def reject(self):
self._answered = True
self._decline(603)
def end(self, reason=None):
if not self._answered:
self.reject()
else:
context = '\x74\x03\x00\x81'
self._close(context, reason)
self.dispose()
def dispose(self):
MediaCall.dispose(self)
self._dispatch("on_call_ended")
self._dispose()
def on_media_session_prepared(self, session):
if self._xml_needed:
self._send_xml()
def _on_invite_received(self, message):
if self._producer:
stream = self.media_session.create_stream("video",
MediaStreamDirection.SENDING, False)
self.media_session.add_stream(stream)
def _on_bye_received(self, message):
self.dispose()
def _on_session_accepted(self):
self._dispatch("on_call_accepted")
def _on_session_rejected(self, message):
self._dispatch("on_call_rejected", message)
self.dispose()
def _on_data_blob_received(self, blob):
blob.data.seek(0, os.SEEK_SET)
data = blob.data.read()
data = unicode(data[10:], "utf-16-le").rstrip("\x00")
if not self._sent_syn:
self.send_binary_syn() #Send 603 first ?
if data == 'syn':
self.send_binary_ack()
elif data == 'ack' and self._producer:
self._send_xml()
elif '<producer>' in data or '<viewer>' in data:
self._handle_xml(data)
elif data.startswith('ReflData'):
refldata = data.split(':')[1]
str = ""
for i in range(0, len(refldata), 2):
str += chr(int(refldata[i:i+2], 16))
print "Got ReflData :", str
|
def send_data(self, data):
message_bytes = data.encode("utf-16-le") + "\x00\x00"
id = (self._generate_id() << 8) | 0x80
header = struct.pack("<LHL", id, 8, len(message
|
_bytes))
self._send_data(header + message_bytes)
def send_binary_syn(self):
self.send_data('syn')
self._sent_syn = True
def send_binary_ack(self):
self.send_data('ack')
def send_binary_viewer_data(self):
self.send_data('receivedViewerData')
def _send_xml(self):
if not self.media_session.prepared:
self._xml_needed = True
return
logger.info("Send XML for session %i", self._session_id)
self._xml_needed = False
message = WebcamSessionMessage(session=self.media_session,
id=self._session_id, producer=self._producer)
self.send_data(str(message))
def _handle_xml(self, data):
message = WebcamSessionMessage(body=data, producer=self._producer)
initial = not self._producer
self.media_session.process_remote_message(message, initial)
self._session_id = message.id
logger.info("Received XML data for session %i", self._session_id)
if self._producer:
self.send_binary_viewer_data()
else:
self._send_xml()
class WebcamCandidateEncoder(MediaCandidateEncoder):
def __init__(self):
MediaCandidateEncoder.__init__(self)
def encode_candidates(self, desc, local_candidates, remote_candidates):
for candidate in local_candidates:
desc.ips.append(candidate.ip)
desc.ports.append(candidate.port)
desc.rid = int(local_candidates[0].foundation)
desc.sid = int(local_candidates[0].username)
def decode_candidates(self, desc):
local_candidates = []
remote_candidate = []
for ip in desc.ips:
for port in desc.ports:
candidate = MediaCandidate()
candidate.foundation = str(desc.rid)
candidate.component_id = 1
candidate.username = str(desc.sid)
candidate.password = ""
candidate.ip = ip
candidate.port = port
candidate.transport = "TCP"
candidate.priority = 1
local_candidates.append(candidate)
return local_candidates, remote_candidate
class WebcamSessionMessage(MediaSessionMessage):
def __init__(self, session=None, body=None, id=0, producer=False):
self._id = id
self._producer = producer
MediaSessionMessage.__init__(self, session, body)
@property
def id(self):
return self._id
@property
def producer(self):
return self._producer
def _create_stream_description(self, stream):
return WebcamStreamDescription(stream, self._id, self._producer)
def _parse(self, body):
tree = ElementTree.fromstring(body)
self._id = int(tree.find("session").text)
desc = self._create_stream_description(None)
self.descriptions.append(desc)
for node in tree.findall("tcp/*"):
if node.tag == "tcpport":
desc.ports.append(int(node.text))
elif node.tag.startswith("tcpipaddress"):
desc.ips.append(node.text)
desc.rid = tree.find("rid").text
return self._descriptions
def __str__(self):
tag = self.producer and "producer" or "viewer"
desc = self._descriptions[0]
body = "<%s>" \
"<version>2.0</version>" \
"<rid>%s</rid>" \
"<session>%u</session>" \
"<ctypes>0</ctypes>" \
"<cpu>2010</cpu>" % (tag, desc.rid, de
|
tarnheld/ted-editor
|
hm/apkhm.py
|
Python
|
unlicense
| 3,380
| 0.012722
|
from skimage import measure
import numpy as np
import struct
import math as m
from PIL import Image
from simplify import simplify
import argparse
parser = argparse.ArgumentParser(description='convert apk heightmaps to floating point tiff')
parser.add_argument('file', type=str, help='the apk heightmap file')
args = parser.parse_args()
hdr=b'\x33\x13\x26\xc3\x33\x13\x26\x43\x02\x00\x20\xc1\x33\x13\xa1\x43'
with open(args.file, mode='rb') as file:
raw = file.read()
print(struct.unpack_from("<4xIII",raw,0x1020))
print(struct.unpack_from("<ffff",raw,0x1030))
t,w,h = struct.unpack_from("<4xIII",raw,0x1020)
e1,e2,e3,e4 = struct.unpack_from("<ffff",raw,0x1030)
dt = np.dtype("half")
dt = dt.newbyteorder('<')
img = np.frombuffer(raw,dtype=dt,offset=0x1040,count=w*h)
print (img.shape)
img = img.reshape((w,h))
imin = np.amin(img)
imax = np.amax(img)
extents = np.array((e1,e2,e3,e4))
np.savez_compressed(args.file, extents = extents, heightmap=img)
fimg = img.astype(np.float32)
fimg.reshape((w*h,1))
pimg = Image.frombytes('F',(w,h), fimg.tostring(),'raw','F;32NF')
pimg.save(args.file + ".tif")
hmin = e1 * (1-imin) + e2 * imin
hmax = e1 * (1-imax) + e2 * imax
contours = []
hstep = 2.5
nc = m.ceil((hmax-hmin)/hstep)
for i in range(nc):
hgt = imin + i*hstep/(hmax-hmin)
npc = measure.find_contours(img, hgt)
cs = []
for c in npc:
c = simplify(c,5,True)
cs.append(c)
cs = np.array(cs)
contours.
|
append(cs)
|
np.savez_compressed(args.file+"-contours", *contours)
# mi,ma = float(np.amin(img)),float(np.amax(img))
# print("contour",mi,ma)
# for i in range(50):
# d = float(mi*(1-i/50)+ma*i/50)
# print("contour",d)
# npc = measure.find_contours(img, d)
# for n,c in enumerate(npc):
# contours = [((x[1]-512)/1024*3499.99975586*2,(x[0]-512)/1024*3499.99975586*2) for x in c]
# if norm(c[-1] - c[0]) < 0.01:
# self.canvas.create_polygon(contours,fill="",outline='red',tag="contour")
# else:
# self.canvas.create_line(contours,fill='green',tag="contour")
# except FileNotFoundError:
# print("file not found!")
# return
# try:
# self.img = Image.open(path)
# except:
# try:
# with open(path, mode='rb') as file:
# raw = file.read()
# self.img = Image.frombytes("F",(1024,1024),raw,"raw","F;16")
# print(self.img.getpixel((4,4)))
# f = 1.0 / 2**8
# self.img = self.img.point(lambda x: x * f)
# print(self.img.getpixel((4,4)))
# self.img = self.img.resize((8192,8192))
# self.img = self.img.filter(ImageFilter.CONTOUR)
# except FileNotFoundError:
# print("file not found!")
# return
# self.ix =2*3499.99975586
# f = self.ix/2049.0
# print (f)
# #self.img = self.img.transform((int(self.ix),int(self.ix)),Image.AFFINE,data=(f,0,0,0,f,0))
# self.img = self.img.resize((int(self.ix),int(self.ix)))
# self.simg = self.img
# self.pimg = ImageTk.PhotoImage(self.img)
# self.imgcid = self.canvas.create_image(-2048, -2048, image=self.pimg, anchor=tk.NW)
|
Varriount/Colliberation
|
libs/twisted/internet/task.py
|
Python
|
mit
| 24,723
| 0.002346
|
# -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Scheduling utility methods and classes.
@author: Jp Calderone
"""
__metaclass__ = type
import time
from zope.interface import implements
from twisted.python import reflect
from twisted.python.failure import Failure
from twisted.internet import base, defer
from twisted.internet.interfaces import IReactorTime
class LoopingCall:
"""Call a function repeatedly.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
@ivar clock: A provider of
L{twisted.internet.interfaces.IReactorTime}. The default is
L{twisted.internet.reactor}. Feel free to set this to
something else, but it probably ought to be set *before*
calling L{start}.
@type running: C{bool}
@ivar running: A flag which is C{True} while C{f} is scheduled to be called
(or is currently being called). It is set to C{True} when L{start} is
called and set to C{False} when L{stop} is called or if C{f} raises an
exception. In either case, it will be C{False} by the time the
C{Deferred} returned by L{start} fires its callback or errback.
@type _expectNextCallAt: C{float}
@ivar _expectNextCallAt: The time at which this instance most recently
scheduled itself to run.
@type _realLastTime: C{float}
@ivar _realLastTime: When counting skips, the time at which the skip
counter was last invoked.
@type _runAtStart: C{bool}
@ivar _runAtStart: A flag indicating whether the 'now' argument was passed
to L{LoopingCall.start}.
"""
call = None
running = False
deferred = None
interval = None
_expectNextCallAt = 0.0
_runAtStart = False
starttime = None
def __init__(self, f, *a, **kw):
self.f = f
self.a = a
self.kw = kw
from twisted.internet import reactor
self.clock = reactor
def withCount(cls, countCallable):
"""
An alternate constructor for L{LoopingCall} that makes available the
number of calls which should have occurred since it was last invoked.
Note that this number is an C{int} value; It represents the discrete
number of calls that should have been made. For example, if you are
using a looping call to display an animation with discrete frames, this
number would be the number of frames to advance.
The count is normally 1, but can be higher. For example, if the reactor
is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
returned from a previous call is not fired before an interval has
elapsed, or if the callable itself blocks for longer than an interval,
preventing I{itself} from being called.
@param countCallable: A callable that will be invoked each time the
resulting LoopingCall is run, with an integer specifying the number
of calls that should have been invoked.
@type countCallable: 1-argument callable which takes an C{int}
@return: An instance of L{LoopingCall} with call counting enabled,
which provides the count as the first positional argument.
@rtype: L{LoopingCall}
@since: 9.0
"""
def counter():
now = self.clock.seconds()
lastTime = self._realLastTime
if lastTime is None:
lastTime = self.starttime
if self._runAtStart:
lastTime -= self.interval
self._realLastTime = now
lastInterval = self._intervalOf(lastTime)
thisInterval = self._intervalOf(now)
count = thisInterval - lastInterval
return countCallable(count)
self = cls(counter)
self._realLastTime = None
return self
withCount = classmethod(withCount)
def _intervalOf(self, t):
"""
Determine the number of intervals passed as of the given point in
time.
@param t: The specified time (from the start of the L{LoopingCall}) to
be measured in intervals
@return: The C{int} number of intervals which have passed as of the
given point in time.
"""
elapsedTime = t - self.starttime
intervalNum = int(elapsedTime / self.interval)
return intervalNum
def start(self, interval, now=True):
"""
Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, ("Tried to start an already running "
"LoopingCall.")
if interval < 0:
raise ValueError, "interval must be >= 0"
self.running = True
d = self.deferred = defer.Deferred()
self.starttime = self.clock.seconds()
self._expectNextCallAt = self.starttime
self.interval = interval
self._runAtStart = now
if now:
self()
else:
self._reschedule()
return d
def stop(self):
"""Stop running function.
"""
assert self.running, ("Tried to stop a LoopingCall that was "
"not running.")
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self.deferred = self.deferred, None
d.callback(self)
def reset(self):
"""
Skip the next iteration and reset the timer.
@since: 11.1
"""
assert self.running, ("Tried to reset a LoopingCall that was "
"not running.")
if self.call is not None:
self.call.cancel()
self.call = None
self._expectNextCallAt = self.clock.seconds()
self._reschedule()
def __call__(self):
def cb(result):
if self.running:
self._reschedule()
else:
d, self.deferred = self.deferred, None
d.callback(self)
def eb(failure):
self.running = False
d, self.deferred = self.deferred, None
d.errback(failure)
self.call = None
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(
|
cb)
d.addErrback(eb)
def _reschedule(self):
"""
Schedule the next iteration of this looping call.
|
"""
if self.interval == 0:
self.call = self.clock.callLater(0, self)
return
currentTime = self.clock.seconds()
# Find how long is left until the interval comes around again.
untilNextTime = (self._expectNextCallAt - currentTime) % self.interval
# Make sure it is in the future, in case more than one interval worth
# of time passed since the previous call was made.
nextTime = max(
self._expectNextCallAt + self.interval, currentTime + untilNextTime)
# If the interval falls on the current time exactly, skip it and
# schedule the call for the next interval.
if nextTime == currentTime:
nextTime += self.interval
self._expectNextCallAt = nextTime
self.call = self.clock.callLater(nex
|
google-research/jax3d
|
jax3d/projects/nesf/nerfstatic/utils/types_test.py
|
Python
|
apache-2.0
| 2,544
| 0.008648
|
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax3d.projects.nesf.nerfstatic.utils.types."""
import jax.numpy as jnp
from jax3d.projects.nesf.nerfstatic.utils import types
import numpy as np
def test_bounding_box_simple():
bbox = types.BoundingBox3d(
min_corner=jnp.asarray([0, 0, 0]),
max_corner=jnp.asarray([1, 1, 1]))
rays = types.Rays(origin=jnp.asarray([10, 10, 10]),
direction=jnp.asarray([1, 1, 1]),
scene_id=None)
assert bbox.intersect_rays(rays) == (-10, -9)
def test_bounding_box_zero_dir():
bbox = types.BoundingBox3d(
min_corner=jnp.asarray([0, 0, 0]),
max_corner=jnp.asarray([1, 1, 1]))
rays = types.Rays(origin=jnp.asarray([10, 0.5, 0.5]),
direction=jnp.asarray([1, 0, 0]),
scene_id=None)
assert bbox.intersect_rays(rays) == (-10, -9)
def test_bounding_box_no_intersection():
bbox = types.BoundingBox3d(
min_corner=jnp.asarray([0, 0, 0]),
max_corner=jnp.asarray([1, 1, 1]))
rays = types.Rays(origin=jnp.asarray([10
|
, 10, 10]),
direction=jnp.asarray([1, 0, 0]),
scene_id=None)
i = bbox.intersect_rays(rays)
assert i[1] < i[0]
def test_point_cloud():
h, w = 6, 8
normalize = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True)
rays = types.Rays(scene_id=np.zeros((h, w, 1), dtype=np.int32),
origin=np.random.rand(h, w, 3),
direction=normalize(np.random.randn(h, w, 3)))
views =
|
types.Views(rays=rays,
depth=np.random.rand(h, w, 1),
semantics=np.random.randint(0, 5, size=(h, w, 1)))
# Construct point cloud.
point_cloud = views.point_cloud
# Only valid points.
assert np.all(point_cloud.points >= -1)
assert np.all(point_cloud.points <= 1)
# Size matches expected value.
assert (point_cloud.size ==
point_cloud.points.shape[0] ==
point_cloud.semantics.shape[0])
|
tbabej/astropy
|
astropy/coordinates/builtin_frames/ecliptic.py
|
Python
|
bsd-3-clause
| 6,080
| 0.000987
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ..representation import SphericalRepresentation
from ..baseframe import BaseCoordinateFrame, TimeFrameAttribute
from .utils import EQUINOX_J2000
__all__ = ['GeocentricTrueEcliptic', 'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic']
class GeocentricTrueEcliptic(BaseCoordinateFrame):
"""
Geocentric ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
This frame has one frame attribute:
* ``equinox``
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth (necessary for transformation to
non-geocentric systems).
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
lon : `Angle`, optional, must be keyword
The ecliptic longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat : `Angle`, optional, must be keyword
The ecliptic latitude for this object (``lon`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object from the geocenter.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
class BarycentricTrueEcliptic(BaseCoordinateFrame):
"""
Barycentric ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
This frame has one frame attribute:
* ``equinox``
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
l : `Angle`, optional, must be keyword
The ecliptic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `Angle`, optional, must be keyword
The ecliptic latitude for this object (``l`` must also be given and
``representation`` must be None).
r : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object from the sun's center.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
class HeliocentricTrueEcliptic(BaseCoordinateFrame):
"""
Heliocentric ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
This frame has one frame attribute:
* ``equinox``
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
l : `Angle`, optional, must be keyword
The ecliptic longitude for this object (``b`` must also be given and
``representation`` must b
|
e None).
b : `Angle`, optional, must be keyword
The ecliptic latitude for this object
|
(``l`` must also be given and
``representation`` must be None).
r : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object from the sun's center.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
|
FreedomBen/terminator
|
terminatorlib/prefseditor.py
|
Python
|
gpl-2.0
| 57,238
| 0.002009
|
#!/usr/bin/python
"""Preferences Editor for Terminator.
Load a UIBuilder config file, display it,
populate it with our current config, then optionally read that back out and
write it to a config file
"""
import os
import gtk
from util import dbg, err
import config
from keybindings import Keybindings, KeymapError
from translation import _
from encoding import TerminatorEncoding
from terminator import Terminator
from plugin import PluginRegistry
def color2hex(widget):
"""Pull the colour values out of a Gtk ColorPicker widget and return them
as 8bit hex values, sinces its default behaviour is to give 16bit values"""
widcol = widget.get_color()
return('#%02x%02x%02x' % (widcol.red>>8, widcol.green>>8, widcol.blue>>8))
# FIXME: We need to check that we have represented all of Config() below
class PrefsEditor:
"""Class implementing the various parts of the preferences editor"""
config = None
registry = None
plugins = None
keybindings = None
window = None
builder = None
layouteditor = None
previous_layout_selection = None
previous_profile_selection = None
colorschemevalues = {'black_on_yellow': 0,
'black_on_white': 1,
'grey_on_black': 2,
'green_on_black': 3,
'white_on_black': 4,
'orange_on_black': 5,
'ambience': 6,
'solarized_light': 7,
'solarized_dark': 8,
'custom': 9}
colourschemes = {'grey_on_black': ['#aaaaaa', '#000000'],
'black_on_yellow': ['#000000', '#ffffdd'],
'black_on_white': ['#000000', '#ffffff'],
'white_on_black': ['#ffffff', '#000000'],
'green_on_black': ['#00ff00', '#000000'],
'orange_on_black': ['#e53c00', '#000000'],
'ambience': ['#ffffff', '#300a24'],
'solarized_light': ['#657b83', '#fdf6e3'],
'solarized_dark': ['#839496', '#002b36']}
palettevalues = {'tango': 0,
'linux': 1,
'xterm': 2,
'rxvt': 3,
'ambience': 4,
'solarized': 5,
'custom': 6}
palettes = {'tango': '#000000:#cc0000:#4e9a06:#c4a000:#3465a4:\
#75507b:#06989a:#d3d7cf:#555753:#ef2929:#8ae234:#fce94f:#729fcf:\
#ad7fa8:#34e2e2:#eeeeec',
'linux': '#000000:#aa0000:#00aa00:#aa5500:#0000aa:\
#aa00aa:#00aaaa:#aaaaaa:#555555:#ff5555:#55ff55:#ffff55:#5555ff:\
#ff55ff:#55ffff:#ffffff',
'xterm': '#000000:#cd0000:#00cd00:#cdcd00:#1e90ff:\
#cd00cd:#00cdcd:#e5e5e5:#4c4c4c:#ff0000:#00ff00:#ffff00:#4682b4:\
#ff00ff:#00ffff:#ffffff',
|
'rxvt': '#000000:#cd0000:#00cd00:#cdcd00:#0000cd:\
#cd00cd:#00cdcd:#faebd7:#404040:#ff0000:#00ff00:#ffff00:#0000ff:\
#ff00ff:#00ffff:#ffffff',
'ambience': '#2e3436:#cc0000:#4e9a06:#c4a000:\
#3465a4:#75507b:#06989a:#d3d7cf:#555753:#ef2929:#8ae234:#fce94f:\
#729fcf:#ad7fa8:#34e2e2:#eeeeec',
'solarized': '#073642:#dc322f:#859900:#b58900:\
#268bd2:#d33682:#2aa198:#eee8d5:#002b36:#cb4b16:#586e75:#657b83:\
#839496:#6c71c4:#93a1a1:#fdf6e3'}
keybindingnames = {
|
'zoom_in' : 'Increase font size',
'zoom_out' : 'Decrease font size',
'zoom_normal' : 'Restore original font size',
'new_tab' : 'Create a new tab',
'cycle_next' : 'Focus the next terminal',
'cycle_prev' : 'Focus the previous terminal',
'go_next' : 'Focus the next terminal',
'go_prev' : 'Focus the previous terminal',
'go_up' : 'Focus the terminal above',
'go_down' : 'Focus the terminal below',
'go_left' : 'Focus the terminal left',
'go_right' : 'Focus the terminal right',
'rotate_cw' : 'Rotate terminals clockwise',
'rotate_ccw' : 'Rotate terminals counter-clockwise',
'split_horiz' : 'Split horizontally',
'split_vert' : 'Split vertically',
'close_term' : 'Close terminal',
'copy' : 'Copy selected text',
'paste' : 'Paste clipboard',
'toggle_scrollbar' : 'Show/Hide the scrollbar',
'search' : 'Search terminal scrollback',
'close_window' : 'Close window',
'resize_up' : 'Resize the terminal up',
'resize_down' : 'Resize the terminal down',
'resize_left' : 'Resize the terminal left',
'resize_right' : 'Resize the terminal right',
'move_tab_right' : 'Move the tab right',
'move_tab_left' : 'Move the tab left',
'toggle_zoom' : 'Maximise terminal',
'scaled_zoom' : 'Zoom terminal',
'next_tab' : 'Switch to the next tab',
'prev_tab' : 'Switch to the previous tab',
'switch_to_tab_1' : 'Switch to the first tab',
'switch_to_tab_2' : 'Switch to the second tab',
'switch_to_tab_3' : 'Switch to the third tab',
'switch_to_tab_4' : 'Switch to the fourth tab',
'switch_to_tab_5' : 'Switch to the fifth tab',
'switch_to_tab_6' : 'Switch to the sixth tab',
'switch_to_tab_7' : 'Switch to the seventh tab',
'switch_to_tab_8' : 'Switch to the eighth tab',
'switch_to_tab_9' : 'Switch to the ninth tab',
'switch_to_tab_10' : 'Switch to the tenth tab',
'full_screen' : 'Toggle fullscreen',
'reset' : 'Reset the terminal',
'reset_clear' : 'Reset and clear the terminal',
'hide_window' : 'Toggle window visibility',
'group_all' : 'Group all terminals',
'ungroup_all' : 'Ungroup all terminals',
'group_tab' : 'Group terminals in tab',
'ungroup_tab' : 'Ungroup terminals in tab',
'new_window' : 'Create a new window',
'new_terminator' : 'Spawn a new Terminator process',
'broadcast_off' : 'Don\'t broadcast key presses',
'broadcast_group' : 'Broadcast key presses to group',
'broadcast_all' : 'Broadcast key events to all',
'insert_number' : 'Insert terminal number',
'insert_padded' : 'Insert zero padded terminal number',
'edit_window_title': 'Edit window title'
}
def __init__ (self, term):
self.config = config.Config()
self.term = term
self.builder = gtk.Builder()
self.keybindings = Keybindings()
try:
# Figure out where our library is on-disk so we can open our
(head, _tail) = os.path.split(config.__file__)
librarypath = os.path.join(head, 'preferences.glade')
gladefile = open(librarypath, 'r')
gladedata = gladefile.read()
except Exception, ex:
print "Failed to find preferences.glade"
print ex
return
self.builder.add_from_string(gladedata)
self.window = self.builder.ge
|
robwebset/script.ebooks
|
resources/lib/kiehinen/ebook.py
|
Python
|
gpl-2.0
| 8,604
| 0.002092
|
from struct import unpack, pack, calcsize
from mobi_languages import LANGUAGES
from lz77 import uncompress
def LOG(*args):
pass
MOBI_HDR_FIELDS = (
("id", 16, "4s"),
("header_len", 20, "I"),
("mobi_type", 24, "I"),
("encoding", 28, "I"),
("UID", 32, "I"),
("generator_version", 36, "I"),
("reserved", 40, "40s"),
("first_nonbook_idx", 80, "I"),
("full_name_offs", 84, "I"),
("full_name_len", 88, "I"),
("locale_highbytes", 92, "H"),
("locale_country", 94, "B"),
("locale_language", 95, "B"),
("input_lang", 96, "I"),
("output_lang", 100, "I"),
("format_version", 104, "I"),
("first_image
|
_idx", 108, "I"),
("huff/cdic_record", 112, "I"),
("huff/cdic_count", 116, "I"),
("datp_record", 120, "I"),
("datp_count", 124, "I"),
("exth_flags", 128, "I"),
("unknowni@132", 132, "32s"),
("unknown@164", 164, "I"),
("drm_offs", 168, "I"),
("drm_count", 172, "I"),
("drm_size", 176, "I"),
("drm_flags", 180, "I"),
("unknown@184", 184, "I"),
|
("unknown@188", 188, "I"),
("unknown@192", 192, "H"),
("last_image_record", 194, "H"),
("unknown@196", 196, "I"),
("fcis_record", 200, "I"),
("unknown@204", 204, "I"),
("flis_record", 208, "I"),
("unknown@212", 212, "I"),
("extra_data_flags", 242, "H")
)
EXTH_FMT = ">4x2I"
'''4x = "EXTH", I = hlen, I = record count'''
EXTH_RECORD_TYPES = {
1: 'drm server id',
2: 'drm commerce id',
3: 'drm ebookbase book id',
100: 'author', # list
101: 'publisher', # list
102: 'imprint',
103: 'description',
104: 'isbn', # list
105: 'subject', # list
106: 'publication date',
107: 'review',
108: 'contributor', # list
109: 'rights',
110: 'subjectcode', # list
111: 'type',
112: 'source',
113: 'asin',
114: 'version number', # int
115: 'sample', # int (or bool)?
116: 'start reading',
117: 'adult',
118: 'retail price',
119: 'retail price currency',
201: 'cover offset', # int
202: 'thumbnail offset', # int
203: 'has fake cover', # bool?
208: 'watermark',
209: 'tamper proof keys',
401: 'clipping limit', # int
402: 'publisher limit',
404: 'ttsflag',
501: 'cde type',
502: 'last update time',
503: 'updated title'
}
PRC_HDRFMT = '>H2xIHHI' # Compression,unused,Len,Count,Size,Pos
def parse_palmdb(filename):
import palm
db = palm.Database(filename)
return db
class Book:
def __init__(self, fn):
self.filename = fn
# Set some fields to defaults
self.title = fn
self.author = "??"
self.language = "??"
# Rob Addition: Description
self.description = ""
self.is_a_book = False
f = open(fn)
d = f.read(68)
f.close()
encodings = {
1252: 'cp1252',
65001: 'utf-8'
}
supported_types = ('BOOKMOBI', 'TEXtREAd')
self.type = d[60:68]
if self.type not in supported_types:
LOG(1, "Unsupported file type %s" % (self.type))
return None
try:
db = parse_palmdb(fn)
except:
return None
self.is_a_book = True
# now we have a better guess at the title, use it for now
self.title = db.name
self.records = db.records
rec0 = self.records[0].data
#LOG(5,repr(rec0))
if self.type == 'BOOKMOBI':
LOG(3, "This is a MOBI book")
self.mobi = {}
for field, pos, fmt in MOBI_HDR_FIELDS:
end = pos + calcsize(fmt)
if (end > len(rec0) or
("header_len" in self.mobi
and end > self.mobi["header_len"])):
continue
LOG(4, "field: %s, fmt: %s, @ [%d:%d], data: %s" % (
field, fmt, pos, end, repr(rec0[pos:end])))
(self.mobi[field], ) = unpack(">%s" % fmt, rec0[pos:end])
LOG(3, "self.mobi: %s" % repr(self.mobi))
# Get and decode the book name
if self.mobi['locale_language'] in LANGUAGES:
lang = LANGUAGES[self.mobi['locale_language']]
if self.mobi['locale_country'] == 0:
LOG(2, "Book language: %s" % lang[0][1])
self.language = "%s (%s)" % (lang[0][1], lang[0][0])
elif self.mobi['locale_country'] in lang:
country = lang[self.mobi['locale_country']]
LOG(2, "Book language is %s (%s)" % (
lang[0][1], country[1]))
self.language = "%s (%s-%s)" % (
lang[0][1],
lang[0][0],
country[0]
)
pos = self.mobi['full_name_offs']
end = pos + self.mobi['full_name_len']
self.title = rec0[pos:end].decode(encodings[self.mobi['encoding']])
LOG(2, "Book name: %s" % self.title)
if self.mobi['id'] != 'MOBI':
LOG(0, "Mobi header missing!")
return None
if (0x40 & self.mobi['exth_flags']): # check for EXTH
self.exth = parse_exth(rec0, self.mobi['header_len'] + 16)
LOG(3, "EXTH header: %s" % repr(self.exth))
if 'author' in self.exth:
self.author = ' & '.join(self.exth['author'])
else:
self.author = "n/a"
self.rawdata = d
if (('updated title' in self.exth) and
(type(self.exth['updated title']) is str)):
self.title = ' '.join(self.exth['updated title'])
if 'description' in self.exth:
self.description = ' <P> '.join(self.exth['description'])
elif self.type == 'TEXtREAd':
LOG(2, "This is an older MOBI book")
self.rawdata = d
compression, data_len, rec_count, rec_size, pos = unpack(
PRC_HDRFMT, rec0[:calcsize(PRC_HDRFMT)])
LOG(3, "compression %d, data_len %d, rec_count %d, rec_size %d" %
(compression, data_len, rec_count, rec_size))
if compression == 2:
data = uncompress(self.records[1].data)
else:
data = self.records[1].data
from BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(data)
self.metadata = soup.fetch("dc-metadata")
try:
self.title = soup.fetch("dc:title")[0].getText()
self.author = soup.fetch("dc:creator")[0].getText()
self.language = soup.fetch("dc:language")[0].getText()
except:
self.title, self.author, self.language = ("Unknown", "Unknown",
"en-us")
try:
self.description = soup.fetch("dc:description")[0].getText()
except:
pass
def to_html(self):
last_idx = (
self.mobi['first_image_idx'] if 'mobi' in self.__dict__ else -1)
return ''.join([uncompress(x.data) for x in self.records[1:last_idx]])
def parse_exth(data, pos):
ret = {}
n = 0
if (pos != data.find('EXTH')):
LOG(0, "EXTH header not found where it should be @%d" % pos)
return None
else:
end = pos + calcsize(EXTH_FMT)
(hlen, count) = unpack(EXTH_FMT, data[pos:end])
LOG(4, "pos: %d, EXTH header len: %d, record count: %d" % (
pos, hlen, count))
pos = end
while n < count:
end = pos + calcsize(">2I")
t, l = unpack(">2I", data[pos:end])
v = data[end:pos + l]
if l - 8 == 4:
v = unpack(">I", v)[0]
if t in EXTH_RECORD_TYPES:
rec = EXTH_RECORD_TYPES[t]
|
PBR/chebi2gene
|
chebi2gene.py
|
Python
|
bsd-3-clause
| 17,793
| 0.001124
|
#!/usr/bin/python
"""
Small web application to retrieve information from uniprot and itag for
a given compound.
The idea is that for one compound we are able to find out in which
reactions it is involved and what are the proteins involved in these
reactions. For each of these proteins we can find if there are genes and
genes from tomato associated with them.
"""
from flask import Flask, Response, render_template, request, redirect, url_for
from flaskext.wtf import Form, TextField
import ConfigParser
import datetime
import json
import os
import rdflib
import urllib
CONFIG = ConfigParser.ConfigParser()
CONFIG.readfp(open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'chebi2gene.cfg')))
# Address of the sparql server to query.
SERVER = CONFIG.get('chebi2gene', 'sparql_server')
# Create the application.
APP = Flask(__name__)
APP.secret_key = CONFIG.get('chebi2gene', 'secret_key')
# Stores in which graphs are the different source of information.
GRAPHS = {option: CONFIG.get('graph', option) for option in CONFIG.options('graph')}
print GRAPHS
class ChebiIDForm(Form):
""" Simple text field form to input the chebi identifier or the
name of the protein.
"""
chebi_id = TextField('Chebi ID or molecule name')
def convert_to_uniprot_id(data):
""" Converts from RHEA Uniprot URI to Uniprot ID.
@param data, a dictionary of String: [String] where the keys are
reaction ID and the values are protein URI.
@return, a dictionary of String: [String] where the keys are
reaction ID and the values are protein ID.
"""
for key in data:
proteins = data[key]
proteins2 = []
for protein in proteins:
prot_id = protein.rsplit(':', 1)[1]
proteins2.append(prot_id.strip())
data[key] = proteins2
return data
def get_exact_chebi_from_search(name):
""" Search the chebi database for molecule having the given string
in their name. The data returned contains the chebi identifier, the
name and synonyms of the molecule in chebi.
@param name, a string, name of the molecule to search in chebi.
@return, a dictionary containing all the molecule found for having
the input string in their name. The data structure returned is like:
{string: {'name': string, 'syn': [String]}}, where the keys are the
chebi identifier and the values are dictionaries containing the
name of the molecules and a list of its synonym.
"""
query = '''
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX obo:<http://purl.obolibrary.org/obo#>
SELECT DISTINCT ?id ?name ?syn
FROM <%(chebi)s>
WHERE {
{
?id rdfs:label ?name .
?id obo:Synonym ?syn .
FILTER (
regex(?name, "%(search)s", "i")
)
}
} ORDER BY ?id
''' % {'search': name, 'chebi': GRAPHS['chebi']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
molecules = {}
for entry in data_js['results']['bindings']:
chebi_id = entry['id']['value'].rsplit('/', 1)[1].split('_')[1]
if chebi_id in molecules:
molecules[chebi_id]['syn'].append(entry['syn']['value'])
else:
molecules[chebi_id] = {
'name': [entry['name']['value']],
'syn': [entry['syn']['value']]
}
return molecules
def get_extended_chebi_from_search(name):
""" Search the chebi database for molecule having the given string
in their name or in their synonyms. The data returned contains the
chebi identifier, the name and synonyms of the molecule in chebi.
@param name, a string, name of the molecule to search in chebi.
@return, a dictionary containing all the molecule found for having
the input string in their name or in their synonyms.
The data structure returned is like:
{string: {'name': string, 'syn': [String]}}, where the keys are the
chebi identifier and the values are dictionaries containing the
name of the molecules and a list of its synonym.
"""
query = '''
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX obo:<http://purl.obolibrary.org/obo#>
SELECT DISTINCT ?id ?name ?syn
FROM <%(chebi)s>
WHERE {
{
?id rdfs:label ?name .
?id obo:Synonym ?syn .
FILTER (
regex(?name, "%(search)s", "i")
|| regex(?syn, "%(search)s", "i")
)
}
} ORDER BY ?id
''' % {'search': name, 'chebi': GRAPHS['chebi']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
molecules = {}
for entry in data_js['results']['bindings']:
chebi_id = entry['id']['value'].rsplit('/', 1)[1].split('_')[1]
if chebi_id in molecules:
molecules[chebi_id]['syn'].append(entry['syn']['value'])
else:
molecules[chebi_id] = {
'name': [entry['name']['value']],
'syn': [entry['syn']['value']]
}
return molecules
def get_genes_of_proteins(data):
""" Returns the genes associated with proteins.
@param name, a dictionary where the keys are reactions identifier
and the values lists of proteins identifier.
@return, a dictionary containing all the genes related with the
proteins specified.
The data structure returned is like:
{string: [String]}, where the keys are the uniprot identifier and
the values are list of gene identifier associated with the protein.
"""
genes = {}
for key in data:
proteins = data[key]
# Let's make sure the identifiers are unique
proteins = list(set(proteins))
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX pos:<http://pbr.wur.nl/POSITION#>
SELECT DISTINCT ?prot ?name ?sca ?start ?stop ?desc
FROM <%(itag)s>
WHERE{
?gene gene:Protein ?prot .
FILTER (
?prot IN (
<http://purl.uniprot.org/uniprot/%(prot)s>
)
)
?gene gene:Position ?pos .
?pos pos:Scaffold ?sca .
?gene gene:Description ?desc .
?gene gene:FeatureName ?name .
?pos pos:Start ?start .
?pos pos:Stop ?stop .
} ORDER BY ?name
''' % {'prot': '>,\n<http://purl.uniprot.org/uniprot/'.join(
proteins), 'itag': GRAPHS['itag']}
data_js = sparql_query(query, SERVER)
for entry in data_js['results']['bindings']:
prot_id = entry['prot']['value'].rsplit('/', 1)[1]
gene = {}
for var in ['name', 'sca', 'start', 'stop', 'desc']:
gene[var] = entry[var]['value']
gene['sca'] = gene['sca'].rsplit('#', 1)[1]
if prot_id in genes:
genes[prot_id].append(gene)
else:
genes[prot_id] = [gene]
return genes
def get_pathways_of_proteins(data):
""" Returns the pathways associated with proteins.
@param name, a dictionary where the keys are reactions identifier
and the values lists of proteins.
@return, a dictionary containing all the pathways related with the
proteins specified.
The data structure returned is like:
{string: [String]}, where the keys are the uniprot identifier and
the values are list of pathways associated with the protein.
"""
pathways = {}
for key in data:
proteins = data[key]
# Let's make sure the identifiers are unique
|
proteins = list(set(proteins))
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
SELECT DISTINCT ?prot ?desc
FROM <%(uniprot)s>
WHERE {
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot
|
rdfs:comment ?desc .
FILTER (
?prot IN (
<http://purl.uniprot.org/uniprot
|
valeth/apex-sigma
|
sigma/plugins/searches/google/google.py
|
Python
|
gpl-3.0
| 1,478
| 0.003392
|
import aiohttp
import discord
import random
from config import GoogleAPIKey
from config import GoogleCSECX
async def google(cmd, message, args):
if not args:
await message.channel.send(cmd.help())
return
else:
search = ' '.join(args)
url = 'https://www.googleapis.com/customsearch/v1?q=' + search + '&cx=' + GoogleCSECX + '&key=' + GoogleAPIKey
async with aiohttp.ClientSession() as session:
async with session.get(url) as data:
results = await data.json()
google_colors = [0x4285f4, 0x34a853, 0xfbbc05, 0xea4335, 0x00a1f1, 0x7cbb00, 0xffbb00, 0xf65314]
embed_color = random.choice(google_colors)
try:
title = results['items'][0]['title']
url = results['items'][0]['link']
embed = discord.Embed(color=embed_color)
embed.set_author(name='Google', icon_url='https://avatars2.githubusercontent.com/u/1342004?v=3&s=400'
|
,
url='https://www
|
.google.com/search?q=' + search)
embed.add_field(name=title, value='[**Link Here**](' + url + ')')
await message.channel.send(None, embed=embed)
except Exception as e:
cmd.log.error(e)
embed = discord.Embed(color=0xDB0000, title='❗ Daily Limit Reached.')
embed.set_footer(text='Google limits this API feature, and we hit that limit.')
await message.channel.send(None, embed=embed)
|
jpbarrette/moman
|
finenight/python/iadfa.py
|
Python
|
mit
| 4,303
| 0.006739
|
from fsa import *
from nameGenerator import *
class IncrementalAdfa(Dfa):
"""This class is an Acyclic Deterministic Finite State Automaton
constructed by a list of words.
"""
def __init__(self, words, nameGenerator = None, sorted = False):
if nameGenerator is None:
nameGenerator = IndexNameGenerator()
self.nameGenerator = nameGenerator
if sorted:
self.createFromSortedListOfWords(words)
else:
self.createFromArbitraryListOfWords(words)
def getCommonPrefix(self, word):
stateNam
|
e = self.startState
index = 0
nextStateName = stateName
whi
|
le nextStateName is not None:
symbol = word[index]
stateName = nextStateName
if symbol in self.states[stateName].transitions:
nextStateName = self.states[stateName].transitions[symbol]
index += 1
else:
nextStateName = None
return (stateName, word[index:])
def hasChildren(self, stateName):
okay = False
if [s for s in list(self.states[stateName].transitions.values()) if s]:
okay = True
return okay
def addSuffix(self, stateName, currentSuffix):
lastState = stateName
while len(currentSuffix) > 0:
newStateName = self.nameGenerator.generate()
symbol = currentSuffix[0]
currentSuffix = currentSuffix[1:]
self.states[stateName].transitions[symbol] = newStateName
self.states[newStateName] = State(newStateName)
stateName = newStateName
self.finalStates.append(stateName)
def markedAsRegistered(self, stateName):
return stateName in self.register
def markAsRegistered(self, stateName):
self.register[stateName] = True
def equivalentRegisteredState(self, stateName):
equivatentState = None
for state in list(self.register.keys()):
if self.areEquivalents(state, stateName):
equivatentState = state
return equivatentState
def lastChild(self, stateName):
input = list(self.states[stateName].transitions.keys())
input.sort()
return (self.states[stateName].transitions[input[-1]], input[-1])
def replaceOrRegister(self, stateName):
#childName = self.finalStates[-1]
childName, lastSymbol = self.lastChild(stateName)
if not self.markedAsRegistered(childName):
if self.hasChildren(childName):
self.replaceOrRegister(childName)
equivalentState = self.equivalentRegisteredState(childName)
if equivalentState is not None:
self.deleteBranch(childName)
self.states[stateName].transitions[lastSymbol] = equivalentState
else:
self.markAsRegistered(childName)
def deleteBranch(self, child):
childs = [child]
while len(childs) > 0:
nextChilds = []
for child in childs:
nextChilds += [s for s in list(self.states[child].transitions.values()) if not self.markedAsRegistered(s)]
self.states.pop(child)
if child in self.finalStates:
self.finalStates.remove(child)
childs = nextChilds
def createFromSortedListOfWords(self, words):
self.register = {}
self.finalStates = []
self.startState = self.nameGenerator.generate()
self.states = {self.startState : State(self.startState)}
lastWord = None
for word in words:
if word.endswith('\n'):
word = word[:-1]
lastStateName, currentSuffix = self.getCommonPrefix(word)
if self.hasChildren(lastStateName):
self.replaceOrRegister(lastStateName)
self.addSuffix(lastStateName, currentSuffix)
self.replaceOrRegister(self.startState)
def createFromArbitraryListOfWords(self, words):
self.register = {}
self.finalStates = []
self.startState = self.nameGenerator.generate()
self.states = {self.startState : State(self.startState)}
|
alfredodeza/pytest
|
src/_pytest/setupplan.py
|
Python
|
mit
| 818
| 0
|
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setupplan",
"--setup-plan",
action="store_true",
help="show what fixtures and tests would be execute
|
d but "
"don't execute anything.",
|
)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
# Will return a dummy fixture if the setuponly option is provided.
if request.config.option.setupplan:
my_cache_key = fixturedef.cache_key(request)
fixturedef.cached_result = (None, my_cache_key, None)
return fixturedef.cached_result
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setupplan:
config.option.setuponly = True
config.option.setupshow = True
|
markalansmith/draftmim
|
web/draftmim/core.py
|
Python
|
apache-2.0
| 189
| 0
|
from draftmim import app
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.restless import APIManager
db = SQLAlchemy(app)
api_manager = APIManager(app, fl
|
ask_sqlalchemy_db=db
|
)
|
jairideout/q2cli
|
q2cli/dev.py
|
Python
|
bsd-3-clause
| 1,137
| 0
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import click
@click.group(help='Utilities for developers and advanced users.')
def dev():
pass
@dev.command(name='refresh-cache',
short_help='Refresh CLI cache.',
help="Refresh the CLI cache. Use this command if you are "
"developing a plugin, or q2cli itself, and want your "
|
"changes to take effect in the CLI. A refresh of the cache "
"is necessary becaus
|
e package versions do not typically "
"change each time an update is made to a package's code. "
"Setting the environment variable Q2CLIDEV to any value "
"will always refresh the cache when a command is run.")
def refresh_cache():
import q2cli.cache
q2cli.cache.CACHE.refresh()
|
Didou09/tofu
|
tofu/imas2tofu/_comp_mesh.py
|
Python
|
mit
| 6,909
| 0
|
# Built-in
import os
import warnings
# Common
import numpy as np
# #############################################################################
# Triangular meshes
# #############################################################################
def tri_checkformat_NodesFaces(nodes, indfaces, ids=None):
# Check mesh type
if indfaces.shape[1] == 3:
mtype = 'tri'
elif indfaces.shape[1] == 4:
mtype = 'quad'
else:
msg = ("Mesh seems to be neither triangular nor quadrilateral\n"
+ " => unrecognized mesh type, not implemented yet")
raise Exception(msg)
# Check indexing !!!
indmax = int(np.nanmax(indfaces))
if indmax == nodes.shape[0]:
indfaces = indfaces - 1
elif indmax > nodes.shape[0]:
msg = ("There seems to be an indexing error\n"
+ "\t- np.max(indfaces) = {}".format(indmax)
+ "\t- nodes.shape[0] = {}".format(nodes.shape[0]))
raise Exception(msg)
# Check for duplicates
nnodes = nodes.shape[0]
nfaces = indfaces.shape[0]
nodesu, indnodesu = np.unique(nodes, axis=0, return_index=True)
facesu, indfacesu = np.unique(indfaces, axis=0, return_index=True)
facesuu = np.unique(facesu)
lc = [nodesu.shape[0] != nnodes,
facesu.shape[0] != nfaces,
facesuu.size != nnodes or np.any(facesuu != np.arange(0, nnodes))]
if any(lc):
msg = "Non-valid mesh in {}:\n".format(ids)
if lc[0]:
noddup = [ii for ii in range(0, nnodes) if ii not in indnodesu]
msg += (" Duplicate nodes: {}\n".format(nnodes - nodesu.shape[0])
+ "\t- nodes.shape: {}\n".format(nodes.shape)
+ "\t- unique nodes.shape: {}\n".format(nodesu.shape)
+ "\t- duplicate nodes indices: {}\n".format(noddup))
if lc[1]:
dupf =
|
[ii for ii in range(0, nfaces) if ii not in indfacesu]
msg += (" Duplicate faces: {}\n".format(nfaces - facesu.shape[0
|
])
+ "\t- faces.shape: {}\n".format(indfaces.shape)
+ "\t- unique faces.shape: {}".format(facesu.shape)
+ "\t- duplicate facess indices: {}\n".format(dupf))
if lc[2]:
nfu = facesuu.size
nodnotf = [ii for ii in range(0, nnodes) if ii not in facesuu]
fnotn = [ii for ii in facesuu if ii < 0 or ii >= nnodes]
msg += (" Non-bijective nodes indices vs faces:\n"
+ "\t- nb. nodes: {}\n".format(nnodes)
+ "\t- nb. unique nodes index in faces: {}\n".format(nfu)
+ "\t- nodes not in faces: {}\n".format(nodnotf)
+ "\t- faces ind not in nodes: {}\n".format(fnotn))
raise Exception(msg)
# Test for unused nodes
facesu = np.unique(indfaces)
c0 = np.all(facesu >= 0) and facesu.size == nnodes
if not c0:
indnot = [ii for ii in range(0, nnodes) if ii not in facesu]
msg = ("Some nodes not used in mesh of ids {}:\n".format(ids)
+ " - unused nodes indices: {}".format(indnot))
warnings.warn(msg)
# Convert to triangular mesh if necessary
if mtype == 'quad':
# Convert to tri mesh (solution for unstructured meshes)
indface = np.empty((indfaces.shape[0]*2, 3), dtype=int)
indface[::2, :] = indfaces[:, :3]
indface[1::2, :-1] = indfaces[:, 2:]
indface[1::2, -1] = indfaces[:, 0]
indfaces = indface
mtype = 'quadtri'
ntri = 2
else:
ntri = 1
# Check orientation
x, y = nodes[indfaces, 0], nodes[indfaces, 1]
orient = ((y[:, 1] - y[:, 0])*(x[:, 2] - x[:, 1])
- (y[:, 2] - y[:, 1])*(x[:, 1] - x[:, 0]))
indclock = orient > 0.
if np.any(indclock):
nclock, ntot = indclock.sum(), indfaces.shape[0]
msg = ("Some triangles not counter-clockwise\n"
+ " (necessary for matplotlib.tri.Triangulation)\n"
+ " => {}/{} triangles reshaped".format(nclock, ntot))
warnings.warn(msg)
(indfaces[indclock, 1],
indfaces[indclock, 2]) = indfaces[indclock, 2], indfaces[indclock, 1]
return indfaces, mtype, ntri
# #############################################################################
# Rectangular meshes
# #############################################################################
def _rect_checkRZ(aa, name='R', shapeRZ=None):
if aa.ndim == 1 and np.any(np.diff(aa) < 0.):
msg = "{} must be increasing!".format(name)
raise Exception(msg)
elif aa.ndim == 2:
lc = [np.all(np.diff(aa[0, :])) > 0.,
np.all(np.diff(aa[:, 0])) > 0.]
if np.sum(lc) != 1:
msg = "{} must have exactly one dim increasing".format(name)
raise Exception(msg)
if lc[0]:
aa = aa[0, :]
if shapeRZ[1] is None:
shapeRZ[1] = name
if shapeRZ[1] != name:
msg = ("Inconsistent shapeRZ[1]\n"
+ "\t- expected: [{}, ...]\n".format(name)
+ "\t- provided: {}".format(shapeRZ))
raise Exception(msg)
else:
aa = aa[:, 0]
if shapeRZ[0] is None:
shapeRZ[0] = name
assert shapeRZ[0] == name
return aa, shapeRZ
def rect_checkformat(R, Z, datashape=None,
shapeRZ=None, ids=None):
if R.ndim not in [1, 2] or Z.ndim not in [1, 2]:
msg = ""
raise Exception(msg)
shapeu = np.unique(np.r_[R.shape, Z.shape])
if shapeRZ is None:
shapeRZ = [None, None]
# Check R, Z
R, shapeRZ = _rect_checkRZ(R, name='R', shapeRZ=shapeRZ)
Z, shapeRZ = _rect_checkRZ(Z, name='Z', shapeRZ=shapeRZ)
if datashape is not None:
if None in shapeRZ:
pass
shapeRZ = tuple(shapeRZ)
if shapeRZ == ('R', 'Z'):
datashape_exp = (R.size, Z.size)
elif shapeRZ == ('Z', 'R'):
datashape_exp = (Z.size, R.size)
else:
msg = "Inconsistent data shape !"
raise Exception(msg)
if datashape != datashape_exp:
msg = ("Inconsistent data shape\n"
+ "\t- shapeRZ = {}\n".format(shapeRZ)
+ "\t- datashape expected: {}\n".format(datashape_exp)
+ "\t- datashape provided: {}\n".format(datashape))
raise Exception(msg)
if None not in shapeRZ:
shapeRZ = tuple(shapeRZ)
if shapeRZ not in [('R', 'Z'), ('Z', 'R')]:
msg = ("Wrong value for shapeRZ:\n"
+ "\t- expected: ('R', 'Z') or ('Z', 'R')\n"
+ "\t- provided: {}".format(shapeRZ))
raise Exception(msg)
return R, Z, shapeRZ, 0
|
mgraupe/acq4
|
setup.py
|
Python
|
mit
| 7,496
| 0.007204
|
DESCRIPTION = """\
ACQ4 is a python-based platform for experimental neurophysiology.
It includes support for standard electrophysiology, multiphoton imaging,
scanning laser photostimulation, and many other experimental techniques. ACQ4 is
highly modular and extensible, allowing support to be added for new types of
devices, techniques, user-interface modules, and analyses.
"""
setupOpts = dict(
name='acq4',
description='Neurophysiology acquisition and analysis platform',
long_description=DESC
|
RIPTION,
license='MIT',
url='http
|
://www.acq4.org',
author='Luke Campagnola',
author_email='luke.campagnola@gmail.com',
)
from setuptools import setup
import distutils.dir_util
import distutils.sysconfig
import os, sys, re
from subprocess import check_output
## generate list of all sub-packages
path = os.path.abspath(os.path.dirname(__file__))
n = len(path.split(os.path.sep))
subdirs = [i[0].split(os.path.sep)[n:] for i in os.walk(os.path.join(path, 'acq4')) if '__init__.py' in i[2]]
allPackages = ['.'.join(p) for p in subdirs]
## Make sure build directory is clean before installing
buildPath = os.path.join(path, 'build')
if os.path.isdir(buildPath):
distutils.dir_util.remove_tree(buildPath)
## Determine current version string
initfile = os.path.join(path, 'acq4', '__init__.py')
init = open(initfile).read()
m = re.search(r'__version__ = (\S+)\n', init)
if m is None or len(m.groups()) != 1:
raise Exception("Cannot determine __version__ from init file: '%s'!" % initfile)
version = m.group(1).strip('\'\"')
initVersion = version
# If this is a git checkout, try to generate a more decriptive version string
try:
if os.path.isdir(os.path.join(path, '.git')):
def gitCommit(name):
commit = check_output(['git', 'show', name], universal_newlines=True).split('\n')[0]
assert commit[:7] == 'commit '
return commit[7:]
# Find last tag matching "acq4-.*"
tagNames = check_output(['git', 'tag'], universal_newlines=True).strip().split('\n')
while True:
if len(tagNames) == 0:
raise Exception("Could not determine last tagged version.")
lastTagName = tagNames.pop()
if re.match(r'acq4-.*', lastTagName):
break
# is this commit an unchanged checkout of the last tagged version?
lastTag = gitCommit(lastTagName)
head = gitCommit('HEAD')
if head != lastTag:
branch = re.search(r'\* (.*)', check_output(['git', 'branch'], universal_newlines=True)).group(1)
version = version + "-%s-%s" % (branch, head[:10])
# any uncommitted modifications?
modified = False
status = check_output(['git', 'status', '-s'], universal_newlines=True).strip().split('\n')
for line in status:
if line.strip() != '' and line[:2] != '??':
modified = True
break
if modified:
version = version + '+'
sys.stderr.write("Detected git commit; will use version string: '%s'\n" % version)
except:
version = initVersion
sys.stderr.write("This appears to be a git checkout, but an error occurred "
"while attempting to determine a version string for the "
"current commit.\nUsing the unmodified version string "
"instead: '%s'\n" % version)
sys.excepthook(*sys.exc_info())
print("__init__ version: %s current version: %s" % (initVersion, version))
if 'upload' in sys.argv and version != initVersion:
print("Base version does not match current; stubbornly refusing to upload.")
exit()
import distutils.command.build
class Build(distutils.command.build.build):
def run(self):
ret = distutils.command.build.build.run(self)
# If the version in __init__ is different from the automatically-generated
# version string, then we will update __init__ in the build directory
global path, version, initVersion
if initVersion == version:
return ret
initfile = os.path.join(path, self.build_lib, 'acq4', '__init__.py')
if not os.path.isfile(initfile):
sys.stderr.write("Warning: setup detected a git install and attempted "
"to generate a descriptive version string; however, "
"the expected build file at %s was not found. "
"Installation will use the original version string "
"%s instead.\n" % (initfile, initVersion)
)
else:
data = open(initfile, 'r').read()
open(initfile, 'w').write(re.sub(r"__version__ = .*", "__version__ = '%s'" % version, data))
# If this is windows, we need to update acq4.bat to reference the correct python executable.
if sys.platform == 'win32':
runner = os.path.join(path, self.build_scripts, 'acq4.bat')
runcmd = "%s -m acq4" % sys.executable
data = open(runner, 'r').read()
open(runner, 'w').write(re.sub(r'python -m acq4', runcmd, data))
return ret
# copy config tree to system location
# if sys.platform == 'win32':
# dataRoot = os.path.join(os.environ['ProgramFiles'], 'acq4')
# elif sys.platform == 'darwin':
# dataRoot = 'Library/Application Support/acq4'
# else:
# dataRoot = '/etc/acq4'
# instead, just install config example to same path as package.
if sys.platform == 'win32':
#dataRoot = distutils.sysconfig.get_python_lib().replace(sys.prefix, '')
dataRoot = 'Lib/site-packages/acq4'
else:
#dataRoot = 'python%d.%d/site-packages/acq4' % (sys.version_info.major, sys.version_info.minor)
dataRoot = distutils.sysconfig.get_python_lib().replace(sys.prefix+'/', '') + '/acq4'
dataFiles = []
configRoot = os.path.join(path, 'config')
for subpath, _, files in os.walk(configRoot):
endPath = subpath[len(path):].lstrip(os.path.sep)
files = [os.path.join(endPath, f) for f in files]
dataFiles.append((os.path.join(dataRoot, endPath), files))
# print dataFiles[-1]
packageData = []
pkgRoot = os.path.join(path, 'acq4')
for subpath, _, files in os.walk(pkgRoot):
for f in files:
addTo = None
for ext in ['.png', '.cache', '.h', '.hpp', '.dll']:
if f.endswith(ext):
packageData.append(os.path.join(subpath, f)[len(pkgRoot):].lstrip(os.path.sep))
if sys.platform == 'win32':
scripts = ['bin/acq4.bat']
else:
scripts = ['bin/acq4']
setup(
version=version,
cmdclass={'build': Build},
packages=allPackages,
package_dir={},
package_data={'acq4': packageData},
data_files=dataFiles,
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering",
],
install_requires = [
'numpy',
'scipy',
'h5py',
'pillow',
],
scripts = scripts,
**setupOpts
)
|
Strangemother/python-state-machine
|
scatter/__init__.py
|
Python
|
mit
| 36
| 0
|
from root
|
import *
version = '
|
v0.1'
|
Zomboided/VPN-Manager
|
managefiles.py
|
Python
|
gpl-2.0
| 11,679
| 0.007192
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Zomboided
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module is a bunch of functions that are called from the settings
# menu to manage various files groups.
import xbmcaddon
import xbmcgui
import xbmcvfs
import datetime
import os
from libs.vpnproviders import removeGeneratedFiles, cleanPassFiles, providers, usesUserKeys, usesMultipleKeys, getUserKeys
from libs.vpnproviders import getUserCerts, getVPNDisplay, getVPNLocation, removeDownloadedFiles, isAlternative, resetAlternative
from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID, getName
from libs.vpnplatform import getLogPath, getUserDataPath, writeVPNLog, copySystemdFiles, addSystemd, removeSystemd, generateVPNs
from libs.common import resetVPNConnections, isVPNConnected, disconnectVPN, suspendConfigUpdate, resumeConfigUpdate, dnsFix, getVPNRequestedProfile
from libs.common import resetVPNProvider, setAPICommand
from libs.ipinfo import resetIPServices
try:
from libs.generation import generateAll
except:
pass
action = sys.argv[1]
debugTrace("-- Entered managefiles.py with parameter " + action + " --")
if not getID() == "":
addon = xbmcaddon.Addon(getID())
addon_name = getName()
# Reset the ovpn files
if action == "ovpn":
if getVPNRequestedProfile() == "":
if xbmcgui.Dialog().yesno(addon_name, "Resetting the VPN provider will disconnect and reset all VPN connections, and then remove any files that have been created. Continue?"):
suspendConfigUpdate()
# Disconnect so that live files are not being modified
resetVPNConnections(addon)
infoTrace("managefiles.py", "Resetting the VPN provider")
# Delete the generated files, and reset the locations so it can be selected again
removeGeneratedFiles()
# Delete any values that have previously been validated
vpn_provider = getVPNLocation(addon.getSetting("vpn_provider"))
if isAlternative(vpn_provider): resetAlternative(vpn_provider)
# Reset the IP service error counts, etc
resetIPServices()
addon = xbmcaddon.Addon(getID())
resetVPNProvider(addon)
addon = xbmcaddon.Addon(getID())
resumeConfigUpdate()
xbmcgui.Dialog().ok(addon_name, "Reset the VPN provider. Validate a connection to start using a VPN again.")
else:
xbmcgui.Dialog().ok(addon_name, "Connection to VPN being attempted and has been aborted. Try again in a few seconds.")
setAPICommand("Disconnect")
# Generate the VPN provider files
if action == "generate":
# Only used during development to create location files
generateAll()
xbmcgui.Dialog().ok(addon_name, "Regenerated some or all of the VPN location files.")
# Delete all of the downloaded VPN files
if action == "downloads":
debugTrace("Deleting all downloaded VPN files")
removeDownloadedFiles()
xbmcgui.Dialog().ok(addon_name, "Deleted all of the downloaded VPN files. They'll be downloaded again if required.")
# Copy the log file
elif action == "log":
log_path = ""
dest_path = ""
try:
log_path = getLogPath()
start_dir = ""
dest_folder = xbmcgui.Dialog().browse(0, "Select folder to copy log file into", "files", "", False, False, start_dir, False)
dest_path = "kodi " + datetime.datetime.now().strftime("%y-%m-%d %H-%M-%S") + ".log"
dest_path = dest_folder + dest_path.replace(" ", "_")
# Write VPN log to log before copying
writeVPNLog()
debugTrace("Copying " + log_path + " to " + dest_path)
addon = xbmcaddon.Addon(getID())
infoTrace("managefiles.py", "Copying log file to " + dest_path + ". Using version " + addon.getSetting("version_number"))
xbmcvfs.copy(log_path, dest_path)
if not xbmcvfs.exists(dest_path): raise IOError('Failed to copy log ' + log_path + " to " + dest_path)
dialog_message = "Copied log file to: " + dest_path
except:
errorTrace("managefiles.py", "Failed to copy log from " + log_path + " to " + dest_path)
if xbmcvfs.exists(log_path):
dialog_message = "Error copying log, try copying it to a different location."
else:
dialog_messsage = "Could not find the kodi.log file."
errorTrace("managefiles.py", dialog_message + " " + log_path + ", " + dest_path)
xbmcgui.Dialog().ok("Log Copy", dialog_message)
# Delete the user key and cert files
elif action == "user":
if addon.getSetting("1_vpn_validated") == "" or xbmcgui.Dialog().yesno(addon_name, "Deleting key and certificate files will disconnect and reset all VPN connections. Connections must be re-validated before use. Continue?"):
# Disconnect so that live files are not being modified
if isVPNConnected(): resetVPNConnections(addon)
# Select the provider
provider_list = []
for provider in providers:
if usesUserKeys(provider):
provider_list.append(getVPNDisplay(provider))
provider_list.sort()
index = xbmcgui.Dialog().select("Select VPN provider", provider_list)
provider_display = provider_list[index]
provider = getVPNLocation(provider_display)
# Get the key/cert pairs for that provider and offer up for deletion
user_keys = getUserKeys(provider)
user_certs = getUserCerts(provider)
if len(user_keys) > 0 or len(user_certs) > 0:
still_deleting = True
while still_deleting:
if len(user_keys) > 0 or len(user_certs) > 0:
# Build a list of things to display. We should always have pairs, but if
# something didn't copy or the user has messed with the dir this will cope
all_user = []
single_pair = "user [I](Same key and certificate used for all connections)[/I]"
for key in user_keys:
list_item = os.path.basename(key)
list_item = list_item.replace(".key", "")
if list_item == "user": list_item = single_pair
all_user.append(list_item)
for cert in user_certs:
list_item = os.path.basename(cert)
list_item = list_item.replace(".crt", "")
if list_item == "user": list_item = single_pair
|
if not list_item in all_user: all_user.append(list_item)
all_user.sort()
# Offer a delete all option if there are multiple keys
all_item = "[I]Delete all key and certificate files[/I]"
|
if usesMultipleKeys(provider):
all_user.append(all_item)
# Add in a finished option
|
govenius/plotbridge
|
examples/gnuplot_with_direction/expected_output/gnuplot.interactive.py
|
Python
|
gpl-2.0
| 3,554
| 0.019977
|
#!/usr/bin/python
import os
import sys
import time
import logging
import subprocess
replot_poll_period = 1
plot_script_extension = '.gnuplot'
plot_script = None
for f in os.listdir('.'):
if f.endswith(plot_script_extension):
plot_script = f
break
assert plot_script != None, 'No file ending with "%s" found in current directory.' % plot_script_extension
# Check if the plot script is already being plotted
# by another instance of the script
lock_file = plot_script + '.lock'
def refresh_lock():
with open(lock_file, 'w') as lock:
lock.write( "%.3f" % (time.time()) )
def exit_if_locked():
try:
with open(lock_file, 'r') as f:
# the lockfile contains a timestamp
if float(f.read()) > time.time() - max(3, 2*replot_poll_period):
logging.warn("It seems that the file (%s) is already being plotted. Exiting...",
plot_script)
exit()
except IOError:
return # lock doesn't exist
def print_plot_script(fname):
with open(fn
|
ame, 'r') as f:
print '---start of %s---' % fname
for i,l in enumerate(f.read().split('\n')): print '%3d %s' % (i,l)
print '---end of %s---\n' % fname
sys.stdout.flush()
exit_if_locked() # technically, this and the lock file creation should be done atomically...
try:
refresh_lock()
file_to_monitor = plot_script
files_that_must_exist = [ plot
|
_script ]
plotted_once = False
# Watch directory for changes and replot when necessary.
# Use simple polling of st_mtime since it works on Linux and Windows
# and the polling period is reasonably slow (~seconds).
gp = None
while not plotted_once or gp.poll() == None: # keep polling as long as gnuplot is alive
if all( os.path.exists(ff) for ff in files_that_must_exist):
if not plotted_once:
# Initial plot
plot_last_changed_time = os.path.getmtime(file_to_monitor)
tex_last_changed_time = 0
print_plot_script(plot_script)
gp = subprocess.Popen(['gnuplot', plot_script, '-'],
stdin=subprocess.PIPE, stdout=sys.stdout, stderr=sys.stderr)
plotted_once = True
print "Replotting every %g seconds (if plot script modification time changes)..." % replot_poll_period
print "Hit <ctrl> + C to exit."
else:
# Update plot if the plot script was modified
try:
plot_changed_time = os.path.getmtime(file_to_monitor)
if plot_changed_time != plot_last_changed_time and gp != None:
#logging.debug('Plot changed. Reloading plot script.')
gp.stdin.write('load "%s"\n' % plot_script)
plot_last_changed_time = plot_changed_time
# compile .tex to PDF
tex_to_watch = 'output.tex'
if os.path.isfile(tex_to_watch):
try:
tex_changed_time = os.path.getmtime(tex_to_watch)
if tex_changed_time != tex_last_changed_time:
tex_last_changed_time = tex_changed_time
with open('pdflatex.out', 'w') as log_file:
subprocess.call(['pdflatex', '-halt-on-error', 'output'], stdin=None, stdout=log_file, stderr=log_file)
except:
print 'Call to pdflatex failed. See pdflatex.out.'
except OSError:
pass # the plot script does not exist which is normal if the plot was overwritten.
time.sleep(replot_poll_period)
refresh_lock()
finally:
try: os.remove(lock_file)
except: pass
print "The plot engine has terminated. Exiting."
|
dmnfarrell/epitopemap
|
modules/pepdata/pmbec.py
|
Python
|
apache-2.0
| 2,894
| 0.001382
|
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from os.path import join
from .static_data import MATRIX_DIR
def read_coefficients(
key_type='row',
verbose=True,
filename=join(MATRIX_DIR, 'pmbec.mat')):
"""
Parameters
------------
filename : str
Location of PMBEC coefficient matrix
key_type : str
'row' : every key is a single amino acid,
which maps to a dictionary for that row
'pair' : every key is a tuple of amino acids
'pair_string' : every key is a string of two amino acid characters
verbose : bool
Print rows of matrix as we read them
"""
d = {}
if key_type == 'row':
def add_pair(row_letter, col_letter, value):
if row_letter not in d:
d[row_letter] = {}
d[row_letter][col_letter] = value
elif key_type == 'pair':
def add_pair(row_letter, col_letter, value):
d[(row_letter, col_letter)] = value
else:
assert key_type == 'pair_string', \
"Invalid dictionary key type: %s" % key_type
def add_pair(row_letter, col_letter, value):
d["%s%s" % (row_letter, col_le
|
tter)] = value
with open(
|
filename, 'r') as f:
lines = [line for line in f.read().split('\n') if len(line) > 0]
header = lines[0]
if verbose:
print(header)
residues = [x for x in header.split(' ') if len(x) == 1 and x != ' ' and x != '\t']
assert len(residues) == 20
if verbose:
print(residues)
for line in lines[1:]:
cols = [
x
for x in line.split(' ')
if len(x) > 0 and x != ' ' and x != '\t'
]
assert len(cols) == 21, "Expected 20 values + letter, got %s" % cols
row_letter = cols[0]
for i, col in enumerate(cols[1:]):
col_letter = residues[i]
assert col_letter != ' ' and col_letter != '\t'
value = float(col)
add_pair(row_letter, col_letter, value)
return d
if __name__ == '__main__':
d = read_coefficients(key_type='pair_string')
print("PMBEC matrix")
for k in sorted(d):
print(k, d[k])
|
openstack/designate
|
designate/tests/unit/agent/backends/test_knot2.py
|
Python
|
apache-2.0
| 7,785
| 0
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwa
|
re
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitat
|
ions
# under the License.
from unittest import mock
from unittest.mock import call
from oslo_concurrency import processutils
from designate.backend.agent_backend import impl_knot2
from designate import exceptions
import designate.tests
from designate.tests.unit.agent import backends
class Knot2AgentBackendTestCase(designate.tests.TestCase):
def setUp(self):
super(Knot2AgentBackendTestCase, self).setUp()
self.backend = impl_knot2.Knot2Backend('foo')
self.backend._execute_knotc = mock.Mock()
def test_start_backend(self):
self.backend.start()
def test_stop_backend(self):
self.backend.stop()
def test_create_zone(self):
zone = backends.create_dnspy_zone('example.org')
self.backend.create_zone(zone)
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-set', 'zone[example.org]',
expected_error='duplicate identifier'),
call('conf-commit'),
call('zone-refresh', 'example.org')
])
def test_create_zone_already_there(self):
self.backend._execute_knotc.return_value = 'duplicate identifier'
zone = backends.create_dnspy_zone('example.org')
self.backend.create_zone(zone)
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-set', 'zone[example.org]',
expected_error='duplicate identifier'),
call('conf-commit'),
call('zone-refresh', 'example.org')
])
def test_start_minidns_to_knot_axfr(self):
self.backend._start_minidns_to_knot_axfr('foo')
self.backend._execute_knotc.assert_called_with('zone-refresh', 'foo')
@mock.patch('oslo_concurrency.lockutils.lock')
def test_modify_zone(self, mock_lock):
self.backend._modify_zone('blah', 'bar')
self.assertEqual(3, self.backend._execute_knotc.call_count)
self.backend._execute_knotc.assert_called_with('conf-commit')
@mock.patch('oslo_concurrency.lockutils.lock')
def test_modify_zone_exception(self, mock_lock):
# Raise an exception during the second call to _execute_knotc
self.backend._execute_knotc.side_effect = [None, exceptions.Backend,
None]
self.assertRaises(
exceptions.Backend,
self.backend._modify_zone, 'blah', 'bar'
)
self.assertEqual(3, self.backend._execute_knotc.call_count)
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('blah', 'bar'),
call('conf-abort'),
])
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial(self, mock_execute):
result = (
'[example.com.] type: slave | serial: 20 | next-event: idle | '
'auto-dnssec: disabled]'
)
mock_execute.return_value = result, ''
serial = self.backend.find_zone_serial('example.com')
self.assertEqual(20, serial)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial_zone_not_found(self, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError(
'error: [example.com.] (no such zone found)'
)
serial = self.backend.find_zone_serial('example.com')
self.assertIsNone(serial)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial_unexpected_output(self, mock_execute):
mock_execute.return_value = 'bogus output', ''
self.assertRaises(
exceptions.Backend,
self.backend.find_zone_serial, 'example.com'
)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial_error(self, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError('blah')
self.assertRaises(
exceptions.Backend,
self.backend.find_zone_serial, 'example.com'
)
def test_update_zone(self):
zone = backends.create_dnspy_zone('example.org')
self.backend.update_zone(zone)
self.backend._execute_knotc.assert_called_once_with(
'zone-refresh', 'example.org'
)
def test_delete_zone(self):
self.backend.delete_zone('example.org')
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-unset', 'zone[example.org]',
expected_error='invalid identifier'),
call('conf-commit'),
])
def test_delete_zone_already_gone(self):
self.backend._execute_knotc.return_value = 'duplicate identifier'
self.backend.delete_zone('example.org')
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-unset', 'zone[example.org]',
expected_error='invalid identifier'),
call('conf-commit'),
])
class Knot2AgentExecuteTestCase(designate.tests.TestCase):
def setUp(self):
super(Knot2AgentExecuteTestCase, self).setUp()
self.backend = impl_knot2.Knot2Backend('foo')
def test_init(self):
self.assertEqual('knotc', self.backend._knotc_cmd_name)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_ok(self, mock_execute):
mock_execute.return_value = ('OK', '')
self.backend._execute_knotc('a1', 'a2')
mock_execute.assert_called_with('knotc', 'a1', 'a2')
self.assertEqual(1, mock_execute.call_count)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_expected_error(self, mock_execute):
mock_execute.return_value = ('xyz', '')
self.backend._execute_knotc('a1', 'a2', expected_error='xyz')
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_expected_output(self, mock_execute):
mock_execute.return_value = ('xyz', '')
self.backend._execute_knotc('a1', 'a2', expected_output='xyz')
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_with_error(self, mock_execute):
mock_execute.return_value = ('xyz', '')
self.assertRaises(
exceptions.Backend,
self.backend._execute_knotc, 'a1', 'a2'
)
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_raising_exception(self, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError
self.assertRaises(
exceptions.Backend,
self.backend._execute_knotc, 'a1', 'a2'
)
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
|
Heathckliff/SU2
|
SU2_PY/SU2/io/config.py
|
Python
|
lgpl-2.1
| 30,190
| 0.016827
|
#!/usr/bin/env python
## \file config.py
# \brief python package for config
# \author T. Lukaczyk, F. Palacios
# \version 4.0.1 "Cardinal"
#
# SU2 Lead Developers: Dr. Francisco Palacios (Francisco.D.Palacios@boeing.com).
# Dr. Thomas D. Economon (economon@stanford.edu).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2015 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your opt
|
ion) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
import numpy as np
from ..util import bunch, ordered_bunch, switch
from .tools import *
from config_options import *
try:
from collections import OrderedDict
except ImportError:
from ..util.ordered_dict import OrderedDict
inf = 1.0e20
# ----------------------------------------------------------------------
# Configuration Class
# ----------------------------------------------------------------------
class Config(ordered_bunch):
""" config = SU2.io.Config(filename="")
Starts a config class, an extension of
ordered_bunch()
use 1: initialize by reading config file
config = SU2.io.Config('filename')
use 2: initialize from dictionary or bunch
config = SU2.io.Config(param_dict)
use 3: initialize empty
config = SU2.io.Config()
Parameters can be accessed by item or attribute
ie: config['MESH_FILENAME'] or config.MESH_FILENAME
Methods:
read() - read from a config file
write() - write to a config file (requires existing file)
dump() - dump a raw config file
unpack_dvs() - unpack a design vector
diff() - returns the difference from another config
dist() - computes the distance from another config
"""
_filename = 'config.cfg'
def __init__(self,*args,**kwarg):
# look for filename in inputs
if args and isinstance(args[0],str):
filename = args[0]
args = args[1:]
elif kwarg.has_key('filename'):
filename = kwarg['filename']
del kwarg['filename']
else:
filename = ''
# initialize ordered bunch
super(Config,self).__init__(*args,**kwarg)
# read config if it exists
if filename:
try:
self.read(filename)
except IOError:
print 'Could not find config file: %s' % filename
except:
print 'Unexpected error: ',sys.exc_info()[0]
raise
self._filename = filename
def read(self,filename):
""" reads from a config file """
konfig = read_config(filename)
self.update(konfig)
def write(self,filename=''):
""" updates an existing config file """
if not filename: filename = self._filename
assert os.path.exists(filename) , 'must write over an existing config file'
write_config(filename,self)
def dump(self,filename=''):
""" dumps all items in the config bunch, without comments """
if not filename: filename = self._filename
dump_config(filename,self)
def __getattr__(self,k):
try:
return super(Config,self).__getattr__(k)
except AttributeError:
raise AttributeError , 'Config parameter not found'
def __getitem__(self,k):
try:
return super(Config,self).__getitem__(k)
except KeyError:
raise KeyError , 'Config parameter not found: %s' % k
def unpack_dvs(self,dv_new,dv_old=None):
""" updates config with design variable vectors
will scale according to each DEFINITION_DV scale parameter
Modifies:
DV_KIND
DV_MARKER
DV_PARAM
DV_VALUE_OLD
DV_VALUE_NEW
Inputs:
dv_new - list or array of new dv values
dv_old - optional, list or array of old dv values, defaults to zeros
"""
dv_new = copy.deepcopy(dv_new)
dv_old = copy.deepcopy(dv_old)
# handle unpacking cases
def_dv = self['DEFINITION_DV']
n_dv = len(def_dv['KIND'])
if not dv_old: dv_old = [0.0]*n_dv
assert len(dv_new) == len(dv_old) , 'unexpected design vector length'
# handle param
param_dv = self['DV_PARAM']
# apply scale
dv_scales = def_dv['SCALE']
dv_new = [ dv_new[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
dv_old = [ dv_old[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
# Change the parameters of the design variables
self['DV_KIND'] = def_dv['KIND']
param_dv['PARAM'] = def_dv['PARAM']
param_dv['FFDTAG'] = def_dv['FFDTAG']
self.update({ 'DV_MARKER' : def_dv['MARKER'][0] ,
'DV_VALUE_OLD' : dv_old ,
'DV_VALUE_NEW' : dv_new })
def __eq__(self,konfig):
return super(Config,self).__eq__(konfig)
def __ne__(self,konfig):
return super(Config,self).__ne__(konfig)
def local_files(self):
""" removes path prefix from all *_FILENAME params
"""
for key,value in self.iteritems():
if key.split('_')[-1] == 'FILENAME':
self[key] = os.path.basename(value)
def diff(self,konfig):
""" compares self to another config
Inputs:
konfig - a second config
Outputs:
config_diff - a config containing only the differing
keys, each with values of a list of the different
config values.
for example:
config_diff.MATH_PROBLEM = ['DIRECT','CONTINUOUS_ADJOINT']
"""
keys = set([])
keys.update( self.keys() )
keys.update( konfig.keys() )
konfig_diff = Config()
for key in keys:
value1 = self.get(key,None)
value2 = konfig.get(key,None)
if not value1 == value2:
konfig_diff[key] = [value1,value2]
return konfig_diff
def dist(self,konfig,keys_check='ALL'):
""" calculates a distance to another config
Inputs:
konfig - a second config
keys_check - optional, a list of keys to check
Outputs:
distance - a float
Currently only works for DV_VALUE_NEW and DV_VALUE_OLD
Returns a large value otherwise
"""
konfig_diff = self.diff(konfig)
if keys_check == 'ALL':
keys_check = ko
|
jtrobec/pants
|
tests/python/pants_test/bin/test_goal_runner.py
|
Python
|
apache-2.0
| 756
| 0.005291
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pkg_resources import WorkingSet
from pants.base.exceptions import BuildConfigurationError
from pants.bin.goal_runner import OptionsInitializer
from pants.option.options_bootstrapper import OptionsBootstrapper
def test_invalid_version():
options_bootstrapper = OptionsBootstrapper(args=['--pants-version=99.99.9999
|
'])
with pytest.raises(BuildConfigurationError):
OptionsInitializer(options_bootstrapper, WorkingSet()).s
|
etup()
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTokyoESPScans.py
|
Python
|
bsd-3-clause
| 230
| 0.030435
|
def extract
|
TokyoESPScans(item):
"""
Tokyo ESP Scans
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol
|
or frag) or 'preview' in item['title'].lower():
return None
return False
|
skevy/django
|
tests/modeltests/files/models.py
|
Python
|
bsd-3-clause
| 1,085
| 0.001843
|
"""
42. Storing files according to a custom storage system
``FileField`` and its variations can take a ``storage`` argument to specify how
and where files should be stored.
"""
import random
import tempfile
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
temp_storage_location = tempfile.mkdtemp()
temp_storage = FileSystemStorage(location=temp_storage_location)
class Storage(models.Model):
def custom_upload_to(self, filename):
return 'foo'
def random_upload_to(self, filename):
# This returns a different result each time,
# to make sure it only gets called
|
once.
return '%s/%s' % (random.randint(100, 999), filename)
normal = models.FileField(storage=temp_storage, upload_to='tests')
custom = models.FileField(storage=temp_storage, upload_to=custom_upload_to)
random = models.FileField(storage=temp_storage, upload_to=random_upload_to)
default
|
= models.FileField(storage=temp_storage, upload_to='tests', default='tests/default.txt')
|
ssebastianj/pywebtasks
|
pywebtasks/__init__.py
|
Python
|
mit
| 546
| 0
|
# -*- coding: utf-8 -*-
__title__ = 'pywebtask'
__version__ = '0.1.8'
__build__ = 0x000108
__author__ = 'Sebastián José Seba'
|
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Sebastián José Seba'
from .webtasks import run, run_file
# Set defau
|
lt logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
jb-old/chii
|
quoth/uwsgi_app.py
|
Python
|
unlicense
| 148
| 0.006757
|
i
|
mport os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settin
|
gs'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
christophercrouzet/hienoi
|
hienoi/gui.py
|
Python
|
mit
| 20,220
| 0
|
"""Graphical user interface."""
import collections
import ctypes
import sdl2
import hienoi.renderer
from hienoi._common import GLProfile, GraphicsAPI, ParticleDisplay, UserData
from hienoi._vectors import Vector2i, Vector2f, Vector4f
class NavigationAction(object):
"""Enumerator for the current nagivation action.
Attributes
----------
NONE
MOVE
ZOOM
"""
NONE = 0
MOVE = 1
ZOOM = 2
_Handles = collections.namedtuple(
'_Handles', (
'window',
'renderer',
))
_GLHandles = collections.namedtuple(
'_GLHandles', (
'context',
))
_RGBMasks = collections.namedtuple(
'_RGBMasks', (
'red',
'green',
'blue',
))
_FIT_VIEW_REL_PADDING = 2.0
if sdl2.SDL_BYTEORDER == sdl2.SDL_LIL_ENDIAN:
_RGB_MASKS = _RGBMasks(red=0x000000FF, green=0x0000FF00, blue=0x00FF0000)
else:
_RGB_MASKS = _RGBMasks(red=0x00FF0000, green=0x0000FF00, blue=0x000000FF)
class GUI(object):
"""GUI.
Parameters
----------
window_title : str
Title for the window.
window_position : hienoi.Vector2i
Initial window position.
window_size : hienoi.Vector2i
Initial window size.
window_flags : int
SDL2 window flags.
view_aperture_x : float
Initial length in world units to be shown on the X axis.
view_zoom_range : hienoi.Vector2f
Zoom value range for the view.
mouse_wheel_step : float
Coefficient value for each mouse wheel step.
grid_density : float
See :attr:`GUI.grid_density`.
grid_adaptive_threshold : float
See :attr:`GUI.grid_adaptive_threshold`.
show_grid : bool
See :attr:`GUI.show_grid`.
background_color : hienoi.Vector4f
See :attr:`GUI.background_color`.
grid_color : hienoi.Vector4f
See :attr:`GUI.grid_color`.
grid_origin_color : hienoi.Vector4f
See :attr:`GUI.grid_origin_color`.
particle_display : int
See :attr:`GUI.particle_display`.
point_size : int
See :attr:`GUI.point_size`.
edge_feather : float
See :attr:`GUI.edge_feather`.
stroke_width : float
See :attr:`GUI.stroke_width`.
initialize_callback : function
Callback function to initialize any GUI state.
It takes a single argument ``gui``, an instance of this class.
on_event_callback : function
Callback function ran during the event polling.
It takes 3 arguments: ``gui``, an instance of this class,
``data``, some data to pass back and forth between the caller and this
callback function, and ``event``, the event fired.
renderer : dict
Keyword arguments for the configuration of the renderer. See the
parameters for the class :class:`hienoi.renderer.Renderer`.
Attributes
----------
view_position : hienoi.Vector2f
Position of the view (camera).
view_zoom : float
Current zoom value for the view.
grid_density : float
Density of the grid.
A density of 10.0 means that there are around 10 grid divisions
displayed on the X axis. A grid division unit represents a fixed length
in world units, meaning that the actual grid density changes depending
on the view's zoom.
show_grid : bool
True to show the grid.
background_color : hienoi.Vector4f
Color for the background.
grid_color : hienoi.Vector4f
Color for the grid.
grid_origin_color : hienoi.Vector4f
Color for the origin axis of the grid.
particle_display : int
Display mode for the particles. Available values are enumerated in the
:class:`~hienoi.ParticleDisplay` class.
point_size : int
Size of the particles in pixels when the display mode is set to
:attr:`~hienoi.ParticleDisplay.POINT`.
edge_feather : float
Feather fall-off in pixels to apply to objects drawn with displays such
as :attr:`~hienoi.ParticleDisplay.CIRCLE` or
:attr:`~hienoi.ParticleDisplay.DISC`.
stroke_width : float
Width of the stroke in pixels to apply to objects drawn with displays
such as :attr:`~hienoi.ParticleDisplay.CIRCLE`.
quit : bool
``True`` to signal to the application that it should quit.
has_view_changed : bool
``True`` if the view state has just been changed following an event. It
is reset to ``False`` whenever :meth:`poll_events` is called.
user_data : object
Attribute reserved for any user data.
"""
def __init__(self,
window_title='hienoi',
window_position=Vector2i(sdl2.SDL_WINDOWPOS_CENTERED,
sdl2.SDL_WINDOWPOS_CENTERED),
window_size=Vector2i(800, 600),
window_flags=sdl2.SDL_WINDOW_RESIZABLE,
view_aperture_x=100.0,
|
view_zoom_range=Vector2f(1e-6, 1e+6),
mouse_wheel_step=0.01,
grid_density=10.0,
grid_adaptive_threshold=3.0,
show_grid=True,
background_color=Vector4f(0.15, 0.15, 0.15, 1.0),
|
grid_color=Vector4f(0.85, 0.85, 0.85, 0.05),
grid_origin_color=Vector4f(0.85, 0.25, 0.25, 0.25),
particle_display=ParticleDisplay.DISC,
point_size=4,
edge_feather=2.0,
stroke_width=0.0,
initialize_callback=None,
on_event_callback=None,
renderer=None):
renderer = {} if renderer is None else renderer
if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != 0:
raise RuntimeError(sdl2.SDL_GetError().decode())
renderer_info = hienoi.renderer.get_info()
if renderer_info.api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MAJOR_VERSION,
renderer_info.major_version)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MINOR_VERSION,
renderer_info.minor_version)
if renderer_info.profile == GLProfile.CORE:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
self._handles = _create_handles(window_title, window_position,
window_size, window_flags,
renderer_info)
self._renderer = hienoi.renderer.Renderer(**renderer)
self._initial_view_aperture_x = view_aperture_x
self._view_zoom_range = view_zoom_range
self._mouse_wheel_step = mouse_wheel_step
self._grid_adaptive_threshold = grid_adaptive_threshold
self._on_event_callback = on_event_callback
self._listen_for_navigation = False
self._is_view_manipulated = False
self.view_position = Vector2f(0.0, 0.0)
self._view_zoom = 1.0
self.grid_density = grid_density
self.show_grid = show_grid
self.background_color = background_color
self.grid_color = grid_color
self.grid_origin_color = grid_origin_color
self.particle_display = particle_display
self.point_size = point_size
self.edge_feather = edge_feather
self.stroke_width = stroke_width
self._navigation_action = NavigationAction.NONE
self.quit = False
self.user_data = UserData()
if initialize_callback:
initialize_callback(self)
@property
def view_zoom(self):
return self._view_zoom
@view_zoom.setter
def view_zoom(self, value):
self._view_zoom = max(self._view_zoom_range[0],
min(self._view_zoom_range[1], value))
@property
def navigation_action(self):
return self._navigation_action
@property
def has_view_changed(self):
return self._has_view_changed
def poll_events(self, scene_state, data=None):
"""Process each event in the queue.
Parameters
--------
|
hcosta/escueladevideojuegos.net-backend-django
|
edv/reddit/migrations/0005_auto_20170520_2005.py
|
Python
|
gpl-3.0
| 788
| 0.001271
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-20 18:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit', '0004_auto_20170520_1931'),
]
operations = [
migrations.AddField(
model_name='question',
name='announce',
field=models.BooleanField(default=False, verbose_name='Anuncio'),
),
migrations.AddField(
model_name='question',
nam
|
e='label',
field=models.IntegerField(choices=[(0, ''), (1, 'Ayuda!'), (2, 'Resuelta'), (3, 'Discusión'), (4, 'Tutorial'), (5, 'Ejemplo'), (6, 'Recurso'), (7, 'Juego')], default=0, verbose_name=
|
'Etiqueta'),
),
]
|
ChengchenZhao/DrSeq2
|
ceas_lib/annotator.py
|
Python
|
gpl-3.0
| 75,107
| 0.022807
|
"""Module Description
Copyright (c) 2008 H. Gene Shin <shin@jimmy.harvard.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: H. Gene Shin
@contact: shin@jimmy.harvard.edu
"""
# ------------------------------------
# Python modules
# ------------------------------------
import sys,time,re,operator,copy,sqlite3,warnings
import itertools
from array import *
import bisect
# ------------------------------------
# My own Python modules
# ------------------------------------
from CEAS.inout import *
from CEAS.tables import *
from CEAS.sampler import *
from CEAS.corelib import *
import CEAS.Prob as Prob
#-------------------------------------
# classes
#-------------------------------------
class Annotator:
"""Annotator Class
This class annotates a list of genome coordinates and gives a summary of annotation.
1. Annotater.annotate() annotates a list of genome coordinates (locations) based on a gene annotation table (e.g., refSeq provided by UCSC).
2. Annotatoer.summarize() summarizes the annotation in a table (See tables.py)
"""
def __init__(self):
"""Constructor"""
pass
def annotate(self,genome_coordinates=None,gene_table=None,roi=None,prom=(1000, 2000, 3000),bipro
|
m=(2500, 5000), down=(1000, 2000, 3000), gene_div=(3,5),quantize=True):
"""Annotate given coordinates based on the given gene table."""
# get the chromsomes of the gene table and genome coordinates
try:
chroms_gc=genome_coordinates.keys()
chroms_gt=gene_table.get_chroms()
chroms_gt,chroms_gc=set(chroms_gt),set(chroms_gc)
chroms=chroms_gt.intersection(chroms_gc)
chroms=list(ch
|
roms)
chroms.sort()
num_coordinates={}
num_genes={}
for chrom in chroms:
num_coordinates[chrom]=len(genome_coordinates[chrom])
num_genes[chrom]=len(gene_table[chrom][gene_table[chrom].keys()[0]])
except AttributeError:
raise Exception('Genome coordinates and gene table must be given for genome annotation')
#initialize with an empty dictionary
table=AnnotTable()
#iterate over the chromosomes
for chrom in chroms:
genes=gene_table[chrom]
num_genes_this_chr=num_genes[chrom]
coordinates=genome_coordinates[chrom]
num_coordinates_this_chr=num_coordinates[chrom]
table.init_table(chrom)
#initialize the promoter distances. This will be used in obtaining bidirectional promoter too.
prom_dists=[[0,0] for i in xrange(num_coordinates_this_chr)]
# get the nearest genes to set the searching range for promoter and downstream
nearest_genes=self.find_nearest_genes(genes)
# point begin
pointerBeg=0
maxprom = max(prom[-1], biprom[-1])
maxdown = down[-1]
for i in xrange(0,num_genes_this_chr):
# get the strand of the gene
try:
strand=genes['strand'][i]
except KeyError:
raise Exception("'strand' must be included in the gene annotation table for running CEAS")
# get the beginning and end point of search
# the beginning and end points are the end of the previous gene and the beginning of the next gene.
beg,end=0,0
try:
if strand=='+':
beg=max(genes['txStart'][i]-maxprom, nearest_genes['before'][i])
end=min(genes['txEnd'][i]+maxdown, nearest_genes['after'][i])
else:
beg=max(genes['txStart'][i]-maxdown, nearest_genes['before'][i])
end=min(genes['txEnd'][i]+maxprom, nearest_genes['after'][i])
except KeyError: # check the gene annotation table has necessary columns
raise Exception("'txStart' and 'txEnd' must be included in the gene annotation table for running CEAS")
### test block-out ###
# set search index j to the begining point of the last gene. This makes sure that we include isoforms
j=pointerBeg
###
### test block-in ###
#j = bisect.bisect_left(coordinates, beg)
###
if coordinates[j]>end: continue
### test block-out ###
# two while loops to detect the annotation start coordinate for the current gene.
while j>0 and coordinates[j]>=beg:
j-=1
while j<num_coordinates_this_chr and coordinates[j]<beg:
if j>=table.size(chrom)[0]:
table.add_row(chrom,[coordinates[j]]+[0]*table.get_column_num())
j+=1
###
# if get to the end of chromosome, then break
if j==num_coordinates_this_chr: break
### test block-out
# save the current start point for the next gene
pointerBeg=j
###
# otherwise, get the annotations of the probes related with the current gene
while j<num_coordinates_this_chr and (coordinates[j]>=beg and coordinates[j]<=end):
# get the annotation and update the entire annotation table
single_annot=self.annotate_single(coordinates[j],strand,genes['txStart'][i],genes['txEnd'][i],\
genes['cdsStart'][i],genes['cdsEnd'][i],genes['exonStarts'][i],genes['exonEnds'][i],prom,down,gene_div,biprom)
self.update_annot_table(table,single_annot,coordinates,prom_dists,chrom,j,biprom)
j+=1
# quantize promoter, bipromoter and downstream
if quantize:
table[chrom]['promoter']=ordinate2(table[chrom]['promoter'],prom)
table[chrom]['bipromoter']=ordinate2(table[chrom]['bipromoter'],biprom)
table[chrom]['downstream']=ordinate2(table[chrom]['downstream'],down)
if roi:
roichroms = roi.get_chroms()
for chrom in chroms:
table[chrom]['roi']=[0]*len(genome_coordinates[chrom])
if chrom in roichroms:
self.do_annotation_roi(genome_coordinates[chrom], table[chrom], roi[chrom])
return table
def annotate_single(self,coordinate,strand,txStart,txEnd,cdsStart,cdsEnd,exonStarts,exonEnds,prom,down,gene_div,biprom):
"""Annotate a single genome coordinate
Parameters:
1. coordinate: a list of genome locations to annotate
2. strand: the strand (+/-) of the gene
3. txStart: transcription start
4. txEnd: transcription end
5. cdsStart: translation start
6. cdsEnd: translation end
7. exonStarts: exon start locations
8. exonEnds: exon end locations
9. prom: promoter lengths (e.g., (1000, 2000, 3000))
10. down: downstream lengths (e.g., (1000, 2000, 3000))
11. gene: the number of divisions of a gene (eg, (3,5))
"""
# container of the annotation for a single location.
single_annot=None
maxprom = max(prom[-1], biprom[-1])
maxdown = down[-1]
# get the annotation for the location
# + strand
if strand=='+':
# promoter
if coordinate<txStart and coordinate>=txStart-maxprom: # txStart-promo <= prob < txStart
single_annot=['promoter',txStart-coordinate]
# downstre
|
smdabdoub/phylotoast
|
bin/PCoA.py
|
Python
|
mit
| 10,870
| 0.004416
|
#!/usr/bin/env python
import argparse
from collections import OrderedDict
import itertools
import sys
from phylotoast import util, graph_util as gu
errors = []
try:
from palettable.colorbrewer.qualitative import Set3_12
except ImportError as ie:
errors.append("No module named palettable")
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
except ImportError as ie:
errors.append(ie)
if len(errors) != 0:
for item in errors:
print("Import Error:", item)
sys.exit()
def handle_program_options():
parser = argparse.ArgumentParser(description="Create a 2D or 3D PCoA plot. By default"
", this script opens a window with the plot "
"displayed if you want to change certain aspects of "
"the plot (such as rotate the view in 3D mode). If "
"the -o option is specified, the plot will be saved "
"directly to an image without the initial display "
"window.")
parser.add_argument("-i", "--coord_fp", required=True,
help="Input principal coordinates filepath (i.e. resulting file "
"from principal_coordinates.py) [REQUIRED].")
parser.add_argument("-m", "--map_fp", required=True,
help="Input metadata mapping filepath [REQUIRED].")
parser.add_argument("-g", "--group_by", r
|
equired=True,
help="Any mapping categories, such as treatment type, that will "
|
"be used to group the data in the output iTol table. For example,"
" one category with three types will result in three data columns"
" in the final output. Two categories with three types each will "
"result in six data columns. Default is no categories and all the"
" data will be treated as a single group.")
parser.add_argument("-d", "--dimensions", default=2, type=int, choices=[2, 3],
help="Choose whether to plot 2D or 3D.")
parser.add_argument("-c", "--colors", default=None,
help="A column name in the mapping file containing hexadecimal "
"(#FF0000) color values that will be used to color the groups. "
"Each sample ID must have a color entry.")
parser.add_argument("-s", "--point_size", default=100, type=int,
help="Specify the size of the circles representing each of the "
"samples in the plot")
parser.add_argument("--pc_order", default=[1, 2], type=int, nargs=2,
help="Choose which Principle Coordinates are displayed and in "
"which order, for example: 1 2. This option is only used when a "
"2D plot is specified.")
parser.add_argument("--x_limits", type=float, nargs=2,
help="Specify limits for the x-axis instead of automatic setting "
"based on the data range. Should take the form: --x_limits -0.5 "
"0.5")
parser.add_argument("--y_limits", type=float, nargs=2,
help="Specify limits for the y-axis instead of automatic setting "
"based on the data range. Should take the form: --y_limits -0.5 "
"0.5")
parser.add_argument("--z_limits", type=float, nargs=2,
help="Specify limits for the z-axis instead of automatic setting "
"based on the data range. Should take the form: --z_limits -0.5 "
"0.5")
parser.add_argument("--z_angles", type=float, nargs=2, default=[-134.5, 23.],
help="Specify the azimuth and elevation angles for a 3D plot.")
parser.add_argument("-t", "--title", default="", help="Title of the plot.")
parser.add_argument("--figsize", default=[14, 8], type=int, nargs=2,
help="Specify the 'width height' in inches for PCoA plots. "
"Default figure size is 14x8 inches")
parser.add_argument("--font_size", default=12, type=int,
help="Sets the font size for text elements in the plot.")
parser.add_argument("--label_padding", default=15, type=int,
help="Sets the spacing in points between the each axis and its "
"label.")
parser.add_argument("--legend_loc", default="best", choices=['best','upper right','upper left',
'lower left', 'lower right', 'right', 'center left', 'center right',
'lower center', 'upper center', 'center', 'outside', 'none'],
help="Sets the location of the Legend. Default: best.")
parser.add_argument("--annotate_points", action="store_true",
help="If specified, each graphed point will be labeled with its "
"sample ID.")
parser.add_argument("--ggplot2_style", action="store_true",
help="Apply ggplot2 styling to the figure.")
parser.add_argument("-o", "--out_fp", default=None,
help="The path and file name to save the plot under. If specified"
", the figure will be saved directly instead of opening a window "
"in which the plot can be viewed before saving.")
return parser.parse_args()
def main():
args = handle_program_options()
try:
with open(args.coord_fp):
pass
except IOError as ioe:
err_msg = "\nError in input principal coordinates filepath (-i): {}\n"
sys.exit(err_msg.format(ioe))
try:
with open(args.map_fp):
pass
except IOError as ioe:
err_msg = "\nError in input metadata mapping filepath (-m): {}\n"
sys.exit(err_msg.format(ioe))
with open(args.coord_fp) as F:
pcd = F.readlines()
pcd = [line.split("\t") for line in pcd]
map_header, imap = util.parse_map_file(args.map_fp)
data_gather = util.gather_categories(imap, map_header,
args.group_by.split(","))
categories = OrderedDict([(condition, {"pc1": [], "pc2": [], "pc3": []})
for condition in data_gather.keys()])
bcolors = itertools.cycle(Set3_12.hex_colors)
if not args.colors:
colors = [bcolors.next() for _ in categories]
else:
colors = util.color_mapping(imap, map_header,
args.group_by, args.colors)
colors = colors.values()
parsed_unifrac = util.parse_unifrac(args.coord_fp)
pco = args.pc_order
if args.dimensions == 3:
pco.append(3)
pc1v = parsed_unifrac["varexp"][pco[0] - 1]
pc2v = parsed_unifrac["varexp"][pco[1] - 1]
if args.dimensions == 3:
pc3v = parsed_unifrac["varexp"][pco[2] - 1]
for sid, points in parsed_unifrac["pcd"].items():
for condition, dc in data_gather.items():
if sid in dc.sids:
cat = condition
break
categories[cat]["pc1"].append((sid, points[pco[0] - 1]))
categories[cat]["pc2"].append((sid, points[pco[1] - 1]))
if args.dimensions == 3:
categories[cat]["pc3"].append((sid, points[pco[2] - 1]))
axis_str = "PC{} (Percent Explained Variance {:.3f}%)"
# initialize plot
fig = plt.figure(figsize=args.figsize)
if args.dimensions == 3:
ax = fig.add_subplot(111, projection="3d")
ax.view_init(elev=args.z_angles[1], azim=args.z_angles[0])
ax.set_zlabel(axis_str.format(3, pc3v), labelpad=args.label_padding)
if args.z_limits:
ax.set_zlim(args.z_limits)
else:
ax = fig.add_subplot(111)
# plot data
for i, cat in enumerate(categories):
if args.dimensions == 3:
|
jellybean4/yosaipy2
|
yosaipy2/core/event/event_bus.py
|
Python
|
apache-2.0
| 1,390
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from blinker import signal, Namespace, NamedSignal
from yosaipy2.core.event.abcs import EventBus
from typing import Dict
from functools import wraps
class BlinkerEventBus(EventBus):
def __init__(self):
# type: (str) -> None
self.AUTO_TOPIC = "blinker_eventbus_auto_topic"
self._signals = {} # type: Dict[NamedSignal]
def send_message(self, topic_name, **kwargs):
if topic_name not in self._signals:
sig = signal(topic_name)
|
self._signals[topic_name] = sig
else:
sig = self._signals[topic_name]
sig.send(None,
|
**kwargs)
def subscribe(self, func, topic_name):
if topic_name not in self._signals:
sig = signal(topic_name)
self._signals[topic_name] = sig
else:
sig = self._signals[topic_name]
callback = self._adapter(func, topic_name)
sig.connect(callback)
def unsubscribe(self, listener, topic_name):
pass
@staticmethod
def _adapter(func, topic_name):
@wraps(func)
def callback(sender, **kwargs):
func(topic=topic_name, **kwargs)
return callback
def isSubscribed(self, listener, topic_name):
if topic_name not in self._signals:
return False
return True
event_bus = BlinkerEventBus()
|
lantius/ndb-key
|
repro.py
|
Python
|
mit
| 1,545
| 0.005825
|
import webapp2
from google.appengine.ext import db
from google.appengine.ext import ndb
from db_class import DerivedClass as OldDerivedClass
from ndb_class import BaseClass as NewBaseClass
from ndb_class import DerivedClass as NewDerivedClass
class Repro(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
# Create a derived object using google.appengine.ext.db
obj = OldDerivedClass(name='foo', version='bar')
db_key = obj.put()
self.response.write('%s, %d\n' % (db_key.kind(), db_key.id()))
# Attempt to load using the converted key
ndb_key = ndb.Key.from_old_key(db_key)
try:
ndb_key.get()
except ndb.KindError:
self.response.write('failed (KindError): %s\n' % str(ndb_key))
# Attempt to create a new key using the ndb derived class
derived_key = ndb.Key(NewDe
|
rivedClass, ndb_key.id())
obj = derived_key.get()
if not obj:
self.response.write('failed (None): %s\n' % str(derived_key))
# Attempt to create a new key using the ndb bas
|
e class
base_key = ndb.Key(NewBaseClass, ndb_key.id())
obj = derived_key.get()
if not obj:
self.response.write('failed (None): %s\n' % str(base_key))
# Manually create a new key using the ndb derived class name
force_key = ndb.Key('DerivedClass', ndb_key.id())
try:
force_key.get()
except ndb.KindError:
self.response.write('failed (KindError): %s\n' % str(force_key))
application = webapp2.WSGIApplication([('/', Repro)], debug=True)
|
nii-cloud/dodai-compute
|
nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
|
Python
|
apache-2.0
| 2,067
| 0
|
# Copyright 2011
|
OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file exc
|
ept in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
fixed_ips = Table(
"fixed_ips",
meta,
Column(
"id",
Integer(),
primary_key=True,
nullable=False))
#
# New Tables
#
# None
#
# Tables to alter
#
# None
#
# Columns to add to existing tables
#
fixed_ips_addressV6 = Column(
"addressV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_netmaskV6 = Column(
"netmaskV6",
String(
length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_gatewayV6 = Column(
"gatewayV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
# Add columns to existing tables
fixed_ips.create_column(fixed_ips_addressV6)
fixed_ips.create_column(fixed_ips_netmaskV6)
fixed_ips.create_column(fixed_ips_gatewayV6)
|
valery-barysok/skyscanner-python-sdk
|
docs/conf.py
|
Python
|
apache-2.0
| 8,466
| 0.005315
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# skyscanner documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import skyscanner
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Skyscanner Python SDK'
copyright = u'2015, Ardy Dedase'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = skyscanner.__version__
# The full version, including alpha/beta/rc tags.
release = skyscanner.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for
|
a lis
|
t of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skyscannerdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'skyscanner.tex',
u'Skyscanner Python SDK Documentation',
u'Ardy Dedase', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'skyscanner',
u'Skyscanner Python SDK Documentation',
[u'Ardy Dedase'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'skyscanner',
u'Skyscanner Python SDK Documentation',
u'Ardy Dedase',
'skyscanner',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo
|
opentechinstitute/commotion-router-test-suite
|
tests/__init__.py
|
Python
|
agpl-3.0
| 69
| 0
|
""" Initializat
|
ion code related to Commotion Rout
|
er UI unit tests"""
|
stdweird/aquilon
|
lib/python2.6/aquilon/worker/commands/show_rack_rack.py
|
Python
|
apache-2.0
| 1,055
| 0
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show rack --rack`."""
from aquilon.aqdb.model import Rack
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
class CommandShowRackRack(BrokerCommand
|
):
required_parameters = ["rack"]
d
|
ef render(self, session, rack, **arguments):
return Rack.get_unique(session, rack, compel=True)
|
tidalf/plugin.audio.qobuz
|
resources/lib/qobuz/extension/kooli/kooli/script/kooli-xbmc-service.py
|
Python
|
gpl-3.0
| 4,243
| 0.002593
|
'''
qobuz.extension.kooli.script.kooli-xbmc-service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:part_of: kodi-qobuz
:copyright: (c) 2012-2018 by Joachim Basmaison, Cyril Leclerc
:license: GPLv3, see LICENSE for more details.
'''
from os import path as P
import SocketServer
import socket
import sys
import threading
import time
base_path = P.abspath(P.dirname(__file__))
try:
import kooli as _ # pylint:disable=E0401
except ImportError:
sys.path.append(P.abspath(P.join(base_path, P.pardir, P.pardir)))
from kooli import log
from kooli import qobuz_lib_path
from kodi_six import xbmc # pylint:disable=E0401
from kooli.application import application, shutdown_server, qobuzApp
from kooli.monitor import Monitor
from qobuz import config
from qobuz.api import api
from qobuz.api.user import current as user
from qobuz.debug import getLogger
from qobuz.gui.util import notify_warn
import qobuz.gui.util as gui
logger = getLogger(__name__)
def my_finish(self):
if not self.wfile.closed:
try:
self.wfile.flush()
except socket.error:
# A final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
try:
self.wfile.close()
self.rfile.close()
except socket.error:
pass
SocketServer.StreamRequestHandler.finish = my_finish # Ugly monkey patching
def is_empty(obj):
if obj is None:
return True
if isinstance(obj, basestring):
if obj == '':
return True
return False
def is_authentication_set():
username = config.app.registry.get('username')
password = config.app.registry.get('password')
if not is_empty(username) and not is_empty(password):
return True
return False
def is_service_enable():
return config.app.registry.get('enable_scan_feature', to='bool')
@application.before_request
def shutdown_request():
if monitor.abortRequested:
shutdown_server()
return None
class KooliService(threading.Thread):
name = 'httpd'
def __init__(self, port=33574):
threading.Thread.__init__(self)
self.daemon = True
self.port = port
self.running = False
self.threaded = True
self.processes = 2
self.alive = True
def stop(self):
shutdown_server()
self.alive = False
def run(self):
while self.alive:
if not is_authentication_set():
gui.notify_warn('Authentication not set',
'You need to enter credentials')
elif not user.logged:
if not api.login(
username=qobuzApp.registry.get('username'),
password=qobuzApp.registry.get('password')):
gui.notify_warn('Login failed', 'Invalid credentials')
else:
try:
application.run(port=self.port,
threaded=True,
processes=0,
debug=False,
use_reloader=False,
use_debugger=False,
use_evalex=True,
passthrough_errors=False)
except Exception as e:
logger.error('KooliService port: %s Error: %s',
self.port, e)
raise e
time.sleep(1)
if __name__ == '__main__':
monitor = Monitor()
if is_service_enable():
monitor.add_service(KooliService())
else:
notify_warn('Qo
|
buz service / HTTPD',
'Service is disabled from configuration')
monitor.start_all_service()
alive = True
while alive:
abort = False
try:
abort = monitor.abortRequested
except Exception as e:
logger.error('Error while getting abortRequested %s', e)
if abort:
alive = False
continue
xbmc.sleep(1
|
000)
monitor.stop_all_service()
|
wldisaster/Alice
|
blog/admin.py
|
Python
|
gpl-3.0
| 339
| 0.009063
|
from django.contrib import admin
from .models import Post, Category, Tag
# Register your models here.
cla
|
ss PostAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'category', 'author']
# 注册新增PostAdmin
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.re
|
gister(Tag)
|
Mustapha90/IV16-17
|
tango_with_django_project/dev_settings.py
|
Python
|
gpl-3.0
| 375
| 0.002667
|
# -*- coding: utf-8 -*-
from .common_settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SEC
|
URITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dz(#w(lfve24ck!!yrt3l7$jfdoj+fgf+ru@w)!^gn9aq$s+&y'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
|
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
caioariede/pyintercept
|
setup.py
|
Python
|
mit
| 1,498
| 0
|
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
VERSION = '0.4.1'
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='pyintercept',
version=VERSION,
description="Intercept function calls from Python scripts",
author="Caio Ariede",
author_email="caio.
|
ariede@gmail.com",
url="http://github.com/caioariede/pyintercept",
license="MIT",
zip_safe=False,
platforms=["any"],
packages=find_packages(),
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
|
],
include_package_data=True,
install_requires=[
'byteplay',
],
tests_require=[
'pytest',
'uncompyle6',
],
test_suite='py.test',
cmdclass={'test': PyTest},
)
|
Mirio/captcha2upload
|
setup.py
|
Python
|
bsd-2-clause
| 647
| 0.001546
|
from distutils.core import setup
setup(
name='captcha2upload',
packages=['captcha2upload'],
package_dir={'captcha2upload': 'src/captcha2upload'},
version='0.2',
install_requires=['requests'],
description='Upload your image and solve captche usi
|
ng the 2Captcha '
'Service',
author='Alessandro Sbarbati',
author_email='miriodev@gmail.com',
url='https://github.com/Mirio/captcha2upload',
download_url='https://github.com/Mirio/captcha2upload/tarball/0.1',
keywords=['2captcha', 'captcha', 'Image Recognition'],
classifiers=["Topic :: S
|
cientific/Engineering :: Image Recognition"],
)
|
alshedivat/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py
|
Python
|
apache-2.0
| 4,199
| 0.009288
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndFilterFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _map_and_filter_fusion_test_cases():
"""Generates test cases for the MapAndFilterFusion optimization."""
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
|
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
def _testMapAndFilter(self, dataset, function, predicate):
iterator = dataset.make_one_shot_iterator()
get_ne
|
xt = iterator.get_next()
with self.cached_session() as sess:
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if sess.run(b):
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(*_map_and_filter_fusion_test_cases())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map", "FilterByLastComponent"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
def testCapturedInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with captured inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map",
"Filter"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
if __name__ == "__main__":
test.main()
|
jjmontesl/cubetl
|
cubetl/template/__init__.py
|
Python
|
mit
| 1,587
| 0.00189
|
# CubETL
# Copyright (c) 2013-2019 Jose Juan Montes
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from cubetl.core import Node, Component
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class TemplateRendererBase(Node):
def __init__(self, template):
self.template = template
def render(self, ctx, data):
raise NotImplementedError()
def process(self, ctx, m):
|
#template = ctx.interpolate(self.te
|
mplate, m)
result = self.render(ctx, {'m': m})
m['templated'] = result
yield m
|
ESOedX/edx-platform
|
cms/djangoapps/contentstore/management/commands/tests/test_export_olx.py
|
Python
|
agpl-3.0
| 3,608
| 0.000554
|
"""
Tests for exporting OLX content.
"""
from __future__ import absolute_import
import shutil
import tarfile
import unittest
from six import StringIO
from tempfile import mkdtemp
import ddt
import six
from django.core.management import CommandError, call_command
from path import Path as path
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestArgParsingCourseExportOlx(unittest.TestCase):
"""
Tests for parsing arguments for the `export_olx` management command
"""
def test_no_args(self):
"""
Test export command with no arguments
"""
if six.PY2:
errstring = "Error: too few arguments"
else:
errstring = "Error: the following arguments are required: course_id"
with self.assertR
|
aisesRegexp(CommandError, errstring):
call_command('export_olx')
@ddt.ddt
class TestCourseExportOlx(ModuleStoreTestCase):
"""
Test exporting OLX content from a cour
|
se or library.
"""
def test_invalid_course_key(self):
"""
Test export command with an invalid course key.
"""
errstring = "Unparsable course_id"
with self.assertRaisesRegexp(CommandError, errstring):
call_command('export_olx', 'InvalidCourseID')
def test_course_key_not_found(self):
"""
Test export command with a valid course key that doesn't exist.
"""
errstring = "Invalid course_id"
with self.assertRaisesRegexp(CommandError, errstring):
call_command('export_olx', 'x/y/z')
def create_dummy_course(self, store_type):
"""Create small course."""
course = CourseFactory.create(default_store=store_type)
self.assertTrue(
modulestore().has_course(course.id),
u"Could not find course in {}".format(store_type)
)
return course.id
def check_export_file(self, tar_file, course_key):
"""Check content of export file."""
names = tar_file.getnames()
dirname = "{0.org}-{0.course}-{0.run}".format(course_key)
self.assertIn(dirname, names)
# Check if some of the files are present, without being exhaustive.
self.assertIn("{}/about".format(dirname), names)
self.assertIn("{}/about/overview.html".format(dirname), names)
self.assertIn("{}/assets/assets.xml".format(dirname), names)
self.assertIn("{}/policies".format(dirname), names)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_export_course(self, store_type):
test_course_key = self.create_dummy_course(store_type)
tmp_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, tmp_dir)
filename = tmp_dir / 'test.tar.gz'
call_command('export_olx', '--output', filename, six.text_type(test_course_key))
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file, test_course_key)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_export_course_stdout(self, store_type):
test_course_key = self.create_dummy_course(store_type)
out = StringIO()
call_command('export_olx', six.text_type(test_course_key), stdout=out)
out.seek(0)
output = out.read()
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file, test_course_key)
|
bsmr-eve/Pyfa
|
eos/effects/subsystembonuscaldarioffensive3remoteshieldboosterheat.py
|
Python
|
gpl-3.0
| 459
| 0.004357
|
# subsystemBonusCaldariOffensive3RemoteShieldBoosterHeat
#
# Used by:
# Subsystem: Tengu Offensive - Support Processor
type = "passive"
def handler(fit, src, context
|
):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Shield Emission Systems"),
|
"overloadSelfDurationBonus", src.getModifiedItemAttr("subsystemBonusCaldariOffensive3"),
skill="Caldari Offensive Systems")
|
111t8e/h2o-2
|
py/testdir_single_jvm/test_GLM2_syn_2659x1049.py
|
Python
|
apache-2.0
| 1,417
| 0.012703
|
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
params = {
'response': 1049,
'family': 'binomial',
'beta_epsilon': 0.0001,
'alpha': 1.0,
'lambda': 1e-05,
'n_folds': 1,
'max_iter': 20,
}
class Basic(unittest.TestCase):
def tearDown(self):
h2
|
o.check_sandbox_fo
|
r_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_syn_2659x1049(self):
csvFilename = "syn_2659x1049.csv"
csvPathname = 'logreg' + '/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
kwargs = params
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=120, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
def test_GLM2_syn_2659x1049x2enum(self):
csvFilename = "syn_2659x1049x2enum.csv"
csvPathname = 'logreg' + '/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
kwargs = params
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=240, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
|
leapcode/soledad
|
tests/e2e/test_incoming_mail_pipeline.py
|
Python
|
gpl-3.0
| 1,604
| 0
|
# This script does the following:
#
# - create a user using bonafide and and invite code given as an environment
# variable.
#
# - create and upload an OpenPGP key manually, as that would be
# a responsibility of bitmask-dev.
#
# - send an email to the user using sendmail, with a secret in the body.
#
# - start a soledad client using the created user.
#
# - download pending blobs. There should be only one.
#
# - look inside the blob, parse the email message.
#
# - compare the token in the incoming message with the token in the sent
# message and succeed if the tokens are the same.
#
# - delete the user (even if the test failed). (TODO)
import pytest
from utils import get_session
from utils import g
|
en_key
from utils import put_key
from utils import send_email
from utils import get_incoming_fd
from utils import get_received_secret
@pytest.inlineCallbacks
def test_incoming_
|
mail_pipeline(soledad_client, tmpdir):
# create a user and login
session = yield get_session(tmpdir)
# create a OpenPGP key and upload it
key = gen_key(session.username)
yield put_key(session.uuid, session.token, str(key.pubkey))
# get a soledad client for that user
client = soledad_client(
uuid=session.uuid,
passphrase='123',
token=session.token)
# send the email
sent_secret = send_email(session.username)
# check the incoming blob and compare sent and received secrets
fd = yield get_incoming_fd(client)
received_secret = get_received_secret(key, fd)
assert sent_secret == received_secret
# TODO: delete user in the end
|
jasonsahl/LS-BSR
|
tools/compare_BSR.py
|
Python
|
gpl-3.0
| 3,059
| 0.012422
|
#!/usr/bin/env python
"""compares BSR values between two groups in a BSR matrix
Numpy and BioPython need to be installed. Python version must be at
least 2.7 to use collections"""
from optparse import OptionParser
import subprocess
from ls_bsr.util import prune_matrix
from ls_bsr.util import compare_values
from ls_bsr.util import find_uniques
import sys
import os
def test_file(option, opt_str, value, parser):
try:
with open(value): setattr(parser.values, option.dest, value)
except IOError:
print('%s file cannot be opened' % option)
sys.exit()
def add_headers(infile, outfile, lower, upper):
file_out = open(outfile, "w")
file_out.write("marker"+"\t"+"group1_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group_1"+"\t"+">="+str(lower)+"\t"+"group2_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group2"+"\t"+">="+str(lower)+"\n")
with open(infile) as my_file:
for line in my_file:
file_out.write(line)
file_out.close()
def main(matrix,group1,group2,fasta,upper,lower):
prune_matrix(matrix,group1,group2)
compare_values("group1_pruned.txt","group2_pruned.txt",upper,lower)
subprocess.check_call("paste group1_out.txt group2_out.txt > groups_combined.txt", shell=True)
find_uniques("groups_combined.txt",fasta)
add_headers("groups_combined.txt","groups_combined_header.txt",lower,upper)
os.system("rm group1_out.txt group2_out.txt")
if __name__ == "__main__":
usage="usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-b", "--bsr_matrix", dest="matrix",
help="/path/to/bsr_matrix [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-f", "--fasta", dest="fasta",
help="/path/to/ORF_fasta_file [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-1", "--group_1_ids", dest="group1",
help="new line separated file with group1 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-2", "--group_2_
|
ids", dest="group2",
help="new line separated file with group2 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-u", "--upper_bound", dest="upper",
help="upper bound for BSR comparisons, defaults to 0.8",
default="0.8", type="float")
parser.add_option("-l", "--lower_bound", dest="lower",
help="lower bound f
|
or BSR comparisons, defaults to 0.4",
default="0.4", type="float")
options, args = parser.parse_args()
mandatories = ["matrix", "group1", "group2", "fasta"]
for m in mandatories:
if not options.__dict__[m]:
print("\nMust provide %s.\n" %m)
parser.print_help()
exit(-1)
main(options.matrix,options.group1,options.group2,options.fasta,options.upper,options.lower)
|
sistason/kinksorter2
|
src/kinksorter_app/management/commands/kink_besteffortsync.py
|
Python
|
gpl-3.0
| 2,449
| 0.004492
|
from os import path, access, W_OK, R_OK
import argparse
import logging
from django.core.management.base import BaseCommand, CommandError
from kinksorter_app.functionality.movie_handling import merge_movie, recognize_movie
from kinksorter_app.models import Movie, PornDirectory
from kinksorter_app.functionality.directory_handling import PornDirectoryHandler, get_target_porn_directory
from kinksorter_app.functionality.sorting import TargetSorter
logger = logging.getLogger(__name__)
def argcheck_dir(string):
if path.isdir(string) and access(string, W_OK) and access(string, R_OK):
return path.abspath(string)
raise argparse.ArgumentTypeError('{} is no directory or isn\'t writeable'.format(string))
class Command(BaseCommand):
logging.basicConfig(level=logging.DEBUG)
help = "Syncs a source directory into a destination directory"
def add_arguments(self, pars
|
er):
parser.add_argument('src_directory', type=argcheck_dir)
parser.add_argument('dst_directory', type=argcheck_dir)
def handle(self, *args, **options):
src_dir = options['src_directory']
dst_dir = options['dst_directory']
logger.info("Start")
if PornDirectory.objects.filter(id=0).e
|
xists():
dst_handler = PornDirectoryHandler(0)
else:
dst_handler = PornDirectoryHandler(None, init_path=dst_dir, name="dest", id_=0)
dst_handler.scan() # only scan initially, since the merged files get added to the db
if PornDirectory.objects.filter(path=src_dir).exists():
PornDirectory.objects.delete(path=src_dir) # don't keep the src directory, to force resyncs
src_handler = PornDirectoryHandler(None, init_path=src_dir, name="src")
else:
src_handler = PornDirectoryHandler(None, init_path=src_dir, name="src")
src_handler.scan()
for movie in src_handler.directory.movies.all():
if not movie.scene_id:
recognize_movie(movie, None)
if not movie.scene_id:
# if it was not recognized during first run, recognize with extensive=True again
recognize_movie(movie, None, extensive=True)
if not dst_handler.directory.movies.filter(scene_id=movie.scene_id).exists():
merge_movie(movie.id)
ts = TargetSorter("move", list(dst_handler.directory.movies.all()))
ts.sort()
|
hanyassasa87/ns3-802.11ad
|
doc/manual/source/conf.py
|
Python
|
gpl-2.0
| 10,879
| 0.001287
|
# -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 26 00:00:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys, os
# To change default code-block format in Latex to footnotesize (8pt)
# Tip from https://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx/9955928
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
#from sphinx.highlighting import PygmentsBridge
#from pygments.formatters.latex import LatexFormatter
#
#class CustomLatexFormatter(LatexFormatter):
# def __init__(self, **options):
# super(CustomLatexFormatter, self).__init__(**options)
# self.verboptions = r"formatcom=\footnotesize"
#
#PygmentsBridge.latex_formatter = CustomLatexFormatter
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.imgmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3 project'
copyright = u'2006-2019'
#author = u'test'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = u'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'est vtest'
html_title = 'Manual'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_spl
|
it_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# co
|
ntain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuf
|
OddBloke/sphinx-git
|
tests/test_git_changelog.py
|
Python
|
gpl-3.0
| 11,748
| 0
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime
import six
from bs4 import BeautifulSoup
from git import InvalidGitRepositoryError, Repo
from mock import ANY, call
from nose.tools import (
assert_equal,
assert_greater,
assert_in,
assert_less_equal,
assert_not_in,
assert_raises,
)
from sphinx_git import GitChangelog
from . import MakeTestableMixin, TempDirTestCase
class TestableGitChangelog(MakeTestableMixin, GitChangelog):
pass
class ChangelogTestCase(TempDirTestCase):
def setup(self):
super(ChangelogTestCase, self).setup()
self.changelog = TestableGitChangelog()
self.changelog.state.document.settings.env.srcdir = self.root
class TestNoRepository(ChangelogTestCase):
def test_not_a_repository(self):
assert_raises(InvalidGitRepositoryError, self.changelog.run)
class TestWithRepository(ChangelogTestCase):
def _set_username(self, username):
config_writer = self.repo.config_writer()
config_writer.set_value('user', 'name', username)
config_writer.release()
def setup(self):
super(TestWithRepository, self).setup()
self.repo = Repo.init(self.root)
self._set_username('Test User')
def test_no_commits(self):
assert_raises(ValueError, self.changelog.run)
def test_single_commit_produces_single_item(self):
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(1, len(bullet_list.findAll('list_item')))
def test_single_commit_message_and_user_display(self):
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
|
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGe
|
nerator())
assert_equal(1, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(5, len(par_children))
assert_equal('my root commit', par_children[0].text)
assert_equal('Test User', par_children[2].text)
def test_single_commit_message_and_user_display_with_non_ascii_chars(self):
self._set_username('þéßþ Úßéë')
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
list_markup = BeautifulSoup(six.text_type(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(1, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(5, len(par_children))
assert_equal('my root commit', par_children[0].text)
assert_equal(u'þéßþ Úßéë', par_children[2].text)
def test_single_commit_time_display(self):
before = datetime.now().replace(microsecond=0)
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
after = datetime.now()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item.paragraph
children = list(item.childGenerator())
timestamp = datetime.strptime(children[4].text, '%Y-%m-%d %H:%M:%S')
assert_less_equal(before, timestamp)
assert_greater(after, timestamp)
def test_single_commit_default_detail_setting(self):
self.repo.index.commit(
'my root commit\n\nadditional information\nmore info'
)
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(2, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(5, len(par_children))
assert_equal('my root commit', par_children[0].text)
assert_equal('Test User', par_children[2].text)
assert_equal(
str(children[1]),
'<paragraph>additional information\nmore info</paragraph>'
)
def test_single_commit_preformmated_detail_lines(self):
self.repo.index.commit(
'my root commit\n\nadditional information\nmore info'
)
self.changelog.options.update({'detailed-message-pre': True})
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(2, len(children))
assert_equal(
str(children[1]),
'<literal_block xml:space="preserve">additional information\n'
'more info</literal_block>'
)
def test_more_than_ten_commits(self):
for n in range(15):
self.repo.index.commit('commit #{0}'.format(n))
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(10, len(bullet_list.findAll('list_item')))
for n, child in zip(range(15, 5), bullet_list.childGenerator()):
assert_in('commit #{0}'.format(n), child.text)
assert_not_in('commit #4', bullet_list.text)
def test_specifying_number_of_commits(self):
for n in range(15):
self.repo.index.commit('commit #{0}'.format(n))
self.changelog.options.update({'revisions': 5})
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(5, len(bullet_list.findAll('list_item')))
for n, child in zip(range(15, 10), bullet_list.childGenerator()):
assert_in('commit #{0}'.format(n), child.text)
assert_not_in('commit #9', bullet_list.text)
def test_specifying_a_rev_list(self):
self.repo.index.commit('before tag')
commit = self.repo.index.commit('at tag')
self.repo.index.commit('after tag')
self.repo.index.commit('last commit')
self.repo.create_tag('testtag', commit)
self.changelog.options.update({'rev-list': 'testtag..'})
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(2, len(bullet_list.findAll('list_item')))
children = list(bullet_list.childGenerator())
first_element = children[0]
second_element = children[1]
assert_in('last commit', first_element.text)
assert_in('after tag', second_element.text)
def test_warning_given_if_rev_list_and_revisions_both_given(self):
self.repo.index.commit('a commit')
self.changelog.options.update({'rev-list': 'HEAD', 'revisions': 12})
nodes = self.changelog.run()
assert_equal(
1, self.changelog.state.document.reporter.warning.call_count
)
def test_line_number_displayed_in_multiple_option_warning(self):
self.repo.index.commit('a commit')
self.changelog.options.update({'rev-list': 'HEAD', 'revisions': 12})
nodes = self.changelog.run()
document_reporter = self.changelog.state.document.reporter
assert_equal(
[call(ANY, line=self.changelog.lineno)],
document_reporter.warning.call_args_list
)
def test_name_filter(self):
self.repo.index.commit('initial')
for file_name in ['abc.txt', 'bcd.txt', 'abc.other', 'atxt']:
full_path = os.path.join(self.repo.working_tree_dir, file_name)
f = open(full_path, 'w+')
|
threemeninaboat3247/kuchinawa
|
kuchinawa/__init__.py
|
Python
|
mit
| 687
| 0.020378
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 03:24:37 2017
@a
|
uthor: Yuki
"""
import os,sys,logging
ENTRYPOINT=__path__[0]
ICONPATH=os.path.join(ENTRYPOINT,'Icons','logo.png')
KUCHINAWA='Kuchinawa'
from kuchinawa.Compile import compileUi
from kuchinawa.Thread import Main
#change the multiprocessing's context to 'spawn'
try:
import multiprocessing
multipr
|
ocessing.set_start_method('spawn')
except:
print('The context of multiprocessing is already set.')
def run_sample():
'''Run a sample program'''
from PyQt5.QtWidgets import QApplication
from kuchinawa.Examples import SinCos
app = QApplication([])
s=SinCos.Sample()
sys.exit(app.exec_())
|
krarkrrrc/vidpager
|
db/DbTools.py
|
Python
|
gpl-3.0
| 2,503
| 0.029165
|
import sys
import sqlite3
import CONST
import re
"""
Provides the low-level functions to insert, query and update the db
"""
def init():
con = sqlite3.connect( CONST.db_name )
# asr value is auto-speech-recognition rendered captions, either 0 (false) or 1 (true)
con.execute( '''CREATE TABLE IF NOT EXISTS subtitles
( urlid text, title text, captions text, timestamps text, asr integer, ROWID INTEGER PRIMARY KEY )''' )
return con
def insert( *args, table='subtitles', **kwargs ):
"""
Takes 3 arguments in the following order: String video_title, String url_id, String subtitles
"""
con = init()
try:
with con:
con.execute( "INSERT INTO " + table + " VALUES ( '" + args[0] + "', '" + args[1] + "', '" + args[2] + "', '" + args[3] + "', '" + args[4] + "', NULL )" )
except sqlite3.IntegrityError:
print( "Error inserting into db" )
def get_rowid_from_urlid( urlid ):
"""
Returns a row id to select columns from
"""
con = init()
try:
with con:
rowid = str( con.execute( "SELECT rowid FROM subtitles WHERE urlid =:urlid", {"urlid": urlid} ).fetchone()[0] )
except sqlite3.IntegrityError:
print( "Error in get_rowid_from_urlid" )
#print( "rowid = " + str( rowid ) )
return rowid
def get_column_from_rowid( rowid, column ):
con = init()
try:
with con:
column_data = str( con.execute( "SELECT " + column + " FROM subtitles WHERE rowid = " + rowid + ";" ).fetchone()[0] )
except sqlite3.IntegrityError:
print( "Error in get_column_from_rowid" )
return column_data
def get_column_from_urlid( urlid, column ):
return get_column_from_rowid( get_rowid_from_urlid( urlid ), column )
def parse_subtitles( subtitles ):
# match[0] is timestamp, [1] is caption
matches = re.finda
|
ll( r'(\d\d:\d
|
\d:\d\d\.\d\d\d\s-->\s\d\d:\d\d:\d\d\.\d\d\d)\\n([\w\s\d\\\,\.\;\:\$\!\%\)\(\?\/\'\"\-]+)\\n\\n', subtitles )
captions = ""
timestamps = ""
count = 0
for match in matches:
captions += '<' + str( count ) + '>' + match[1]
timestamps += '<' + str( count ) + '>' + match[0]
count += 1
return { 'captions' : captions, 'timestamps' : timestamps }
def insert_raw_subtitles( urlid, raw_subs, title ):
subs = str( raw_subs )[2:-1]
parsed_subs = parse_subtitles( subs )
insert( urlid, title, parsed_subs['captions'], parsed_subs['timestamps'], '0')
|
TuSimple/simpledet
|
config/RepPoints/reppoints_moment_dcn_r101v1b_fpn_multiscale_2x.py
|
Python
|
apache-2.0
| 7,766
| 0.000644
|
from models.RepPoints.builder import RepPoints as Detector
from models.dcn.builder import DCNResNetFPN as Backbone
from models.RepPoints.builder import RepPointsNeck as Neck
from models.RepPoints.builder import RepPointsHea
|
d as Head
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
# normalizer = normalizer_factory(type="syncb
|
n", ndev=8, wd_mult=1.0)
normalizer = normalizer_factory(type="gn")
class BackboneParam:
fp16 = General.fp16
# normalizer = NormalizeParam.normalizer
normalizer = normalizer_factory(type="fixbn")
depth = 101
num_c3_block = 0
num_c4_block = 3
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class HeadParam:
num_class = 1 + 80
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
class point_generate:
num_points = 9
scale = 4
stride = (8, 16, 32, 64, 128)
transform = "moment"
class head:
conv_channel = 256
point_conv_channel = 256
mean = None
std = None
class proposal:
pre_nms_top_n = 1000
post_nms_top_n = None
nms_thr = None
min_bbox_side = None
class point_target:
target_scale = 4
num_pos = 1
class bbox_target:
pos_iou_thr = 0.5
neg_iou_thr = 0.5
min_pos_iou = 0.0
class focal_loss:
alpha = 0.25
gamma = 2.0
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = None
image_roi = None
batch_image = None
class regress_target:
class_agnostic = None
mean = None
std = None
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = None
stride = None
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
head = Head(HeadParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
excluded_param = ["gn"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = 35
class schedule:
begin_epoch = 0
end_epoch = 12
lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3
iter = 2000
class TestScaleParam:
short_ranges = [600, 800, 1000, 1200]
long_ranges = [2000, 2000, 2000, 2000]
@staticmethod
def add_resize_info(roidb):
ms_roidb = []
for r_ in roidb:
for short, long in zip(TestScaleParam.short_ranges, TestScaleParam.long_ranges):
r = r_.copy()
r["resize_long"] = long
r["resize_short"] = short
ms_roidb.append(r)
return ms_roidb
class TestParam:
min_det_score = 0.05 # filter appended boxes
max_det_per_image = 100
process_roidb = TestScaleParam.add_resize_info
def process_output(x, y):
return x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
class RandResizeParam:
short = None # generate on the fly
long = None
short_ranges = [600, 800, 1000, 1200]
long_ranges = [2000, 2000, 2000, 2000]
class RandCropParam:
mode = "center" # random or center
short = 800
long = 1333
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class RandPadParam:
short = 1200
long = 2000
max_num_gt = 100
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, \
RandResize2DImageBbox, RandCrop2DImageBbox, Resize2DImageByRoidb, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord
from models.retinanet.input import Norm2DImage
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
# Resize2DImageBbox(ResizeParam),
RandResize2DImageBbox(RandResizeParam),
RandCrop2DImageBbox(RandCropParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
# Resize2DImageBbox(ResizeParam),
Resize2DImageByRoidb(),
Pad2DImageBbox(RandPadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
from models.retinanet import metric as cls_metric
import core.detection_metric as box_metric
cls_acc_metric = cls_metric.FGAccMetric(
"FGAcc",
["cls_loss_output", "point_refine_labels_output"],
[]
)
box_init_l1_metric = box_metric.L1(
"InitL1",
["pts_init_loss_output", "points_init_labels_output"],
[]
)
box_refine_l1_metric = box_metric.L1(
"RefineL1",
["pts_refine_loss_output", "point_refine_labels_output"],
[]
)
metric_list = [cls_acc_metric, box_init_l1_metric, box_refine_l1_metric]
return General, KvstoreParam, HeadParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
|
thomazs/geraldo
|
site/newsite/django_1_0/tests/regressiontests/modeladmin/models.py
|
Python
|
lgpl-3.0
| 31,422
| 0.002005
|
# coding: utf-8
from datetime import date
from django.db import models
from django.contrib.auth.models import User
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
def __unicode__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, related_name='main_concerts')
opening_band = models.ForeignKey(Band, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField()
pub_date = models.DateTimeField()
band = models.ForeignKey(Band)
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel)
__test__ = {'API_TESTS': """
>>> from django.contrib.admin.options import ModelAdmin, HORIZONTAL, VERTICAL
>>> from django.contrib.admin.sites import AdminSite
None of the following tests really depend on the content of the request, so
we'll just pass in None.
>>> request = None
# the sign_date is not 100 percent accurate ;)
>>> band = Band(name='The Doors', bio='', sign_date=date(1965, 1, 1))
>>> band.save()
Under the covers, the admin system will initialize ModelAdmin with a Model
class and an AdminSite instance, so let's just go ahead and do that manually
for testing.
>>> site = AdminSite()
>>> ma = ModelAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name', 'bio', 'sign_date']
# form/fields/fieldsets interaction ##########################################
fieldsets_add and fieldsets_change should return a special data structure that
is used in the templates. They should generate the "right thing" whether we
have specified a custom form, the fields arugment, or nothing at all.
Here's the default case. There are no custom form_add/form_change methods,
no fields argument, and no fieldsets argument.
>>> ma = ModelAdmin(Band, site)
>>> ma.get_fieldsets(request)
[(None, {'fields': ['name', 'bio', 'sign_date']})]
>>> ma.get_fieldsets(request, band)
[(None, {'fields': ['name', 'bio', 'sign_date']})]
If we specify the fields argument, fieldsets_add and fielsets_change should
just stick the fields into a formsets structure and return it.
>>> class BandAdmin(ModelAdmin):
... fields = ['name']
>>> ma = BandAdmin(Band, site)
>>> ma.get_fieldsets(request)
[(None, {'fields': ['name']})]
>>> ma.get_fieldsets(request, band)
[(None, {'fields': ['name']})]
If we specify fields or fieldsets, it should exclude fields on the Form class
to the fields specified. This may cause errors to be raised in the db layer if
required model fields arent in fields/fieldsets, but that's preferable to
ghost errors where you have a field in your Form class that isn't being
displayed because you forgot to add it to fields/fielsets
>>> class BandAdmin(ModelAdmin):
... fields = ['name']
>>> ma = BandAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name']
>>> ma.get_form(request, band).base_fields.keys()
['name']
>>> class BandAdmin(ModelAdmin):
... fieldsets = [(None, {'fields': ['name']})]
>>> ma = BandAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name']
>>> ma.get_form(request, band).base_fields.keys()
['name']
If we specify a form, it should use it allowing custom validation to work
properly. This won't, however, break any of the admin widgets or media.
>>> from django import forms
>>> class AdminBandForm(forms.ModelForm):
... delete = forms.BooleanField()
...
... class Meta:
... model = Band
>>> class BandAdmin(ModelAdmin):
... form = AdminBandForm
>>> ma = BandAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name', 'bio', 'sign_date', 'delete']
>>> type(ma.get_form(request).base_fields['sign_date'].widget)
<class 'django.contrib.admin.widgets.AdminDateWidget'>
If we need to override the queryset of a ModelChoiceField in our custom form
make sure that RelatedFieldWidgetWrapper doesn't mess that up.
>>> band2 = Band(name='The Beetles', bio='', sign_date=date(1962, 1, 1))
>>> band2.save()
>>> class AdminConcertForm(forms.ModelForm):
... class Meta:
... model = Concert
...
... def __init__(self, *args, **kwargs):
... super(AdminConcertForm, self).__init__(*args, **kwargs)
... self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
>>> class ConcertAdmin(ModelAdmin):
... form = AdminConcertForm
>>> ma = ConcertAdmin(Concert, site)
>>> form = ma.get_form(request)()
>>> print form["main_band"]
<select name="main_band" id="id_main_band">
<option value="" selected="selected">---------</option>
<option value="1">The Doors</option>
</select>
>>> band2.delete()
# radio_fields behavior ################################################
First, without any radio_fields specified, the widgets for ForeignKey
and fields with choices specified ought to be a basic Select widget.
ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
they need to be handled properly when type checking. For Select fields, all of
the choices lists have a first entry of dashes.
>>> cma = ModelAdmin(Concert, site)
>>> cmafa = cma.get_form(request)
>>> type(cmafa.base_fields['main_band'].widget.widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['main_band'].widget.choices)
[(u'', u'---------'), (1, u'The Doors')]
>>> type(cmafa.base_fields['opening_band'].widget.widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['opening_band'].widget.choices)
[(u'', u'---------'), (1, u'The Doors')]
>>> type(cmafa.base_fields['day'].widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['day'].widget.choices)
[('', '---------'), (1, 'Fri'), (2, 'Sat')]
>>> type(cmafa.base_fields['transport'].widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['transport'].widget.choices)
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
Now specify all the fields as radio_fields. Widgets should now be
RadioSelect, and the choices list should have a first entry of 'None' if
blank=True for the model field. Finally, the widget should have the
'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
>>> class ConcertAdmin(ModelAdmin):
... radio_fields = {
... 'main_band': HORIZONTAL,
... 'opening_band': VERTICAL,
... 'day': VERTICAL,
... 'transport': HORIZONTAL,
... }
>>> cma = ConcertAdmin(Concert, site)
>>> cmafa = cma.get_form(request)
>>> type(cmafa.base_fields['main_band'].widget.widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['main_band'].widget.attrs
{'class': 'radiolist inline'}
>>> list(cmafa.base_fields['main_band'].widget.choices)
[(1, u'The Doors')]
>>> type(cmafa.base_fields['opening_band'].widget.widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['opening_band'].widget.attrs
{'class': 'radiolist'}
>>> list(cm
|
afa.base_fields['opening_band'].widget.choices)
[(u'', u'None'), (1, u'The Doors')]
>>> type(cmafa.base_fields['day'].widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['day'].widget.attrs
{'class': 'radiolist'}
>>> list(cmafa.base_fields['day'].widget.choices)
[(1, 'Fri'), (2, '
|
Sat')]
>>> type(cmafa.base_fields['transport'].widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['transport'].widget.attrs
{'class': 'radiolist inline'}
>>> list(cmafa.base_fields['transport'].widget.choices)
[('', u'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
>>> band.delete()
# ModelAdmin Option Validation ################################################
>>> fr
|
lmotta/Roam
|
src/configmanager/editorwidgets/listwidget.py
|
Python
|
gpl-2.0
| 5,997
| 0.001501
|
import os
from functools import partial
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import Qt
from qgis.core import QgsMapLayer
from qgis.gui import QgsExpressionBuilderDialog
from roam.api.utils import layer_by_name
from configmanager.models import QgsLayerModel, QgsFieldModel
from configmanager.editorwidgets.core import ConfigWidget
from configmanager.editorwidgets.uifiles.ui_listwidget_config import Ui_Form
class ListWidgetConfig(Ui_Form, ConfigWidget):
description = 'Select an item from a predefined list'
def __init__(self, parent=None):
super(ListWidgetConfig, self).__init__(parent)
self.setupUi(self)
self.allownull = False
self.orderby = False
self.orderbyCheck.hide()
self.layerRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 0))
self.listRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 1))
self.layermodel = QgsLayerModel(watchregistry=False)
self.layermodel.layerfilter = [QgsMapLayer.VectorLayer]
self.fieldmodel = QgsFieldModel()
self.blockSignals(True)
self.l
|
ayerCombo.setModel(self.layermodel)
self.keyCombo.setModel(self.fieldmodel)
self.valueCombo.setModel(self.fieldmodel)
self.filterButton.pressed.connect(self.define_filter)
self.fieldmodel.setLayerFilter(self.layerCombo.view().selectionModel())
self.reset()
self.blockSignals(False)
def define_filter(self):
layer = self.layerCombo.currentText()
|
if not layer:
return
layer = layer_by_name(layer)
dlg = QgsExpressionBuilderDialog(layer, "List filter", self)
text = self.filterText.toPlainText()
dlg.setExpressionText(text)
if dlg.exec_():
self.filterText.setPlainText(dlg.expressionText())
def reset(self):
self.listtype = 'layer'
self.listText.setPlainText('')
self.orderby = False
self.allownull = False
self.filterText.setPlainText('')
self.layerCombo.setCurrentIndex(-1)
self.keyCombo.setCurrentIndex(-1)
self.valueCombo.setCurrentIndex(-1)
def widgetchanged(self):
self.widgetdirty.emit(self.getconfig())
@property
def allownull(self):
return self.allownullCheck.isChecked()
@allownull.setter
def allownull(self, value):
self.allownullCheck.setChecked(value)
@property
def orderby(self):
return self.orderbyCheck.isChecked()
@orderby.setter
def orderby(self, value):
self.orderbyCheck.setChecked(value)
@property
def list(self):
return [item for item in self.listText.toPlainText().split('\n')]
@property
def filter(self):
return self.filterText.toPlainText()
@property
def layer(self):
return self.layerCombo.currentText()
@property
def key(self):
index_key = self.fieldmodel.index(self.keyCombo.currentIndex(), 0)
fieldname_key = self.fieldmodel.data(index_key, QgsFieldModel.FieldNameRole)
return fieldname_key
@property
def value(self):
index_value = self.fieldmodel.index(self.valueCombo.currentIndex(), 0)
return self.fieldmodel.data(index_value, QgsFieldModel.FieldNameRole)
def getconfig(self):
config = {}
config['allownull'] = self.allownull
config['orderbyvalue'] = self.orderby
if self.layerRadio.isChecked():
subconfig = {}
# TODO Grab the data here and not just the text
subconfig['layer'] = self.layer
subconfig['key'] = self.key
subconfig['value'] = self.value
subconfig['filter'] = self.filter
config['layer'] = subconfig
else:
config['list'] = {}
config['list']['items'] = self.list
return config
def blockSignals(self, bool):
for child in self.findChildren(QWidget):
child.blockSignals(bool)
super(ListWidgetConfig, self).blockSignals(bool)
def setconfig(self, config):
self.blockSignals(True)
self.allownull = config.get('allownull', True)
self.orderby = config.get('orderbyvalue', False)
#Clear the widgets
self.listText.setPlainText('')
self.keyCombo.clear()
self.valueCombo.clear()
self.filterText.clear()
self.layermodel.refresh()
# Rebind all the values
if 'list' in config:
subconfig = config.get('list', {})
self.listRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(1)
listitems = subconfig.get('items', [])
itemtext = '\n'.join(listitems)
self.listText.setPlainText(itemtext)
else:
self.layerRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(0)
subconfig = config.get('layer', {})
layer = subconfig.get('layer', '') or ''
key = subconfig.get('key', '') or ''
value = subconfig.get('value', '') or ''
filter = subconfig.get('filter', None)
index = self.layerCombo.findData(layer, Qt.DisplayRole)
if index > -1:
self.layerCombo.setCurrentIndex(index)
index = self.layermodel.index(index, 0)
self.fieldmodel.updateLayer(index, None)
keyindex = self.keyCombo.findData(key.lower(), QgsFieldModel.FieldNameRole)
if keyindex > -1:
self.keyCombo.setCurrentIndex(keyindex)
valueindex = self.valueCombo.findData(value.lower(), QgsFieldModel.FieldNameRole)
if valueindex > -1:
self.valueCombo.setCurrentIndex(valueindex)
self.filterText.setPlainText(filter)
self.allownullCheck.setChecked(self.allownull)
self.orderbyCheck.setChecked(self.orderby)
self.blockSignals(False)
|
tiangolo/fastapi
|
docs_src/header_params/tutorial003.py
|
Python
|
mit
| 216
| 0
|
from typing import List, Optional
from fastapi import FastAPI, Header
app
|
= FastAPI()
@app.get("/items/")
async def read_items(x_token: Optional[List[str]] = Header(None)):
return {"X-Token values"
|
: x_token}
|
rlishtaba/py-algorithms
|
py_algorithms/challenges/challenges.py
|
Python
|
mit
| 1,505
| 0
|
import re
class Challenges:
@staticmethod
def first_factorial(number: int) -> int:
"""
Iterative approach
:param number: an input, first factorial of a number
:return: factorial
"""
found = 1
step = 2
w
|
hile step <= number:
found *= step
step += 1
return found
@staticmethod
def longest_word(sentence: str) -> str:
"""
Detect longest word in a sentence
:param sentence:
:return:
"""
trimmed = re.compile('[^a-zA-Z0-9 ]').sub('',
|
sentence)
chunks = trimmed.split(' ')
longest = 0
index = -1
for i, x in enumerate(chunks):
if len(x) > longest:
longest = len(x)
index = i
return chunks[index]
@staticmethod
def letter_mutation(string):
"""
Coderbyte challenge: Letter Changes
:param string: a sentence
:return: str, transformed sentence
"""
alphabet = list(range(97, 123))
alphabet_len = len(alphabet) - 1
ret = ''
vowels = list('aeiou')
for x in list(string):
r = x
if ord(x) in alphabet:
if ord(x) == alphabet[alphabet_len]:
r = chr(alphabet[0])
else:
r = chr(ord(x) + 1)
if r in vowels:
r = r.upper()
ret += r
return ret
|
tdhooper/starstoloves
|
starstoloves/lib/user/user_repository.py
|
Python
|
gpl-2.0
| 564
| 0.007092
|
import sys
from starstoloves.models import User as UserModel
from starstoloves import model_repository
from starstoloves.lib.track import lastfm_track_repo
|
sitory
from .user import User
def from_session_key(session_key):
user_model, created = UserModel.objects.get_or_create(session_key=session_key)
return User(
session_key=session_key,
repository=sys.modules[__name__],
);
def delete(user
|
):
try:
user_model = model_repository.from_user(user)
user_model.delete()
except UserModel.DoesNotExist:
pass;
|
romonzaman/newfies-dialer
|
newfies/dnc/constants.py
|
Python
|
mpl-2.0
| 748
| 0
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-201
|
5 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2
|
billing.com>
#
from django.utils.translation import ugettext_lazy as _
from django_lets_go.utils import Choice
class DNC_COLUMN_NAME(Choice):
id = _('ID')
name = _('name')
date = _('date')
contacts = _('contacts')
class DNC_CONTACT_COLUMN_NAME(Choice):
id = _('ID')
dnc = _('DNC')
phone_number = _('phone number')
date = _('date')
|
fossilet/ansible
|
lib/ansible/module_utils/facts.py
|
Python
|
gpl-3.0
| 132,636
| 0.004976
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import array
import shlex
import errno
import fcntl
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
import ConfigParser
import StringIO
from string import maketrans
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'),
('/etc/slackware-version', 'Slackware'),
('/etc/redhat-release', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
('/etc/system-release', 'OtherLinux'),
('/etc/alpine-release', 'Alpine'),
('/etc/release', 'Solaris'),
('/etc/arch-release', 'Archlinux'),
('/etc/SuSE-release', 'SuSE'),
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
('/etc/lsb-release', 'Mandriva'),
('/etc/os-release', 'NA'),
)
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
]
def __init__(self, load_on_init=True):
self.facts = {}
if load_on_init:
self.get_platform_facts()
self.get_distribution_facts()
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'Linux':
self.get_distribution
|
_facts()
elif self.facts['system'] == 'AIX':
# Attempt t
|
o use getconf to figure out architecture
# fall back to bootinfo if needed
if module.get_bin_path('getconf'):
rc, out, err = module.run_command([module.get_bin_path('getconf'),
'MACHINE_ARCHITECTURE'])
data = out.split('\n')
self.facts['architecture'] = data[0]
else:
rc,
|
humancompatibleai/imitation
|
src/imitation/data/buffer.py
|
Python
|
mit
| 13,770
| 0.001888
|
import dataclasses
from typing import Dict, Mapping, Optional, Tuple
import numpy as np
from stable_baselines3.common import vec_env
from imitation.data import types
class Buffer:
"""A FIFO ring buffer for NumPy arrays of a fixed shape and dtype.
Supports random sampling with replacement.
"""
capacity: int
"""The number of data samples that can be stored in this buffer."""
sample_shapes: Dict[str, Tuple[int, ...]]
"""The shapes of each data sample stored in this buffer."""
_arrays: Dict[str, np.ndarray]
"""The underlying NumPy arrays (which actually store the data)."""
_n_data: int
"""The number of samples currently stored in this buffer.
An integer in `range(0, self.capacity + 1)`. This attribute is the return
value of `self.size()`.
"""
_idx: int
"""The index of the first row that new data should be written to.
An integer in `range(0, self.capacity)`.
"""
def __init__(
self,
ca
|
pacity: int,
sample_shapes: Mapp
|
ing[str, Tuple[int, ...]],
dtypes: Mapping[str, np.dtype],
):
"""Constructs a Buffer.
Args:
capacity: The number of samples that can be stored.
sample_shapes: A dictionary mapping string keys to the shape of
samples associated with that key.
dtypes (`np.dtype`-like): A dictionary mapping string keys to the dtype
of samples associated with that key.
Raises:
KeyError: `sample_shapes` and `dtypes` have different keys.
"""
if sample_shapes.keys() != dtypes.keys():
raise KeyError("sample_shape and dtypes keys don't match")
self.capacity = capacity
self.sample_shapes = {k: tuple(shape) for k, shape in sample_shapes.items()}
self._arrays = {
k: np.zeros((capacity,) + shape, dtype=dtypes[k])
for k, shape in self.sample_shapes.items()
}
self._n_data = 0
self._idx = 0
@classmethod
def from_data(
cls,
data: Dict[str, np.ndarray],
capacity: Optional[int] = None,
truncate_ok: bool = False,
) -> "Buffer":
"""Constructs and return a Buffer containing the provided data.
Shapes and dtypes are automatically inferred.
Args:
data: A dictionary mapping keys to data arrays. The arrays may differ
in their shape, but should agree in the first axis.
capacity: The Buffer capacity. If not provided, then this is automatically
set to the size of the data, so that the returned Buffer is at full
capacity.
truncate_ok: Whether to error if `capacity` < the number of samples in
`data`. If False, then only store the last `capacity` samples from
`data` when overcapacity.
Examples:
In the follow examples, suppose the arrays in `data` are length-1000.
`Buffer` with same capacity as arrays in `data`::
Buffer.from_data(data)
`Buffer` with larger capacity than arrays in `data`::
Buffer.from_data(data, 10000)
`Buffer with smaller capacity than arrays in `data`. Without
`truncate_ok=True`, `from_data` will error::
Buffer.from_data(data, 5, truncate_ok=True)
Raises:
ValueError: `data` is empty.
ValueError: `data` has items mapping to arrays differing in the
length of their first axis.
"""
data_capacities = [arr.shape[0] for arr in data.values()]
data_capacities = np.unique(data_capacities)
if len(data) == 0:
raise ValueError("No keys in data.")
if len(data_capacities) > 1:
raise ValueError("Keys map to different length values")
if capacity is None:
capacity = data_capacities[0]
sample_shapes = {k: arr.shape[1:] for k, arr in data.items()}
dtypes = {k: arr.dtype for k, arr in data.items()}
buf = cls(capacity, sample_shapes, dtypes)
buf.store(data, truncate_ok=truncate_ok)
return buf
def store(self, data: Dict[str, np.ndarray], truncate_ok: bool = False) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Args:
data: A dictionary mapping keys `k` to arrays with shape
`(n_samples,) + self.sample_shapes[k]`, where `n_samples` is less
than or equal to `self.capacity`.
truncate_ok: If False, then error if the length of `transitions` is
greater than `self.capacity`. Otherwise, store only the final
`self.capacity` transitions.
Raises:
ValueError: `data` is empty.
ValueError: If `n_samples` is greater than `self.capacity`.
ValueError: data is the wrong shape.
"""
expected_keys = set(self.sample_shapes.keys())
missing_keys = expected_keys.difference(data.keys())
unexpected_keys = set(data.keys()).difference(expected_keys)
if len(missing_keys) > 0:
raise ValueError(f"Missing keys {missing_keys}")
if len(unexpected_keys) > 0:
raise ValueError(f"Unexpected keys {unexpected_keys}")
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
if len(n_samples) > 1:
raise ValueError("Keys map to different length values.")
n_samples = n_samples[0]
if n_samples == 0:
raise ValueError("Trying to store empty data.")
if n_samples > self.capacity:
if not truncate_ok:
raise ValueError("Not enough capacity to store data.")
else:
data = {k: arr[-self.capacity :] for k, arr in data.items()}
for k, arr in data.items():
if arr.shape[1:] != self.sample_shapes[k]:
raise ValueError(f"Wrong data shape for {k}")
new_idx = self._idx + n_samples
if new_idx > self.capacity:
n_remain = self.capacity - self._idx
# Need to loop around the buffer. Break into two "easy" calls.
self._store_easy({k: arr[:n_remain] for k, arr in data.items()})
assert self._idx == 0
self._store_easy({k: arr[n_remain:] for k, arr in data.items()})
else:
self._store_easy(data)
def _store_easy(self, data: Dict[str, np.ndarray]) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Requires that `size(data) <= self.capacity - self._idx`, where `size(data)` is
the number of rows in every array in `data.values()`. Updates `self._idx`
to be the insertion point of the next call to `_store_easy` call,
looping back to `self._idx = 0` if necessary.
Also updates `self._n_data`.
Args:
data: Same as in `self.store`'s docstring, except with the additional
constraint `size(data) <= self.capacity - self._idx`.
"""
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
assert len(n_samples) == 1
n_samples = n_samples[0]
assert n_samples <= self.capacity - self._idx
idx_hi = self._idx + n_samples
for k, arr in data.items():
self._arrays[k][self._idx : idx_hi] = arr
self._idx = idx_hi % self.capacity
self._n_data = min(self._n_data + n_samples, self.capacity)
def sample(self, n_samples: int) -> Dict[str, np.ndarray]:
"""Uniformly sample `n_samples` samples from the buffer with replacement.
Args:
n_samples: The number of samples to randomly sample.
Returns:
samples (np.ndarray): An array with shape
`(n_samples) + self.sample_shape`.
Raises:
ValueError: The buffer is empty.
"""
if self.size() == 0:
raise ValueError("Buffer is empty")
ind = np.random.randint
|
emilybache/texttest-runner
|
src/main/python/storytext/lib/storytext/javarcptoolkit/describer.py
|
Python
|
mit
| 2,499
| 0.008003
|
from storytext.javaswttoolkit import describer as swtdescriber
from org.eclipse.core.internal.runtime impo
|
rt InternalPlatform
from org.eclipse.ui.forms.widgets import ExpandableComposite
import os
from pprint import pprint
class Describer(
|
swtdescriber.Describer):
swtdescriber.Describer.stateWidgets = [ ExpandableComposite ] + swtdescriber.Describer.stateWidgets
swtdescriber.Describer.ignoreChildren = (ExpandableComposite,) + swtdescriber.Describer.ignoreChildren
def buildImages(self):
swtdescriber.Describer.buildImages(self)
self.buildImagesFromBundles()
def buildImagesFromBundles(self):
allImageTypes = [ "gif", "png", "jpg" ]
allImageTypes += [ i.upper() for i in allImageTypes ]
cacheFile = os.path.join(os.getenv("STORYTEXT_HOME"), "osgi_bundle_image_types")
cacheExists = os.path.isfile(cacheFile)
bundleImageTypes = eval(open(cacheFile).read()) if cacheExists else {}
for bundle in InternalPlatform.getDefault().getBundleContext().getBundles():
usedTypes = []
name = bundle.getSymbolicName()
imageTypes = bundleImageTypes.get(name, allImageTypes)
for imageType in imageTypes:
self.logger.debug("Searching bundle " + name + " for images of type " + imageType)
images = bundle.findEntries("/", "*." + imageType, True)
if images and images.hasMoreElements():
self.storeAllImages(images)
usedTypes.append(imageType)
if not cacheExists:
bundleImageTypes[name] = usedTypes
if not cacheExists:
f = open(cacheFile, "w")
pprint(bundleImageTypes, f)
f.close()
def storeAllImages(self, entries):
while entries.hasMoreElements():
url = entries.nextElement()
self.storeImageData(url)
def getExpandableCompositeState(self, widget):
return widget.isExpanded()
def getExpandableCompositeDescription(self, widget):
state = self.getExpandableCompositeState(widget)
self.widgetsWithState[widget] = state
desc = "Expandable '" + widget.getText() + "' "
desc += "(expanded)" if state else "(collapsed)"
if state:
clientDesc = self.getDescription(widget.getClient())
desc += "\n " + clientDesc.replace("\n", "\n ")
return desc
|
Polychart/builder
|
server/polychartQuery/csv/__init__.py
|
Python
|
agpl-3.0
| 65
| 0
|
#!/usr/bin/env python
from connection
|
import Conn as Connection
| |
karrtikr/ete
|
ete3/tools/ete_extract.py
|
Python
|
gpl-3.0
| 2,246
| 0.00089
|
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime H
|
uerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Enviro
|
nment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
from .common import src_tree_iterator
DESC = ""
def populate_args(extract_args_p):
extract_args = extract_args_p.add_argument_group('TREE EDIT OPTIONS')
extract_args.add_argument("--orthologs", dest="orthologs",
nargs="*",
help="")
extract_args.add_argument("--duplications", dest="duplications",
action="store_true",
help="")
def run(args):
from .. import Tree, PhyloTree
for nw in src_tree_iterator(args):
if args.orthologs is not None:
t = PhyloTree(nw)
for e in t.get_descendant_evol_events():
print(e.in_seqs, e.out_seqs)
|
dwlehman/blivet
|
blivet/devices/md.py
|
Python
|
lgpl-2.1
| 22,939
| 0.000915
|
# devices/md.py
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import os
import six
from gi.repository import BlockDev as blockdev
from ..devicelibs import mdraid, raid
from .. import errors
from .. import util
from ..flags import flags
from ..storage_log import log_method_call
from .. import udev
from ..size import Size
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from .container import ContainerDevice
from .raid import RaidDevice
class MDRaidArrayDevice(ContainerDevice, RaidDevice):
""" An mdraid (Linux RAID) device. """
_type = "mdarray"
_packages = ["mdadm"]
_devDir = "/dev/md"
_formatClassName = property(lambda s: "mdmember")
_formatUUIDAttr = property(lambda s: "mdUuid")
def __init__(self, name, level=None, major=None, minor=None, size=None,
memberDevices=None, totalDevices=None,
uuid=None, fmt=None, exists=False, metadataVersion=None,
parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword uuid: the device UUID
:type uuid: str
:keyword level: the device's RAID level
:type level: any valid RAID level descriptor
:keyword int memberDevices: the number of active member devices
:keyword int totalDevices: the total number of member devices
:keyword metadataVersion: the version of the device's md metadata
:type metadataVersion: str (eg: "0.90")
:keyword minor: the device minor (obsolete?)
:type minor: int
"""
# pylint: disable=unused-argument
# These attributes are used by _addParent, so they must be initialized
# prior to instantiating the superclass.
self._memberDevices = 0 # the number of active (non-spare) members
self._totalDevices = 0 # the total number of members
# avoid attribute-defined-outside-init pylint warning
self._level = None
super(MDRaidArrayDevice, self).__init__(name, fmt=fmt, uuid=uuid,
exists=exists, size=size,
parents=parents,
sysfsPath=sysfsPath)
try:
self.level = level
except errors.DeviceError as e:
# Could not set the level, so set loose the parents that were
# added in superclass constructor.
for dev in self.parents:
dev.removeChild()
raise e
self.uuid = uuid
self._totalDevices = util.numeric_type(totalDevices)
self.memberDevices = util.numeric_type(memberDevices)
self.chunkSize = mdraid.MD_CHUNK_SIZE
if not self.exists and not isinstance(metadataVersion, str):
self.metadataVersion = "default"
else:
self.metadataVersion = metadataVersion
if self.parents and self.parents[0].type == "mdcontainer" and self.type != "mdbiosraidarray":
raise errors.DeviceError("A device with mdcontainer member must be mdbiosraidarray.")
if self.exists and self.mdadmFormatUUID and not flags.testing:
# this is a hack to work around mdadm's insistence on giving
# really high minors to arrays it has no config entry for
with open("/etc/mdadm.conf", "a") as c:
c.write("ARRAY %s UUID=%s\n" % (self.path, self.mdadmFormatUUID))
@property
def mdadmFormatUUID(self):
""" This array's UUID, formatted for external use.
:returns: the array's UUID in mdadm format, if available
:rtype: str or NoneType
"""
formatted_uuid = None
if self.uuid is not None:
try:
formatted_uuid = blockdev.md.get_md_uuid(self.uuid)
except blockdev.MDRaidError:
pass
return formatted_uuid
@property
def level(self):
""" Return the raid level
:returns: raid level value
:rtype: an object that represents a RAID level
"""
return self._level
@property
def _levels(self):
""" Allowed RAI
|
D level for this
|
type of device."""
return mdraid.RAID_levels
@level.setter
def level(self, value):
""" Set the RAID level and enforce restrictions based on it.
:param value: new raid level
:param type: object
:raises :class:`~.errors.DeviceError`: if value does not describe
a valid RAID level
:returns: None
"""
try:
level = self._getLevel(value, self._levels)
except ValueError as e:
raise errors.DeviceError(e)
self._level = level
@property
def createBitmap(self):
""" Whether or not a bitmap should be created on the array.
If the the array is sufficiently small, a bitmap yields no benefit.
If the array has no redundancy, a bitmap is just pointless.
"""
try:
return self.level.has_redundancy() and self.size >= Size(1000) and self.format.type != "swap"
except errors.RaidError:
# If has_redundancy() raises an exception then this device has
# a level for which the redundancy question is meaningless. In
# that case, creating a write-intent bitmap would be a meaningless
# action.
return False
def getSuperBlockSize(self, raw_array_size):
"""Estimate the superblock size for a member of an array,
given the total available memory for this array and raid level.
:param raw_array_size: total available for this array and level
:type raw_array_size: :class:`~.size.Size`
:returns: estimated superblock size
:rtype: :class:`~.size.Size`
"""
return blockdev.md.get_superblock_size(raw_array_size,
version=self.metadataVersion)
@property
def size(self):
"""Returns the actual or estimated size depending on whether or
not the array exists.
"""
if not self.exists or not self.mediaPresent:
try:
size = self.level.get_size([d.size for d in self.devices],
self.memberDevices,
self.chunkSize,
self.getSuperBlockSize)
except (blockdev.MDRaidError, errors.RaidError) as e:
log.info("could not calculate
|
keras-team/keras-io
|
examples/vision/siamese_contrastive.py
|
Python
|
apache-2.0
| 11,646
| 0.001717
|
"""
Title: Im
|
age similarity estimation using a Siamese Net
|
work with a contrastive loss
Author: Mehdi
Date created: 2021/05/06
Last modified: 2021/05/06
Description: Similarity learning using a siamese network trained with a contrastive loss.
"""
"""
## Introduction
[Siamese Networks](https://en.wikipedia.org/wiki/Siamese_neural_network)
are neural networks which share weights between two or more sister networks,
each producing embedding vectors of its respective inputs.
In supervised similarity learning, the networks are then trained to maximize the
contrast (distance) between embeddings of inputs of different classes, while minimizing the distance between
embeddings of similar classes, resulting in embedding spaces that reflect
the class segmentation of the training inputs.
"""
"""
## Setup
"""
import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
"""
## Hyperparameters
"""
epochs = 10
batch_size = 16
margin = 1 # Margin for constrastive loss.
"""
## Load the MNIST dataset
"""
(x_train_val, y_train_val), (x_test, y_test) = keras.datasets.mnist.load_data()
# Change the data type to a floating point format
x_train_val = x_train_val.astype("float32")
x_test = x_test.astype("float32")
"""
## Define training and validation sets
"""
# Keep 50% of train_val in validation set
x_train, x_val = x_train_val[:30000], x_train_val[30000:]
y_train, y_val = y_train_val[:30000], y_train_val[30000:]
del x_train_val, y_train_val
"""
## Create pairs of images
We will train the model to differentiate between digits of different classes. For
example, digit `0` needs to be differentiated from the rest of the
digits (`1` through `9`), digit `1` - from `0` and `2` through `9`, and so on.
To carry this out, we will select N random images from class A (for example,
for digit `0`) and pair them with N random images from another class B
(for example, for digit `1`). Then, we can repeat this process for all classes
of digits (until digit `9`). Once we have paired digit `0` with other digits,
we can repeat this process for the remaining classes for the rest of the digits
(from `1` until `9`).
"""
def make_pairs(x, y):
"""Creates a tuple containing image pairs with corresponding label.
Arguments:
x: List containing images, each index in this list corresponds to one image.
y: List containing labels, each label with datatype of `int`.
Returns:
Tuple containing two numpy arrays as (pairs_of_samples, labels),
where pairs_of_samples' shape is (2len(x), 2,n_features_dims) and
labels are a binary array of shape (2len(x)).
"""
num_classes = max(y) + 1
digit_indices = [np.where(y == i)[0] for i in range(num_classes)]
pairs = []
labels = []
for idx1 in range(len(x)):
# add a matching example
x1 = x[idx1]
label1 = y[idx1]
idx2 = random.choice(digit_indices[label1])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [1]
# add a non-matching example
label2 = random.randint(0, num_classes - 1)
while label2 == label1:
label2 = random.randint(0, num_classes - 1)
idx2 = random.choice(digit_indices[label2])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [0]
return np.array(pairs), np.array(labels).astype("float32")
# make train pairs
pairs_train, labels_train = make_pairs(x_train, y_train)
# make validation pairs
pairs_val, labels_val = make_pairs(x_val, y_val)
# make test pairs
pairs_test, labels_test = make_pairs(x_test, y_test)
"""
We get:
**pairs_train.shape = (60000, 2, 28, 28)**
- We have 60,000 pairs
- Each pair contains 2 images
- Each image has shape `(28, 28)`
"""
"""
Split the training pairs
"""
x_train_1 = pairs_train[:, 0] # x_train_1.shape is (60000, 28, 28)
x_train_2 = pairs_train[:, 1]
"""
Split the validation pairs
"""
x_val_1 = pairs_val[:, 0] # x_val_1.shape = (60000, 28, 28)
x_val_2 = pairs_val[:, 1]
"""
Split the test pairs
"""
x_test_1 = pairs_test[:, 0] # x_test_1.shape = (20000, 28, 28)
x_test_2 = pairs_test[:, 1]
"""
## Visualize pairs and their labels
"""
def visualize(pairs, labels, to_show=6, num_col=3, predictions=None, test=False):
"""Creates a plot of pairs and labels, and prediction if it's test dataset.
Arguments:
pairs: Numpy Array, of pairs to visualize, having shape
(Number of pairs, 2, 28, 28).
to_show: Int, number of examples to visualize (default is 6)
`to_show` must be an integral multiple of `num_col`.
Otherwise it will be trimmed if it is greater than num_col,
and incremented if if it is less then num_col.
num_col: Int, number of images in one row - (default is 3)
For test and train respectively, it should not exceed 3 and 7.
predictions: Numpy Array of predictions with shape (to_show, 1) -
(default is None)
Must be passed when test=True.
test: Boolean telling whether the dataset being visualized is
train dataset or test dataset - (default False).
Returns:
None.
"""
# Define num_row
# If to_show % num_col != 0
# trim to_show,
# to trim to_show limit num_row to the point where
# to_show % num_col == 0
#
# If to_show//num_col == 0
# then it means num_col is greater then to_show
# increment to_show
# to increment to_show set num_row to 1
num_row = to_show // num_col if to_show // num_col != 0 else 1
# `to_show` must be an integral multiple of `num_col`
# we found num_row and we have num_col
# to increment or decrement to_show
# to make it integral multiple of `num_col`
# simply set it equal to num_row * num_col
to_show = num_row * num_col
# Plot the images
fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5))
for i in range(to_show):
# If the number of rows is 1, the axes array is one-dimensional
if num_row == 1:
ax = axes[i % num_col]
else:
ax = axes[i // num_col, i % num_col]
ax.imshow(tf.concat([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
ax.set_axis_off()
if test:
ax.set_title("True: {} | Pred: {:.5f}".format(labels[i], predictions[i][0]))
else:
ax.set_title("Label: {}".format(labels[i]))
if test:
plt.tight_layout(rect=(0, 0, 1.9, 1.9), w_pad=0.0)
else:
plt.tight_layout(rect=(0, 0, 1.5, 1.5))
plt.show()
"""
Inspect training pairs
"""
visualize(pairs_train[:-1], labels_train[:-1], to_show=4, num_col=4)
"""
Inspect validation pairs
"""
visualize(pairs_val[:-1], labels_val[:-1], to_show=4, num_col=4)
"""
Inspect test pairs
"""
visualize(pairs_test[:-1], labels_test[:-1], to_show=4, num_col=4)
"""
## Define the model
There are be two input layers, each leading to its own network, which
produces embeddings. A `Lambda` layer then merges them using an
[Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) and the
merged output is fed to the final network.
"""
# Provided two tensors t1 and t2
# Euclidean distance = sqrt(sum(square(t1-t2)))
def euclidean_distance(vects):
"""Find the Euclidean distance between two vectors.
Arguments:
vects: List containing two tensors of same length.
Returns:
Tensor containing euclidean distance
(as floating point value) between vectors.
"""
x, y = vects
sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True)
return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon()))
input = layers.Input((28, 28, 1))
x = tf.keras.layers.BatchNormalization()(input)
x = layers.Conv2D(4, (5, 5), activation="tanh")(x)
x = layers.AveragePooling2D(pool_size=(2, 2))(x)
x = layers.Conv2D(16, (5, 5), activation="tanh")(x)
x = layers.AveragePooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
x = tf.keras.layers.BatchNorma
|
tensorflow/hub
|
tensorflow_hub/compressed_module_resolver.py
|
Python
|
apache-2.0
| 3,271
| 0.006114
|
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to resolve TF-Hub Module stored in compressed TGZ format."""
im
|
port hashlib
import urllib
import tensorflow as tf
from tenso
|
rflow_hub import resolver
LOCK_FILE_TIMEOUT_SEC = 10 * 60 # 10 minutes
_COMPRESSED_FORMAT_QUERY = ("tf-hub-format", "compressed")
def _module_dir(handle):
"""Returns the directory where to cache the module."""
cache_dir = resolver.tfhub_cache_dir(use_temp=True)
return resolver.create_local_module_dir(
cache_dir,
hashlib.sha1(handle.encode("utf8")).hexdigest())
def _is_tarfile(filename):
"""Returns true if 'filename' is TAR file."""
return filename.endswith((".tar", ".tar.gz", ".tgz"))
class HttpCompressedFileResolver(resolver.HttpResolverBase):
"""Resolves HTTP handles by downloading and decompressing them to local fs."""
def is_supported(self, handle):
# HTTP(S) handles are assumed to point to tarfiles.
if not self.is_http_protocol(handle):
return False
# AUTO defaults to COMPRESSED
load_format = resolver.model_load_format()
return load_format in [
resolver.ModelLoadFormat.COMPRESSED.value,
resolver.ModelLoadFormat.AUTO.value
]
def __call__(self, handle):
module_dir = _module_dir(handle)
def download(handle, tmp_dir):
"""Fetch a module via HTTP(S), handling redirect and download headers."""
request = urllib.request.Request(
self._append_compressed_format_query(handle))
response = self._call_urlopen(request)
return resolver.DownloadManager(handle).download_and_uncompress(
response, tmp_dir)
return resolver.atomic_download(handle, download, module_dir,
self._lock_file_timeout_sec())
def _lock_file_timeout_sec(self):
# This method is provided as a convenience to simplify testing.
return LOCK_FILE_TIMEOUT_SEC
def _append_compressed_format_query(self, handle):
return self._append_format_query(handle, _COMPRESSED_FORMAT_QUERY)
class GcsCompressedFileResolver(resolver.Resolver):
"""Resolves GCS handles by downloading and decompressing them to local fs."""
def is_supported(self, handle):
return handle.startswith("gs://") and _is_tarfile(handle)
def __call__(self, handle):
module_dir = _module_dir(handle)
def download(handle, tmp_dir):
return resolver.DownloadManager(handle).download_and_uncompress(
tf.compat.v1.gfile.GFile(handle, "rb"), tmp_dir)
return resolver.atomic_download(handle, download, module_dir,
LOCK_FILE_TIMEOUT_SEC)
|
jart/tensorflow
|
tensorflow/contrib/data/python/kernel_tests/get_single_element_test.py
|
Python
|
apache-2.0
| 3,539
| 0.004521
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import get_single_element
from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class GetSingleElementTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("Zero", 0, 1),
("Five", 5, 1),
("Ten", 10, 1),
("Empty", 100, 1, errors.InvalidArgumentError, "Dataset was empty."),
("MoreThanOne", 0, 2, errors.InvalidArgumentError,
"Dataset had more than one element."),
)
def testGetSingleElement(self, skip, take, error=None, error_msg=None):
skip_t = array_ops.placeholder(dtypes.int64, shape=[])
take_t = array_ops.placeholder(dtypes.int64, shape=[])
def make_sparse(x):
x_1d = array_ops.reshape(x, [1])
x_2d = array_ops.reshape(x, [1, 1])
return sparse_tensor.SparseTensor(x_2d, x_1d, x_1d)
dataset = dataset_ops.Dataset.range(100).skip(skip_t).map(
lambda x: (x * x, make_sparse(x))).take(take_t)
element = get_single_element.get_single_element(da
|
taset)
with
|
self.test_session() as sess:
if error is None:
dense_val, sparse_val = sess.run(
element, feed_dict={
skip_t: skip,
take_t: take
})
self.assertEqual(skip * skip, dense_val)
self.assertAllEqual([[skip]], sparse_val.indices)
self.assertAllEqual([skip], sparse_val.values)
self.assertAllEqual([skip], sparse_val.dense_shape)
else:
with self.assertRaisesRegexp(error, error_msg):
sess.run(element, feed_dict={skip_t: skip, take_t: take})
@parameterized.named_parameters(
("SumZero", 0),
("SumOne", 1),
("SumFive", 5),
("SumTen", 10),
)
def testReduceDataset(self, stop):
def init_fn(_):
return np.int64(0)
def reduce_fn(state, value):
return state + value
def finalize_fn(state):
return state
sum_reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn)
stop_t = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset_ops.Dataset.range(stop_t)
element = get_single_element.reduce_dataset(dataset, sum_reducer)
with self.test_session() as sess:
value = sess.run(element, feed_dict={stop_t: stop})
self.assertEqual(stop * (stop - 1) / 2, value)
if __name__ == "__main__":
test.main()
|
gltn/stdm
|
stdm/third_party/sqlalchemy/dialects/sqlite/json.py
|
Python
|
gpl-2.0
| 2,292
| 0
|
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. seealso::
JSON1_
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
|
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
|
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
|
afunTW/moth-graphcut
|
src/view/graphcut_app.py
|
Python
|
mit
| 11,346
| 0.003552
|
import logging
import os
import sys
import tkinter
from tkinter import ttk
sys.path.append('../..')
import cv2
from src.image.imnp import ImageNP
from src.support.tkconvert import TkConverter
from src.view.template import TkViewer
from src.view.tkfonts import TkFonts
from src.view.tkframe import TkFrame, TkLabelFrame
from src.view.ttkstyle import TTKStyle, init_css
LOGGER = logging.getLogger(__name__)
THRESHOLD_OPTION = [(u'手動', 'manual'), ('Mean Adaptive', 'mean'), ('Gaussian Adaptive', 'gaussian')]
class GraphCutViewer(TkViewer):
def __init__(self):
super().__init__()
self._im_w, self._im_h = 800, 533
self._init_window(zoom=False)
self._init_style()
self._init_frame()
self._init_menu()
def _init_style(self):
init_css()
theme = 'default'
if os.name == 'posix':
theme = 'alt'
TTKStyle('H4Padding.TLabelframe', theme=theme, background='gray82')
TTKStyle('H4Padding.TLabelframe.Label', theme=theme, font=('', 16), background='gray82')
TTKStyle('H2Blac
|
kBold.TLabel', theme=theme, font=('', 24, 'bold'), background='white', foreground='black')
TTKStyle('H2RedBold.TLabel', theme=theme, font=('', 24, 'bold'), background='white', foreground='red')
self.font = TkFonts()
# init frame
def _init_frame(self):
# root
self.frame_root = TkFrame(self.root, bg='white')
self.frame_root.grid(row=0, column=
|
0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_root, 0, 1, 2)
self.set_all_grid_columnconfigure(self.frame_root, 0)
# head
self.frame_head = TkFrame(self.frame_root, bg='white')
self.frame_head.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_head, 0)
self.set_all_grid_columnconfigure(self.frame_head, 0)
# body
self.frame_body = TkFrame(self.frame_root, bg='black')
self.frame_body.grid(row=1, column=0, sticky='news')
self.set_all_grid_columnconfigure(self.frame_body, 0, 1)
self.set_all_grid_rowconfigure(self.frame_body, 0)
# body > panel
self.frame_panel = TkFrame(self.frame_body, bg='light pink')
self.frame_panel.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_panel, 0)
self.set_all_grid_columnconfigure(self.frame_panel, 0)
# body > display
self.frame_display = TkFrame(self.frame_body, bg='royal blue')
self.frame_display.grid(row=0, column=1, sticky='news')
self.set_all_grid_rowconfigure(self.frame_display, 0)
self.set_all_grid_columnconfigure(self.frame_display, 0)
# footer
self.frame_footer = TkFrame(self.frame_root, bg='gray82')
self.frame_footer.grid(row=2, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_footer, 0, 1)
self.set_all_grid_columnconfigure(self.frame_footer, 0)
# footer > panel setting
self.frame_panel_setting = ttk.LabelFrame(self.frame_footer, text=u'輸入圖片選項: ', style='H4Padding.TLabelframe')
self.frame_panel_setting.grid(row=0, column=0, sticky='news', pady=10)
self.set_all_grid_rowconfigure(self.frame_panel_setting, 0, 1)
self.set_all_grid_columnconfigure(self.frame_panel_setting, 0)
# footer > panel setting > template option
self.frame_template_options = TkFrame(self.frame_panel_setting, bg='gray82', pady=5)
self.frame_template_options.grid(row=0, column=0, sticky='news')
# footer > panel setting > gamma
self.frame_gamma = TkFrame(self.frame_panel_setting, bg='gray82', pady=5)
self.frame_gamma.grid(row=1, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_gamma, 0)
self.set_all_grid_columnconfigure(self.frame_gamma, 0)
# footer > display setting
self.frame_display_setting = ttk.LabelFrame(self.frame_footer, text=u'輸出圖片選項: ', style='H4Padding.TLabelframe')
self.frame_display_setting.grid(row=1, column=0, sticky='news', pady=10)
self.set_all_grid_rowconfigure(self.frame_display_setting, 0)
self.set_all_grid_columnconfigure(self.frame_display_setting, 0)
# footer > display setting > threshold options
self.frame_threshold_options = TkFrame(self.frame_display_setting, bg='gray82', pady=5)
self.frame_threshold_options.grid(row=0, column=0, sticky='news')
# footer > display setting > manual threshold
self.frame_manual_threshold = TkFrame(self.frame_display_setting, bg='gray82', pady=5)
self.frame_manual_threshold.grid(row=1, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_manual_threshold, 0)
self.set_all_grid_columnconfigure(self.frame_manual_threshold, 0)
self._init_widget_head()
self._init_widget_body()
self._init_widget_footer()
# init head widget
def _init_widget_head(self):
self.set_all_grid_rowconfigure(self.frame_head, 0, 1)
self.label_state = ttk.Label(self.frame_head, text=u'現在模式: N/A', style='H2.TLabel')
self.label_state.grid(row=0, column=0, sticky='w')
self.label_resize = ttk.Label(self.frame_head, text=u'原有尺寸 N/A-> 顯示尺寸 N/A', style='H2.TLabel')
self.label_resize.grid(row=1, column=0, sticky='w')
# init body widget
def _init_widget_body(self):
# panel
self.set_all_grid_rowconfigure(self.frame_panel, 0, 1)
self.label_panel = ttk.Label(self.frame_panel, text='Input Panel', style='H2.TLabel')
self.label_panel.grid(row=0, column=0, sticky='ns')
self.photo_panel = ImageNP.generate_checkboard((self._im_h, self._im_w), block_size=10)
self.photo_panel = TkConverter.ndarray_to_photo(self.photo_panel)
self.label_panel_image = ttk.Label(self.frame_panel, image=self.photo_panel)
self.label_panel_image.grid(row=1, column=0, sticky='ns')
# display
self.label_display = ttk.Label(self.frame_display, text='Display', style='H2.TLabel')
self.label_display.grid(row=0, column=0, columnspan=3)
self.set_all_grid_rowconfigure(self.frame_display, 0, 1, 2)
self.set_all_grid_columnconfigure(self.frame_display, 0, 1, 2)
self.photo_small = ImageNP.generate_checkboard((self._im_h//2, self._im_w//3), 10)
self.photo_small = TkConverter.ndarray_to_photo(self.photo_small)
self.photo_large = ImageNP.generate_checkboard((self._im_h, self._im_w//3), 10)
self.photo_large = TkConverter.ndarray_to_photo(self.photo_large)
self.label_fl_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_fl_image.grid(row=1, column=0)
self.label_fr_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_fr_image.grid(row=1, column=1)
self.label_bl_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_bl_image.grid(row=2, column=0)
self.label_br_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_br_image.grid(row=2, column=1)
self.label_body_image = ttk.Label(self.frame_display, image=self.photo_large)
self.label_body_image.grid(row=1, column=2, rowspan=2)
# init footer widget
def _init_widget_footer(self):
# input panel template option
self.label_template = ttk.Label(self.frame_template_options, text=u'過濾方式: ', style='H5.TLabel')
self.label_template.grid(row=0, column=0, sticky='w')
self.val_checkbtn_floodfill = tkinter.StringVar()
self.checkbtn_floodfill = ttk.Checkbutton(
self.frame_template_options,
text=u'floodfill',
variable=self.val_checkbtn_floodfill,
onvalue='on', offvalue='off',
style='H5.TCheckbutton'
)
self.checkbtn_floodfill.grid(row=0, column=1, sticky='w')
# input panel gamma
self.label_gamma = ttk.Label(self.frame_gamma, text=u'調整對比 ({:.2f}): '.format(1.), style='H5.TLabel')
self.label_gamma.grid(row=0, column=0, sticky='w')
|
krahman/BuildingMachineLearningSystemsWithPython
|
ch02/seeds_threshold.py
|
Python
|
mit
| 921
| 0
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from load import load_dataset
import numpy as np
from threshold import learn_m
|
odel, apply_model, accuracy
features, labels = load_dataset('seeds')
# Turn the labels into a binary array
labels = (labels == 'Canadian')
error = 0.0
for fold in range(10):
training = np.ones(len(features), bool)
# numpy magic to make an array with 10% of 0s starting at fold
training[fold::10] = 0
# whatever is not t
|
raining is for testing
testing = ~training
model = learn_model(features[training], labels[training])
test_error = accuracy(features[testing], labels[testing], model)
error += test_error
error /= 10.0
print('Ten fold cross-validated error was {0:.1%}.'.format(error))
|
fujy/ROS-Project
|
src/rbx2/rbx2_tasks/nodes/patrol_smach_iterator.py
|
Python
|
mit
| 9,741
| 0.01314
|
#!/usr/bin/env python
""" patrol_smach_iterator.py - Version 1.0 2013-10-23
Control a robot using SMACH to patrol a square area a specified number of times
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
import smach
from smach import StateMachine, Iterator
from smach_ros import SimpleActionState, IntrospectionServer
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist
from tf.transformations import quaternion_from_euler
from visualization_msgs.msg import Marker
from math import radians, pi
class main():
def __init__(self):
rospy.init_node('patrol_smach', anonymous=False)
# Set the shutdown function (stop the robot)
rosp
|
y.on_shutdown(self.shutdown)
# Initialize a number of parameters and variables
self.init()
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
# Wait up to 60 seconds
|
for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move_base action server")
# Track success rate of getting to the goal locations
self.n_succeeded = 0
self.n_aborted = 0
self.n_preempted = 0
self.n_patrols = 2
# Turn the waypoints into SMACH states
nav_states = list()
for waypoint in self.waypoints:
nav_goal = MoveBaseGoal()
nav_goal.target_pose.header.frame_id = 'map'
nav_goal.target_pose.pose = waypoint
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0),
server_wait_timeout=rospy.Duration(10.0))
nav_states.append(move_base_state)
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0))
# Initialize the top level state machine
self.sm = StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm:
# Initialize the iterator
self.sm_patrol_iterator = Iterator(outcomes = ['succeeded','preempted','aborted'],
input_keys = [],
it = lambda: range(0, self.n_patrols),
output_keys = [],
it_label = 'index',
exhausted_outcome = 'succeeded')
with self.sm_patrol_iterator:
# Initialize the patrol state machine
self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted','continue'])
# Add the states to the state machine with the appropriate transitions
with self.sm_patrol:
StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})
StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})
StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})
StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})
StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'continue','aborted':'continue','preempted':'continue'})
# Close the sm_patrol machine and add it to the iterator
Iterator.set_contained_state('PATROL_STATE', self.sm_patrol, loop_outcomes=['continue'])
# Close the top level state machine
StateMachine.add('PATROL_ITERATOR', self.sm_patrol_iterator, {'succeeded':'succeeded', 'aborted':'aborted'})
# Create and start the SMACH introspection server
intro_server = IntrospectionServer('patrol', self.sm, '/SM_ROOT')
intro_server.start()
# Execute the state machine
sm_outcome = self.sm.execute()
rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))
intro_server.stop()
def move_base_result_cb(self, userdata, status, result):
if status == actionlib.GoalStatus.SUCCEEDED:
self.n_succeeded += 1
elif status == actionlib.GoalStatus.ABORTED:
self.n_aborted += 1
elif status == actionlib.GoalStatus.PREEMPTED:
self.n_preempted += 1
try:
rospy.loginfo("Success rate: " + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))
except:
pass
def init(self):
# How big is the square we want the robot to patrol?
self.square_size = rospy.get_param("~square_size", 1.0) # meters
# How many times should we execute the patrol loop
self.n_patrols = rospy.get_param("~n_patrols", 3) # meters
# Create a list to hold the target quaternions (orientations)
quaternions = list()
# First define the corner orientations as Euler angles
euler_angles = (pi/2, pi, 3*pi/2, 0)
# Then convert the angles to quaternions
for angle in euler_angles:
q_angle = quaternion_from_euler(0, 0, angle, axes='sxyz')
q = Quaternion(*q_angle)
quaternions.append(q)
# Create a list to hold the waypoint poses
self.waypoints = list()
# Append each of the four waypoints to the list. Each waypoint
# is a pose consisting of a position and orientation in the map frame.
self.waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3]))
self.waypoints.append(Pose(Point(self.square_size, 0.0, 0.0), quaternions[0]))
self.waypoints.append(Pose(Point(self.square_size, self.square_size, 0.0), quaternions[1]))
self.waypoints.append(Pose(Point(0.0, self.square_size, 0.0), quaternions[2]))
# Initialize the waypoint visualization markers for RViz
self.init_waypoint_markers()
# Set a visualization marker at each waypoint
for waypoint in self.waypoints:
p = Point()
p = waypoint.position
self.waypoint_markers.points.append(p)
# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
rospy.loginfo("Starting SMACH test")
# Publish the waypoint markers
self.marker_pub.publish(self.waypoint_markers)
rospy.sleep(1)
self.marker_pub.publish(self.waypoint_markers)
de
|
adamgreig/agg-kicad
|
scripts/check_mod.py
|
Python
|
mit
| 4,233
| 0
|
"""
check_mod.py
Copyright 2015 Adam Greig
Licensed u
|
nder the MIT licence, see LICENSE file for details.
Check all footprint files in a directory against a set of consistency fules.
"""
from __future__ import print_function, division
import sys
import os
import glob
from decimal import Decimal
import argparse
from sexp import parse as sexp_parse
def checkrefval(mod, errs):
for fp_text in (node for node in mod if node[0] == "fp_text"):
if fp_text[1] not in ("reference",
|
"value"):
continue
layer = [n for n in fp_text if n[0] == "layer"][0]
if layer[1] != "F.Fab":
errs.append("Value and Reference fields must be on F.Fab")
if fp_text[1] == "reference" and fp_text[2] != "REF**":
errs.append("Reference field must contain REF**")
if fp_text[1] == "value" and not mod[1].startswith(fp_text[2]):
errs.append("Value field must contain module name")
def checkfont(mod, errs):
for fp_text in (node for node in mod if node[0] == "fp_text"):
effects = [n for n in fp_text if n[0] == "effects"][0]
font = [n for n in effects if n[0] == "font"][0]
size = [n for n in font if n[0] == "size"][0]
thickness = [n for n in font if n[0] == "thickness"][0]
if (Decimal(size[1]) != 1 or Decimal(size[2]) != 1):
errs.append("Font must all be 1mm x 1mm size")
if Decimal(thickness[1]) != Decimal("0.15"):
errs.append("Font must be 0.15mm line thickness")
def checklines(mod, errs, check_layers, check_width):
line_types = ("fp_line", "fp_circle", "fp_arc", "fp_poly", "fp_curve")
for line in (node for node in mod if node[0] in line_types):
layer = [n for n in line if n[0] == "layer"][0]
width = [n for n in line if n[0] == "width"][0]
if layer[1] in check_layers:
if Decimal(width[1]) != Decimal(check_width):
errs.append("Lines on {} must be {}mm wide"
.format(check_layers, check_width))
def checkctyd(mod, errs):
found_ctyd = False
for ctyd in (node for node in mod if node[0] == "fp_line"):
layer = [n for n in ctyd if n[0] == "layer"][0]
width = [n for n in ctyd if n[0] == "width"][0]
start = [n for n in ctyd if n[0] == "start"][0]
end = [n for n in ctyd if n[0] == "end"][0]
ctyd_layers = ("F.CrtYd", "B.CrtYd")
if layer[1] in ctyd_layers:
found_ctyd = True
if Decimal(width[1]) != Decimal("0.01"):
errs.append("Courtyard lines must be 0.01mm wide")
if (Decimal(start[1]) % Decimal("0.05") != 0
or Decimal(start[2]) % Decimal("0.05") != 0
or Decimal(end[1]) % Decimal("0.05") != 0
or Decimal(end[2]) % Decimal("0.05") != 0):
errs.append("Courtyard lines must lie on a 0.05mm grid")
if not found_ctyd:
errs.append("No courtyard found")
def checkmod(path, verbose=False):
errs = []
with open(path) as f:
mod = sexp_parse(f.read())
checkrefval(mod, errs)
checkfont(mod, errs)
checklines(mod, errs, ("F.SilkS", "B.SilkS"), "0.15")
checklines(mod, errs, ("F.Fab", "B.Fab"), "0.01")
checkctyd(mod, errs)
if len(errs) == 0:
if verbose:
print("Checked '{}': OK".format(path))
return True
else:
print("Checked '{}': Error:".format(path), file=sys.stderr)
for err in errs:
print(" " + err, file=sys.stderr)
print("", file=sys.stderr)
return False
def main(prettypath, verbose=False):
ok = True
for f in glob.glob(os.path.join(prettypath, "*.kicad_mod")):
result = checkmod(f, verbose)
if not result:
ok = False
return ok
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("prettypath", type=str,
help="Path to footprints")
parser.add_argument("--verbose", action="store_true",
help="Print out every footprint checked even if OK")
args = vars(parser.parse_args())
result = main(**args)
sys.exit(0 if result else 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.