repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
kornai/4lang
|
src/fourlang/lexicon.py
|
Python
|
mit
| 15,540
| 0.000193
|
import copy
import cPickle
import json
import logging
import os
import sys
from nltk.corpus import stopwords as nltk_stopwords
from pymachine.definition_parser import read_defs
from pymachine.machine import Machine
from pymachine.control import ConceptControl
from pymachine.utils import MachineGraph, MachineTraverser
from utils import get_cfg, ensure_dir
import networkx as nx
import csv
import traceback
class Lexicon():
"""A mapping from lemmas to machines"""
@staticmethod
def build_from_4lang(cfg):
fn = cfg.get("machine", "definitions")
primitive_fn = cfg.get("machine", "primitives")
primitives = set(
[line.decode('utf-8').strip() for line in open(primitive_fn)])
logging.info('parsing 4lang definitions...')
pn_index = 1 if cfg.get("deps", "lang") == 'hu' else 0
definitions = read_defs(file(fn), pn_index)
#logging.info('parsed {0} entries, done!'.format(len(definitions)))
logging.info('lowercasing binaries...')
for pn, machines in definitions:
for m in machines:
for node in MachineTraverser.get_nodes(
m, keep_upper=True, names_only=False):
node.printname_ = node.printname_.lower()
logging.info('done!')
lexicon = Lexicon.create_from_dict(definitions, primitives, cfg)
return lexicon
@staticmethod
def load_from_binary(file_name):
logging.info('loading lexicon from {0}...'.format(file_name))
data = cPickle.load(file(file_name))
machines_dump, ext_machines_dump = map(
lambda s: json.loads(data[s]), ("def", "ext"))
cfg, primitives = data['cfg'], data['prim']
lexicon = Lexicon.create_from_dumps(machines_dump, ext_machines_dump,
primitives, cfg)
logging.info('done!')
return lexicon
def save_to_binary(self, file_name):
logging.info('saving lexicon to {0}...'.format(file_name))
data = {
"def": json.dumps(Lexicon.dump_machines(self.lexicon)),
"ext": json.dumps(Lexicon.dump_machines(self.ext_lexicon)),
"prim": self.primitives,
"cfg": self.cfg}
with open(file_name, 'w') as out_file:
cPickle.dump(data, out_file)
logging.info('done!')
@staticmethod
def create_from_dumps(machines_dump, ext_machines_dump, primitives, cfg):
"""builds the lexicon from dumps created by Lexi
|
con.dump_machines"""
lexicon = Lexicon(cfg)
lexicon.primitives = primitives
for word, dumped_def_graph in machines_dump.iteritems():
new_machine = Machine(word, ConceptControl())
lexicon.add_def_graph(word, new_machine, dumped_def_graph)
lexicon.add(word, new_machine, external=False)
for word, dumped_def_graph in ext_machines_dump.iteritems():
new_machine = Machine(word, ConceptControl())
lexicon.add_
|
def_graph(word, new_machine, dumped_def_graph)
lexicon.add(word, new_machine, external=True)
return lexicon
def add_def_graph(self, word, word_machine, dumped_def_graph,
allow_new_base=False, allow_new_ext=False):
node2machine = {}
graph = MachineGraph.from_dict(dumped_def_graph)
for node in graph.nodes_iter():
pn = "_".join(node.split('_')[:-1])
if pn == word:
node2machine[node] = word_machine
else:
if not pn:
logging.warning(u"empty pn in node: {0}, word: {1}".format(
node, word))
node2machine[node] = self.get_machine(pn, new_machine=True)
for node1, adjacency in graph.adjacency_iter():
machine1 = node2machine[node1]
for node2, edges in adjacency.iteritems():
machine2 = node2machine[node2]
for i, attributes in edges.iteritems():
part_index = attributes['color']
machine1.append(machine2, part_index)
@staticmethod
def dump_definition_graph(machine, seen=set()):
graph = MachineGraph.create_from_machines([machine])
return graph.to_dict()
@staticmethod
def dump_machines(machines):
"""processes a map of lemmas to machines and dumps them to lists
of strings, for serialization"""
dump = {}
for word, machine_set in machines.iteritems():
if len(machine_set) > 1:
raise Exception("cannot dump lexicon with ambiguous \
printname: '{0}'".format(word))
machine = next(iter(machine_set))
# logging.info('dumping this: {0}'.format(
# MachineGraph.create_from_machines([machine]).to_dot()))
try:
dump[word] = Lexicon.dump_definition_graph(machine)
except:
traceback.print_exc()
logging.warning('skipping word {0}'.format(word))
return dump
@staticmethod
def create_from_dict(word2machine, primitives, cfg):
lexicon = Lexicon(cfg)
lexicon.lexicon = dict(word2machine)
lexicon.primitives = primitives
return lexicon
def __init__(self, cfg):
self.cfg = cfg
self.lexicon = {}
self.ext_lexicon = {}
self.oov_lexicon = {}
self._known_words = None
self.expanded = set()
self.expanded_lexicon = {}
self.stopwords = set(nltk_stopwords.words('english'))
self.stopwords.add('as') # TODO
self.stopwords.add('root') # TODO
self.full_graph = None
self.shortest_path_dict = None
def get_words(self):
return set(self.lexicon.keys()).union(set(self.ext_lexicon.keys()))
def known_words(self):
if self._known_words is None:
self._known_words = self.get_words()
return self._known_words
def add(self, printname, machine, external=True, oov=False):
if printname in self.oov_lexicon:
assert oov is False
del self.oov_lexicon[printname]
lexicon = self.oov_lexicon if oov else (
self.ext_lexicon if external else self.lexicon)
self._add(printname, machine, lexicon)
def _add(self, printname, machine, lexicon):
if printname in lexicon:
raise Exception("duplicate word in lexicon: '{0}'".format(lexicon))
lexicon[printname] = set([machine])
def get_expanded_definition(self, printname):
machine = self.expanded_lexicon.get(printname)
if machine is not None:
return machine
machine = copy.deepcopy(self.get_machine(printname))
self.expand_definition(machine)
self.expanded_lexicon[printname] = machine
return machine
def get_machine(self, printname, new_machine=False, allow_new_base=False,
allow_new_ext=False, allow_new_oov=True):
"""returns the lowest level (base < ext < oov) existing machine
for the printname. If none exist, creates a new machine in the lowest
level allowed by the allow_* flags. Will always create new machines
for uppercase printnames"""
# returns a new machine without adding it to any lexicon
if new_machine:
return Machine(printname, ConceptControl())
# TODO
if not printname:
return self.get_machine("_empty_")
if printname.isupper():
# return self.get_machine(printname, new_machine=True)
return self.get_machine(
printname=printname.lower(), new_machine=new_machine,
allow_new_base=allow_new_base, allow_new_ext=allow_new_ext,
allow_new_oov=allow_new_oov)
machines = self.lexicon.get(
printname, self.ext_lexicon.get(
printname, self.oov_lexicon.get(printname, set())))
if len(machines) == 0:
# logging.info(
# u'creating new machine for unknown word: "{0}"'.format(
#
|
xdnian/pyml
|
code/ch02/perceptron.py
|
Python
|
mit
| 1,571
| 0.000637
|
import numpy as np
class Perceptron(object):
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples
|
is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
""
|
"
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, -1)
|
ivanlyon/exercises
|
test/test_k_hnumbers.py
|
Python
|
mit
| 1,101
| 0.001817
|
import io
import unittest
from unittest.mock import patch
from kattis import k_hnumbers
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem
|
statement sample input and output.'''
inputs = []
inputs.append('21')
inputs.append('85')
inputs.append('789')
inputs.append('0')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('21 0')
outputs.append('85 5')
outputs.append('789 62')
outputs = '\n'.join(outputs) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.
|
stdout', new_callable=io.StringIO) as stdout:
k_hnumbers.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
|
iulian787/spack
|
var/spack/repos/builtin/packages/kcov/package.py
|
Python
|
lgpl-2.1
| 1,192
| 0.001678
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Kcov(CMakePackage):
"""Code coverage tool for compiled programs, Python and Bash which uses
debugging information to collect and report data without special
compilation options"""
homepage = "http://simonkagstrom.github.io/kcov/index.html"
url = "https://github.com/SimonKagstrom/kcov/archive/38.tar.gz"
version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f
|
9bb2bb0be68b4')
depends_on('cmake@2.8.4:', type='build')
depends_on('zlib')
depends_on('curl')
def cmake_args(self):
# Necessary at least on macOS, fixes linking error to LLDB
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.r
|
b
return ['-DSPECIFY_RPATH=ON']
@run_after('install')
@on_package_attributes(run_tests=True)
def test_install(self):
# The help message exits with an exit code of 1
kcov = Executable(self.prefix.bin.kcov)
kcov('-h', ignore_errors=1)
|
xorpaul/shinken
|
shinken/webui/plugins_skonf/action/action.py
|
Python
|
agpl-3.0
| 2,125
| 0.001882
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Publ
|
ic License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# G
|
NU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
### Will be populated by the UI with it's own value
app = None
# We will need external commands here
import time
from shinken.external_command import ExternalCommand, ExternalCommandManager
# Our page
def get_page(cmd=None):
# First we look for the user sid
# so we bail out if it's a false one
user = app.get_user_auth()
if not user:
return {'status': 401, 'text': 'Invalid session'}
now = int(time.time())
print "Ask us an /action page", cmd
elts = cmd.split('/')
cmd_name = elts[0]
cmd_args = elts[1:]
print "Got command", cmd_name
print "And args", cmd_args
# Check if the command exist in the external command list
if cmd_name not in ExternalCommandManager.commands:
return {'status': 404, 'text': 'Unknown command %s' % cmd_name}
extcmd = '[%d] %s' % (now, ';'.join(elts))
print "Got the; form", extcmd
# Ok, if good, we can launch the command
extcmd = extcmd.decode('utf8', 'replace')
e = ExternalCommand(extcmd)
print "Creating the command", e.__dict__
app.push_external_command(e)
return {'status': 200, 'text': 'Command launched'}
pages = {get_page: {'routes': ['/action/:cmd#.+#']}}
|
runiq/modeling-clustering
|
find-correct-cluster-number/clustering_run.py
|
Python
|
bsd-2-clause
| 6,261
| 0.003035
|
# TODO:
# - Figure out when to use previous runs' information
# - merge this module's parse_clustermerging and
# newick.parse_clustermerging
from StringIO import StringIO
from os import remove
import shutil
import os.path as op
import subprocess as sp
import sys
import numpy as np
CM_FILE = 'ClusterMerging.txt'
class ClusteringRun(object):
def __init__(self, prmtop=None, mask='@CA,C,O,N', start_n_clusters=2,
n_clusters=50, ptraj_trajin_fn='ptraj_trajin', cm_fn=CM_FILE,
cn_fns=None, prefix='c', log_fn=None, no_ssr_sst=False,
use_cpptraj=False):
self.prmtop = prmtop
self.mask = mask
self.start_n_clusters = start_n_clusters
self.n_clusters = n_clusters
self.ptraj_trajin_fn = ptraj_trajin_fn
self.cm_fn = cm_fn
self.no_ssr_sst = no_ssr_sst
if use_cpptraj:
self._ptraj_prg = 'cpptraj'
else:
self._ptraj_prg = 'ptraj'
if cn_fns is None:
self.cn_fns = {}
else:
self.cn_fns = cn_fns
self.prefix = prefix
self.log_fn = log_fn
# No clustering run necessary if all of the following conditions
# apply:
# - ClusterMerging.txt exists
# - self.cn_fns is not empty
# - every file in self.cn_fns exists
# Incidentally, fuck yeah generators. They make code look like
# Lisp, however. http://xkcd.com/297/
if all((op.exists(self.cm_fn),
self.cn_fns,
all(op.exists(fn) for fn in self.cn_fns.itervalues()))):
pass
else:
self.cluster()
pass
self.n_decoys = self._get_n_decoys()
def _get_n_decoys(self):
with open(self.cm_fn) as cm_file:
for i, _ in enumerate(cm_file, start=1):
# i is the number of nodes in the clustering tree and is
# always 1 lower than the number of decoys if we cluster
# up to a single cluster
# Did that just make sense to anyone?
pass
self.n_decoys = i + 1
return self.n_decoys
def _cluster(self, script, append=False):
def _run_ptraj(script_fn):
# God I hate plumbing
writemode = 'a' if append else 'w'
if self.log_fn is None:
log_fh = self.log_fn
else:
log_fh = open(self.log_fn, writemode)
with log_fh as logfile:
return sp.check_call([self._ptraj_prg, self.prmtop,
script_fn], stdout=logfile, stderr=logfile)
try:
remove('ptraj_script')
except OSError as e:
if e.errno == 2:
pass
shutil.copy(self.ptraj_trajin_fn, 'ptraj_script')
with open('ptraj_script', 'a') as ptraj_script:
ptraj_script.write(script)
return _run_ptraj(script_fn='ptraj_script')
def _run_c1(self):
ptraj_single = ('cluster out c1 representative none average none all '
'none averagelinkage clusters 1 rms {mask}'.format(mask=self.mask))
self._cluster(script=ptraj_single)
self.cn_fns[1] = '{prefix}1.txt'.format(prefix=self.prefix)
return self.cn_fns
def _run_cn(self):
clusterstring = ('cluster out c{n} representative none average none all '
'none ReadMerge clusters {n} rms {mask}')
ptraj_full = ('\n'.join(clusterstring.format(n=i, mask=self.mask) for i
in xrange(self.start_n_clusters, self.n_clusters+1)))
self._cluster(script=ptraj_full)
self.cn_fns.update({i: 'c{n}.txt'.format(n=i) for i in
xrange(self.start_n_clusters, self.n_clusters+1)})
return self.cn_fns
def _parse_clustermerging(self, reverse=True):
clustermerging = np.genfromtxt(self.cm_fn,
# - pSF values are only computed for the last 50 records
# - The results for
|
n_cluster = 1 are not helpful
skip_header=self.n_decoys - 51, skip_footer=1,
dtype=[('n'
|
, 'i8'), ('rmsd', 'f8'), ('dbi', 'f8'), ('psf', 'f8')],
usecols=(0, 3, 4, 5), invalid_raise=False,
converters={0: lambda s: int(s.rstrip(':')) + self.n_decoys + 1})
if reverse:
step = -1
else:
step = 1
self._n = clustermerging['n'][::step]
self._dbi = clustermerging['dbi'][::step]
self._psf = clustermerging['psf'][::step]
self._rmsd = clustermerging['rmsd'][::step]
def _get_ssr_ssts(self):
ssr_ssts = []
for i, fn in sorted(self.cn_fns.iteritems()):
with open(fn) as fh:
for line in fh:
if line.startswith('#SSR/SST: '):
ssr_sst_pre = line.split()[1]
# '-nan' is the value for a single cluster
if ssr_sst_pre != '-nan':
ssr_sst = float(ssr_sst_pre)
ssr_ssts.append(ssr_sst)
self._ssr_sst = np.array(ssr_ssts)
def cluster(self):
if self.n_clusters == 1:
self._run_c1()
elif (self.n_clusters > 1 and op.exists(self.cm_fn) and
not self.no_ssr_sst):
# If we don't want to plot SSR/SST values, the subsequent
# clustering runs aren't necessary, because all other
# metrics are present in ClusterMerging.txt already
self._run_cn()
else:
print "No valid cluster number specified"
sys.exit(1)
def gather_metrics(self):
self._parse_clustermerging()
if self.no_ssr_sst:
imetrics = zip(self._n, self._rmsd, self._dbi, self._psf)
self.metrics = np.rec.fromrecords(imetrics,
names=('n', 'rmsd', 'dbi', 'psf'))
else:
self._get_ssr_ssts()
imetrics = zip(self._n, self._rmsd, self._dbi, self._psf,
self._ssr_sst)
self.metrics = np.rec.fromrecords(imetrics,
names=('n', 'rmsd', 'dbi', 'psf', 'ssr_sst'))
return self.metrics
|
the-zebulan/CodeWars
|
tests/kyu_6_tests/test_write_number_in_expanded_form.py
|
Python
|
mit
| 394
| 0
|
impor
|
t unittest
from katas.kyu_6.write_number_in_expanded_form import expanded_form
class ExpandedFormTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(expanded_form(12), '10 + 2')
def test_equal_2(self):
self.assertEqual(expanded_form(42), '40 + 2')
def test_equal_3(self):
self.assertEqual(expanded_form(70304), '70000 + 300 + 4')
| |
cschenck/blender_sim
|
fluid_sim_deps/blender-2.69/2.69/scripts/freestyle/style_modules/nature.py
|
Python
|
gpl-3.0
| 1,715
| 0.001749
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the
|
hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for
|
more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : nature.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Uses the NatureUP1D predicate to select the lines
# of a given type (among Nature.SILHOUETTE, Nature.CREASE, Nature.SUGGESTIVE_CONTOURS,
# Nature.BORDERS).
# The suggestive contours must have been enabled in the
# options dialog to appear in the View Map.
from freestyle import ChainSilhouetteIterator, IncreasingColorShader, \
IncreasingThicknessShader, Nature, Operators, TrueUP1D
from PredicatesU1D import pyNatureUP1D
from logical_operators import NotUP1D
Operators.select(pyNatureUP1D(Nature.SILHOUETTE))
Operators.bidirectional_chain(ChainSilhouetteIterator(), NotUP1D(pyNatureUP1D(Nature.SILHOUETTE)))
shaders_list = [
IncreasingThicknessShader(3, 10),
IncreasingColorShader(0.0, 0.0, 0.0, 1, 0.8, 0, 0, 1),
]
Operators.create(TrueUP1D(), shaders_list)
|
ragarwal6397/sqoot
|
sqoot/tests/__init__.py
|
Python
|
mit
| 757
| 0.002642
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2014 Rajat Agarwal
import os, sys
import unittest
import sqoot
if 'PUBLIC_API_KEY' in os.environ and 'PRIVATE_API
|
_KEY' in os.environ:
PUBLIC_API_KEY = os.environ['PUBLIC_API_KEY']
PRIVATE_API_KEY = os.environ['PRIVATE_API_KEY']
else:
try:
from _creds import *
except ImportError:
print "Please create a creds.py file
|
in this package, based upon creds.example.py"
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append('/home/ragarwal/sqoot')
class BaseEndpointTestCase(unittest.TestCase):
def setUp(self):
self.api = sqoot.Sqoot(
privateApiKey=PRIVATE_API_KEY,
publicApiKey=PUBLIC_API_KEY,
)
|
zielmicha/satori
|
satori.core/satori/core/wsgi.py
|
Python
|
mit
| 343
| 0.005831
|
import sys
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
application = get_wsgi_applicati
|
on()
|
# initialize thrift server structures - takes a long time and it's better
# to do it on startup than during the first request
import satori.core.thrift_server
|
intel-ctrlsys/actsys
|
actsys/control/provisioner/provisioner.py
|
Python
|
apache-2.0
| 3,948
| 0.001773
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Intel Corp.
#
"""
Interface for all resource control plugins.
"""
from abc import ABCMeta, abstractmethod
from ..plugin import DeclareFramework
@DeclareFramework('provisioner')
class Provisioner(object, metaclass=ABCMeta):
PROVISIONER_KEY = "provisioner"
PROVISIONER
|
_IMAGE_KEY = "ima
|
ge"
PROVISIONER_BOOTSTRAP_KEY = "provisioner_bootstrap"
PROVISIONER_FILE_KEY = "provisioner_files"
PROVISIONER_KARGS_KEY = "provisioner_kernel_args"
PROVISIONER_UNSET_KEY = "UNDEF"
@abstractmethod
def add(self, device):
"""
Attempts to add a device to the provisioner. Does nothing if the device is already added.
:param device:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def delete(self, device):
"""
Attempts to remove a device from the provisioner. Does nothing if the device isn't already there.
:param device:
:return: Updated device with the correct fields removed
"""
pass
@abstractmethod
def set_ip_address(self, device, ip_address, interface="eth0"):
"""
Mutate the device to include this ip_address.
Save it to the DataStore
And set it in the provisioner
:param device:
:param ip_address:
:param interface:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def set_hardware_address(self, device, hardware_address, interface="eth0"):
"""
Same as Provisioner.set_ip_address
:param device:
:param hardware_address:
:param interface:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def set_image(self, device, image):
"""
Set an image (already known by the provisioner) to a given device.
:param device:
:param image:
:param kernel:
:param network_interface:
:return: Updated device with the new fields applied
:raise: ProvisionException, the image specified is not known to the provisioner
"""
pass
@abstractmethod
def set_bootstrap(self, device, bootstrap):
"""
:param device:
:param bootstrap:
:return: Updated device with the new fields applied
:raise: ProvisionException, the bootstrap specified is not known to the provisioner
"""
pass
@abstractmethod
def set_files(self, device, files):
"""
:param device:
:param files:
:return: Updated device with the new fields applied
:raise: ProvisionException, the file(s) specified is not known to the provisioner
"""
pass
@abstractmethod
def set_kernel_args(self, device, args):
"""
:param device:
:param args:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def list(self):
"""
List all devices that the provisioner knows about.
does this come the DataStore or Warewulf?
:return: return the list of device names
"""
pass
@abstractmethod
def list_images(self):
"""
List all the images this provisioner knows about.
:return: list of known images (names only)
"""
pass
class ProvisionerException(Exception):
"""
A staple Exception thrown by the Provisioner
"""
def __init__(self, msg, command_output=None):
super(ProvisionerException, self).__init__()
self.msg = msg
if command_output is not None:
self.cmd_stdout = command_output.stdout
self.cmd_stderr = command_output.stderr
self.cmd_return_code = command_output.return_code
def __str__(self):
return repr(self.msg)
|
davideuler/foursquared
|
util/gen_parser.py
|
Python
|
apache-2.0
| 4,392
| 0.006831
|
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)
|
s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, at
|
tributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
|
shaded-enmity/feaders
|
feader/utils.py
|
Python
|
mit
| 173
| 0.023121
|
#!/usr/bin/python -tt
fr
|
om graph.node import Node, FileNode
def endswith(s, pats):
return any(s.endswith(p) for p in pats)
|
def create_graph(files):
return Node(None)
|
wolverineav/neutron
|
neutron/extensions/external_net.py
|
Python
|
apache-2.0
| 2,051
| 0
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as nexception
class ExternalNetworkInUse(nexception.InUse):
message = _("External network %(net_id)s cannot be updated to be made "
"non-external, since it has existing gateway ports")
# For backward compatibility the 'router' prefix is kept.
EXTERNAL = 'router:external'
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {EXTERNAL: {'allow_post': True,
'allow_put': True,
'de
|
fault': False,
'is_visible': True,
'convert_to': attr.convert_to_boolean,
'enforce_policy': True,
'required_by_policy': True}}}
class External_net(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron external network"
@classmethod
|
def get_alias(cls):
return "external-net"
@classmethod
def get_description(cls):
return _("Adds external network attribute to network resource.")
@classmethod
def get_updated(cls):
return "2013-01-14T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
ShaguptaS/moviepy
|
setup.py
|
Python
|
mit
| 419
| 0.016706
|
#!/usr/bin/env pytho
|
n
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(name='moviepy',
version='0.2.1.6.9',
author='Zulko 2013',
description='Module for script-based video editing',
long_description=open('README.rst').read(),
license='LICENSE.txt',
keywords="movie editing film mixing script-based",
packages= find_packages(exclude='docs'))
| |
Vanuan/gpx_to_road_map
|
converters/location.py
|
Python
|
apache-2.0
| 4,821
| 0.009334
|
#
# Location-related classes for simplification of GPS traces.
# Author: James P. Biagioni (jbiagi1@uic.edu)
# Company: University of Illinois at Chicago
# Created: 5/16/11
#
import os
class Location:
def __init__(self, id, latitude, longitude, time):
self.id = id
self.latitude = latitude
self.longitude = longitude
self.orig_latitude = latitude
self.orig_longitude = longitude
self.time = time
self.prev_location = None
self.next_location = None
def __str__(self):
location_string = str(self.id) + "," + str(self.latitude) + "," + str(self.longitude) + "," + str(self.time)
if (self.prev_location is not None):
location_string += "," + str(self.prev_location.id)
else:
location_string += ",None"
if (self.next_location is not None):
location_string += "," + str(self.next_location.id)
else:
location_string += ",None"
return location_string
class Trip:
def __init__(self):
|
self.locations = []
def add_location(self, bus_location):
self.loc
|
ations.append(bus_location)
@property
def num_locations(self):
return len(self.locations)
@property
def start_time(self):
return self.locations[0].time
@property
def end_time(self):
return self.locations[-1].time
@property
def time_span(self):
return (self.locations[-1].time - self.locations[0].time)
class TripLoader:
@staticmethod
def get_all_trips(trips_path):
# storage for all trips
all_trips = []
# get trip filenames
trip_filenames = os.listdir(trips_path)
# iterate through all trip filenames
for trip_filename in trip_filenames:
# if filename starts with "trip_"
if (trip_filename.startswith("trip_") is True):
# load trip from file
curr_trip = TripLoader.load_trip_from_file(trips_path + trip_filename)
# add trip to all_trips list
all_trips.append(curr_trip)
# return all trips
return all_trips
@staticmethod
def load_trip_from_file(trip_filename):
# create new trip object
new_trip = Trip()
# create new trip locations dictionary
new_trip_locations = {} # indexed by location id
# open trip file
trip_file = open(trip_filename, 'r')
prev_location = None
# read through trip file, a line at a time
for trip_location in trip_file:
# parse out location elements
location_elements = trip_location.strip('\n').split(',')
# create new location object
new_location = Location(str(location_elements[0]), float(location_elements[1]), float(location_elements[2]), float(location_elements[3]))
# store new trip location
new_trip_locations[new_location.id] = new_location
if prev_location:
new_location.prev_location_id = prev_location.id
prev_location.next_location_id = new_location.id
else:
new_location.prev_location_id = "None"
prev_location = new_location
# store prev/next_location id
#new_location.prev_location_id = "None"#str(location_elements[4])
#new_location.next_location_id = "None"#str(location_elements[5])
#new_location.prev_location_id = str(location_elements[4])
#new_location.next_location_id = str(location_elements[5])
# add new location to trip
new_trip.add_location(new_location)
new_location.prev_location_id = prev_location.id
prev_location.next_location_id = new_location.id
# close trip file
trip_file.close()
# iterate through trip locations, and connect pointers
for trip_location in new_trip.locations:
# connect prev_location pointer
if (trip_location.prev_location_id != "None"):
trip_location.prev_location = new_trip_locations[trip_location.prev_location_id]
else:
trip_location.prev_location = None
# connect next_location pointer
if (trip_location.next_location_id != "None"):
trip_location.next_location = new_trip_locations[trip_location.next_location_id]
else:
trip_location.next_location = None
# return new trip
return new_trip
|
Programmica/python-gtk3-tutorial
|
conf.py
|
Python
|
cc0-1.0
| 7,069
| 0.006649
|
# -*- coding: utf-8 -*-
#
# Python GTK+ 3 Tutorial documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 29 18:42:04 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python GTK+ 3 Tutor
|
ial'
copyright = u'2012, Andrew Steele'
# The version info for the project you're documenting, acts as replacement for
# |version
|
| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonGTK3Tutorialdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PythonGTK3Tutorial.tex', u'Python GTK+ 3 Tutorial Documentation',
u'Andrew Steele', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythongtk3tutorial', u'Python GTK+ 3 Tutorial Documentation',
[u'Andrew Steele'], 1)
]
|
postlund/home-assistant
|
homeassistant/components/config/entity_registry.py
|
Python
|
apache-2.0
| 4,955
| 0.001816
|
"""HTTP views to interact with the entity registry."""
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.co
|
mponents.websocket_api.const import ERR_NOT_FOUND
from homeassistant.components.websocket_api.decorators import (
async_response,
require_admin,
)
from homeassistant.core import callback
from homeassistant.helpers import co
|
nfig_validation as cv
from homeassistant.helpers.entity_registry import async_get_registry
async def async_setup(hass):
"""Enable the Entity Registry views."""
hass.components.websocket_api.async_register_command(websocket_list_entities)
hass.components.websocket_api.async_register_command(websocket_get_entity)
hass.components.websocket_api.async_register_command(websocket_update_entity)
hass.components.websocket_api.async_register_command(websocket_remove_entity)
return True
@async_response
@websocket_api.websocket_command({vol.Required("type"): "config/entity_registry/list"})
async def websocket_list_entities(hass, connection, msg):
"""Handle list registry entries command.
Async friendly.
"""
registry = await async_get_registry(hass)
connection.send_message(
websocket_api.result_message(
msg["id"], [_entry_dict(entry) for entry in registry.entities.values()]
)
)
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/get",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_get_entity(hass, connection, msg):
"""Handle get entity registry entry command.
Async friendly.
"""
registry = await async_get_registry(hass)
entry = registry.entities.get(msg["entity_id"])
if entry is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
connection.send_message(websocket_api.result_message(msg["id"], _entry_dict(entry)))
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/update",
vol.Required("entity_id"): cv.entity_id,
# If passed in, we update value. Passing None will remove old value.
vol.Optional("name"): vol.Any(str, None),
vol.Optional("icon"): vol.Any(str, None),
vol.Optional("new_entity_id"): str,
# We only allow setting disabled_by user via API.
vol.Optional("disabled_by"): vol.Any("user", None),
}
)
async def websocket_update_entity(hass, connection, msg):
"""Handle update entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
changes = {}
for key in ("name", "icon", "disabled_by"):
if key in msg:
changes[key] = msg[key]
if "new_entity_id" in msg and msg["new_entity_id"] != msg["entity_id"]:
changes["new_entity_id"] = msg["new_entity_id"]
if hass.states.get(msg["new_entity_id"]) is not None:
connection.send_message(
websocket_api.error_message(
msg["id"], "invalid_info", "Entity is already registered"
)
)
return
try:
if changes:
entry = registry.async_update_entity(msg["entity_id"], **changes)
except ValueError as err:
connection.send_message(
websocket_api.error_message(msg["id"], "invalid_info", str(err))
)
else:
connection.send_message(
websocket_api.result_message(msg["id"], _entry_dict(entry))
)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/remove",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_remove_entity(hass, connection, msg):
"""Handle remove entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
registry.async_remove(msg["entity_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
@callback
def _entry_dict(entry):
"""Convert entry to API format."""
return {
"config_entry_id": entry.config_entry_id,
"device_id": entry.device_id,
"disabled_by": entry.disabled_by,
"entity_id": entry.entity_id,
"name": entry.name,
"icon": entry.icon,
"platform": entry.platform,
"original_name": entry.original_name,
"original_icon": entry.original_icon,
}
|
ostrokach/elaspic
|
elaspic/cli/elaspic_run.py
|
Python
|
mit
| 7,164
| 0.002233
|
"""ELASPIC RUN
"""
import os
import os.path as op
import logging
import argparse
from elaspic import conf, pipeline
logger = logging.getLogger(__name__)
def validate_args(args):
if args.config_file and not os.path.isfile(args.config_file):
raise Exception('The configuration file {} does not exist!'.format(args.config_file))
if ((args.uniprot_id is None and args.structure_file is None) or
(args.uniprot_id is not None and args.structure_file is not None)):
raise Exception("""\
One of '-u' ('--uniprot_id') or '-p' ('--structure_file') must be specified!""")
if (args.uniprot_id and (
(args.config_file is None) and
(args.pdb_dir is None or args.blast_db_dir is None or
args.archive_dir is None))):
raise Exception("""\
When using the database pipeline, \
you must either provide a configuration file ('-c', '--config_file') or \
'--pdb_dir', '--blast_db_dir', and '--archive_dir'.""")
if args.sequence_file and not args.structure_file:
raise Exception("""\
A template PDB file must be specified using the '--structure_file' option, \
when you specify a target sequence using the '--sequence_file' option!""")
def elaspic(args):
validate_args(args)
# Read configurations
if args.config_file is not None:
conf.read_configuration_file(args.config_file)
elif args.uniprot_id:
conf.read_configuration_file(
DATABASE={
'connection_string': args.connection_string
},
EXTERNAL_DIRS={
'pdb_dir': args.pdb_dir,
'blast_db_dir': args.blast_db_dir,
'archive_dir': args.archive_dir,
})
elif args.structure_file:
unique_temp_dir = op.abspath(op.join(os.getcwd(), '.elaspic'))
os.makedirs(unique_temp_dir, exist_ok=True)
conf.read_configuration_file(
DEFAULT={
'unique_temp_dir': unique_temp_dir
},
EXTERNAL_DIRS={
'pdb_dir': args.pdb_dir,
|
'blast_db_dir': a
|
rgs.blast_db_dir,
'archive_dir': args.archive_dir
})
if args.uniprot_id:
# Run database pipeline
if args.uniprot_domain_pair_ids:
logger.debug('uniprot_domain_pair_ids: %s', args.uniprot_domain_pair_ids)
uniprot_domain_pair_ids_asint = (
[int(x) for x in args.uniprot_domain_pair_ids.split(',') if x]
)
else:
uniprot_domain_pair_ids_asint = []
# Run database pipeline
from elaspic import database_pipeline
pipeline = database_pipeline.DatabasePipeline(
args.uniprot_id, args.mutations,
run_type=args.run_type,
uniprot_domain_pair_ids=uniprot_domain_pair_ids_asint
)
pipeline.run()
elif args.structure_file:
# Run local pipeline
from elaspic import standalone_pipeline
pipeline = standalone_pipeline.StandalonePipeline(
args.structure_file, args.sequence_file, args.mutations,
mutation_format=args.mutation_format,
run_type=args.run_type,
)
pipeline.run()
def configure_run_parser(sub_parsers):
help = "Run ELASPIC"
description = help + ""
example = r"""
Examples
--------
$ elaspic run -p 4DKL.pdb -m A_M6A -n 1
$ elaspic run -u P00044 -m M1A -c config_file.ini
$ elaspic run -u P00044 -m M1A \
--connection_string=mysql://user:pass@localhost/elaspic \
--pdb_dir=/home/pdb/data/data/structures/divided/pdb \
--blast_db_dir=/home/ncbi/blast/db \
--archive_dir=/home/elaspic
"""
parser = sub_parsers.add_parser(
'run',
help=help,
description=description,
epilog=example,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-c', '--config_file', nargs='?', type=str,
help='ELASPIC configuration file.')
parser.add_argument(
'--connection_string', nargs='?', type=str,
help=('SQLAlchemy formatted string describing the connection to the database.'))
parser.add_argument(
'--pdb_dir', nargs='?', type=str,
help=("Folder containing PDB files in split format (e.g. 'ab/pdb1ab2.ent.gz')."))
parser.add_argument(
'--blast_db_dir', nargs='?', type=str,
help=("Folder containing NCBI `nr` and `pdbaa` databases."))
parser.add_argument(
'--archive_dir', nargs='?', type=str,
help=('Folder containing precalculated ELASPIC data.'))
parser.add_argument(
'-v', '--verbose', action='count',
help=('Specify verbosity level.'))
parser.add_argument(
'-u', '--uniprot_id',
help="The Uniprot ID of the protein that you want to mutate (e.g. 'P28223')."
"This option relies on a local elaspic database, which has to be specified "
"in the configuration file.")
parser.add_argument(
'-p', '--structure_file',
help="Full filename (including path) of the PDB file that you wish to mutate.")
parser.add_argument(
'-s', '--sequence_file',
help="Full filename (including path) of the FASTA file containing the sequence that you "
"wish to model. If you choose this option, you also have to specify "
"a template PDB file using the '--pdb-file' option.")
parser.add_argument(
'-m', '--mutations', nargs='?', default=[''],
help="Mutation(s) that you wish to evaluate.\n"
"If you used '--uniprot_id', mutations must be provided using uniprot coordinates "
"(e.g. 'D172E,R173H' or 'A_V10I').\n"
"If you used '--structure_file', mutations must be provided using the chain "
"and residue id (e.g. 'A_M1C,B_C20P' to mutate a residue with id '1' on chain A "
"to Cysteine, and residue with id '20' on chain B to Proline).\n"
"If you used '--sequence_file', mutations must be provided using the chain "
"and residue INDEX (e.g. '1_M1C,2_C20P' to mutate the first residue in sequence 1 "
"to Cysteine, and the 20th residue in sequence 2 to Proline).")
parser.add_argument(
'-n', '--mutation_format', nargs='?', default=None,
help="Mutation format:\n"
" 1. {pdb_chain}_{pdb_mutation},...\n"
" 2. {pdb_chain}_{sequence_mutation},...\n"
" 3. {sequence_pos}_{sequence_mutation}... (default)\n\n"
"If `sequence_file` is None, this does not matter "
"(always {pdb_chain}_{pdb_mutation})."
)
parser.add_argument(
'-i', '--uniprot_domain_pair_ids', nargs='?', default='',
help="List of uniprot_domain_pair_ids to analyse "
"(useful if you want to restrict your analysis to only a handful of domains).")
parser.add_argument(
'-t', '--run_type', nargs='?', type=str, default='all',
choices=sorted(pipeline.Pipeline._valid_run_types),
help='Type of analysis to perform.')
parser.set_defaults(func=elaspic)
|
huntxu/neutron
|
neutron/tests/fullstack/base.py
|
Python
|
apache-2.0
| 4,357
| 0
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from concurrent import futures
import os
from oslo_config import cfg
from oslo_log import log as logging
from neutron.common import utils as common_utils
from neutron.conf.agent import common as config
from neutron.tests import base as tests_base
from neutron.tests.common import helpers
from neutron.tests.common import net_helpers
from neutron.tests.fullstack.resources import client as client_resource
from neutron.tests import tools
from neutron.tests.unit import testlib_api
# This is the directory from which infra fetches log files for fullstack tests
DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_path(),
'dsvm-fullstack-logs')
ROOTDIR = os.path.dirname(__file__)
LOG = logging.getLogger(__name__)
class BaseFullStackTestCase(testlib_api.MySQLTestCaseMixin,
testlib_api.SqlTestCase):
"""Base test class for full-stack tests."""
BUILD_WITH_MIGRATIONS = True
def setUp(self, environment):
super(BaseFullStackTestCase, self).setUp()
tests_base.setup_test_logging(
cfg.CONF, DEFAULT_LOG_DIR, '%s.txt' % self.get_name())
# NOTE(zzzeek): the opportunistic DB fixtures have built for
# us a per-test (or per-process) database. Set the URL of this
# database in CONF as the full stack tests need to actually run a
# neutron server against this database.
_orig_db_url = cfg.CONF.database.connection
cfg.CONF.set_override(
'connection', str(self.engine.url), group='database')
self.addCleanup(
cfg.CONF.set_override,
"connection", _orig_db_url, group="database"
)
# NOTE(ihrachys): seed should be reset before environment fixture below
# since the latter starts services that may rely on generated port
# numbers
tools.reset_random_seed()
# configure test runner to use rootwrap
self.setup_rootwrap()
config.setup_privsep()
self.environment = environment
self.environment.test_name = self.get_name()
self.useFixture(self.environment)
self.client = self.environment.neutron_server.client
self.safe_client = self.useFixture(
client_resource.ClientFixture(se
|
lf.client))
def get_name(self):
class_name, test_name = self.id().split(".")[-2:]
return "%s.%s" % (class_name, test_name)
def _assert_ping_durin
|
g_agents_restart(
self, agents, src_namespace, ips, restart_timeout=10,
ping_timeout=1, count=10):
with net_helpers.async_ping(
src_namespace, ips, timeout=ping_timeout,
count=count) as done:
LOG.debug("Restarting agents")
executor = futures.ThreadPoolExecutor(max_workers=len(agents))
restarts = [agent.restart(executor=executor)
for agent in agents]
futures.wait(restarts, timeout=restart_timeout)
self.assertTrue(all([r.done() for r in restarts]))
LOG.debug("Restarting agents - done")
# It is necessary to give agents time to initialize
# because some crucial steps (e.g. setting up bridge flows)
# happen only after RPC is established
agent_names = ', '.join({agent.process_fixture.process_name
for agent in agents})
common_utils.wait_until_true(
done,
timeout=count * (ping_timeout + 1),
exception=RuntimeError("Could not ping the other VM, "
"re-starting %s leads to network "
"disruption" % agent_names))
|
ovnicraft/server-tools
|
base_locale_uom_default/tests/test_res_lang.py
|
Python
|
agpl-3.0
| 1,605
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
class TestResLang(TransactionCase):
def setUp(self):
super(TestResLang, self).setUp()
self.lang = self.env.ref('base.lang_en')
self.env.user.lang = self.lang.code
self.uom = self.env.ref('product.product_uom_dozen')
self.lang.default_uom_ids = [
|
(6, 0,
|
self.uom.ids)]
def test_check_default_uom_ids_fail(self):
"""It should not allow multiple UoMs of the same category."""
with self.assertRaises(ValidationError):
self.lang.default_uom_ids = [
(4, self.env.ref('product.product_uom_unit').id),
]
def test_check_default_uom_ids_pass(self):
"""It should allow multiple UoMs of different categories."""
self.lang.default_uom_ids = [
(4, self.env.ref('product.product_uom_kgm').id),
]
self.assertEqual(len(self.lang.default_uom_ids), 2)
def test_default_uom_by_category_exist(self):
"""It should return the default UoM if existing."""
self.assertEqual(
self.env['res.lang'].default_uom_by_category('Unit'),
self.uom,
)
def test_default_uom_by_category_no_exist(self):
"""It should return empty recordset when no default UoM."""
self.assertEqual(
self.env['res.lang'].default_uom_by_category('Volume'),
self.env['product.uom'].browse(),
)
|
Acrisel/acris
|
acris/acris_example/resource_pool_callback.py
|
Python
|
mit
| 2,420
| 0.013636
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Acrisel LTD
# Copyright (C) 2008- Acrisel (acrisel.com) . All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import time
from acris import resource_pool as rp
from acris import threaded
import queue
from datetime import datetime
from acris import traced_method
traced=traced_method(print, True)
class MyResource1(rp.Resource): pass
class MyResource2(rp.Resource): pass
rp1=rp.ResourcePool('RP1', resource_cls=MyResource1, policy={'resource_limit': 2, }).load()
rp2=rp.ResourcePool('RP2', resource_cls=MyResource2, policy={'resource_limit': 1, }).load()
class Callback(object):
def __init__(self, notify_queue):
self.q=notify_queue
def __call__(self, ticket=None):
self.q.put(ticket)
@threaded
def worker_callback(name, rp):
print('[ %s ] %s getting resource' % (str(datetime.now()), name
|
))
notify_queue=queue.Queue()
callback=Callback(notify_queue)
r=rp.get(callback=callback)
if not r:
print('[ %s ] %s doing work before resource available' % (str(datetime.now()), name,))
print('[ %s ] %s waiting for resources' % (str(datetime.now()), name,))
ticket=notify_q
|
ueue.get()
r=rp.get(ticket=ticket)
print('[ %s ] %s doing work (%s)' % (str(datetime.now()), name, repr(r)))
time.sleep(2)
print('[ %s ] %s returning (%s)' % (str(datetime.now()), name, repr(r)))
rp.put(*r)
r1=worker_callback('>>> w11-callback', rp1)
r2=worker_callback('>>> w21-callback', rp2)
r3=worker_callback('>>> w22-callback', rp2)
r4=worker_callback('>>> w12-callback', rp1)
|
djcf/error-reloader-extension
|
tests/badwebserver_jsonly.py
|
Python
|
gpl-2.0
| 4,289
| 0.021217
|
import socket, sys, time, argparse
parser = argparse.ArgumentParser(description="This bad server accepts an HTTP connection and replies with a valid HTML document which links to assets. However, attemps to load the assets should result in a net::ERR_EMPTY_RESPONSE.")
parser.add_argument("-p", "--port", type=int, help="The port to listen for new connections on.", default=8080)
parser.add_argument("-t", "--tries", type=int, help="The number of attempts before asset requests will be responded to successfully", default=5)
args = parser.parse_args()
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('localhost', args.port))
serversocket.listen(5)
print "The bad web server is listening on port %s. Requests for the HTML index will always be replied to. Assets requests will be responded to after %s unsuccessful attempts.\n" % (args.port, args.tries)
response_text = """HTTP/1.0 200 OK
Server: BadWebServer v0.1
Content-Type: text/html
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>Bad Web Server</title>
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.8.0/jquery.min.js"></script>
<script src="/script.js" id="script01"></script>
<script type="text/javascript">
function refresh_location_hints() {
$('#for_script01').val($('#script01').attr('src'));
$('#for_css01').val($('#css01').attr('href'));
$('#for_img01').val($('#img01').attr('src'));
$('#img01').attr('alt', $('#img01').attr('src'));
setTimeout(function() {
refresh_location_hints();
}, 1000);
}
$(document).ready(function() {
setTimeout(function() {
refresh_location_hints();
}
|
, 1000);
});
</script>
<style>
input { width: 600px; }
</style>
</head>
<body>
<header>
<h1>About Bad Web Server</h1>
<p>The bad web server will correctly trans
|
fer a valid HTML5 document to the browser when the browser requests the resource identified as '/'. The page will also request images, stylesheets and javascript resources from the server - but these should all result in the browser encountering a socket error and triggering a net::ERR_EMPTY_RESPONSE. The javascript will correctly load after the 5th attempt and display an alert to the user when it loads correctly, as will the CSS resource. We also import JQuery to dynamicly hint at the current location of each failed resource for testing.</p>
</header>
<article>
<input type="text" id="for_script01"> External Script (#script01) URL<br>
</article>
</body>
</html>"""
js_response_text = """HTTP/1.0 200 OK
Server: BadWebServer v0.1
Content-Type: text/javascript
alert("Javascript resource ('#script_01') loaded successfully after %s attempts");""" % args.tries
css_response_text = """HTTP/1.0 200 OK
Server: BadWebServer v0.1
Content-Type: text/stylesheet
* { margin: 5px; padding: 5px; }
body { background-color: #00ff00; color: #555555; }"""
css_requests = js_requests = 0
while True:
#accept connections from outside
(clientsocket, address) = serversocket.accept()
chunks = []
bytes_recd = 0
chunk = ""
while "\r\n\r\n" not in chunk:
chunk = clientsocket.recv(min(2048 - bytes_recd, 2048))
if chunk == '':
raise RuntimeError("socket connection broken (but not by me)")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
header = ''.join(chunks)
print "Received: " + header
request_line = header.split("\r\n")[0]
resource_marker = request_line.split()[1]
if resource_marker is "/" or resource_marker is "/index.html" or resource_marker is "/index.htm":
print "^ INDEX - WILL REPLY ^"
clientsocket.send(response_text);
clientsocket.shutdown(0)
elif ".css" in resource_marker:
css_requests += 1
if css_requests > args.tries:
css_requests = 0
print "^ FINAL CSS REQUEST - WILL REPLY ^"
clientsocket.send(css_response_text)
clientsocket.shutdown(0)
else:
print "^ CSS REQUEST #%s - WILL NOT REPLY ^" % css_requests
elif ".js" in resource_marker:
js_requests += 1
if js_requests > args.tries:
js_requests = 0
print "^ FINAL JS REQUEST - WILL REPLY ^"
clientsocket.send(js_response_text)
clientsocket.shutdown(0)
else:
print "^ JS REQUEST #%s - WILL NOT REPLY ^" % js_requests
else:
print "^ WILL NOT REPLY ^"
print "\n"
clientsocket.close()
|
miztiik/scrapy-Demos
|
aCloudGuru/aCloudGuru/settings.py
|
Python
|
mit
| 3,160
| 0.00981
|
# -*- coding: utf-8 -*-
# Scrapy settings for aCloudGuru project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/down
|
loader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'aCloudGuru'
SPIDER_MODULES = ['aCloudGuru.spiders']
NEWSPIDER_MODULE = 'aCloudG
|
uru.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'aCloudGuru (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'aCloudGuru.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'aCloudGuru.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'aCloudGuru.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/choroplethmapbox/_hovertemplate.py
|
Python
|
mit
| 488
| 0.002049
|
import _plotly_u
|
tils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="choroplethmapbox", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop
|
("edit_type", "none"),
**kwargs
)
|
cxxgtxy/tensorflow
|
tensorflow/python/ops/numpy_ops/np_array_ops.py
|
Python
|
apache-2.0
| 63,472
| 0.010398
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common array methods."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import math
import numbers
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_export
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.util import nest
newaxis = np_export.np_export_constant(__name__, 'newaxis', np.newaxis)
@np_utils.np_doc('empty')
def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
return zeros(shape, dtype)
@np_utils.np_doc('empty_like')
def empty_like(a, dtype=None):
return zeros_like(a, dtype)
@np_utils.np_doc('zeros')
def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
dtype = (
np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type())
if isinstance(shape, np_arrays.ndarray):
shape = shape.data
return np_arrays.tensor_to_ndarray(array_ops.zeros(shape
|
, dtype=dtype))
@np_utils.np_doc('zeros_like')
def zeros_like(a, dtype=None): # pylint: disable=missing-docstring
if isinstance(a, np_arrays.ndarray):
a = a.data
if dtype is None:
# We need to let np_utils.result_type decide the dtype, not tf.zeros_like
dtype = np_utils.result_type(a)
else:
# TF and numpy has different interpretations of Python types such as
# `float`, so we let `np_utils.result_type` decide.
dtype = np_ut
|
ils.result_type(dtype)
dtype = dtypes.as_dtype(dtype) # Work around b/149877262
return np_arrays.tensor_to_ndarray(array_ops.zeros_like(a, dtype))
@np_utils.np_doc('ones')
def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
if dtype:
dtype = np_utils.result_type(dtype)
if isinstance(shape, np_arrays.ndarray):
shape = shape.data
return np_arrays.tensor_to_ndarray(array_ops.ones(shape, dtype=dtype))
@np_utils.np_doc('ones_like')
def ones_like(a, dtype=None):
if isinstance(a, np_arrays.ndarray):
a = a.data
if dtype is None:
dtype = np_utils.result_type(a)
else:
dtype = np_utils.result_type(dtype)
return np_arrays.tensor_to_ndarray(array_ops.ones_like(a, dtype))
@np_utils.np_doc('eye')
def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring
if dtype:
dtype = np_utils.result_type(dtype)
if not M:
M = N
# Making sure N, M and k are `int`
N = int(N)
M = int(M)
k = int(k)
if k >= M or -k >= N:
# tf.linalg.diag will raise an error in this case
return zeros([N, M], dtype=dtype)
if k == 0:
return np_arrays.tensor_to_ndarray(linalg_ops.eye(N, M, dtype=dtype))
# We need the precise length, otherwise tf.linalg.diag will raise an error
diag_len = min(N, M)
if k > 0:
if N >= M:
diag_len -= k
elif N + k > M:
diag_len = M - k
elif k <= 0:
if M >= N:
diag_len += k
elif M - k > N:
diag_len = N + k
diagonal_ = array_ops.ones([diag_len], dtype=dtype)
return np_arrays.tensor_to_ndarray(
array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k))
@np_utils.np_doc('identity')
def identity(n, dtype=float):
return eye(N=n, M=n, dtype=dtype)
@np_utils.np_doc('full')
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
if not isinstance(shape, np_arrays.ndarray):
shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32))
shape = atleast_1d(shape).data
fill_value = asarray(fill_value, dtype=dtype)
return np_arrays.tensor_to_ndarray(
array_ops.broadcast_to(fill_value.data, shape))
# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online).
@np_utils.np_doc_only('full_like')
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a).data
dtype = dtype or np_utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return np_arrays.tensor_to_ndarray(
array_ops.broadcast_to(fill_value.data, array_ops.shape(a)))
def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Main implementation of np.array()."""
if isinstance(val, np_arrays.ndarray):
result_t = val.data
else:
result_t = val
if not isinstance(result_t, ops.Tensor):
if not dtype:
dtype = np_utils.result_type(result_t)
# We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
# convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
# while np.array allows them. We need to convert-then-cast.
def maybe_data(x):
if isinstance(x, np_arrays.ndarray):
return x.data
return x
# Handles lists of ndarrays
result_t = nest.map_structure(maybe_data, result_t)
# EagerTensor conversion complains about "mixed types" when converting
# tensors with no dtype information. This is because it infers types based
# on one selected item in the list. So e.g. when converting [2., 2j]
# to a tensor, it will select float32 as the inferred type and not be able
# to convert the list to a float 32 tensor.
# Since we have some information about the final dtype we care about, we
# supply that information so that convert_to_tensor will do best-effort
# conversion to that dtype first.
result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)
result_t = math_ops.cast(result_t, dtype=dtype)
elif dtype:
result_t = math_ops.cast(result_t, dtype)
if copy:
result_t = array_ops.identity(result_t)
if ndmin == 0:
return np_arrays.tensor_to_ndarray(result_t)
ndims = array_ops.rank(result_t)
def true_fn():
old_shape = array_ops.shape(result_t)
new_shape = array_ops.concat(
[array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0)
return array_ops.reshape(result_t, new_shape)
result_t = np_utils.cond(
np_utils.greater(ndmin, ndims), true_fn, lambda: result_t)
return np_arrays.tensor_to_ndarray(result_t)
# TODO(wangpeng): investigate whether we can make `copy` default to False.
# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc_only('array')
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Since Tensors are immutable, a copy is made only if val is placed on a
different devi
|
kalyons11/kevin
|
kevin/playground/gpa.py
|
Python
|
mit
| 2,017
| 0.000496
|
# Quick script to calculate GPA given a class list file.
# Class list file should be a csv with COURSE_ID,NUM_UNITS,GRADE
# GRADE should be LETTER with potential modifiers after that
# registrar.mit.edu/classes-grades-evaluations/grades/calculating-gpa
import argparse
import pandas as pd
def get_parser():
# Get the argument parser for this script
parser = argparse.ArgumentParser()
parser.add_argumen
|
t('-F', '--filename', help='Filename for grades')
return parser
class GPACalculator:
def __init__(self, fname):
# Load file via pandas
self.__data = pd.read_csv(
fname,
header=None,
|
names=['course', 'units', 'grade']
)
def calc_gpa(self):
# Map grades to grade points
grade_points = self.__data.grade.apply(self.__grade_point_mapper)
# Multiply pointwise by units
grade_points_weighted = grade_points * self.__data.units
# Sum weighted units
weighted_units_sum = grade_points_weighted.sum()
# Divide by total units
gpa_raw = weighted_units_sum / self.__data.units.sum()
# Round to nearest tenth
return round(gpa_raw, 1)
def __grade_point_mapper(self, grade):
# Maps a string letter grade to a numerical value
# MIT 5.0 scale
grade_map = {
'A': 5,
'B': 4,
'C': 3,
'D': 2,
'F': 0,
}
first_char = grade[0].upper()
try:
return grade_map[first_char]
except:
raise ValueError('Invalid grade {grade}'.format(grade=grade))
if __name__ == '__main__':
# Set up argument parsing
parser = get_parser()
args = parser.parse_args()
# Make sure filename is present
if not args.filename:
raise ValueError('Must provide filename via -F, --filename')
# Create calculator
calc = GPACalculator(args.filename)
# Execute and print
gpa = calc.calc_gpa()
print(gpa)
|
kbrebanov/ansible
|
lib/ansible/module_utils/network/nxos/nxos.py
|
Python
|
gpl-3.0
| 14,622
| 0.001299
|
#
# This code is part of Ansible, but is an independent component.
#
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils.urls import fetch_url
_DEVICE_CONNECTION = None
nxos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE'])),
'use_ssl': dict(type='bool'),
'validate_certs': dict(type='bool'),
'timeout': dict(type='int'),
'transport': dict(default='cli', choices=['cli', 'nxapi'])
}
nxos_argument_spec = {
'provider': dict(type='dict', options=nxos_provider_spec),
}
nxos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9),
'use_ssl': dict(removed_in_version=2.9, type='bool'),
'validate_certs': dict(removed_in_version=2.9, type='bool'),
'timeout': dict(removed_in_version=2.9, type='int'),
'transport': dict(removed_in_version=2.9, choices=['cli', 'nxapi'])
}
nxos_argument_spec.update(nxos_top_spec)
def get_provider_argspec():
return nxos_provider_spec
def check_args(module, warnings):
pass
def load_params(module):
provider = module.params.get('provider') or dict()
for key, value in iteritems(provider):
if key in nxos_provider_spec:
if module.params.get(key) is None and value is not None:
module.params[key] = value
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
load_params(module)
if is_nxapi(module):
conn = Nxapi(module)
else:
conn = Cli(module)
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
def exec_command(self, command):
if isinstance(command, dict):
command = self._module.jsonify(command)
return exec_command(self._module, command)
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show ru
|
nning-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=to_text(err))
try:
|
cfg = to_text(out, errors='surrogate_or_strict').strip()
except UnicodeError as e:
self._module.fail_json(msg=u'Failed to decode config: %s' % to_text(out))
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
responses = list()
for item in to_list(commands):
if item['output'] == 'json' and not is_json(item['command']):
cmd = '%s | json' % item['command']
elif item['output'] == 'text' and is_json(item['command']):
cmd = item['command'].rsplit('|', 1)[0]
else:
cmd = item['command']
rc, out, err = self.exec_command(cmd)
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
self._module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
if check_rc and rc != 0:
self._module.fail_json(msg=to_text(err))
if not check_rc and rc != 0:
try:
out = self._module.from_json(err)
except ValueError:
out = to_text(err).strip()
else:
try:
out = self._module.from_json(out)
except ValueError:
out = to_text(out).strip()
if item['output'] == 'json' and out != '' and isinstance(out, string_types):
self._module.fail_json(msg='failed to retrieve output of %s in json format' % item['command'])
responses.append(out)
return responses
def load_config(self, config, return_error=False, opts=None):
"""Sends configuration commands to the remote device
"""
if opts is None:
opts = {}
rc, out, err = self.exec_command('configure')
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err))
msgs = []
for cmd in config:
rc, out, err = self.exec_command(cmd)
if opts.get('ignore_timeout') and rc == 1:
msgs.append(err)
return msgs
elif rc != 0:
self._module.fail_json(msg=to_text(err))
elif out:
msgs.append(out)
self.exec_command('end')
return msgs
class Nxapi:
OUTPUT_TO_COMMAND_TYPE = {
'text': 'cli_show_ascii',
'json': 'cli_show',
'bash': 'bash',
'config': 'cli_conf'
}
def __init__(self, module):
self._module = module
self._nxapi_auth = None
self._device_configs = {}
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
host = self._module.params['host']
port = self._module.params['port']
if self._module.params['use_ssl']:
proto = 'https'
port = port or 443
else:
proto = 'http'
port = port or 80
self._url = '%s://%s:%s/ins' % (proto, host, port)
def _error(self, msg, **kwargs):
self._nxapi_auth = None
if 'url' not in kwargs:
kwargs['url'] = self._url
self._module.fail_json(msg=msg, **kwargs)
def _request_builder(se
|
bovee/needletail
|
bench/benchmark.py
|
Python
|
mit
| 1,291
| 0.002324
|
#!/usr/bin/env python
"""
Unscientific benchmarking of this versus the --release rust
implementation below using the %timeit Ipython magic (times in sec)
n_kmers, py_runtime, rust_runtime
6594204, 14.4, 0.578
Both give identical counts on the files tested (and printing kmers out
and diff'ing the results gives no difference)
"""
from __future__ im
|
port print_function
import sys
from Bio.SeqIO import parse
from Bio.Seq import reverse_complement
def slid_win(seq, size=4, overlapping=True):
"""Returns a sliding window along self.seq."""
itr = iter(seq)
if overla
|
pping:
buf = ''
for _ in range(size):
buf += next(itr)
for l in itr:
yield buf
buf = buf[1:] + l
yield buf
else:
buf = ''
for l in itr:
buf += l
if len(buf) == size:
yield buf
buf = ''
filename = sys.argv[1]
n_total = 0
n_canonical = 0
for s in parse(filename, 'fastq'):
uppercase_seq = str(s.upper().seq)
for kmer in slid_win(uppercase_seq, 4):
canonical = min(kmer, reverse_complement(kmer))
if canonical == 'CAGC':
n_canonical += 1
n_total += 1
# print(canonical)
print(n_total, n_canonical)
|
ctasims/Dive-Into-Python-3
|
examples/alphameticstest.py
|
Python
|
mit
| 2,452
| 0.00367
|
from alphametics import solve
import unittest
class KnownValues(unittest.TestCase):
def test_out(self):
'''TO + GO == OUT'''
self.assertEqual(solve('TO + GO == OUT'), '21 + 81 == 102')
def test_too(self):
'''I + DID == TOO'''
self.assertEqual(solve('I + DID == TOO'), '9 + 191 == 200')
def test_mom(self):
'''AS + A == MOM'''
self.assertEqual(solve('AS + A == MOM'), '92 + 9 == 101')
def test_best(self):
'''HES + THE == BEST'''
self.assertEqual(solve('HES + THE == BEST'), '426 + 842 == 1268')
def test_late(self):
'''NO + NO + TOO == LATE'''
self.assertEqual(solve('NO + NO + TOO == LATE'), '74 + 74 + 944 == 1092')
def test_onze(self):
'''UN + UN + NEUF == ONZE'''
self.assertEqual(solve('UN + UN + NEUF == ONZE'), '81 + 81 + 1987 == 2149')
def test_deux(self):
'''UN + DEUX + DEUX + DEUX + DEUX == NEUF'''
self.assertEqual(solve('UN + DEUX + DEUX + DEUX + DEUX == NEUF'), '25 + 1326 + 1326 + 1326 + 1326 == 5329')
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this
|
list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENT
|
AL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
devilry/devilry-django
|
devilry/devilry_statistics/tests/test_api/api_test_mixin.py
|
Python
|
bsd-3-clause
| 2,945
| 0.003056
|
from django.conf import settings
from model_bakery import baker
from rest_framework.test import APIRequestFactory, force_authenticate
class ApiTestMixin:
"""
Mixin class for API tests.
Can be used for ViewSets too. Just override :meth:`.get_as_view_kwargs` - se example in the docs
for that method.
"""
apiview_class = None
def get_default_requestuser(self):
return None
def make_user(self, shortname='user@example.com', **kwargs):
return baker.make(settings.AUTH_USER_MODEL, shortname=shortname, **kwargs)
def make_superuser(self, shortname='super@example.com', **kwargs):
return self.make_user(shortname=shortname, is_superuser=True, **kwargs)
def get_as_view_kwargs(self):
"""
The kwargs for the ``as_view()``-method of the API view class.
If you are writing tests for a ViewSet, you have to override this
to define what action you are testing (list, retrieve, update, ...), like this::
def get_as_view_kwargs(self):
return {
'get': 'list'
}
"""
return {}
def add_authenticated_user_to_request(self, request, requestuser):
if requestuser:
force_authenticate(request, requestuser)
def make_request(self, method, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
factory = APIRequestFactory()
request = getattr(factory, method)(api_url, format='json', data=data)
viewkwargs = viewkwargs or {}
if requestuser:
request.user = requestuser or self.get_default_requestuser()
response = self.apiview_class.as_view(**self.get_as_view_kwargs())(request, **viewkwargs)
response.render()
return response
def make_get_request(self, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
return self.make_request
|
(method='get', viewkwargs=viewkwargs,
api_url=api_url, data=data,
requestuser=requestuser)
def make_post_request(self, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
return self.make_request(method='post', viewkwargs=viewkwargs,
api_url=api_url, data=data,
requestuser=requestuser)
def make_put_request(self, viewkwargs=None,
|
api_url='/test/', data=None, requestuser=None):
return self.make_request(method='put', viewkwargs=viewkwargs,
api_url=api_url, data=data,
requestuser=requestuser)
def make_delete_request(self, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
return self.make_request(method='delete', viewkwargs=viewkwargs,
api_url=api_url, data=data,
requestuser=requestuser)
|
a25kk/apm
|
src/apm.sitetheme/apm/sitetheme/tests/test_setup.py
|
Python
|
mit
| 1,143
| 0
|
# -*- coding: utf-8 -*-
"""Setup/installation tests for this package."""
from apm.buildout.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of apm.buildout into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if apm.buildout is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProdu
|
ctInstalled('apm.buildout'))
def test_uninstall(self):
"""Test if apm.buildout i
|
s cleanly uninstalled."""
self.installer.uninstallProducts(['apm.buildout'])
self.assertFalse(self.installer.isProductInstalled('apm.buildout'))
# browserlayer.xml
def test_browserlayer(self):
"""Test that IApmBuildoutLayer is registered."""
from apm.buildout.interfaces import IApmBuildoutLayer
from plone.browserlayer import utils
self.failUnless(IApmBuildoutLayer in utils.registered_layers())
|
ProfessionalIT/customers
|
bambinocampones/src/bambinocampones/website/forms.py
|
Python
|
mit
| 144
| 0
|
from d
|
jango.forms import ModelForm
from .models import Depoimento
class DepoimentoForm(ModelForm):
class
|
Meta:
model = Depoimento
|
janelia-flyem/pydvid
|
pydvid/gui/contents_browser.py
|
Python
|
bsd-3-clause
| 15,264
| 0.013037
|
"""
This module implements a simple widget for viewing the list of datasets and nodes in a DVID instance.
Requires PyQt4. To see a demo of it in action, start up your dvid server run this::
$ python pydvid/gui/contents_browser.py localhost:8000
"""
import socket
import httplib
import coll
|
ections
from PyQt4.QtGui import QPushButton, QDialog, QVBoxLayout, QGroupBox, QTreeWidget, \
QTreeWidgetItem, QSizePolicy, QListWidget, QListWidgetItem, \
QDialogButtonBox, QLineEdit, QLabel, QComboBox, QMessageBox, \
QHBoxLayout
from PyQt4.QtCore import Qt, QStringList, QSize, QEvent
import pydvid.general
class ContentsBrowser(QDialog):
"""
Displays the contents of a DVID server, listing all datasets and the volum
|
es/nodes within each dataset.
The user's selected dataset, volume, and node can be accessed via the `get_selection()` method.
If the dialog is constructed with mode='specify_new', then the user is asked to provide a new data name,
and choose the dataset and node to which it will belong.
**TODO:**
* Show more details in dataset list (e.g. shape, axes, pixel type)
* Show more details in node list (e.g. date modified, parents, children)
* Gray-out nodes that aren't "open" for adding new volumes
"""
def __init__(self, suggested_hostnames, mode='select_existing', parent=None):
"""
Constructor.
suggested_hostnames: A list of hostnames to suggest to the user, e.g. ["localhost:8000"]
mode: Either 'select_existing' or 'specify_new'
parent: The parent widget.
"""
super( ContentsBrowser, self ).__init__(parent)
self._suggested_hostnames = suggested_hostnames
self._mode = mode
self._current_dset = None
self._repos_info = None
self._hostname = None
# Create the UI
self._init_layout()
VolumeSelection = collections.namedtuple( "VolumeSelection", "hostname dataset_uuid data_name node_uuid" )
def get_selection(self):
"""
Get the user's current (or final) selection.
Returns a VolumeSelection tuple.
"""
node_uuid = self._get_selected_node()
dset_uuid, data_name = self._get_selected_data()
if self._mode == "specify_new":
data_name = str( self._new_data_edit.text() )
return ContentsBrowser.VolumeSelection(self._hostname, dset_uuid, data_name, node_uuid)
def _init_layout(self):
"""
Create the GUI widgets (but leave them empty).
"""
hostname_combobox = QComboBox(parent=self)
self._hostname_combobox = hostname_combobox
hostname_combobox.setEditable(True)
hostname_combobox.setSizePolicy( QSizePolicy.Expanding, QSizePolicy.Maximum )
hostname_combobox.installEventFilter(self)
for hostname in self._suggested_hostnames:
hostname_combobox.addItem( hostname )
self._connect_button = QPushButton("Connect", parent=self, clicked=self._handle_new_hostname)
hostname_layout = QHBoxLayout()
hostname_layout.addWidget( hostname_combobox )
hostname_layout.addWidget( self._connect_button )
hostname_groupbox = QGroupBox("DVID Host", parent=self)
hostname_groupbox.setLayout( hostname_layout )
hostname_groupbox.setSizePolicy( QSizePolicy.Preferred, QSizePolicy.Maximum )
data_treewidget = QTreeWidget(parent=self)
data_treewidget.setHeaderLabels( ["Data"] ) # TODO: Add type, shape, axes, etc.
data_treewidget.setSizePolicy( QSizePolicy.Preferred, QSizePolicy.Preferred )
data_treewidget.itemSelectionChanged.connect( self._handle_data_selection )
data_layout = QVBoxLayout()
data_layout.addWidget( data_treewidget )
data_groupbox = QGroupBox("Data Volumes", parent=self)
data_groupbox.setLayout( data_layout )
node_listwidget = QListWidget(parent=self)
node_listwidget.setSizePolicy( QSizePolicy.Preferred, QSizePolicy.Preferred )
node_listwidget.itemSelectionChanged.connect( self._update_display )
node_layout = QVBoxLayout()
node_layout.addWidget( node_listwidget )
node_groupbox = QGroupBox("Nodes", parent=self)
node_groupbox.setLayout( node_layout )
new_data_edit = QLineEdit(parent=self)
new_data_edit.textEdited.connect( self._update_display )
full_url_label = QLabel(parent=self)
full_url_label.setSizePolicy( QSizePolicy.Preferred, QSizePolicy.Maximum )
text_flags = full_url_label.textInteractionFlags()
full_url_label.setTextInteractionFlags( text_flags | Qt.TextSelectableByMouse )
new_data_layout = QVBoxLayout()
new_data_layout.addWidget( new_data_edit )
new_data_groupbox = QGroupBox("New Data Volume", parent=self)
new_data_groupbox.setLayout( new_data_layout )
new_data_groupbox.setSizePolicy( QSizePolicy.Preferred, QSizePolicy.Maximum )
buttonbox = QDialogButtonBox( Qt.Horizontal, parent=self )
buttonbox.setStandardButtons( QDialogButtonBox.Ok | QDialogButtonBox.Cancel )
buttonbox.accepted.connect( self.accept )
buttonbox.rejected.connect( self.reject )
buttonbox.button(QDialogButtonBox.Ok).setEnabled(False)
layout = QVBoxLayout()
layout.addWidget( hostname_groupbox )
layout.addWidget( data_groupbox )
layout.addWidget( node_groupbox )
if self._mode == "specify_new":
layout.addWidget( new_data_groupbox )
else:
new_data_groupbox.hide()
layout.addWidget( full_url_label )
layout.addWidget( buttonbox )
# Stretch factors
layout.setStretchFactor(data_groupbox, 3)
layout.setStretchFactor(node_groupbox, 1)
self.setLayout(layout)
self.setWindowTitle( "Select DVID Volume" )
# Initially disabled
data_groupbox.setEnabled(False)
node_groupbox.setEnabled(False)
new_data_groupbox.setEnabled(False)
# Save instance members
self._data_groupbox = data_groupbox
self._node_groupbox = node_groupbox
self._new_data_groupbox = new_data_groupbox
self._data_treewidget = data_treewidget
self._node_listwidget = node_listwidget
self._new_data_edit = new_data_edit
self._full_url_label = full_url_label
self._buttonbox = buttonbox
def sizeHint(self):
return QSize(700, 500)
def eventFilter(self, watched, event):
if watched == self._hostname_combobox \
and event.type() == QEvent.KeyPress \
and ( event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter):
self._connect_button.click()
return True
return False
def showEvent(self, event):
"""
Raise the window when it is shown.
For some reason, that doesn't happen automatically if this widget is also the main window.
"""
super(ContentsBrowser, self).showEvent(event)
self.raise_()
def _handle_new_hostname(self):
new_hostname = str( self._hostname_combobox.currentText() )
if '://' in new_hostname:
new_hostname = new_hostname.split('://')[1]
error_msg = None
self._repos_info = None
self._current_dset = None
self._hostname = None
try:
# Query the server
connection = httplib.HTTPConnection( new_hostname )
self._repos_info = pydvid.general.get_repos_info( connection )
self._hostname = new_hostname
self._connection = connection
except socket.error as ex:
error_msg = "Socket Error: {} (Error {})".format( ex.args[1], ex.args[0] )
except httplib.HTTPException as ex:
error_msg = "HTTP Error: {}".format( ex.args[0] )
if error_msg:
QMessageBox.critical(self, "Connection Error", error_msg)
self._populate_datasets_tree()
self._popu
|
GoogleCloudPlatform/repo-automation-playground
|
xunit-autolabeler-v2/ast_parser/core/test_data/cli/additions/additions.py
|
Python
|
apache-2.0
| 926
| 0
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the
|
License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [END main_method]
# [START not_main]
def not_main():
return 'not main'
# [END not_main]
# [START also_not_main]
def also_not_main():
return 'also_not main'
# [END also_not_main]
# [START untested_method]
def untested_method():
return 'untested!'
# [END untested_method]
|
alexbruy/QGIS
|
python/plugins/processing/algs/qgis/Intersection.py
|
Python
|
gpl-2.0
| 5,707
| 0.002628
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Intersection.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import Qgis, QgsFeatureRequest, QgsFeature, QgsGeometry, QgsWkbTypes, QgsWkbTypes
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
wkbTypeGroups = {
'Point': (QgsWkbTypes.Point, QgsWkbTypes.MultiPoint, QgsWkbTypes.Point25D, QgsWkbTypes.Mul
|
tiPoint25D,),
'LineString': (QgsWkbTypes.LineString, QgsWkbTypes.MultiLineString, QgsWkbTypes.LineString25D, QgsWkbTypes.MultiLineString25D,),
'Polygon': (QgsWkbTypes.Polygon, QgsWkbTypes.MultiPolygon, QgsWkbTypes.Polygon25D, QgsWkbTypes.MultiPolygon25D,),
}
for key, value in wkbTypeGroups.items():
for const in value:
wk
|
bTypeGroups[const] = key
class Intersection(GeoAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'intersect.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Intersection')
self.group, self.i18n_group = self.trAlgorithm('Vector overlay tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterVector(self.INPUT2,
self.tr('Intersect layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Intersection')))
def processAlgorithm(self, progress):
vlayerA = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
vlayerB = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT2))
geomType = vlayerA.wkbType()
fields = vector.combineVectorFields(vlayerA, vlayerB)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
geomType, vlayerA.crs())
outFeat = QgsFeature()
index = vector.spatialindex(vlayerB)
selectionA = vector.features(vlayerA)
total = 100.0 / len(selectionA)
for current, inFeatA in enumerate(selectionA):
progress.setPercentage(int(current * total))
geom = inFeatA.geometry()
atMapA = inFeatA.attributes()
intersects = index.intersects(geom.boundingBox())
for i in intersects:
request = QgsFeatureRequest().setFilterFid(i)
inFeatB = vlayerB.getFeatures(request).next()
tmpGeom = inFeatB.geometry()
if geom.intersects(tmpGeom):
atMapB = inFeatB.attributes()
int_geom = QgsGeometry(geom.intersection(tmpGeom))
if int_geom.wkbType() == QgsWkbTypes.Unknown or QgsWkbTypes.flatType(int_geom.geometry().wkbType()) == QgsWkbTypes.GeometryCollection:
int_com = geom.combine(tmpGeom)
int_sym = geom.symDifference(tmpGeom)
int_geom = QgsGeometry(int_com.difference(int_sym))
if int_geom.isGeosEmpty() or not int_geom.isGeosValid():
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
self.tr('GEOS geoprocessing error: One or '
'more input features have invalid '
'geometry.'))
try:
if int_geom.wkbType() in wkbTypeGroups[wkbTypeGroups[int_geom.wkbType()]]:
outFeat.setGeometry(int_geom)
attrs = []
attrs.extend(atMapA)
attrs.extend(atMapB)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
continue
del writer
|
dfurtado/generator-djangospa
|
generators/app/templates/root/main/permissions.py
|
Python
|
mit
| 438
| 0.006849
|
fr
|
om rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permissions(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD, or OPTIONS requests.
(request.method in permissions.SAFE_METHODS) or (obj.owner == request.user)
| |
ec2ainun/skripsiTF
|
DeepLforServer(GPU)/jupyter_notebook_config.py
|
Python
|
mit
| 290
| 0.003448
|
import os
from IPython.lib import passwd
c.NotebookApp.ip =
|
'*'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python3'
c.NotebookApp.password = u'sha1:035a13e895a5:8a3398f1576a32cf
|
938f9236db03f5e8668356c5'
|
dims/neutron
|
neutron/tests/unit/db/test_api.py
|
Python
|
apache-2.0
| 1,611
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing
|
, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_db import exception as db_exc
import testtools
from neutron.db import api
|
as db_api
from neutron.tests import base
class TestExceptionToRetryContextManager(base.BaseTestCase):
def test_translates_single_exception(self):
with testtools.ExpectedException(db_exc.RetryRequest):
with db_api.exc_to_retry(ValueError):
raise ValueError()
def test_translates_multiple_exception_types(self):
with testtools.ExpectedException(db_exc.RetryRequest):
with db_api.exc_to_retry((ValueError, TypeError)):
raise TypeError()
def test_passes_other_exceptions(self):
with testtools.ExpectedException(ValueError):
with db_api.exc_to_retry(TypeError):
raise ValueError()
def test_inner_exception_preserved_in_retryrequest(self):
try:
exc = ValueError('test')
with db_api.exc_to_retry(ValueError):
raise exc
except db_exc.RetryRequest as e:
self.assertEqual(exc, e.inner_exc)
|
setsulla/owanimo
|
lib/picture/bin/patternmatch.py
|
Python
|
mit
| 1,439
| 0.007644
|
import sys
import os
import time
import numpy
import cv2
import cv2.cv as cv
from PIL import Image
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
from picture.util import define
from picture.util.system import POINT
from picture.util.log import LOG as L
THRESHOLD = 0.96
class PatternMatch(object):
def __init__(self):
pass
@classmethod
def __patt
|
ernmatch(self, reference, target):
L.info("reference : %s" % reference)
img_rgb = cv2.imread(reference)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(target, 0)
|
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
loc = numpy.where( res >= THRESHOLD)
result = None
for pt in zip(*loc[::-1]):
result = POINT(pt[0], pt[1], w, h)
return result
@classmethod
def bool(self, reference, target):
result = PatternMatch.__patternmatch(reference, target)
if result is None:
return False
else:
return True
@classmethod
def coordinate(self, reference, target):
return PatternMatch.__patternmatch(reference, target)
if __name__ == "__main__":
pmc = PatternMatch()
print pmc.bool(os.path.join(define.APP_TMP,"screen.png"),
os.path.join(define.APP_TMP,"login.png"))
|
arkatebi/SwissProt-stats
|
Ontology/IO/GoaIO.py
|
Python
|
gpl-3.0
| 8,540
| 0.012529
|
# Copyright 2013 by Kamil Koziara. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
I/O operations for gene annotation files.
"""
from __future__ import print_function
import sys
import csv
import collections
from Ontology.Data import GeneAnnotation, TermAssociation
from .Interfaces import OntoIterator, OntoReader
class TsvIterator(OntoIterator):
"""
Parses TSV files
"""
def __init__(self, file_handle):
self._reader = csv.reader(file_handle, delimiter='\t')
def __iter__(self):
return self._reader
def __next__(self):
return next(self._reader)
def next(self):
return next(self._reader)
# GAF version 2.0
GAF20FIELDS = ['DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By',
'Annotation_Extension',
'Gene_Product_Form_ID']
# GAF version 1.0
GAF10FIELDS = ['DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By']
GAF_VERSION = { "1.0" : GAF10FIELDS,
"2.0" : GAF20FIELDS}
def _split_multi(value):
if len(value) > 0:
return value.split('|')
else:
return []
def _to_goa(obj_rows, version):
row = obj_rows[0]
obj_id = row[1]
obj_attrs = {GAF20FIELDS[0] : row[0],
GAF20FIELDS[2] : row[2],
GAF20FIELDS[9] : row[9],
GAF20FIELDS[10] : _split_multi(row[10]),
GAF20FIELDS[11] : row[11],
GAF20FIELDS[12]: _split_multi(row[12])}
if version == "1.0":
row_len = 15
else:
row_len = 17
obj_attrs[GAF20FIELDS[15]] = _split_multi(row[15])
obj_attrs[GAF20FIELDS[16]] = row[16]
assocs = []
for row in obj_rows:
if len(row) == row_len:
assocs.append(TermAssociation(row[4],
{GAF20FIELDS[3] : _split_multi(row[3]),
GAF20FIELDS[5] : _split_multi(row[5]),
GAF20FIELDS[6] : row[6],
GAF20FIELDS[7] :_split_multi(row[7]),
GAF20FIELDS[8] : row[8],
GAF20FIELDS[13] : row[13],
GAF20FIELDS[14] : row[14]}
))
else:
raise ValueError("Invalid gaf file: Incorrect row length.")
return GeneAnnotation(obj_id, assocs, obj_attrs)
class GafReader(OntoReader):
"""
Reads GAF files into list of GeneAnnotation.
GAF file is list of tab separated values in the following order:
'DB', 'DB Object ID', 'DB Object Symbol', 'Qualifier', 'GO ID',
'DB:Reference', 'Evidence Code', 'With (or) From', 'Aspect',
'DB Object Name', 'DB Object Synonym', 'DB Object Type',
'Taxon', 'Date', 'Assigned By', 'Annotation Extension',
'Gene Product Form ID'
"""
_ID_IDX = 1
def __init__(self, file_handle, assoc_format = "dict"):
"""
Parameters:
----------
- assoc_format - states format of returned association:
o "dict" - as a dictionary (faster)
o "in_mem_sql" - as dict-like object with underlying in-memory database
(more memory efficient)
"""
self.handle = file_handle
self.assoc_format = assoc_format
def read(self):
first = self.handle.readline()
if first and first.startswith('!gaf-version:'):
version = first[(first.find(':') + 1):].strip()
else:
raise ValueError("Invalid gaf file: No version specified.")
if version not in GAF_VERSION:
raise ValueError("Incorrect version.")
tsv_iter = TsvIterator(self.handle)
if self.assoc_format == "dict":
|
raw_records = collections.defaultdict(list)
for row in tsv_iter:
first = row[0]
if not first.startswith('!'):
raw_records[row[self._ID_IDX]].append(row)
return dict([(k, _to_goa(v, version)) for k, v in
|
raw_records.items()]) # Possible py2 slow down
elif self.assoc_format == "in_mem_sql":
try:
sqla = InSqlAssoc(GAF_VERSION[version], [1,4], lambda x: _to_goa(x, version))
except ImportError:
print("Error: To use in_mem_sql association you need to have sqlite3 bindings installed.", file=sys.stderr)
else:
for row in tsv_iter:
if not row[0].startswith('!'):
sqla.add_row(row)
return sqla
else:
raise ValueError("Incorrect assoc_format parameter.")
class InSqlAssoc(object):
"""
Immutable dictionary-like structure for storing annotations.
It provides slower access but is more memory efficient thus more suitable
for big annotations files.
"""
def __init__(self, fields, index, selection_to_obj_fun, db_path = ":memory:"):
"""
Parameters:
----------
- fields - name of the columns in db representation
- index - pair of fields indexing associations: (gene_id, ontology_term_id)
- selection_to_obj_fun - function transforming list of rows into
GeneAssociation
- db_path - path to database file, special value ":memory:" creates
database in memory
"""
import sqlite3
self.fields = fields
self.fun = selection_to_obj_fun
self.con = sqlite3.connect(db_path)
self.index = index
cur = self.con.cursor()
query = 'CREATE TABLE assocs ("' + self.fields[0] + '" '
for field in fields[1:]:
query += ', "' + field + '" '
query += ');'
cur.execute(query)
cur.execute('CREATE INDEX obj_idx ON assocs ({0});'.format(self.fields[index[0]]))
self.con.commit()
def add_row(self, row):
if len(row) != len(self.fields):
raise TypeError("Incorrect number of fields in a row.")
else:
cur = self.con.cursor()
cur.execute("INSERT INTO assocs VALUES (?" + (",?" * (len(self.fields) - 1)) + ");", row)
self.con.commit()
def __len__(self):
cur = self.con.cursor()
cur.execute('SELECT COUNT(DISTINCT "' + self.fields[self.index[0]] + '") FROM assocs;')
return cur.fetchone()[0]
def __contains__(self, key):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs WHERE "' + self.fields[self.index[0]]\
+ '" = ?;', [key])
return len(list(cur)) > 0 #TODO sth prettier
def __getitem__(self, key):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs WHERE "' + self.fields[self.index[0]]\
+ '" = ?;', [key])
return self.fun(list(cur))
def __iter__(self):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs ORDER BY "{0}"'.format(self.fields[self.index[0]]))
cur_id = None
row_list = []
for row in cur:
if cur_id and cur_id != row[self.index[0]]:
obj = self.fun(row_list)
row_list = [row]
cur_id = row[self.index[0]]
yield (cur_id, obj)
else:
cur_id = row[self.in
|
sidnarayanan/RedPanda
|
T3/inputs/pf_tmpl.py
|
Python
|
mit
| 6,716
| 0.018463
|
#!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import system,getenv,path
from time import clock,time
import json
which = int(argv[1])
submit_id = int(argv[2])
sname = argv[0]
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Tools.Load import *
import PandaCore.Tools.job_management as cb
import RedPanda.Cluster.convert_arrays as ca
Load('PFAnalyzer')
data_dir = getenv('CMSSW_BASE') + '/src/PandaAnalysis/data/'
stopwatch = clock()
def print_time(label):
global stopwatch
now_ = clock()
PDebug(sname+'.print_time:'+str(time()),
'%.3f s elapsed performing "%s"'%((now_-stopwatch),label))
stopwatch = now_
def copy_local(long_name):
replacements = {
r'\${EOS}':'root://eoscms.cern.ch//store/user/snarayan',
r'\${EOS2}':'root://eoscms.cern.ch//store/group/phys_exotica',
r'\${CERNBOX}':'root://eosuser//eos/user/s/snarayan',
r'\${CERNBOXB}':'root://eosuser//eos/user/b/bmaier',
}
full_path = long_name
for k,v in replacements.iteritems():
full_path = sub(k,v,full_path)
PInfo(sname,full_path)
panda_id = long_name.split('/')[-1].split('_')[-1].replace('.root','')
input_name = 'input_%s.root'%panda_id
# if the file is cached locally, why not use it?
local_path = full_path.replace('root://xrootd.cmsaf.mit.edu/','/mnt/hadoop/cms')
if path.isfile(local_path):
# apparently SmartCached files can be corrupted...
ftest = root.TFile(local_path)
if ftest and not(ftest.IsZombie()):
full_path = local_path
# xrdcp if remote, copy if local
if 'root://' in full_path:
cmd = 'xrdcp %s %s'%(full_path,input_name)
else:
cmd = 'cp %s %s'%(full_path,input_name)
'''
# rely on pxrdcp for local and remote copies
cmd = "pxrdcp %s %s"%(full_path,input_name)
PInfo(sname+'.copy_local',cmd)
'''
system(cmd)
if path.isfile(input_name):
PInfo(sname+'.copy_local','Successfully copied to %s'%(input_name))
return input_name
else:
PError(sname+'.copy_local','Failed to copy %s'%input_name)
return None
def fn(input_name,isData,full_path):
PInfo(sname+'.fn','Starting to process '+input_name)
# now we instantiate and configure the analyzer
analyzer = root.redpanda.PFAnalyzer()
# read the inputs
try:
fin = root.TFile.Open(input_name)
tree = fin.FindObjectAny("even
|
ts")
except:
PError(sname+'.fn','Could not read %s'%input_name)
return F
|
alse # file open error => xrootd?
if not tree:
PError(sname+'.fn','Could not recover tree in %s'%input_name)
return False
output_name = input_name.replace('input','output')
analyzer.SetOutputFile(output_name)
analyzer.Init(tree)
# run and save output
analyzer.Run()
analyzer.Terminate()
ret = path.isfile(output_name)
if ret:
PInfo(sname+'.fn','Successfully created %s'%(output_name))
return True
else:
PError(sname+'.fn','Failed in creating %s!'%(output_name))
return False
def cleanup(fname):
ret = system('rm -f %s'%(fname))
if ret:
PError(sname+'.cleanup','Removal of %s exited with code %i'%(fname,ret))
else:
PInfo(sname+'.cleanup','Removed '+fname)
return ret
def hadd(good_inputs):
good_outputs = ' '.join([x.replace('input','output') for x in good_inputs])
cmd = 'hadd -f output.root ' + good_outputs
ret = system(cmd)
if not ret:
PInfo(sname+'.hadd','Merging exited with code %i'%ret)
else:
PError(sname+'.hadd','Merging exited with code %i'%ret)
def stageout(infilename,outdir,outfilename):
if path.isdir(outdir): # assume it's a local copy
mvargs = 'mv $PWD/%s %s/%s'%(infilename,outdir,outfilename)
lsargs = 'ls %s/%s'%(outdir,outfilename)
else:
if system('which gfal-copy')==0:
mvargs = 'gfal-copy '
lsargs = 'gfal-ls '
elif system('which lcg-cp')==0:
mvargs = 'lcg-cp -v -D srmv2 -b '
lsargs = 'lcg-ls -v -D srmv2 -b '
else:
PError(sname+'.stageout','Could not find a stageout protocol!')
return -1
mvargs += 'file://$PWD/%s srm://t3serv006.mit.edu:8443/srm/v2/server?SFN=%s/%s'%(infilename,outdir,outfilename)
lsargs += 'srm://t3serv006.mit.edu:8443/srm/v2/server?SFN=%s/%s'%(outdir,outfilename)
PInfo(sname,mvargs)
ret = system(mvargs)
system('rm *.root')
if not ret:
PInfo(sname+'.stageout','Move exited with code %i'%ret)
else:
PError(sname+'.stageout','Move exited with code %i'%ret)
return ret
PInfo(sname,lsargs)
ret = system(lsargs)
if ret:
PError(sname+'.stageout','Output file is missing!')
return ret
return 0
def write_lock(outdir,outfilename,processed):
lockname = outfilename.replace('.root','.lock')
flock = open(lockname,'w')
for k,v in processed.iteritems():
flock.write(v+'\n')
PInfo(sname+'.write_lock','This job successfully processed %i inputs!'%len(processed))
flock.close()
return stageout(lockname,outdir+'/locks/',lockname)
if __name__ == "__main__":
sample_list = cb.read_sample_config('local.cfg',as_dict=False)
to_run = None #sample_list[which]
for s in sample_list:
if which==s.get_id():
to_run = s
break
if not to_run:
PError(sname,'Could not find a job for PROCID=%i'%(which))
exit(3)
outdir = 'XXXX' # will be replaced when building the job
outfilename = to_run.name+'_%i.root'%(submit_id)
processed = {}
print_time('loading')
for f in to_run.files:
input_name = copy_local(f)
print_time('copy %s'%input_name)
if input_name:
success = fn(input_name,(to_run.dtype!='MC'),f)
print_time('analyze %s'%input_name)
if success:
processed[input_name] = f
cleanup(input_name)
print_time('remove %s'%input_name)
if len(processed)==0:
exit(1)
hadd(list(processed))
print_time('hadd')
ca.process_file('output.root')
print_time('conversion')
ret1 = stageout('output.root',outdir,outfilename)
ret2 = stageout('output.npy',outdir,outfilename.replace('.root','.npy'))
print_time('stageout')
system('rm -f *root *npy')
if not ret1 and not ret2:
write_lock(outdir,outfilename,processed)
print_time('create lock')
else:
exit(-1*max(ret1, ret2))
exit(0)
|
shiminasai/mapafinca
|
clima/migrations/0001_initial.py
|
Python
|
mit
| 4,223
| 0.002842
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('lugar', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DiasEfectivoLLuvia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('dias_lluvia', models.FloatField()),
('comunidad', models.ForeignKey(to='lugar.Comunidad')),
('departamento', models.ForeignKey(to='lugar.Departamento')),
('municipio', models.ForeignKey(to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Dias Efectivo de LLuvia',
'verbose_name_plural': 'Dias Efectivo de LLuvia',
},
),
migrations.CreateModel(
name='Precipitacion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('precipitacion', models.FloatField()),
('total_precipitacion', models.FloatField(editable=False)),
('comunidad', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'municipio', chained_field=b'municipio', blank=True, auto_choose=True, to='lugar.Comunidad', null=True)),
('departamento', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'pais', chained_field=b'pais', auto_choose=True, to='lugar.Departamento')),
('municipio', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'departamento', chained_field=b'departamento', auto_choose=True, to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose
|
_name': 'Precipitaci\xf3n',
'verbose_name_plural': 'Precipitaci\xf3n',
},
),
migratio
|
ns.CreateModel(
name='Temperatura',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('temperatura', models.FloatField()),
('total_temperatura', models.FloatField(editable=False)),
('comunidad', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'municipio', chained_field=b'municipio', blank=True, auto_choose=True, to='lugar.Comunidad', null=True)),
('departamento', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'pais', chained_field=b'pais', auto_choose=True, to='lugar.Departamento')),
('municipio', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'departamento', chained_field=b'departamento', auto_choose=True, to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Temperatura',
'verbose_name_plural': 'Temperatura',
},
),
]
|
marlengit/BitcoinUnlimited
|
qa/rpc-tests/p2p-versionbits-warning.py
|
Python
|
mit
| 6,299
| 0.002223
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2016 The Bitcoin Unlimited developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
Test version bits' warning system.
Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated.
'''
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
def on_inv(self, conn, message):
pass
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class VersionBitsWarningTest(BitcoinTestFramework):
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
with open(self.alert_filename, 'w') as f:
pass
self.node_options = ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]
self.nodes.append(start_node(0, self.options.tmpdir, self.node_options))
import re
self.vb_pattern = re.compile("^Warning.*versionbit")
|
# Send numblocks blocks via peer with nVersionToUse set.
def send_blocks_with_version(self, peer, numblocks, nVersionToUse):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"]+1
tip = int(tip, 16)
for i in range(numblocks):
block = create_block(tip, create_coinbase(height+1), block_time)
block.
|
nVersion = nVersionToUse
block.solve()
peer.send_message(msg_block(block))
block_time += 1
height += 1
tip = block.sha256
peer.sync_with_ping()
def test_versionbits_in_alert_file(self):
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
assert(self.vb_pattern.match(alert_text))
def run_test(self):
# Setup the p2p connection and start up the network thread.
test_node = TestNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
test_node.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
# 1. Have the node mine one period worth of blocks
self.nodes[0].generate(VB_PERIOD)
# 2. Now build one period of blocks on the tip, with < VB_THRESHOLD
# blocks signaling some unknown bit.
nVersion = VB_TOP_BITS | (1<<VB_UNKNOWN_BIT)
self.send_blocks_with_version(test_node, VB_THRESHOLD-1, nVersion)
# Fill rest of period with regular version blocks
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
# Check that we're not getting any versionbit-related errors in
# getinfo()
assert(not self.vb_pattern.match(self.nodes[0].getinfo()["errors"]))
# 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
# some unknown bit
self.send_blocks_with_version(test_node, VB_THRESHOLD, nVersion)
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD)
# Might not get a versionbits-related alert yet, as we should
# have gotten a different alert due to more than 51/100 blocks
# being of unexpected version.
# Check that getinfo() shows some kind of error.
assert(len(self.nodes[0].getinfo()["errors"]) != 0)
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared, and restart the node. This should move the versionbit state
# to ACTIVE.
self.nodes[0].generate(VB_PERIOD)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
# Empty out the alert file
with open(self.alert_filename, 'w') as f:
pass
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
# Connecting one block should be enough to generate an error.
self.nodes[0].generate(1)
assert(len(self.nodes[0].getinfo()["errors"]) != 0)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
self.test_versionbits_in_alert_file()
# Test framework expects the node to still be running...
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
if __name__ == '__main__':
VersionBitsWarningTest().main()
|
JShadowMan/package
|
python/zdl/error_logger/error_logger/adapter/postgresql.py
|
Python
|
mit
| 412
| 0.002427
|
#!/usr/bin/env python
#
# Copyright (C) 2017 DL
#
import psycopg2
from error_logger.adapter import base_adapter
class PostgresqlAdapter(base_adapter.BaseAdapter):
|
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
super(PostgresqlAdapter, self).__init__()
def create_connection(self):
return psycopg2.con
|
nect(*self._args, **self._kwargs)
|
ibc/MediaSoup
|
worker/deps/gyp/test/win/gyptest-system-include.py
|
Python
|
isc
| 476
| 0.010504
|
#!/usr/bin/env python
# Copyr
|
ight (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Checks that m
|
svs_system_include_dirs works.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'system-include'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
kinect110/RPSOM
|
src/examples/RPSOM_animal.py
|
Python
|
mit
| 941
| 0.025505
|
#!/usr/local/bin python
# -*- coding: utf-8 -*-
from RPSOM import Model
from RPSOM.transition_graph import output_graph
if __name__=='__main__':
# learning rate alpha setup
alpha_max = [0.1, 0.5, 0.7]
alpha_min = [0.01, 0.1, 0.2]
# neighborhood radius sigma setup
sigma_max = [5, 7, 10]
sigma_min = [1, 2, 3]
epochs = 10
# RPSOM model setup
rpsom=Model.RPSOM(epochs, 15, 20, input_file="animal.csv", alpha_max=alpha_max, alpha_min=alpha_min, sigma_max=sigma_max, sigma_min=sigma
|
_min, log_file="test.log")
#cb = [som.write_BMU for som in rpsom.som]
cb = None
# RPSOM train
rpsom.fit (trainX=rpsom.input_x, epochs=rpsom.epochs, verbose=0, callbacks=cb)
# Output Map
# Output thickness map
rpsom.map_output2wrl_squ(grad=100, filename="test")
# Output grayscale 2D map
filename="example_animal"
rpsom.map_output2wrl_gray_squ(filename)
# Output transiti
|
on graph
output_graph(rpsom)
rpsom.weight_output_csv ("rpsom_weight")
|
stharrold/demo
|
tests/test_app_template/test_app_template__init__.py
|
Python
|
mit
| 591
| 0.003384
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Pytests for demo/app_template/__init__.py
"""
# Import standard packages.
import os
import sys
# Import installed packages.
# Import local packages.
sys.path.insert(0, os.path.curdir)
import demo
def test__all__(
ref_all=[
'main',
'template']
) -> None:
r"""Pytest for __all__
Notes:
* Check that expected modules are exported.
"""
|
test_all = demo.app_template.__all__
assert ref_all == test_all
for attr in ref_all:
assert has
|
attr(demo.app_template, attr)
return None
|
g3rd/django-rszio
|
rszio/__init__.py
|
Python
|
mit
| 18
| 0
|
ver
|
sion = '1.1.0'
| |
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram2d/colorbar/tickformatstop/_templateitemname.py
|
Python
|
mit
| 494
| 0
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
|
self,
plotly_name="templateitemname",
parent_name="histogram2d.colorbar.tickformatstop",
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colo
|
rbars"),
**kwargs
)
|
dekatzenel/team-k
|
mds/core/models/__init__.py
|
Python
|
bsd-3-clause
| 948
| 0.007384
|
"""
Data models for the core Sana data engine. These should be extended as
required.
:Authors: Sana dev team
:Version: 2.0
""
|
"
from .concept import Concept, Relationship, RelationshipCategory
from .device import Device
from .encounter import Encounter
from .events import Event
from .instruction import Instruction
from .location import Location
from .notification import Notification
from .observation import Observation
from .observer import Observer, Surgeon, SurgicalAdvocate
from .procedure import Pr
|
ocedure
from .subject import Subject, SurgicalSubject
__all__ = ['Concept', 'Relationship','RelationshipCategory',
'Device',
'Encounter',
'Event',
'Instruction',
'Location',
'Notification',
'Observation',
'Observer',
'Procedure',
'Subject',
'SurgicalSubject',
'Surgeon',
'SurgicalAdvocate']
|
qwattash/mpm
|
mpm/cli/mpm.py
|
Python
|
gpl-3.0
| 1,756
| 0.001139
|
# -*- coding: utf-8 -*-
import argparse
import six
parser = argparse.ArgumentParser(description="Minecraft Package Manager")
sub = parser.add_subparsers(help="command help")
# package commands
sync_parser = sub.add_parser("sync",
description="Synchronize local mod archive.",
help="sync --help")
show_parser = sub.add_parser("show",
description="Show mod informations.",
help="show --help")
search_parser = sub.add_parser("search",
description="Search mod archive.",
help="search --help")
update_parser = sub.add_parser("update",
description="Update mods.",
help="update --help")
install_parser = sub.add_parser("install",
description="Install mods.",
|
help="install --help")
remove_parser = sub.add_parser("remove",
description="Remove mods.",
help="remove --help")
# repo commands
repo_add_parser = sub.add_parser("addrepo",
description="Add mod repository.",
help="addrepo --hel
|
p")
repo_del_parser = sub.add_parser("rmrepo",
description="Remove mod repository.",
help="rmrepo --help")
repo_show_parser = sub.add_parser("lsrepo",
description="Show mod repository informations.",
help="lsrepo --help")
if __name__ == "__main__":
cmd = parser.parse_args()
six.print_("Done")
|
adarshlx/twitter_nlp
|
hbc/python/tweets2entityDocs.py
|
Python
|
gpl-3.0
| 2,087
| 0.0115
|
#!/usr/bin/python
import sys
sys.path.append('/homes/gws/aritter/twitter_nlp/python')
from twokenize import tokenize
from LdaFeatures import LdaFeatures
from Vocab import Vocab
from Dictionaries import Dictionaries
entityDocs = {}
prevText = None
for line in sys.stdin:
line = line.rstrip('\n')
fields = line.split('\t')
sid = fields[0]
text = fields[6]
words = tokenize(text)
confidence = 1.0 / float(fields[-1])
eType = fields[-2]
entity = fields[-3]
neTags = fields[-4].split(' ')
pos = fields[-5].split(' ')
words = fields[-6].split(' ')
#Just skip duplicate texts (will come from tweets with more than one entiity)
if prevText and prevText == text:
continue
prevText = text
features = LdaFeatures(words, neTags)
for i in range(len(features.entities)):
entity = ' '.join(features.words[features.entities[i][0]:features.entities[i][1]])
entityDocs[entity] = entityDocs.get(entity,[])
entityDocs[entity].append(features.features[i])
dictionaries = Dictionaries('/homes/gws/aritter/twitter_nlp/data/LabeledLDA_dictionaries')
vocab = Vocab()
keys = entityDocs.keys()
keys.sort(cmp=lambda a,b: cmp(len(entityDocs[b]),len(entityDocs[a])))
eOut = open('entities', 'w')
lOut = open('labels', 'w')
dOut = open('dictionaries', 'w')
for e in keys:
labels = dictionaries.GetDictVector(e)
###############################################################################
#NOTE: For now, only include entities which appear in one or more dictionary
# we could modify this to give them membership in all, or no dictionaries
# (in LabeledLDA, don't impose any constraints)
##########################################################
|
#####################
if sum(labels) > 0:
lOut.write(' '.join([str(x) for x in labels]) + "\n")
eOut.write("%s\n" % e)
print '\t'.join([' '.join([str(vocab.GetID(x)) for x in f]) for f in entityDocs[e]])
vocab.SaveVocab('vocab')
for d in dictionaries.dictionaries:
dOut
|
.write(d + "\n")
|
jiajiechen/mxnet
|
python/mxnet/gluon/model_zoo/vision/densenet.py
|
Python
|
apache-2.0
| 7,888
| 0.002789
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""DenseNet, implemented in Gluon."""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']
import os
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ...contrib.nn import HybridConcurrent, Identity
# Helpers
def _make_dense_block(num_layers, bn_size, growth_rate, dropout, stage_index):
out = nn.HybridSequential(prefix='stage%d_'%stage_index)
with out.name_scope():
for _ in range(num_layers):
out.add(_make_dense_layer(growth_rate, bn_size, dropout))
return out
def _make_dense_layer(growth_rate, bn_size, dropout):
new_features = nn.HybridSequential(prefix='')
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
if dropout:
new_features.add(nn.Dropout(dropout))
out = HybridConcurrent(axis=1, prefix='')
out.add(Identity())
out.add(new_features)
return out
def _make_transition(num_output_features):
out = nn.HybridSequential(prefix='')
out.add(nn.BatchNorm())
out.add(nn.Activation('relu'))
out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
out.add(nn.AvgPool2D(pool_size=2, strides=2))
return out
# Net
class DenseNet(HybridBlock):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_init_features : int
Number of filters to learn in the first convolution layer.
growth_rate : int
Number of filters to add each layer (`k` in the paper).
block_config : list of int
List of integers for numbers of layers in each pooling block.
bn_size : int, default 4
Multiplicative factor for number of bottle neck layers.
(i.e. bn_size * k features in the bottleneck layer)
dropout : float, default 0
Rate of dropout after each dense layer.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=7,
strides=2, padding=3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.AvgPool2D(pool_size=7))
self.features.add(nn.Flatten())
self.output = nn.Dense(classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
densenet_spec = {121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (64, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32])}
# Constructor
def get_densenet(num_layers, pretrained=False, ctx=cpu(),
root=os.path.join('~', '.mxnet', 'models'), **kwargs):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
num_init_features, growth_rate, block_config = densenet_spec[num_layers]
net = DenseNet(num_init_features, growth_rate, block_config, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_params(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx)
return net
def densenet121(**kwargs):
r"""Densenet-BC 121-layer model from the
`
|
"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(121, **kwargs)
de
|
f densenet161(**kwargs):
r"""Densenet-BC 161-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(161, **kwargs)
def densenet169(**kwargs):
r"""Densenet-BC 169-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(169, **kwargs)
def densenet201(**kwargs):
r"""Densenet-BC 201-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(201, **kwargs)
|
Frechdachs/python-mpv
|
mpv.py
|
Python
|
agpl-3.0
| 42,232
| 0.006796
|
from ctypes import *
import ctypes.util
import threading
import os
import sys
from warnings import warn
from functools import partial
import collections
import re
import traceback
# vim: ts=4 sw=4 et
if os.name == 'nt':
backend = CDLL('mpv-1.dll')
fs_enc = 'utf-8'
else:
import locale
lc, enc = locale.getlocale(locale.LC_NUMERIC)
# libmpv requires LC_NUMERIC to be set to "C". Since messing with global variables everyone else relies upon is
# still better than segfaulting, we are setting LC_NUMERIC to "C".
locale.setlocale(locale.LC_NUMERIC, 'C')
sofile = ctypes.util.find_library('mpv')
if sofile is None:
raise OSError("Cannot find libmpv in the usual places. Depending on your distro, you may try installing an "
"mpv-devel or mpv-libs package. If you have libmpv around but this script can't find it, maybe consult "
"the documentation for ctypes.util.find_library which this script uses to look up the library "
"filename.")
backend = CDLL(sofile)
fs_enc = sys.getfilesystemencoding()
class MpvHandle(c_void_p):
pass
class MpvOpenGLCbContext(c_void_p):
pass
class PropertyUnavailableError(AttributeError):
pass
class ErrorCode(object):
""" For documentation on these, see mpv's libmpv/client.h """
SUCCESS = 0
EVENT_QUEUE_FULL = -1
NOMEM = -2
UNINITIALIZED = -3
INVALID_PARAMETER = -4
OPTION_NOT_FOUND = -5
OPTION_FORMAT = -6
OPTION_ERROR = -7
PROPERTY_NOT_FOUND = -8
PROPERTY_FORMAT = -9
PROPERTY_UNAVAILABLE = -10
PROPERTY_ERROR = -11
COMMAND = -12
EXCEPTION_DICT = {
0: None,
-1: lambda *a: MemoryError('mpv event queue full', *a),
-2: lambda *a: MemoryError('mpv cannot allocate memory', *a),
-3: lambda *a: ValueError('Uninitialized mpv handle used', *a),
-4: lambda *a: ValueError('Invalid value for mpv parameter', *a),
-5: lambda *a: AttributeError('mpv option does not exist', *a),
-6: lambda *a: TypeError('Tried to set mpv option using wrong format', *a),
-7: lambda *a: ValueError('Invalid value for mpv option', *a),
-8: lambda *a: AttributeError('mpv property does not exist', *a),
# Currently (mpv 0.18.1) there is a bug causing a PROPERTY_FORMAT error to be returned instead of
# INVALID_PARAMETER when setting a property-mapped option to an invalid value.
-9: lambda *a: TypeError('Tried to get/set mpv property using wrong format, or passed invalid value', *a),
-10: lambda *a: PropertyUnavailableError('mpv property is not available', *a),
-11: lambda *a: RuntimeError('Generic error getting or setting mpv property', *a),
-12: lambda *a: SystemError('Error running mpv command', *a) }
@staticmethod
def default_error_handler(ec, *args):
return ValueError(_mpv_error_string(ec).decode('utf-8'), ec, *args)
@classmethod
def raise_for_ec(kls, ec, func, *args):
ec = 0 if ec > 0 else ec
ex = kls.EXCEPTION_DICT.get(ec , kls.default_error_handler)
if ex:
raise ex(ec, *args)
class MpvFormat(c_int):
NONE = 0
STRING = 1
OSD_STRING = 2
FLAG = 3
INT64 = 4
DOUBLE = 5
NODE = 6
NODE_ARRAY = 7
NODE_MAP = 8
BYTE_ARRAY = 9
def __eq__(self, other):
return self is other or self.value == other or self.value == int(other)
def __repr__(self):
return ['NONE', 'STRING', 'OSD_STRING', 'FLAG', 'INT64', 'DOUBLE', 'NODE', 'NODE_ARRAY', 'NODE_MAP',
'BYTE_ARRAY'][self.value]
class MpvEventID(c_int):
NONE = 0
SHUTDOWN = 1
LOG_MESSAGE = 2
GET_PROPERTY_REPLY = 3
SET_PROPERTY_REPLY = 4
COMMAND_REPLY = 5
START_FILE = 6
END_FILE = 7
FILE_LOADED = 8
TRACKS_CHANGED = 9
TRACK_SWITCHED = 10
IDLE = 11
PAUSE = 12
UNPAUSE = 13
TICK = 14
SCRIPT_INPUT_DISPATCH = 15
CLIENT_MESSAGE = 16
VIDEO_RECONFIG = 17
AUDIO_RECONFIG = 18
METADATA_UPDATE = 19
SEEK = 20
PLAYBACK_RESTART = 21
PROPERTY_CHANGE = 22
CHAPTER_CHANGE = 23
ANY = ( SHUTDOWN, LOG_MESSAGE, GET_PROPERTY_REPLY, SET_PROPERTY_REPLY, COMMAND_REPLY, START_FILE, END_FILE,
FILE_LOADED, TRACKS_CHANGED, TRACK_SWITCHED, IDLE, PAUSE, UNPAUSE, TICK, SCRIPT_INPUT_DISPATCH,
CLIENT_MESSAGE, VIDEO_RECONFIG, AUDIO_RECONFIG, M
|
ETADATA_UPDATE, SEEK, PLAYBACK_RESTART, PROPERTY_CHANGE,
CHAPTER_CHANGE )
def __repr__(self):
return ['NONE', 'SHUTDOWN', 'LOG_MESSAGE', 'GET_P
|
ROPERTY_REPLY', 'SET_PROPERTY_REPLY', 'COMMAND_REPLY',
'START_FILE', 'END_FILE', 'FILE_LOADED', 'TRACKS_CHANGED', 'TRACK_SWITCHED', 'IDLE', 'PAUSE', 'UNPAUSE',
'TICK', 'SCRIPT_INPUT_DISPATCH', 'CLIENT_MESSAGE', 'VIDEO_RECONFIG', 'AUDIO_RECONFIG',
'METADATA_UPDATE', 'SEEK', 'PLAYBACK_RESTART', 'PROPERTY_CHANGE', 'CHAPTER_CHANGE'][self.value]
class MpvNodeList(Structure):
def array_value(self, decode_str=False):
return [ self.values[i].node_value(decode_str) for i in range(self.num) ]
def dict_value(self, decode_str=False):
return { self.keys[i].decode('utf-8'): self.values[i].node_value(decode_str) for i in range(self.num) }
class MpvNode(Structure):
_fields_ = [('val', c_longlong),
('format', MpvFormat)]
def node_value(self, decode_str=False):
return MpvNode.node_cast_value(byref(c_void_p(self.val)), self.format.value, decode_str)
@staticmethod
def node_cast_value(v, fmt, decode_str=False):
dwrap = lambda s: s.decode('utf-8') if decode_str else s
return {
MpvFormat.NONE: lambda v: None,
MpvFormat.STRING: lambda v: dwrap(cast(v, POINTER(c_char_p)).contents.value),
MpvFormat.OSD_STRING: lambda v: cast(v, POINTER(c_char_p)).contents.value.decode('utf-8'),
MpvFormat.FLAG: lambda v: bool(cast(v, POINTER(c_int)).contents.value),
MpvFormat.INT64: lambda v: cast(v, POINTER(c_longlong)).contents.value,
MpvFormat.DOUBLE: lambda v: cast(v, POINTER(c_double)).contents.value,
MpvFormat.NODE: lambda v: cast(v, POINTER(MpvNode)).contents.node_value(decode_str),
MpvFormat.NODE_ARRAY: lambda v: cast(v, POINTER(POINTER(MpvNodeList))).contents.contents.array_value(decode_str),
MpvFormat.NODE_MAP: lambda v: cast(v, POINTER(POINTER(MpvNodeList))).contents.contents.dict_value(decode_str),
MpvFormat.BYTE_ARRAY: lambda v: cast(v, POINTER(c_char_p)).contents.value,
}[fmt](v)
MpvNodeList._fields_ = [('num', c_int),
('values', POINTER(MpvNode)),
('keys', POINTER(c_char_p))]
class MpvSubApi(c_int):
MPV_SUB_API_OPENGL_CB = 1
class MpvEvent(Structure):
_fields_ = [('event_id', MpvEventID),
('error', c_int),
('reply_userdata', c_ulonglong),
('data', c_void_p)]
def as_dict(self):
dtype = {MpvEventID.END_FILE: MpvEventEndFile,
MpvEventID.PROPERTY_CHANGE: MpvEventProperty,
MpvEventID.GET_PROPERTY_REPLY: MpvEventProperty,
MpvEventID.LOG_MESSAGE: MpvEventLogMessage,
MpvEventID.SCRIPT_INPUT_DISPATCH: MpvEventScriptInputDispatch,
MpvEventID.CLIENT_MESSAGE: MpvEventClientMessage
}.get(self.event_id.value, None)
r
|
aurule/npc
|
tests/commands/listing/templates/sections/test_simple_section_md.py
|
Python
|
mit
| 991
| 0.005045
|
import npc
from mako.template import Template
def template_output(sectioner):
template_path = str(npc.settings.InternalSettings().get('listing.templates.markdown.sections.simple'))
section_template = Template(filename=template_path)
return section_template.render(sectioner=sectioner)
def test_generates_hashes_for_header_level(prefs):
sectioner = npc.formatters.sectioners.LastInitialSectioner(3, prefs)
sectioner.current_text = 'test text'
output = template_
|
output(sectioner)
assert '###' in output
def test_includes_current_text(prefs):
sectioner = npc.formatters.sectioners.LastInitialSectioner(3, prefs)
sectioner.current_text = 'test text'
output = template_output(sectioner)
assert 'test text' in output
def test_formatted_output(prefs):
|
sectioner = npc.formatters.sectioners.LastInitialSectioner(3, prefs)
sectioner.current_text = 'test text'
output = template_output(sectioner)
assert output == '### test text\n\n'
|
ArchiveTeam/qwiki-discovery
|
pipeline.py
|
Python
|
unlicense
| 7,004
| 0.001428
|
# encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import shutil
import socket
import sys
import time
import random
import seesaw
from seesaw.config import NumberConfigValue
from seesaw.externalprocess import ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.5"):
raise Exception("This pipeline needs seesaw version 0.1.5 or higher.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20140927.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'qwikidisco'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
dirname = "/".join((item["data_dir"], item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix,
item_name.replace(':', '_'),
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
os.rename("%(item_dir)s/%(warc_file_base)s.txt.gz" % item,
"%(data_dir)s/%(warc_file_base)s.txt.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
class CustomProcessArgs(object):
def realize(self, item):
item_type, item_value = item['item_name'].split(':', 1)
if item_type == 'page':
# Expect something like page:0-99999 or page:100000-199999
start_num, end_num = item_value.split('-', 1)
return ['python', 'discover.py', start_num, end_num,
"%(item_dir)s/%(warc_file_base)s.txt.gz" % item]
else:
raise ValueError('unhandled item type: {0}'.format(item_type))
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
SCRIPT_SHA1 = get_hash(os.path.join(CWD, 'discover.py'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'python_version': sys.version,
'script_hash': SCRIPT_SHA1,
}
return d
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="Qwiki Discovery",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/1/10/Qwiki_Logo_June_2012.png" height="50px" title=""/>
<h2>Qwiki Phase 1.
<span class="links">
<a href="http://www.qwiki.com/">Website</a> ·
<a href="http://tracker.archiveteam.org/qwikidisco/">Leaderboard</a>
<a h
|
ref="http://archiveteam.org/index.php?title=Qwiki">Wiki</a> ·
</span>
</h2>
<p>Qwiki shuts down. This is phase 1: content discovery.</p>
""",
utc_deadline=datetime.datetime(2014, 11, 1, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="qwikidisco"),
Exter
|
nalProcess('Scraper', CustomProcessArgs(),
max_tries=2,
accept_on_exit_code=[0],
env={
"item_dir": ItemValue("item_dir")
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.txt.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.txt.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp"
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
|
SUSE/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/virtual_network_peering.py
|
Python
|
mit
| 4,140
| 0.001691
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network.
:type remote_virtual_network: :class:`SubResource
<azure.mgmt.network.v2017_03_01.models.SubResource>`
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or :class:`VirtualNetworkPeeringState
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkPeeringState>`
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:par
|
am name: The name of the resource that is unique within a resource
group. This name can be used to ac
|
cess the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, allow_virtual_network_access=None, allow_forwarded_traffic=None, allow_gateway_transit=None, use_remote_gateways=None, remote_virtual_network=None, peering_state=None, provisioning_state=None, name=None, etag=None):
super(VirtualNetworkPeering, self).__init__(id=id)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
rishita/mxnet
|
example/rcnn/rcnn/processing/roidb.py
|
Python
|
apache-2.0
| 3,603
| 0.00111
|
"""
roidb
basic format [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
extended ['image', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
from __future__ import print_function
import cv2
import numpy as np
from bbox_regression import compute_bbox_regression_targets
from rcnn.config import config
def prepare_roidb(imdb, roidb):
"""
add image path, max_classes, max_overlaps to roidb
:param imdb: image database, provide path
:param roidb: roidb
:return: None
"""
print('prepare roidb')
for i in range(len(roidb)): # image_index
roidb[i]['image'] = imdb.image_path_from_index(imdb.image_set_index[i])
if config.TRAIN.ASPECT_GROUPING:
size = cv2.imread(roidb[i]['image']).shape
roidb[i]['height'] = size[0]
roidb[i]['width'] = size[1]
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_overlaps'] = max_overlaps
roidb[i]['max_classes'] = max_classes
# background roi => background class
zero_indexes = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_indexes] == 0)
# foreground roi => foreground class
nonzero_indexes = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_indexes] != 0)
def add_bbox_regression_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
print('add bounding box regression targets')
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in range(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = compute_bbox_regression_targets(rois, max_overlaps, max_classes)
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
# use fixed / precomputed means and stds instead of empirical values
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (num_classes, 1))
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (num_classes, 1))
else:
# compute mean, std values
class_counts = np.zeros((num_classes, 1)) + config.EPS
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
if cls_indexes.size > 0:
class_counts[cls] += cls_indexes.size
sums[cls, :] += targets[cls_indexes, 1:].sum(axis=0)
squared_sums[cls,
|
:] += (targets[cls_in
|
dexes, 1:] ** 2).sum(axis=0)
means = sums / class_counts
# var(x) = E(x^2) - E(x)^2
stds = np.sqrt(squared_sums / class_counts - means ** 2)
# normalized targets
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] /= stds[cls, :]
return means.ravel(), stds.ravel()
|
drzoidberg33/plexpy
|
plexpy/pmsconnect.py
|
Python
|
gpl-3.0
| 155,901
| 0.005215
|
# This file is part of Tautulli.
#
# Tautulli is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tautulli is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tautulli. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import time
import urllib
import plexpy
import activity_processor
import common
import helpers
import http_handler
import logger
import plextv
import session
import users
def get_server_friendly_name():
logger.info(u"Tautulli Pmsconnect :: Requesting name from server...")
server_name = PmsConnect().get_server_pref(pref='FriendlyName')
# If friendly name is blank
if not server_name:
servers_info = PmsConnect().get_servers_info()
for server in servers_info:
if server['machine_identifier'] == plexpy.CONFIG.PMS_IDENTIFIER:
server_name = server['name']
break
if server_name and server_name != plexpy.CONFIG.PMS_NAME:
plexpy.CONFIG.__setattr__('PMS_NAME', server_name)
plexpy.CONFIG.write()
logger.info(u"Tautulli Pmsconnect :: Server name retrieved.")
return server_name
class PmsConnect(object):
"""
Retrieve data from Plex Server
"""
def __init__(self, url=None, token=None):
self.url = url
self.token = token
if not self.url and plexpy.CONFIG.PMS_URL:
self.url = plexpy.CONFIG.PMS_URL
elif not self.url:
self.url = 'http://{hostname}:{port}'.format(hostname=plexpy.CONFIG.PMS_IP,
port=plexpy.CONFIG.PMS_PORT)
self.timeout = plexpy.CONFIG.PMS_TIMEOUT
if not self.token:
# Check if we should use the admin token, or the guest server token
if session.get_session_user_id():
user_data = users.Users()
user_tokens = user_data.get_tokens(user_id=session.get_session_user_id())
self.token = user_tokens['server_token']
else:
self.token = plexpy.CONFIG.PMS_TOKEN
self.request_handler = http_handler.HTTPHandler(urls=self.url,
token=self.token,
timeout=self.timeout)
def get_sessions(self, output_format=''):
"""
Return current sessions.
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/status/sessions'
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_sessions_terminate(self, session_id='', reason='', output_format=''):
"""
Return current sessions.
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/status/sessions/terminate?sessionId=%s&reason=%s' % (session_id, reason)
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_metadata(self, rating_key='', output_format=''):
"""
Return metadata for request item.
Parameters required: rating_key { Plex ratingKey }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/library/metadata/' + rating_key
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_metadata_children(self, rating_key='', output_format=''):
"""
Return metadata for children of the request item.
Parameters required: rating_key { Plex ratingKey }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/library/metadata/' + rating_key + '/children'
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_metadata_grandchildren(self, rating_key='', output_format=''):
"""
Return metadata for graandchildren of the request item.
Parameters required: rating_key { Plex ratingKey }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/library/metadata/' + rating_key + '/grandchildren'
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_recently_added(self, start='0', count='0', output_format=''):
"""
Return list of recently added items.
Parameters required: count { number of results to return }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/library/recentlyAdded?X-Plex-Container-Start=%s&X-Plex-Container-Size=%s' % (start, count)
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_library_recently_added(self, section_id='', start='0', count='0', output_format=''):
"""
Return list of recently added items.
Parameters required: count { number of results to return }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/library/sections/%s/recentlyAdded?X-Plex-Container-Start=%s&X-Plex-Container-Size=%s' % (section_id, start, count)
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_children_list_related(self, rating_key='', output_format=''):
"""
Return list of related children in requested collection item.
Parameters required: rating_key { ratingKey of parent }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/hubs/metadata/' + rating_key + '/related'
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_childrens_list(self, rating_key='', output_format=''):
"""
|
Return list of children in requested library item.
Parameters required: rating
|
_key { ratingKey of parent }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/library/metadata/' + rating_key + '/allLeaves'
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_server_list(self, output_format=''):
"""
Return list of local server
|
ets-labs/python-dependency-injector
|
examples/containers/override.py
|
Python
|
bsd-3-clause
| 578
| 0
|
"""Container overriding example."""
fr
|
om dependency_injector import containers, providers
class Service:
...
class ServiceStub:
...
class Container(containers.DeclarativeContainer):
service = providers.Factory(Service)
class OverridingContainer(containers.DeclarativeContainer):
service = providers.Factory
|
(ServiceStub)
if __name__ == "__main__":
container = Container()
overriding_container = OverridingContainer()
container.override(overriding_container)
service = container.service()
assert isinstance(service, ServiceStub)
|
khapota/messages-terminal
|
test.py
|
Python
|
mit
| 1,064
| 0.003759
|
from mes
|
senger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver'
|
, 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message
|
JGiola/swift
|
utils/swift_build_support/swift_build_support/products/llvm.py
|
Python
|
apache-2.0
| 2,666
| 0
|
# swift_build_support/products/llvm.py --------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from . import cmark
from . import product
from ..cmake import CMakeOptions
class LLVM(product.Product):
def __init__(self, args, toolchain, source_dir, build_dir):
product.Product.__init__(self, args, toolchain, source_dir,
build_dir)
# Add the cmake option for enabling or disabling assertions.
self.cmake_options.define(
'LLVM_ENABLE_ASSERTIONS:BOOL', args.llvm_assertions)
# Add the cmake option for LLVM_TARGETS_TO_BUILD.
self.cmake_options.define(
'LLVM_TARGETS_TO_BUILD', args.llvm_targets_to_build)
# Add the cmake options for vendo
|
rs
self.cmake_options.extend(self._compiler_vendor_flags)
# Add the cmake options for compiler version information.
self.cmake_options.extend(self._version_flags)
@classmethod
def is_build_script_impl_product(cls):
"""is_build_script_impl_product -> boo
|
l
Whether this product is produced by build-script-impl.
"""
return True
@classmethod
def is_before_build_script_impl_product(cls):
"""is_before_build_script_impl_product -> bool
Whether this product is build before any build-script-impl products.
"""
return False
@property
def _compiler_vendor_flags(self):
if self.args.compiler_vendor == "none":
return []
if self.args.compiler_vendor != "apple":
raise RuntimeError("Unknown compiler vendor?!")
return [
('CLANG_VENDOR', 'Apple'),
('CLANG_VENDOR_UTI', 'com.apple.compilers.llvm.clang'),
# This is safe since we always provide a default.
('PACKAGE_VERSION', str(self.args.clang_user_visible_version))
]
@property
def _version_flags(self):
result = CMakeOptions()
if self.args.clang_compiler_version is not None:
result.define(
'CLANG_REPOSITORY_STRING',
"clang-{}".format(self.args.clang_compiler_version))
return result
@classmethod
def get_dependencies(cls):
return [cmark.CMark]
|
Snesha/azure-linux-extensions
|
AzureEnhancedMonitor/ext/test/test_aem.py
|
Python
|
apache-2.0
| 15,614
| 0.00237
|
#!/usr/bin/env python
#
#CustomScript extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+
#
import sys
import unittest
import env
import os
import json
import datetime
from Utils.WAAgentUtil import waagent
import aem
import handler
TestPublicConfig = """\
{
"cfg": [{
"key": "vmsize",
"value": "Small (A1)"
},{
"key": "vm.roleinstance",
"value": "osupdate"
},{
"key": "vm.role",
"value": "IaaS"
},{
"key": "vm.deploymentid",
"value": "cd98461b43364478a908d03d0c3135a7"
},{
"key": "vm.memory.isovercommitted",
"value": 0
},{
"key": "vm.cpu.isovercommitted",
"value": 0
},{
"key": "script.version",
"value": "1.2.0.0"
},{
"key": "verbose",
"value": "0"
},{
"key": "osdisk.connminute",
"value": "asdf.minute"
},{
"key": "osdisk.connhour",
"value": "asdf.hour"
},{
"key": "osdisk.name",
"value": "osupdate-osupdate-2015-02-12.vhd"
},{
"key": "asdf.hour.uri",
"value": "https://asdf.table.core.windows.net/$metricshourprimarytransactionsblob"
},{
"key": "asdf.minute.uri",
"value": "https://asdf.table.core.windows.net/$metricsminuteprimarytransactionsblob"
},{
"key": "asdf.hour.name",
"value": "asdf"
},{
"key": "asdf.minute.name",
"value": "asdf"
},{
"key": "wad.name",
"value": "asdf"
},{
"key": "wad.isenabled",
"value": "1"
},{
"key": "wad.uri",
"value": "https://asdf.table.core.windows.net/wadperformancecounterstable"
}]
}
"""
TestPrivateConfig = """\
{
"cfg" : [{
"key" : "asdf.minute.key",
"value" : "qwer"
},{
"key" : "wad.key",
"value" : "qwer"
}]
}
"""
class TestAEM(unittest.TestCase):
def setUp(self):
waagent.LoggerInit("/dev/null", "/dev/stdout")
def test_config(self):
publicConfig = json.loads(TestPublicConfig)
privateConfig = json.loads(TestPrivateConfig)
config = aem.EnhancedMonitorConfig(publicConfig, privateConfig)
self.assertNotEquals(None, config)
self.assertEquals(".table.core.windows.net",
config.getStorageHostBase('asdf'))
self.assertEquals(".table.core.windows.net",
config.getLADHostBase())
return config
def test_static_datasource(self):
config = self.test_config()
dataSource = aem.StaticDataSource(config)
counters = dataSource.collect()
self.assertNotEquals(None, counters)
self.assertNotEquals(0, len(counters))
name = "Cloud Provider"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("Microsoft Azure", counter.value)
name = "Virtualization Solution Version"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
name = "Vir
|
tualization Solution"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
name = "Instance Type"
counter = next((c for c in counters if c.na
|
me == name))
self.assertNotEquals(None, counter)
self.assertEquals("Small (A1)", counter.value)
name = "Data Sources"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("wad", counter.value)
name = "Data Provider Version"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("1.0.0", counter.value)
name = "Memory Over-Provisioning"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("no", counter.value)
name = "CPU Over-Provisioning"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("no", counter.value)
def test_cpuinfo(self):
cpuinfo = aem.CPUInfo.getCPUInfo()
self.assertNotEquals(None, cpuinfo)
self.assertNotEquals(0, cpuinfo.getNumOfCoresPerCPU())
self.assertNotEquals(0, cpuinfo.getNumOfCores())
self.assertNotEquals(None, cpuinfo.getProcessorType())
self.assertEquals(float, type(cpuinfo.getFrequency()))
self.assertEquals(bool, type(cpuinfo.isHyperThreadingOn()))
percent = cpuinfo.getCPUPercent()
self.assertEquals(float, type(percent))
self.assertTrue(percent >= 0 and percent <= 100)
def test_meminfo(self):
meminfo = aem.MemoryInfo()
self.assertNotEquals(None, meminfo.getMemSize())
self.assertEquals(long, type(meminfo.getMemSize()))
percent = meminfo.getMemPercent()
self.assertEquals(float, type(percent))
self.assertTrue(percent >= 0 and percent <= 100)
def test_networkinfo(self):
netinfo = aem.NetworkInfo()
adapterIds = netinfo.getAdapterIds()
self.assertNotEquals(None, adapterIds)
self.assertNotEquals(0, len(adapterIds))
adapterId = adapterIds[0]
self.assertNotEquals(None, aem.getMacAddress(adapterId))
self.assertNotEquals(None, netinfo.getNetworkReadBytes())
self.assertNotEquals(None, netinfo.getNetworkWriteBytes())
self.assertNotEquals(None, netinfo.getNetworkPacketRetransmitted())
def test_hwchangeinfo(self):
netinfo = aem.NetworkInfo()
testHwInfoFile = "/tmp/HwInfo"
aem.HwInfoFile = testHwInfoFile
if os.path.isfile(testHwInfoFile):
os.remove(testHwInfoFile)
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())
self.assertTrue(os.path.isfile, aem.HwInfoFile)
#No hardware change
lastChange = hwChangeInfo.getLastHardwareChange()
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertEquals(lastChange, hwChangeInfo.getLastHardwareChange())
#Create mock hardware
waagent.SetFileContents(testHwInfoFile, ("0\nma-ca-sa-ds-02"))
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())
def test_linux_metric(self):
config = self.test_config()
metric = aem.LinuxMetric(config)
self.validate_cnm_metric(metric)
#Metric for CPU, network and memory
def validate_cnm_metric(self, metric):
self.assertNotEquals(None, metric.getCurrHwFrequency())
self.assertNotEquals(None, metric.getMaxHwFrequency())
self.assertNotEquals(None, metric.getCurrVMProcessingPower())
self.assertNotEquals(None, metric.getGuaranteedMemAssigned())
self.assertNotEquals(None, metric.getMaxVMProcessingPower())
self.assertNotEquals(None, metric.getNumOfCoresPerCPU())
self.assertNotEquals(None, metric.getNumOfThreadsPerCore())
self.assertNotEquals(None, metric.getPhysProcessingPowerPerVCPU())
self.assertNotEquals(None, metric.getProcessorType())
self.assertNotEquals(None, metric.getReferenceComputeUnit())
self.assertNotEquals(None, metric.getVCPUM
|
TeppieC/M-ords
|
mords_backend/mords_api/migrations/0022_auto_20161212_0008.py
|
Python
|
mit
| 441
| 0
|
# -*- codi
|
ng: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-12 07:08
from __future__ import unicode_literals
from django.db import migrations
class Migrati
|
on(migrations.Migration):
dependencies = [
('mords_api', '0021_learningword'),
]
operations = [
migrations.RenameField(
model_name='learningword',
old_name='updated_date',
new_name='update_date',
),
]
|
bgarrels/sky
|
sky/dbpedia.py
|
Python
|
bsd-3-clause
| 1,490
| 0
|
import os
try:
from nltk.corpus import stopwords
stopset = set(stopwords.words('english'))
except ImportError:
print("Cannot use dbpedia without 'pip install nltk'")
try:
import ujson as json
except ImportError:
import json
def generate_testables(words, stopword_set, n_grams=4):
grams = set()
n = len(words)
for i in range(len(words)):
for j in range(n_grams):
if n - j > i:
ws = words[i: i + j + 1]
if any(['NN' not in x[1] for x in ws]):
continue
word_list = [x[0].lower() for x in ws]
if any([len(x) < 3 for x in word_list]):
continue
if
|
set(word_list) & stopword_set:
continue
grams.add((" ".join([x[1] for x in ws]), " ".join(word_list)))
return grams
def get_dbpedia_from_words(pos_tags, db_dict, ok_entities=None):
if ok_entities is None:
ok_
|
entities = ['Person', 'Organisation']
ws = generate_testables(pos_tags, stopset)
classes = []
for x in ws:
if x[1] in db_dict:
for y in db_dict[x[1]]:
if y in ok_entities:
classes.append(('db_' + y + '_' + x[0], x))
break
return classes
def load_dbpedia():
# looks for 'dbpedia.json'; sky/sky/dbpedia.json
with open(os.path.join(os.path.dirname(__file__), 'dbpedia.json')) as f:
return json.load(f)
|
stefanwebb/tensorflow-models
|
tensorflow_models/trainers/svb_concrete_np.py
|
Python
|
mit
| 3,783
| 0.019826
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import tensorflow as tf
import tensorflow_mode
|
ls as tf_models
from tensorflow_models.trainers import BaseTrainer
class Trainer(BaseTrainer):
def finalize_hook(self):
print('Done trai
|
ning for {} epochs'.format(self.epoch()))
def learning_hooks(self):
train_op = tf_models.get_inference('elbo')
#train_loss_op = tf_models.get_loss('train/elbo_discrete')
#test_loss_op = tf_models.get_loss('test/elbo_discrete')
train_loss_op = tf_models.get_loss('train/elbo')
test_loss_op = tf_models.get_loss('test/elbo')
x_train = tf_models.train_placeholder()
x_test = tf_models.test_placeholder()
next_train_batch = self._train_batch
next_test_batch = self._test_batch
def train(count_steps):
total_elbo = 0.
for idx in range(count_steps):
X_mb = next_train_batch()
_, this_elbo = self.sess.run([train_op, train_loss_op], feed_dict={x_train: X_mb})
total_elbo += this_elbo
return total_elbo / count_steps
def test():
total_loss = 0.
for idx in range(self.test_batches):
X_mb = next_test_batch()
this_loss = self.sess.run(test_loss_op, feed_dict={x_test: X_mb})
total_loss += this_loss
return total_loss / self.test_batches
return train, test
def initialize_hook(self):
# See where the test loss starts
if self._settings['resume_from'] is None:
# Do a test evaluation before any training happens
test_loss = self.test()
self.results['costs_test'] += [test_loss]
else:
test_loss = self.results['costs_test'][-1]
print('epoch {:.3f}, test loss = {:.2f}'.format(self.epoch(), test_loss))
def step_hook(self):
with tf_models.timer.Timer() as train_timer:
train_loss = self.train(self._batches_per_step)
test_loss = self.test()
self.results['times_train'] += [train_timer.interval]
self.results['costs_train'] += [train_loss]
self.results['costs_test'] += [test_loss]
def before_step_hook(self):
pass
def after_step_hook(self):
train_time = self.results['times_train'][-1]
train_loss = self.results['costs_train'][-1]
test_loss = self.results['costs_test'][-1]
examples_per_sec = self._settings['batch_size'] * self._batches_per_step / train_time
sec_per_batch = train_time / self._batches_per_step
print('epoch {:.3f}, train loss = {:.2f}, test loss = {:.2f} ({:.1f} examples/sec)'.format(self.epoch(), train_loss, test_loss, examples_per_sec))
def initialize_results_hook(self):
results = {}
results['costs_train'] = []
results['times_train'] = []
results['costs_test'] = []
return results
|
bncc/pycore
|
pycore/conf_handler.py
|
Python
|
gpl-3.0
| 660
| 0.037879
|
import json_handler
class conf_handler:
__m_conf_path = None
__m_conf = None
def __init__( self, conf_base = "../conf/", conf_name = "configuration.conf" ):
s
|
elf.__m_conf_path = conf_base + conf_name
self.__m_conf = json_handler.json_handler(self.__m_conf_path)
def re
|
ad_conf( self, field_name ):
if(self.__m_conf == None):
return None
try:
conf_data = self.__m_conf.object_search(field_name)
except:
return None
return conf_data
|
TouK/vumi
|
vumi/transports/truteq/__init__.py
|
Python
|
bsd-3-clause
| 114
| 0
|
"""TruT
|
eq transport
|
."""
from vumi.transports.truteq.truteq import TruteqTransport
__all__ = ['TruteqTransport']
|
HBClab/xnat_BIDS
|
xnat_BIDS/xnat_BIDS.py
|
Python
|
mit
| 16,386
| 0.014219
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import requests
import os
import sys
"""
Purpose:
Download dicoms from xnat and place them into
a BIDs "like" directory structure.
using the xnat rest API to download dicoms.
see here for xnat REST API documentation: (https://wiki.xnat.org/display/XNAT16/Using+the+XNAT+REST+API)
TODO:
1) better error checking
2) add a log to write events to
3) handle conditionals better
4) find a better way to call the script instead of main()
5) add json descriptors according to BIDs format. (not available in API)
6) revise some ugly formating
7) don't copy if already completed? (or just have user be cognizant?)
8) parallelize the processing stream (e.g. get all the data first, then download)
9) Make main more modular (add more methods/possibly classes)
10) Fix error where if a subject has a alpha character in their name I can't filter the subject.
11) Add conversion script?
12) make the session re-ordering better (ording based on string instead of date/number)
"""
import requests
import os
import sys
__all__ = ['xnat_init_session','xnat_query_subjects','xnat_query_sessions','xnat_query_scans','xnat_query_dicoms','subject_variables_dictionary']
class xnat_init_session(object):
"""starts the xnat session and allows user to login to a particular project page"""
def __init__(self,username,password,project):
self.url_base = 'https://rpacs.iibi.uiowa.edu/xnat/REST/projects/%s/' % project
self.username = username
self.password = password
self.project = project
def login(self):
login_query = requests.get(self.url_base,auth=(self.username,self.password))
if login_query.ok:
cookie_info = login_query.cookies.get('JSESSIONID')
self.cookie = {'JSESSIONID' : cookie_info}
else:
print('error')
return 1
#def logout(self):
# logout_query = requests.delete(self.url_base,self.cookie)
# if logout_query.ok:
# print('logout successful')
# else:
# print('logout unsuccessful')
# return 1
class xnat_query_subjects(object):
"""get the subject ids from xnat"""
def __init__(self,cookie,url_base,project):
self.cookie=cookie
self.url_base=url_base
self.project=project
def get_subjects(self):
subject_query = requests.get(self.url_base+'subjects', cookies=self.cookie)
if subject_query.ok:
subject_json = subject_query.json()
subject_list_dict = subject_json['ResultSet']['Result']
self.subject_ids = { x['label']:0 for x in subject_list_dict }
def filter_subjects(self,subjects):
import re
#catch and remove subjects with characters in the name
if subjects != "ALL": #if the subject list specifies who to download
missing_xnat_subjects = list(set(subjects) - set([int(x) for x in self.subject_ids.keys()]))
if missing_xnat_subjects:
self.filt_subject_ids = dict.fromkeys(list(set(subjects) - set(missing_xnat_subjects)))
print('xnat does not have data for these subjects: %s' % str(missing_xnat_subjects))
else:
self.filt_subject_ids = dict.fromkeys(subjects)
else:
self.filt_subject_ids = dict.fromkeys([int(x) for x in self.subject_ids.keys()]) #use all the subjects otherwise
class xnat_query_sessions(object):
"""get the sessions from a particular subject"""
def __init__(self,cookie,url_base,project,subject):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.project=project
def get_sessions(self,session_labels=None):
import re
session_query = requests.get(self.url_base+'subjects/%s/experiments' % (self.subject), cookies=self.cookie)
if session_query.ok:
session_json = session_query.json()
session_list_dict = session_json['ResultSet']['Result']
#sort the session list (fix issues where they are uploaded in the wrong order)
session_list = [session['label'] for session in session_list_dict]
date_list = [session['date'] for session in session_list_dict]
session_list_comp = [re.sub('_[0-9]', '', session) for session in session_list]
date_list_comp = [session.replace('-','') for session in date_list]
print(str(session_list_comp))
print(str(date_list_comp))
if session_list_comp == date_list_comp:
print('date check passed')
else:
print('mismatch between label and date, exiting')
self.session_ids = False
return 1
session_list.sort()
if session_labels is not None:
num_sessions = int(session_json['ResultSet']['totalRecords'])
num_labels = len(session_labels)
if num_sessions != num_labels:
print('%s has the wrong number of sessions, expected: %s, found: %s' % (self.subject,str(num_labels),str(num_sessions)))
print('getting session info for available sessions (assuming they are in the correct order)')
self.session_ids = { sess_label : {session: 0} for sess_label,session in zip(session_labels[0:num_sessions],session_list) }
else:
self.session_ids = { sess_label : {session: 0} for sess_label,session in zip(session_labels,session_list) }
else:
#not supported in this script
self.session_ids = { x['label']: 0 for x in session_list_dict }
def filter_sessions(self,sessions):
#updates the session_ids dictionary
if sessions != "ALL":
#find all session that are not a part of the list
pop_list=list(set(self.session_ids.keys()) - set(sessions))
for key in pop_list:
self.session_ids.pop(key) #remove session from analysis
class xnat_query_scans(object):
"""get the scans from a particular session"""
def __init__(self,cookie,url_base,project,subject,session):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.session=session
self.project=project
def get_scans(self):
scan_query = requests.get(self.url_base+'subjects/%s/experiments/%s/scans/' % (self.subject,self.session), cookies=self.cookie)
if scan_query.ok:
scan_json = scan_query.json()
scan_list_dict = scan_json['ResultSet']['Result']
self.scan_ids = { x['ID']:[{str(x['series_description']) },x['quality']] for x in scan_list_dict }
#ID is a number like 1,3,300
#type is a name like fMRI FLANKER, PU:Sag CUBE FLAIR, represented as a set?
#^use series_description instead of type to differentiate multiple
#scans as the same type (e.g. DTI 64 dir versus DTI extra B0)
#quality specifies if the scan is usable
class xnat_query_dicoms(object):
"""get the dicoms from a particular scan"""
def __init__(self,cookie,url_base,project,subject,session,scan):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.session=session
self.scan=scan
def get_dicoms(self,out_dir):
#http://stackoverflow.com/questions/4917284/extract-files-from-zip-without-keeping-the-structure-using-python-zipfile
import zipfile
from io import BytesIO
import shutil
dicom_query = requests.get(self.url_base+'subjects/%s/experiments/%s/scans/%s/resources/DICOM/files?format=zip' % (self.subject,self.session,self.scan), cookies=self.coo
|
kie)
if dicom_query.ok:
dicom_zip = zipfile.ZipFile(BytesIO(dicom_query.content))
for member in dicom_zip.namelist():
filename = o
|
s.path.basename(member)
if not filename:
|
LamCiuLoeng/fd
|
rpac/widgets/ordering.py
|
Python
|
mit
| 1,705
| 0.017009
|
# -*- coding: utf-8 -*-
'''
Created on 2014-2-17
@author: CL.lam
'''
from sqlalchemy.sql.expression import and_
from rpac.widgets.components import RPACForm, RPACText, RPACCalendarPicker, \
RPACSelect
from rpac.model import ORDER_NEW, ORDER_INPROCESS, ORDER_COMPLETE, qry, \
PrintShop, ORDER_CANCEL
from rpac.model.ordering import ORDER_MANUAL
__all__ = ['order_search_form', ]
def getPrintShop():
return [("", ""), ] + [(unicode(p.id), unicode(p)) for p in qry(PrintShop).filter(and_(PrintShop.active == 0)).order_by(PrintShop.name).all()]
class OrderSearchForm(RPACForm):
fields = [
RPACText("no", label_text = "Job No"),
RPACText("customerpo", label_text = "Family Dollar PO#"),
RPACText("vendorpo", label_text = "Vendor PO"),
RPACCalendarPicker("create_time_from", label_text = "Create Date(from)"),
RPACCalendarPicker("create_time_t
|
o", label_text = "Create Date(to)"),
RPACSelect("status", label_text = "Status", options = [("", ""), (str(ORDER_NEW), "New"),
(str(ORDER_INPROCESS), "In Process"),
(str(ORDER_COMPLETE), "Completed"),
|
(str(ORDER_CANCEL), "Canelled"),
(str(ORDER_MANUAL), "Manual"),
]),
RPACSelect("printShopId", label_text = "Print Shop", options = getPrintShop),
]
order_search_form = OrderSearchForm()
|
eMerzh/Diamond-1
|
src/diamond/handler/g_metric.py
|
Python
|
mit
| 2,760
| 0
|
# coding=utf-8
"""
Emulate a gmetric client for usage with
[Ganglia Monitoring System](http://ganglia.sourceforge.net/)
"""
from Handler import Handler
import logging
try:
import gmetric
except ImportError:
gmetric = None
class GmetricHandler(Handler):
"""
Implements the abstract Handler class, sending data the same way that
gmetric does.
"""
def __init__(se
|
lf, config=None):
"""
Create a new instance of the GmetricHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if gmetric is None:
logging.error("Failed to load gmetric module")
return
# Initialize Data
self.socket = None
# Initialize Options
self.host = self.config['host']
self.port = int(self.config['port'])
self.protocol = self.config['pr
|
otocol']
if not self.protocol:
self.protocol = 'udp'
# Initialize
self.gmetric = gmetric.Gmetric(self.host, self.port, self.protocol)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GmetricHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'protocol': 'udp or tcp',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GmetricHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8651,
'protocol': 'udp',
})
return config
def __del__(self):
"""
Destroy instance of the GmetricHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to a gmond instance
"""
# Just send the data as a string
self._send(metric)
def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = "60"
dmax = "0"
slope = "both"
# FIXME: Badness, shouldn't *assume* double type
metric_type = "double"
units = ""
group = ""
self.gmetric.send(metric_name,
metric.value,
metric_type,
units,
slope,
tmax,
dmax,
group)
def _close(self):
"""
Close the connection
"""
self.gmetric = None
|
danbradham/shadeset
|
shadeset/ui/widgets.py
|
Python
|
mit
| 15,611
| 0
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# Standard library imports
import os
from fnmatch import fnmatch
# Local imports
from . import res
from .. import api, lib, utils
# Third party imports
from .Qt import QtCore, QtGui, QtWidgets
class WindowHeader(QtWidgets.QWidget):
def __init__(self, img, parent=None):
super(WindowHeader, self).__init__(parent)
self.setObjectName("WindowHeader")
self.setAttribute(QtCore.Qt.WA_StyledBackground, True)
self.image = QtWidgets.QLabel()
self.image.setPixmap(QtGui.QPixmap(img))
self.label = QtWidgets.QLabel('ShadeSets')
self.label.setObjectName('h1')
self.layout = QtWidgets.QHBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.image)
self.layout.addWidget(self.label)
self.layout.setStretch(1, 1)
self.setLayout(self.layout)
class ExportForm(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ExportForm, self).__init__(parent=parent)
self.asset = QtWidgets.QListWidget()
self.asset.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding
)
self.suffix = QtWidgets.QLineEdit()
self.preview = QtWidgets.QLabel()
self.selection = QtWidgets.QCheckBox('Selected &Hierarchies')
self.selection.setChecked(True)
self.render_layers = QtWidgets.QCheckBox('&Render Layers')
self.export_button = QtWidgets.QPushButton('&Export Shadeset')
self.attr_prefixes_label = QtWidgets.QLabel('Attribute Prefixes')
self.attr_prefixes_label.setToolTip(
'Space separated list of attribute prefixes to include in export.'
)
self.attr_prefixes = QtWidgets.QLineEdit()
self.attr_prefixes.setText(' '.join(lib.get_export_attr_prefixes()))
self.attrs_label = QtWidgets.QLabel('Attributes')
self.attrs_label.setToolTip(
'Space separated list of attributes to include in export.'
)
self.attrs = QtWidgets.QLineEdit()
self.attrs.setText(' '.join(lib.get_export_attrs()))
options = QtWidgets.QGroupBox()
options_layout = QtWidgets.QVBoxLayout()
options_layout.addWidget(self.selection)
options_layout.addWidget(self.render_layers)
options_layout.addWidget(self.attr_prefixes_label)
options_layout.addWidget(self.attr_prefixes)
options_layout.addWidget(self.attrs_label)
options_layout.addWidget(self.attrs)
options.setLayout(options_layout)
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(20, 20, 20, 20)
self.layout.addWidget(QtWidgets.QLabel('Asset'))
self.layout.addWidget(self.asset)
self.layout.addWidget(QtWidgets.QLabel('Suffix'))
self.layout.addWidget(self.suffix)
self.layout.addWidget(self.preview)
self.layout.addWidget(options)
self.layout.addWidget(self.export_button)
self.setLayout(self.layout)
self.export_button.clicked.connect(self.export)
self.asset.currentItemChanged.connect(self.update_preview)
self.suffix.textChanged.connect(self.update_preview)
self.update_form()
self.update_preview()
def state(self):
asset_item = self.asset.currentItem()
asset = None
if asset_item:
asset = asset_item.asset
return dict(
asset=asset,
suffix=self.suffix.text(),
selection=self.selection.isChecked(),
render_layers=self.render_layers.isChecked(),
attr_prefixes=self.attr_prefixes.text().split(),
attrs=self.attrs.text().split()
)
def add_asset(self, asset):
item = QtWidgets.QListWidgetItem()
item.setText(asset['asset'])
item.asset = asset
self.asset.addItem(item)
def update_form(self):
self.asset.clear()
assets = lib.get_assets(lib.session['project'])
for _, asset in sorted(assets.items()):
self.add_asset(asset)
def update_preview(self):
state = self.state()
if not state['asset']:
self.preview.setText('Select an asset...')
return
name = state['asset']['asset']
if state['suffix']:
name += '_' + state['suffix']
next_publish = lib.get_next_publish(state['asset'], name)
self.preview.setText(next_publish['basename'])
def export(self):
# TODO: move to controller
state = self.state()
# Update export attribute settings
# These are used by CustomAttributesSet
lib.set_export_attrs(state['attrs'])
lib.set_export_attr_prefixes(state['attr_prefixes'])
if not state['asset']:
self.preview.setText('Select an asset...')
return
if state['selection']:
ss = api.gather_hierarchy(render_layers=state['render_layers'])
else:
ss = api.gather(
selection=state['selection'],
render_layers=state['render_layers'],
)
name = state['asset']['asset']
if state['suffix']:
name += '_' + state['suffix']
next_publish = lib.get_next_publish(state['asset'], name)
ss.export(
outdir=next_publish['dirname'],
name=next_publish['basename'].rsplit('.', 1)[0],
)
class ImportForm(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ImportForm, self).__init__(parent=parent)
self.project = QtWidgets.QComboBox()
self.project.setSizeAdjustPolicy(
self.project.AdjustToMinimumContentsLengthWithIcon
)
self.asset = QtWidgets.QListWidget()
self.shadeset = QtWidgets.QListWidget()
self.selection = QtWidgets.QCheckBox('Selected &Hierarchies')
self.selection.setChecked(True)
self.render_layers = QtWidgets.QCheckBox('&Render Layers')
self.apply_button = QtWidgets.QPushButton('&Apply Shadeset')
options = QtWidgets.QGroupBox()
options_layout = QtWidgets.QVBoxLayout()
options_layout.addWidget(self.selection)
options_layout.addWidget(self.render_layers)
options.setLayout(options_layout)
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(20, 20, 20, 20)
self.layout.setHorizontalSpacing(20)
self.layout.setRowStretch(3, 1)
self.layout.addWidget(QtWidgets.QLabel('Project'), 0, 0)
self.layout.addWidget(self.project, 1, 0)
self.layout.addWidget(QtWidgets.QLabel('Asset'), 2, 0)
self.layout.addWidget(self.asset, 3, 0)
self.layout.addWidget(QtWidgets.QLabel('ShadeSet'), 2, 1)
self.layout.addWidget(self.shadeset, 3, 1)
self.layout.addWidget(options, 4, 1)
self.layout.addWidget(self.apply_button, 5, 1)
self.setLayout(self.layout)
self.apply_button.clicked.connect(self.apply)
self.project.activated.connect(self.on_project_changed)
|
self.asset.currentItemChanged.connect(self.on_asset_changed)
self._projects = None
self.update_form()
def state(self):
# Get selected project
project = self.project.currentText()
# Get selected asset
asset_item = self.asset.currentItem()
asset = None
if asset_item:
a
|
sset = asset_item.asset
# Get selected shadeset
shadeset_item = self.shadeset.currentItem()
shadeset = None
if shadeset_item:
shadeset = shadeset_item.publish
return dict(
project=project,
asset=asset,
shadeset=shadeset,
selection=self.selection.isChecked(),
render_layers=self.render_layers.isChecked(),
)
def update_form(self):
self.project.blockSignals(True)
self.project.clear()
for project in sorted(lib.get_projects()):
self.project.addItem(project)
|
zkmake520/ProbabilisticModel
|
NGram/NGram.py
|
Python
|
mit
| 7,676
| 0.046769
|
#here we can use wrapper to accerlate the whole process, since many text may be same, we can save the intermediate results
import operator
from math import log10
import re
import string
import random
import heapq
"""First Part:
Word Segmentation"""
def memory(f):
#memorize function f
table = {}
def fmemo(*args):
if args not in table:
table[args] = f(*args)
return table[args]
return fmemo
#this memory procee is really important which makes the time from 2^n ->n^2*L
@memory
def Segment(text):
#return a list of words that is the best segmentation of the text"
#recursive implementation
if not text: return []
candidates = ([first]+Segment(remind) for (first,remind) in Split(text))
#TODO: actually we can store the Probabilty of each best Segment. there is no need to compute it again
return max(candidates,key=bPwords) #key specifies a one-argument ordering function
#L parameter is 20 in default ,this method returns a list of all possible (frist,rem) pairs, len(first)<=L
def Split(text,L=20):
return [(text[:i+1],text[i+1:]) for i in range(min(len(text),L))]
# The Naive Bayes probabilities of a sequence of words
def Pwords(words):
return Product([_P(word) for word in words])
def Product(nums):
return reduce(operator.mul,nums)
#P(word) == count(word)/N since the GOOGLE N is corpus size. and note that nearly most common 1/3 of a million words covers 98% of all tokens
#so we can use only this part of words, and we can eliminate those numbers and punctuations.
def constantWord(word,N):
return 1./N
def avoidLongWord(word,N):
return 10./(N*10**len(word))
class Pdict(dict):
#probability distribution of words estimated from the counts in datafile
def __init__(self,data,N=None,missing=constantWord):
for key,count in data:
self[key] = self.get(key,0) + int(count)
self.N = float(N or sum(self.itervalues()))
self.missing = missing
def __call__(self,key):
if key in self: return float(self[key]/self.N)
else: return self.missing(key,self.N)
def Datafile(name,sep='\t'):
for line in file(name):
yield line.split(sep)
_N = 1024908267229 #Number of tokens
_P = Pdict(Datafile('vocab.txt'),_N,avoidLongWord)
###biagram
##model P(W1:n) = TTk=1:nP(Wk|Wk-1)
def Pwords2(word,pre):
words = pre+' '+word
if words not in _P2:
return _P(word)
else: return _P2(pre+' '+word)/float(_P(pre))
_P2 = Pdict(Datafile('count_2w.txt'),_N)
@memory
def Segment2(text,pre="<S>"):
#return (log(P(words)),words) where words is the best segment
if not text: return (0.0,[])
candidates= [combine(log10(Pwords2(first,pre)),first,Segment2(remind,first)) for first,remind in Split(text)]
return max(candidates)
def combine(Pfirst, first, (Prem,rem)):
return (Pfirst+Prem,[first]+rem)
"""Second Part:
Secret Code"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def Encode(msg,key):
#encode string with the substitution key
return msg.translate(string.maketrans(ul(alphabet),ul(key)))
def ul(text): return text.upper()+text.lower()
def Encode_Shift(msg,n=10):
#encode string with a shift(caesar) cipher
return Encode(msg,alphabet[n:]+alphabet[:n])
#we can use the technique as above use a logPwords to decode without knowing the key
def logPwords(words):
if isinstance(words,str): words=getAllWords(words)
return sum(log10(_P(word)) for word in words)
def getAllWords(words):
#return a list of words in string lowercase,use pattern compare
return re.findall("[a-z]+",words.lower())
def Decode_Shift(msg):
candidates = [Encode_Shift(msg,n) for n in range(len(alphabet))]
return max(candidates,key=logPwords)
#note that above way is too easy
#here we want to substitute using a general cipher,in which any letter can be substitued for any other letter
"""
step1: given a encoded msg,split them into lowercase only words, and combine these words(remove those numbers and punctuations)
step2: from a random x, use local search to get to the local minimum, design the cost function
step3: repeat step2
"""
#use letter n-grams model
P3l = Pdict(Datafile("count_3l.txt"))
P2l = Pdict(Datafile("count_2l.txt"))
def localsearch(x,f,neighbors,steps=10000):
#local search to get a x that maximizes function cost f
fx = f(x)
neighborhood = iter(neighbors(x))
for i in range(steps):
#print i,fx
x2 = neighborhood.next()
fx2 = f(x2)
if fx2 > fx:
x,fx = x2,fx2
neighborhood = iter(neighbors(x))
print x
return x
_cat ="".join
def Shuffle(text):
text = list(text)
random.shuffle(text)
return text
def DecodeGeneral(msg,step=4000,restarts=20):
#decode a general cipher string by using local search
msg = cat(getAllWords(msg)) #just keep words of alphabet,lowercase
print msg
candidates= [localsearch(Encode(msg,key=cat(Shuffle(alphabet))),logP3letters,getNeighbors,step) for i in range(restarts)]
(p,words) = max(Segment2(text) for text in candidates)
return ' '.join(words)
def getNeighbors(msg):
#generate nearby strings
def swap(a,b):
return msg.translate(string.maketrans(a+b,b+a))
for bigram in heapq.nsmallest(20,set(ngrams(msg,2)),P
|
2l):
print bigram
b1,b2=bigram
for c in alphabet:
if b1 == b2:
if P2l(c+c)
|
> P2l(bigram): yield swap(c,b1)
else:
if P2l(c+b2) > P2l(bigram): yield swap(c,b1)
if P2l(b1+c) > P2l(bigram): yield swap(c,b2)
while True:
yield swap(random.choice(alphabet), random.choice(alphabet))
cat = ''.join
"""
Spelling Correction:
Find argmaxcP(c|w) which means type w, c is the candidates find highest probability of c
use bayes rule P(c|w) = P(w|c) +P(c)
P(c) is straightforward
P(w|c) is called error model,we need more data in http://www.dcs.bbk.ac.uk/~ROGER/corpora.html.
the data is not large enough, we can hope to just look up P(w=thaw|c=thew), changes are slim
we do some trick by ignoring the letters that are same, then we get P(w=a|c=e) the probability that a was typed
when the corrector is e
"""
def AllCorrections(text):
#spelling correction for all words in text
return re.sub('[a-zA-Z]+',lambda match:getCorrect(match.group(0)),text)
def getCorrect(word):
#return word that is most likely to be the correct spelling of word
candidates = getEdits(word).items()
c,edit = max(candidates, key=lambda (c,e):Pedit(e)*_P(c))
return c
_Pe=Pdict(Datafile('count_1edit.txt'))
_PSpellError = 1./20
def Pedit(edit):
#the probability of an edit,can be "" or 'a|b' or 'a|b + c|d'
if edit == "": return (1.- _PSpellError)
return _PSpellError*Product(_Pe(e) for e in edit.split("+"))
_Prefix = set(w[:i] for w in _P for i in range(len(w)+1))
#we can optimize it ,we don't need to consider all the edits, since merely of them are in vacabulary,thus we can precomputing all the
#possible prefixes,and split the word into two parts, head and tail, thus the head should be in the prefix
def getEdits(w,dis=2):
#return a dict of {correct:edit} pairs within dis edits of words
res = {}
def editsR(head,tail,d,edits):
def edit(L,R): return edits+[R+'|'+L]
C = head+tail
if C in _P:
e = '+'.join(edits)
if C not in res: res[C] = e
else: res[C] = max(res[C],e,key=Pedit)
if d<=0: return
extensions = [head+c for c in alphabet if head+c in _Prefix] ##given a head, all possible heads
pre = (head[-1] if head else '<') ## previous character
#########Insertion
for h in extensions:
editsR(h,tail,d-1,edit(pre+h[-1],pre))
if not tail: return
########Deletion
editsR(head,tail[1:],d-1,edit(pre,pre+tail[0]))
for h in extensions:
if h[-1] == tail[0]: ##match
editsR(h,tail[1:],d,edits)
else: ##relacement
editsR(h,tail[1:],d-1,edit(h[-1],tail[0]))
##transpose
editsR('',w,dis,[])
return res
|
mozman/ezdxf
|
tests/test_00_dxf_low_level_structs/test_054_dxfattr.py
|
Python
|
mit
| 541
| 0
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.
|
lldxf.attributes import DXFAttr, RETURN_DEFAULT
def test_return_default():
attr = D
|
XFAttr(
code=62,
default=12,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr.fixer(7) == 12
attr2 = DXFAttr(
code=63,
default=13,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr2.fixer(7) == 13
if __name__ == "__main__":
pytest.main([__file__])
|
Ichaelus/Github-Classifier
|
Application/Models/ClassificationModules/descriptionreponamelstm.py
|
Python
|
mit
| 3,641
| 0.005774
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Models.FeatureProcessing import *
from keras.models import Sequential
from keras.layers import Activation, Dense, LSTM
from keras.optimizers import Adam, SGD
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class descriptionreponamelstm(ClassificationModule):
"""A basic lstm neural network"""
def __init__(self, num_hidden_layers=3):
ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character")
hidden_size = 300
self.maxlen = 300
# Set output_size
self.output_size = 7 # Hardcoded for 7 classes
model = Sequential()
# Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))
for _ in range(num_hidden_layers):
model.add(Dense(hidden_size))
model.add(Dense(self.output_size))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(),
metrics=['accuracy'])
self.model = model
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
resetWeights(self.model)
def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras
self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose)
def train(self, samples, nb_epoch=200, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(oneHot(getLabelIndex(sample)))
train_lables = np.asarray(train_lables)
train_result
|
= self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights())
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikat
|
or ein gegebenes Sample klassifizieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return np.argmax(self.model.predict(sample))
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.model.predict(sample)[0]
return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned
def formatInputData(self, sample):
"""Extract description and transform to vector"""
sd = getDescription(sample)
sd += getName(sample)
# Returns numpy array which contains 1 array with features
return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0)
|
niranjan94/open-event-orga-server
|
app/models/page.py
|
Python
|
gpl-3.0
| 1,304
| 0.000767
|
from app.models import db
class Page(db.Model):
"""Page model class"""
__tablename__ = 'pages'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
title = db.Column(db.String)
url = db.Column(db.String, nullable=False)
description = db.Column(db.String)
place = db.Column(db.String)
language = db.Column(db.String)
index = db.Column(db.Integer, default=0)
def __init__(self, name=None, title=None, description=None, url=None, place=None, index=None, language=None):
self.name = name
self.description = description
self.title = title
self.url = url
self.place = place
self.language = language
self.index = index
def __repr__(self):
|
return '<Page %r>' % self.name
def __s
|
tr__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.name
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id': self.id,
'name': self.name,
'description': self.description,
'title': self.title,
'url': self.url,
'place': self.place,
'language': self.language
}
|
adrn/gala
|
gala/integrate/tests/test_pyintegrators.py
|
Python
|
mit
| 3,454
| 0.000579
|
"""
Test the integrators.
"""
import os
# Third-party
import pytest
import numpy as np
# Project
from .. import (
LeapfrogIntegrator,
RK5Integrator,
DOPRI853Integrator,
Ruth4Integrator,
)
from gala.tests.optional_deps import HAS_TQDM
# Integrators to test
integrator_list = [
RK5Integrator,
DOPRI853Integrator,
LeapfrogIntegrator,
Ruth4Integrator,
]
# Gradient functions:
def sho_F(t, w, T): # noqa
"""Simple harmonic oscillator"""
q, p = w
wdot = np.zeros_like(w)
wdot[0] = p
wdot[1] = -((2 * np.pi / T) ** 2) * q
return wdot
def forced_sho_F(t, w, A, omega_d):
q, p = w
wdot = np.zeros_like(w)
wdot[0] = p
wdot[1] = -np.sin(q) + A * np.cos(omega_d * t)
return wdot
def lorenz_F(t, w, sigma, rho, beta):
x, y, z, *_ = w
wdot = np.zeros_like(w)
wdot[0] = sigma * (y - x)
wdot[1] = x * (rho - z) - y
wdot[2] = x * y - beta * z
return wdot
def ptmass_F(t, w):
x, y, px, py = w
a = -1.0 / (x * x + y * y) ** 1.5
wdot = np.zeros_like(w)
wdot[0] = px
wdot[1] = py
wdot[2] = x * a
wdot[3] = y * a
return wdot
@pytest.mark.parametrize("Integrator", integrator_list)
def test_sho_forward_backward(Integ
|
rator):
integrator = Integrator(sho_F, func_args=(1.0,))
dt = 1e-4
n_steps = 10_000
forw = integrator.run([0.0, 1.0], dt=dt, n_steps=n_steps)
back = integrator.run([0.0, 1.0], dt=-dt, n_steps=n_steps)
assert np.allclose(forw.w()[:, -1], back.
|
w()[:, -1], atol=1e-6)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_point_mass(Integrator):
q0 = np.array([1.0, 0.0])
p0 = np.array([0.0, 1.0])
integrator = Integrator(ptmass_F)
orbit = integrator.run(np.append(q0, p0), t1=0.0, t2=2 * np.pi, n_steps=1e4)
assert np.allclose(orbit.w()[:, 0], orbit.w()[:, -1], atol=1e-6)
@pytest.mark.skipif(not HAS_TQDM, reason="requires tqdm to run this test")
@pytest.mark.parametrize("Integrator", integrator_list)
def test_progress(Integrator):
q0 = np.array([1.0, 0.0])
p0 = np.array([0.0, 1.0])
integrator = Integrator(ptmass_F, progress=True)
_ = integrator.run(np.append(q0, p0), t1=0.0, t2=2 * np.pi, n_steps=1e2)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_point_mass_multiple(Integrator):
w0 = np.array(
[[1.0, 0.0, 0.0, 1.0], [0.8, 0.0, 0.0, 1.1], [2.0, 1.0, -1.0, 1.1]]
).T
integrator = Integrator(ptmass_F)
_ = integrator.run(w0, dt=1e-3, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_driven_pendulum(Integrator):
integrator = Integrator(forced_sho_F, func_args=(0.07, 0.75))
_ = integrator.run([3.0, 0.0], dt=1e-2, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_lorenz(Integrator):
sigma, rho, beta = 10.0, 28.0, 8 / 3.0
integrator = Integrator(lorenz_F, func_args=(sigma, rho, beta))
_ = integrator.run([0.5, 0.5, 0.5, 0, 0, 0], dt=1e-2, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_memmap(tmpdir, Integrator):
dt = 0.1
n_steps = 1000
nw0 = 10000
filename = os.path.join(str(tmpdir), "test_memmap.npy")
mmap = np.memmap(filename, mode="w+", shape=(2, n_steps + 1, nw0))
w0 = np.random.uniform(-1, 1, size=(2, nw0))
integrator = Integrator(sho_F, func_args=(1.0,))
_ = integrator.run(w0, dt=dt, n_steps=n_steps, mmap=mmap)
|
ngalin/Illumimateys
|
InterviewPrepScripts/videoCapture.py
|
Python
|
mit
| 760
| 0.021053
|
import numpy as np
import cv2
im
|
port time
start = time.time()
end = start + 3 #show video for three seconds - I do this to make sure your stream doesn't get stuffed up by a bad exit. Remove in future.
cap = cv2.VideoCapture(0)
#while time.time() < end: #
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if frame == None:
continue
b,g,r = cv2.split(frame)
b_new = cv2.resize(b,(10,10))
g_new = cv2.resize(g,(10,10))
r_new = cv2.resize(
|
r,(10,10))
out = cv2.merge((b_new,g_new,r_new))
cv2.imshow('frame',out)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
gzxultra/IM_programming
|
class_ClientMessage.py
|
Python
|
gpl-2.0
| 6,373
| 0.007857
|
# _*_ coding:utf-8 _*_
# Filename:ClientUI.py
# Python在线聊天客户端
from socket import *
from ftplib import FTP
import ftplib
|
import socket
import thread
import time
import sys
import codec
|
s
import os
reload(sys)
sys.setdefaultencoding( "utf-8" )
class ClientMessage():
#设置用户名密码
def setUsrANDPwd(self,usr,pwd):
self.usr=usr
self.pwd=pwd
#设置目标用户
def setToUsr(self,toUsr):
self.toUsr=toUsr
self.ChatFormTitle=toUsr
#设置ip地址和端口号
def setLocalANDPort(self,local,port):
self.local = local
self.port = port
def check_info(self):
self.buffer = 1024
self.ADDR=(self.local,self.port)
self.udpCliSock = socket.socket(AF_INET, SOCK_DGRAM)
self.udpCliSock.sendto('0##'+self.usr+'##'+self.pwd,self.ADDR)
self.serverMsg ,self.ADDR = self.udpCliSock.recvfrom(self.buffer)
s=self.serverMsg.split('##')
if s[0]=='Y':
return True
elif s[0]== 'N':
return False
#接收消息
def receiveMessage(self):
self.buffer = 1024
self.ADDR=(self.local,self.port)
self.udpCliSock = socket.socket(AF_INET, SOCK_DGRAM)
self.udpCliSock.sendto('0##'+self.usr+'##'+self.pwd,self.ADDR)
while True:
#连接建立,接收服务器端消息
self.serverMsg ,self.ADDR = self.udpCliSock.recvfrom(self.buffer)
s=self.serverMsg.split('##')
if s[0]=='Y':
#self.chatText.insert(Tkinter.END,'客户端已经与服务器端建立连接......')
return True
elif s[0]== 'N':
#self.chatText.insert(Tkinter.END,'客户端与服务器端建立连接失败......')
return False
elif s[0]=='CLOSE':
i=5
while i>0:
self.chatText.insert(Tkinter.END,'你的账号在另一端登录,该客户端'+str(i)+'秒后退出......')
time.sleep(1)
i=i-1
self.chatText.delete(Tkinter.END)
os._exit(0)
#好友列表
elif s[0]=='F':
for eachFriend in s[1:len(s)]:
print eachFriend
#好友上线
elif s[0]=='0':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime+' ' +'你的好友' + s[1]+'上线了')
#好友下线
elif s[0]=='1':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime+' ' +'你的好友' + s[1]+'下线了')
#好友传来消息
elif s[0]=='2':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +' '+s[1] +' 说:\n')
self.chatText.insert(Tkinter.END, ' ' + s[3])
#好友传来文件
elif s[0]=='3':
filename=s[2]
f=FTP('192.168.1.105')
f.login('Coder', 'xianjian')
f.cwd(self.usr)
filenameD=filename[:-1].encode("cp936")
try:
f.retrbinary('RETR '+filenameD,open('..\\'+self.usr+'\\'+filenameD,'wb').write)
except ftplib.error_perm:
print 'ERROR:cannot read file "%s"' %file
self.chatText.insert(Tkinter.END,filename[:-1]+' 传输完成')
elif s[0]=='4':
agreement=raw_input(s[1]+'请求加你为好友,验证消息:'+s[3]+'你愿意加'+s[1]+'为好友吗(Y/N)')
if agreement=='Y':
self.udpCliSock.sendto('5##'+s[1]+'##'+s[2]+'##Y',self.ADDR)
elif agreement=='N':
self.udpCliSock.sendto('5##'+s[1]+'##'+s[2]+'##N',self.ADDR)
elif s[0]=='5':
if s[3]=='Y':
print s[2]+'接受了你的好友请求'
elif s[3]=='N':
print s[2]+'拒绝了你的好友请求'
#发送消息
def sendMessage(self):
#得到用户在Text中输入的消息
message = self.inputText.get('1.0',Tkinter.END)
#格式化当前的时间
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +' 我 说:\n')
self.chatText.insert(Tkinter.END,' ' + message + '\n')
self.udpCliSock.sendto('2##'+self.usr+'##'+self.toUsr+'##'+message,self.ADDR);
#清空用户在Text中输入的消息
self.inputText.delete(0.0,message.__len__()-1.0)
#传文件
def sendFile(self):
filename = self.inputText.get('1.0',Tkinter.END)
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +'我' + ' 传文件:\n')
self.chatText.insert(Tkinter.END,' ' + filename[:-1] + '\n')
f=FTP('192.168.1.105')
f.login('Coder', 'xianjian')
f.cwd(self.toUsr)
filenameU=filename[:-1].encode("cp936")
try:
#f.retrbinary('RETR '+filename,open(filename,'wb').write)
#将文件上传到服务器对方文件夹中
f.storbinary('STOR ' + filenameU, open('..\\'+self.usr+'\\'+filenameU, 'rb'))
except ftplib.error_perm:
print 'ERROR:cannot read file "%s"' %file
self.udpCliSock.sendto('3##'+self.usr+'##'+self.toUsr+'##'+filename,self.ADDR);
#加好友
def addFriends(self):
message= self.inputText.get('1.0',Tkinter.END)
s=message.split('##')
self.udpCliSock.sendto('4##'+self.usr+'##'+s[0]+'##'+s[1],self.ADDR);
#关闭消息窗口并退出
def close(self):
self.udpCliSock.sendto('1##'+self.usr,self.ADDR);
sys.exit()
#启动线程接收服务器端的消息
def startNewThread(self):
thread.start_new_thread(self.receiveMessage,())
def main():
client = ClientMessage()
client.setLocalANDPort('192.168.1.105', 8808)
client.setUsrANDPwd('12073127', '12073127')
client.setToUsr('12073128')
client.startNewThread()
if __name__=='__main__':
main()
|
pisskidney/leetcode
|
medium/89.py
|
Python
|
mit
| 385
| 0
|
"""
89. Gray Code
https://leetcode.com/problems/gray-code/
"""
from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
res = [0]
|
for i in range(n):
res += [x + 2**i for x in reversed(res)]
return res
def ma
|
in():
s = Solution()
print(s.grayCode(3))
if __name__ == '__main__':
raise(SystemExit(main()))
|
2uller/LotF
|
App/Lib/distutils/util.py
|
Python
|
gpl-2.0
| 19,215
| 0.002082
|
"""distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id$"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32
|
(all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
|
return sys.platform
j = string.find(sys.version, ")", i)
look = sys.version[i+len(prefix):j].lower()
if look=='amd64':
return 'win-amd64'
if look=='itanium':
return 'win-ia64'
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support, distutils.sysconfig
osname, release, machine = _osx_support.get_platform_osx(
distutils.sysconfig.get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
|
netjunki/trac-Pygit2
|
trac/prefs/api.py
|
Python
|
bsd-3-clause
| 1,093
| 0.00183
|
# -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import *
class IPreferencePanelProvider(Interface):
def get_preference_panels(req):
"""Return a list of available preference panels.
The items returned by this function must be tuple of the form
`(panel, label)`.
|
"""
def render_preference_panel(req, panel):
"""Process a request for a preference pa
|
nel.
This function should return a tuple of the form `(template, data)`,
where `template` is the name of the template to use and `data` is the
data to be passed to the template.
"""
|
quxiaolong1504/cloudmusic
|
cmmedia/views.py
|
Python
|
mpl-2.0
| 1,809
| 0.009913
|
# encoding=utf-8
from django.utils.translation import ugettext
|
_lazy as _
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.viewsets import ModelViewSet
from cmmedia.models import Image, Artist, Album, Music
f
|
rom cmmedia.serializers import ImageSerializer, ArtistSerializer, AlbumSerializer, MusicSerializer
class ResourceURLView(APIView):
allowed_methods = ['GET']
def get(self,request,*args,**kwargs):
return Response(self.get_url_dispach())
def get_url_dispach(self,format=None):
return {
_(u"images_url").strip(): reverse('resource_image', request=self.request, format=format,),
_(u"artists_url").strip(): reverse('artists-list', request=self.request, format=format),
_(u"albums_url").strip(): reverse('albums-list', request=self.request, format=format),
_(u"musics_url").strip(): reverse('musics-list', request=self.request, format=format),
}
class ImageView(generics.CreateAPIView):
"""
创建和获取Image资源
"""
queryset = Image.objects.all()
serializer_class = ImageSerializer
class ArtistViewSet(ModelViewSet):
"""
创建,删除,更新,获取艺术家
"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
lookup_field = u'pk'
class AlbumViewSet(ModelViewSet):
"""
创建,删除,更新,获取专辑
"""
queryset = Album.objects.all()
serializer_class = AlbumSerializer
lookup_field = u'pk'
class MusicViewSet(ModelViewSet):
"""
创建,删除,更新,获取专辑
"""
queryset = Music.objects.all()
serializer_class = MusicSerializer
lookup_field = u'pk'
|
joachimmetz/plaso
|
plaso/analysis/tagging.py
|
Python
|
apache-2.0
| 2,057
| 0.006806
|
# -*- coding: utf-8 -*-
"""Analysis plugin that labels events according to rules in a tagging file."""
from plaso.analysis import interface
from plaso.analysis import manager
from plaso.engine import tagging_file
class TaggingAnalysisPlugin(interface.AnalysisPlugin):
"""Analysis plugin that labels events according to rules in a tagging file."""
NAME = 'tagging'
def __init__(self):
"""Initializes a tagging analysis plugin."""
super(TaggingAnalysisPlugin, self).__init__()
self._tagging_rules = None
def ExamineEvent(
self, analysis_mediator, event, event_data, event_data_stream):
"""Labels events according to the rules in a tagging file.
Args:
analysis_mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfVFS.
event (EventObject): event to examine.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
matched_label_names = []
for label_name, filter_objects in self._tagging_rules.items():
for filter_object in filter_objects:
# Note that tagging events based on existing labels is currently
# not
|
supported.
if filter_object.Match(event, event_data, event_data_stream, None):
|
matched_label_names.append(label_name)
break
if matched_label_names:
event_tag = self._CreateEventTag(event, matched_label_names)
analysis_mediator.ProduceEventTag(event_tag)
for label_name in matched_label_names:
self._analysis_counter[label_name] += 1
self._analysis_counter['event_tags'] += 1
def SetAndLoadTagFile(self, tagging_file_path):
"""Sets the tagging file to be used by the plugin.
Args:
tagging_file_path (str): path of the tagging file.
"""
tagging_file_object = tagging_file.TaggingFile(tagging_file_path)
self._tagging_rules = tagging_file_object.GetEventTaggingRules()
manager.AnalysisPluginManager.RegisterPlugin(TaggingAnalysisPlugin)
|
chrislyon/dj_ds1
|
datatable/admin.py
|
Python
|
gpl-2.0
| 375
| 0.016
|
from d
|
jango.contrib import admin
# Register your models here.
from datatable.models import Serveur
# Register your models here.
class ServeurAdmin(admin.ModelAdmin):
list_display = ('In_Type', 'In_Nom', 'In_IP', 'statut')
list_filter = ('In_Type', 'In_Nom', 'In_IP', 'statut')
search_fields = ['In_Type', 'In_Nom', 'In_IP' ]
admin.site.register(Serveur, Serv
|
eurAdmin)
|
pombredanne/libming
|
test/Font/test03.py
|
Python
|
lgpl-2.1
| 384
| 0.020833
|
#!/usr/bin/python
from ming import *
import sys
srcdir=sys.argv[1]
m = SWFMovie();
font = SWFFont(srcdir + "/../Media/test.ttf")
|
text = SWFText(1)
w = font.getStringWidth("The quick brown fox jumps over the lazy dog. 1234567890")
text.setFont(font)
text.setColor(0,0,0,255)
text.setHeight(20)
text.moveTo(w,0)
|
text.addString("|")
m.add(text)
m.nextFrame()
m.save("test03.swf")
|
zhengwsh/InplusTrader_Linux
|
InplusTrader/backtestEngine/model/tick.py
|
Python
|
mit
| 2,130
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRAN
|
TIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Tick(object):
def __init__(self, order_book_id, dt, snapshot):
self._order_book_id = order_book_id
self._dt = dt
self._snapshot = snapshot
@property
def o
|
rder_book_id(self):
return self._order_book_id
@property
def datetime(self):
return self._dt
@property
def open(self):
return self._snapshot['open']
@property
def last(self):
return self._snapshot['last']
@property
def high(self):
return self._snapshot['high']
@property
def low(self):
return self._snapshot['low']
@property
def prev_close(self):
return self._snapshot['prev_close']
@property
def volume(self):
return self._snapshot['volume']
@property
def total_turnover(self):
return self._snapshot['total_turnover']
@property
def open_interest(self):
return self._snapshot['open_interest']
@property
def prev_settlement(self):
return self._snapshot['prev_settlement']
@property
def bid(self):
return self._snapshot['bid']
@property
def bid_volume(self):
return self._snapshot['bid_volume']
@property
def ask(self):
return self._snapshot['ask']
@property
def ask_volume(self):
return self._snapshot['ask_volume']
@property
def limit_up(self):
return self._snapshot['limit_up']
@property
def limit_down(self):
return self._snapshot['limit_down']
|
Jecvay/PyGDB
|
PyGdbDb.py
|
Python
|
mit
| 10,962
| 0.003274
|
# coding=utf-8
import pymysql
import PyGdbUtil
class PyGdbDb:
# 初始化: 连接数据库
def __init__(self, host, port, dbname, user, passwd):
self.project = None
self.table_prefix = None
try:
self.connection = pymysql.connect(
host=host, port=int(port), user=user, password=passwd, db=dbname, charset="utf8mb4")
self.cursor = self.connection.cursor()
except Exception as e_con:
print '数据库连接错误, 程序中止'
print e_con
exit(-1)
def test(self):
print '正在测试数据库连接'
print '数据库连接: ' + str(self.connection.get_host_info()) if self.connection else '数据库连接异常'
print '数据库游标: ' + str(self.cursor) if self.cursor else '数据库游标异常'
print '数据库连接测试完毕'
print '检查表 aabb 是否存在'
if self.exist_table('aabb'):
print '存在'
else:
print '不存在'
print '初始化项目 example'
self.init_project('example', 'example_')
self.new_project()
PyGdbUtil.log(0, '初始化完毕')
# 初始化项目
def init_project(self, project_name, table_prefix):
self.project = project_name
self.table_prefix = table_prefix
# 检测是否存在该项目 不存在->创建 返回True; 存在->返回 False
def new_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
# 创建数据库表
if not exist_project:
self.create_table(self.table_prefix + "BreakPoint(bid INT AUTO_INCREMENT primary key, pid INT, lineNumber INT, funcName TEXT, funcList TEXT)")
self.create_table(self.
|
table_prefix + "PStackSize(pid INT, tid INT, stackSize INT, pass TINYINT)")
self.create_table(self.table_prefix + "FStackSize(pid INT, tid INT, fid INT, stackSize INT)")
self.create_table(self.table_prefix + "FrameVariable(bid INT, varName CHAR, varValue TEXT, varSize INT)")
self.create_table(s
|
elf.table_prefix + "FuncAdjacencyList(pid INT, tid INT, parFid INT, fid INT, cnt INT)")
self.create_table(self.table_prefix + "Function(fid INT, funcName CHAR(30))")
self.create_table(self.table_prefix + "TestCase(tid INT AUTO_INCREMENT primary key, testStr TEXT)")
self.commit()
return True
else:
return False
def clear_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
if exist_project:
self.drop_table(self.table_prefix + "BreakPoint")
self.drop_table(self.table_prefix + "PStackSize")
self.drop_table(self.table_prefix + "FStackSize")
self.drop_table(self.table_prefix + "FrameVariable")
self.drop_table(self.table_prefix + "FuncAdjacencyList")
self.drop_table(self.table_prefix + "Function")
self.drop_table(self.table_prefix + "TestCase")
self.commit()
return True
else:
return False
# 插入测试用例
def insert_test_case(self, test_str):
self.execute("insert into " + self.table_prefix + "TestCase(testStr) VALUES('%s')" % test_str)
# 插入程序断点
def insert_breakpoint(self, pid, line_number, func_name):
# return # 测试
PyGdbUtil.log(0, str(pid) + " " + str(line_number) + " " + str(func_name))
self.execute("insert into " + self.table_prefix +
"BreakPoint(pid, lineNumber, funcName) VALUES (%s, %s, '%s')" % (pid, line_number, func_name))
# 插入函数
def inset_function(self, fid, func_name):
self.execute('insert into ' + self.table_prefix +
'Function(fid, funcName) VALUES (%s, "%s")' % (fid, func_name))
# 插入一个栈帧变量信息
def insert_frame_var(self, bid, var_name, var_value, var_size):
self.execute('insert into ' + self.table_prefix +
'FrameVariable(bid, varName, varValue, varSize) ' +
'VALUES (%s, "%s", "%s", %s)' % (bid, var_name, var_value, var_size))
# 插入栈帧大小
def insert_frame_stack_size(self, pid, tid, fid, size):
self.execute('insert into ' + self.table_prefix +
'FStackSize(pid, tid, fid, stackSize) VALUES (%s, %s, %s, %s)' %
(pid, tid, fid, size))
# 插入最大栈帧大小
def insert_max_stack_size(self, pid, tid, size):
self.execute('insert into ' + self.table_prefix +
'PStackSize(pid, tid, stackSize) VALUES (%s, %s, %s)' %(pid, tid, size))
# 根据函数名称获取 fid
def get_function_fid_by_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName=' + func_name)
fetch_one = self.cursor.fetchone()
print "获取函数id: " + fetch_one
return fetch_one[0]
# 根据bid获取fid
def get_fid_by_bid(self, bid):
self.execute('select funcName from ' + self.table_prefix + 'BreakPoint where bid=' + str(bid))
fetch_one = self.cursor.fetchone()
fid = self.get_fid_by_func_name(str(fetch_one[0]))
return fid
# 根据函数名获取 fid
def get_fid_by_func_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName="%s"' % (str(func_name)))
return self.cursor.fetchone()[0]
# 数据库中插入断点
def info_breakpoint_handler(self, pid, gdb_info_breakpoint):
ss = gdb_info_breakpoint.split("\n")
for s in ss:
if 0 < s.find("breakpoint keep y"):
s2 = s.split()
s3 = s2[8].split(":")
self.insert_breakpoint(pid, s3[1], s2[6])
# 添加有向边 a-->b
def insert_edge(self, pid, tid, func_name_a, func_name_b):
fid_a = self.get_fid_by_func_name(func_name_a)
fid_b = self.get_fid_by_func_name(func_name_b)
try:
self.execute('select cnt from ' + self.table_prefix +
'FuncAdjacencyList where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, fid_a, fid_b))
cnt = int(self.cursor.fetchone()[0]) + 1
self.execute('update ' + self.table_prefix +
'FuncAdjacencyList set cnt=%s where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, cnt, fid_a, fid_b))
except Exception:
cnt = 1
self.execute('insert into ' + self.table_prefix +
'FuncAdjacencyList(pid, tid, parFid, fid, cnt) VALUES (%s, %s, %s, %s, %s)' %
(pid, tid, fid_a, fid_b, cnt))
# 根据 gdb(info b) 的信息获取函数列表
def get_function_list(self, break_info):
func_list = []
string_list = break_info.split('\n')[1:]
for line in string_list:
word = line.split()
if len(word) >= 6:
func_list.append(word[6])
return func_list
# 将给出的函数列表插入数据库中
def insert_function_list(self, func_list):
fid = 0
func_list = list(set(func_list)) # 去重
for func in func_list:
fid += 1
self.inset_function(fid, func)
# 检查是否存在一张表
def exist_table(self, table_name):
try:
self.execute('select * from ' + table_name)
return True
except Exception:
return False
# 创建表
def create_table(self, table_name):
try:
PyGdbUtil.log(0, "创建表" + table_name)
self.execute("create table if not exists " + table_name)
except Exception as e:
# print e
PyGdbUtil.log(2, "创建表" + table_name + "失败! 请检查数据表前缀是否有非法字符.")
# 删除表
def drop_table(self, table_name):
try:
PyGdbUtil.log(0, "删除表" + table_name)
self.execute('drop table if exists ' + table_name)
except Exception as e:
print e
PyGdbUtil.log(2, '删除表失败!')
# 获取测试样例
def get_test_case_by_tid(self, tid):
self.execute("SELECT testStr FROM " + self.table_prefix + "TestCase WHERE tid='%s'" % tid)
return self.cursor.fetchone()[0]
# 获取测试样例
|
rkryan/seniordesign
|
pi/utils/startup_ip.py
|
Python
|
gpl-2.0
| 865
| 0.006936
|
import subprocess
import smtplib
import socket
from email.mime.text import MIMEText
import datetime
# Change to your own account information
to = 'rk.ryan.king@gmail.com'
gmail_user = 'rk.ryan.king@gmail.com'
gmail_password = 'nzwaahcmdzjchxsz'
smtpserver = smtplib.SMTP('smtp.gmail.com', 587)
smtpserver.ehlo()
smtp
|
server.starttls()
smtpserver.ehlo
smtpserver.login(gmail_user, gmail_password)
today = datetime.date.today()
# Very Linux Specific
arg='ip route list'
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src')+1]
my_ip = 'Your ip is %s' % ipaddr
msg = MIMEText(my_ip)
msg['Subject'] = 'IP For Rasp
|
berryPi on %s' % today.strftime('%b %d %Y')
msg['From'] = gmail_user
msg['To'] = to
smtpserver.sendmail(gmail_user, [to], msg.as_string())
smtpserver.quit()
|
popazerty/beyonwiz-4.1
|
lib/python/Components/Converter/EGExtraInfo.py
|
Python
|
gpl-2.0
| 10,356
| 0.039784
|
# Based on PliExtraInfo
# Recoded for Black Pole by meo.
# Recodded for EGAMI
from enigma import iServiceInformation
from Components.Converter.Converter import Converter
from Components.Element import cached
from Poll import Poll
class EGExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
("0x1700", "0x17ff", "Beta", "B" ),
( "0x600", "0x6ff", "Irdeto", "I" ),
( "0x100", "0x1ff", "Seca", "S" ),
( "0x500", "0x5ff", "Via", "V" ),
("0x1800", "0x18ff", "Nagra", "N" ),
("0x4ae0", "0x4ae1", "Dre", "D" ),
( "0xd00", "0xdff", "CryptoW", "CW"),
( "0x900", "0x9ff", "NDS", "ND"),
( "0xb00", "0xbff", "Conax", "CO"),
("0x2600", "0x2600", "Biss", "BI")
)
def GetEcmInfo(self):
data = {}
try:
f = open('/tmp/ecm.info', 'rb')
ecm = f.readlines()
f.close()
info = {}
for line in ecm:
d = line.split(':', 1)
if len(d) > 1:
info[d[0].strip()] = d[1].strip()
# 1st values
data['caid'] = '0x00'
data['pid'] = ''
data['provider'] = ''
data['using'] = ''
data['decode'] = ''
data['source'] = ''
data['reader'] = ''
data['address'] = ''
data['address_from'] = ''
data['hops'] = '0'
data['ecm_time'] = '0'
data['caid'] = info.get('caid', '0')
data['provider'] = info.get('provider', '')
if data['provider'] == '':
data['provider'] = info.get('prov', ' ')
data['using'] = info.get('using', '')
data['reader'] = info.get('reader', '')
## CCcam
if data['using']:
data['using'] = info.get('using', '')
data['decode'] = info.get('decode', '')
data['source'] = info.get('source', '')
data['reader'] = info.get('reader', '')
data['address'] = info.get('address', 'Unknown')
data['address_from'] = info.get('from', 'Unknown')
data['hops'] = info.get('hops', '0')
data['ecm_time'] = info.get('ecm time', '?')
elif data['reader']:
data['caid'] = info.get('caid', '')
data['pid'] = info.get('pid', '')
data['provider'] = info.get('prov', '')
data['reader'] = info.get('reader', '')
data['address'] = info.get('from', 'Unknown')
data['hops'] = info.get('hops', '0')
data['ecm_time'] = info.get('ecm time', '?')
else:
data['decode'] = info.get('decode', '')
if data['decode']:
# gbox (untested)
if data['decode'] == 'Network':
cardid = 'id:' + info.get('prov', '')
try:
f = open('/tmp/share.info', 'rb')
share = f.readlines()
f.close()
for line in share:
if cardid in line:
data['address'] = line.strip()
break
else:
data['address'] = cardid
except:
data['address'] = data['decode']
else:
# adddess = slot or emu
data['address'] = data['decode']
if ecm[1].startswith('SysID'):
data['provider'] = ecm[1].strip()[6:]
if 'CaID 0x' in ecm[0] and 'pid 0x' in ecm[0]:
data['ecm_time'] = info.get('response', '?')
data['caid'] = ecm[0][ecm[0].find('CaID 0x')+7:ecm[0].find(',')]
data['pid'] = ecm[0][ecm[0].find('pid 0x')+6:ecm[0].find(' =')]
data['provider'] = info.get('prov', '0')[:4]
else:
source = info.get('source', None)
if source:
print "Found Source"
#wicardd
if 'CaID 0x' in ecm[0] and 'pid 0x' in ecm[0]:
data['caid'] = ecm[0][ecm[0].find('CaID 0x')+7:ecm[0].find(',')]
data['pid'] = ecm[0][ecm[0].find('pid 0x')+6:ecm[0].find(' =')]
data['provider'] = info.get('prov', '0')[2:]
# MGcam
else:
data['caid'] = info['caid'][2:]
data['pid'] = info['pid'][2:]
data['provider'] = info['prov'][2:]
time = " ?"
for line in ecm:
if line.find('msec') != -1:
line = line.split(' ')
if line[0]:
time = " (%ss)" % (float(line[0])/1000)
continue
data['address'] = source
data['ecm_time'] = time
else:
reader = info.get('reader', '')
if reader:
hops = info.get('hops', None)
if hops and hops != '0':
hops = ' @' + hops
else:
hops = ''
data['hops'] = hops
data['ecm_time'] = info.get('ecm time', '?')
data['address'] = reader
else:
data['hops'] = ""
data['ecm_time'] = ""
data['address'] = ""
except:
data['caid'] = '0x00'
data['provider'] = ''
data['pid'] = ''
data['using'] = ''
data['decode'] = ''
data['source'] = ''
data['reader'] = ''
data['address'] = ''
data['address_from'] = ''
data['hops'] = '0'
data['ecm_time'] = '0'
return data
def get_caName(self):
try:
f = open("/etc/egami/.emuname",'r')
name = f.readline().strip()
f.close()
except:
name = "Common Interface"
return name
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
is_crypted = info.getInfo(iServiceInformation.sIsCrypted)
if self.type == "CamName":
return self.get_caName()
elif self.type == "NetInfo":
if is_crypted != 1:
return ''
data = self.GetEcmInfo()
if data['using']:
return "Address: %s Hops: %s Ecm time: %ss" % (data['address'], data['hops'], data['ecm_time'])
elif data['reader']:
return "Address: %s Hops: %s Ecm time: %ss" % (data['address'], data['hops'], data['ecm_time'])
elif data['decode'] == "slot-1" or data['decode'] == "slot-2" or data['decode'] == "Network":
return "Decode: %s Ecm time: %s Pid: %s" % (data['address'], data['ecm_time'], data['pid'])
elif data['address']:
return "Address: %s Ecm time: %s Pid: %s" % (data['address'], data['ecm_time'], data['pid'])
elif self.type == "EcmInfo":
if is_crypted != 1:
return ''
data = self.GetEcmInfo()
return "CaId: %s Provider: %s" % (data['caid'], data['provider'])
elif self.type == "E-C-N":
if is_crypted != 1:
return 'Fta'
data = self.GetEcmInfo()
if data['using']:
if data['using'] == "fta":
return 'Fta'
elif data['using'] == 'emu':
return "Emulator"
elif data['using'] == 'sci':
return "Card"
else:
return "Network"
elif data['reader']:
pos = data['address_from'].find('.')
if pos > 1:
return "Network"
else:
return "Card"
elif data['decode']:
if data['decode'] == 'Network':
return 'Netowrk'
elif data['decode'] == 'slot-1' or data['decode'] == 'slot-2':
return 'Card'
elif data['address']:
if data['address'][:3] == "net":
return 'Network'
elif data['address'][:3] == "emu":
return 'Emulator'
else:
return 'Fta'
return ""
elif self.type == "CryptoBar":
data = self.GetEcmInfo()
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(data['caid'], 16) >= int(caid_entry[0], 16) and int(data['caid'], 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if res: res += " "
|
res += color + caid_entry[3]
res += "\c00??????"
return res
return ""
text = property(getText)
@cached
def getBool(self):
service =
|
self.source.service
info = service and service.info()
if not info:
return False
if self.type == "CryptoCaidSecaAvailable":
request_caid = "S"
request_selected = False
elif self.type == "CryptoCaidViaAvailable":
request_caid = "V"
request_selected = False
elif self.type == "CryptoCaidIrdetoAvailable":
request_caid = "I"
request_selected = False
elif self.type == "CryptoCaidNDSAvailable":
request_caid = "ND"
request_selected = False
elif self.type == "CryptoCaidConaxAvailable":
request_caid = "CO"
request_selected = False
el
|
kenshay/ImageScript
|
Script_Runner/PYTHON/Lib/lib2to3/tests/test_refactor.py
|
Python
|
gpl-3.0
| 12,405
| 0.000564
|
"""
Unit tests for refactor.py.
"""
import sys
import os
import codecs
import io
import re
import tempfile
import shutil
import unittest
from lib2to3 import refactor, pygram, fixer_base
from lib2to3.pgen2 import token
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
sys.path.append(FIXER_DIR)
try:
_DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
finally:
sys.path.pop()
_2TO3_FIXERS = refactor.get_fixers_from_package("lib2to3.fixes")
class TestRefactoringTool(unittest.TestCase):
def setUp(self):
sys.path.append(FIXER_DIR)
def tearDown(self):
sys.path.pop()
def check_instances(self, instances, classes):
for inst, cls in zip(instances, classes):
if not isinstance(inst, cls):
self.fail("%s are not instances of %s" % instances, classes)
def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
return refactor.RefactoringTool(fixers, options, explicit)
def test_print_function_option(self):
rt = self.rt({"print_function" : True})
self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
self.assertIs(rt.driver.grammar,
pygram.python_grammar_no_print_statement)
def test_write_unchanged_files_option(self):
rt = self.rt()
self.assertFalse(rt.write_unchanged_files)
rt = self.rt({"write_unchanged_files" : True})
self.assertTrue(rt.write_unchanged_files)
def test_fixer_loading_helpers(self):
contents = ["explicit", "first", "last", "parrot", "preorder"]
non_prefixed = refactor.get_all_fix_names("myfixes")
prefixed = refactor.get_all_fix_names("myfixes", False)
full_names = refactor.get_fixers_from_package("myfixes")
self.assertEqual(prefixed, ["fix
|
_" + name for name in contents])
|
self.assertEqual(non_prefixed, contents)
self.assertEqual(full_names,
["myfixes.fix_" + name for name in contents])
def test_detect_future_features(self):
run = refactor._detect_future_features
fs = frozenset
empty = fs()
self.assertEqual(run(""), empty)
self.assertEqual(run("from __future__ import print_function"),
fs(("print_function",)))
self.assertEqual(run("from __future__ import generators"),
fs(("generators",)))
self.assertEqual(run("from __future__ import generators, feature"),
fs(("generators", "feature")))
inp = "from __future__ import generators, print_function"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp ="from __future__ import print_function, generators"
self.assertEqual(run(inp), fs(("print_function", "generators")))
inp = "from __future__ import (print_function,)"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function"""
self.assertEqual(run(inp), fs(("generators", "print_function")))
invalid = ("from",
"from 4",
"from x",
"from x 5",
"from x im",
"from x import",
"from x import 4",
)
for inp in invalid:
self.assertEqual(run(inp), empty)
inp = "'docstring'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
inp = "# comment\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix):
pass
class FileInputFix(fixer_base.BaseFix):
PATTERN = "file_input< any * >"
class SimpleFix(fixer_base.BaseFix):
PATTERN = "'name'"
no_head = NoneFix({}, [])
with_head = FileInputFix({}, [])
simple = SimpleFix({}, [])
d = refactor._get_headnode_dict([no_head, with_head, simple])
top_fixes = d.pop(pygram.python_symbols.file_input)
self.assertEqual(top_fixes, [with_head, no_head])
name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
for fixes in d.values():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
from myfixes.fix_first import FixFirst
from myfixes.fix_last import FixLast
from myfixes.fix_parrot import FixParrot
from myfixes.fix_preorder import FixPreorder
rt = self.rt()
pre, post = rt.get_fixers()
self.check_instances(pre, [FixPreorder])
self.check_instances(post, [FixFirst, FixParrot, FixLast])
def test_naughty_fixers(self):
self.assertRaises(ImportError, self.rt, fixers=["not_here"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
def test_refactor_string(self):
rt = self.rt()
input = "def parrot(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertNotEqual(str(tree), input)
input = "def f(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertEqual(str(tree), input)
def test_refactor_stdin(self):
class MyRT(refactor.RefactoringTool):
def print_output(self, old_text, new_text, filename, equal):
results.extend([old_text, new_text, filename, equal])
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
sys.stdin = io.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
sys.stdin = save
expected = ["def parrot(): pass\n\n",
"def cheese(): pass\n\n",
"<stdin>", False]
self.assertEqual(results, expected)
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
test_file = self.init_test_file(test_file)
old_contents = self.read_file(test_file)
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
self.assertEqual(old_contents, self.read_file(test_file))
if not actually_write:
return
rt.refactor_file(test_file, True)
new_contents = self.read_file(test_file)
self.assertNotEqual(old_contents, new_contents)
return new_contents
def init_test_file(self, test_file):
tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
self.addCleanup(shutil.rmtree, tmpdir)
shutil.copy(test_file, tmpdir)
test_file = os.path.join(tmpdir, os.path.basename(test_file))
os.chmod(test_file, 0o644)
return test_file
def read_file(self, test_file):
with open(test_file, "rb") as fp:
return fp.read()
def refactor_file(self, test_file, fixers=_2TO3_FIXERS):
test_file = self.init_test_file(test_file)
old_contents = self.read_file(test_file)
rt = self.rt(fixers=fixers)
rt.refactor_file(test_file, True)
ne
|
bowen0701/algorithms_data_structures
|
lc0124_binary_tree_maximum_path_sum.py
|
Python
|
bsd-2-clause
| 2,485
| 0.002012
|
"""Leetcode 124. Binary Tree Maximum Path Sum
Hard
URL: https://leetcode.com/problems/binary-tree-maximum-path-sum/
Given a non-empty binary tree, find the maximum path sum.
For this problem, a path is defined as any sequence of nodes from some starting
node to any node in the tree along the parent-child connections. The path must
contain at least one node and does not need to go through the root.
Example 1:
Input: [1,2,3]
1
/ \
2 3
Output: 6
Example 2:
Input: [-10,9,20,null,null,15,7]
-10
/ \
9 20
/ \
15 7
Output: 42
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionLeftRightMaxPathDownSumRecur(object):
def _maxPathDownSum(self, root):
# Edge case.
if not root:
return 0
# Collect max path sum from root value, down paths from left/right nodes.
# If one branch sum is less than 0, do not connect that branch by max(0, .).
left_max_down_sum = max(0, self._maxPathDownSum(root.left))
right_max_down_sum = max(0, self._maxPathDownSum(root.right))
self.max_path_sum = max(
left_max_down_sum + root.val + right_max_down_sum,
self.max_path_sum)
# Return max path down sum from left or right, including root values.
return root.val + max(left_max_down_sum, right_max_down_sum)
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
Time complexity: O(n).
Space complexity: O(logn) for balanced tree, O(n) for singly linked list.
"""
# Use global max path sum for memorization.
self.max_path_sum = -float('inf')
# Collect max path down sum from left or right and update global max sum.
self._maxPathDownSum(root)
return self.max_path_sum
def main():
# Output: 6
# 1
# / \
# 2 3
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
print SolutionLeftRightMaxPathDownSumRecur().maxPathSum(root)
# Output: 42
# -10
# / \
# 9 20
# / \
# 15 7
root = TreeNode
|
(-10)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
print SolutionLeftRightMaxPathDownSumRecur().maxPathSum(root)
if __nam
|
e__ == '__main__':
main()
|
rwalk333/straw
|
src/frontend/app/views.py
|
Python
|
mit
| 3,744
| 0.00641
|
#!/usr/bin/python
'''
Define the views for the straw web app
'''
from flask import render_template, session, request, render_template, jsonify, Flask, make_response
from time import sleep
from kafka.common import FailedPayloadsError, NotLeaderForPartitionError, KafkaUnavailableError
import md5, redis
import json, uuid
MAX_RESULTS = 100
EXPIRATION = 1
def attach_views(app):
@app.route('/_fetch_messages')
def fetch_messages():
# get a redis connection
redis_connection = redis.Redis(connection_pool=app.pool)
# update the query list in the view
if session.get('sid') is not None:
matches = redis_connection.lrange(session.get('sid'), 0, MAX_RESULTS)
return jsonify(result=matches)
@app.route('/', methods=['GET'])
def index():
if session.get('sid') is None:
session['sid'] = uuid.uuid4().hex
try:
query_list = session['queries']
except KeyError:
query_list = []
return render_template('index.html', query_list=query_list)
@app.route('/', methods=['POST'])
def search_box_control():
'''add to or clear the list of queries.'''
# we need a session
if session.get('sid') is None:
raise RuntimeError("No session.")
sid = session.get('sid')
# get a redis connection
redis_connection = redis.Redis(connection_pool=app.pool)
# if clear button pressed:
if 'clear' in request.form:
app.clear_user(session.get('sid'))
if session.has_key('queries'):
del session['queries']
return render_template("index.html", query_list=[], session=session)
# create a new query
text = request.form['text'].lower().split(" ")
# generate a unique query id
msg = {"type":"terms-query","terms":text,"minimum-match":len(text)}
data = json.dumps(msg)
qid = md5.new(data).hexdigest()
query_string = " ".join(text)
# add the qid and value to the query lookup store
try:
session['queries'].append(query_string)
except KeyError:
# sanity: clear any queries stored for this user but not in the session.
redis_connection.delete(sid+"-queries")
session['queries'] = [query_string]
# try three times to do the post to kafka.
post_success = False
for i in range(3):
try:
app.producer.send_messages("queries", data)
except (FailedPayloadsError, NotLeaderForPartitionError, KafkaUnavailableError) as e:
# wait a bit and try again
print("Failed to post query {0} to kafka. Try #{1}".format(data, i))
sleep(0.25)
continue
|
post_success=True
break
if post_success==True:
# subscribe the user to the query
try:
app.user_channels[qid].add(sid)
except KeyError:
app.user_channels[qid] = set([sid])
app.subscriber.add_query(qid)
# l
|
ink the id to the query text
redis_connection.set(qid, " ".join(text))
# add query to the list of things the user has subscribed to
redis_connection.lpush(sid +"-queries", qid)
# update the query list in the view
query_list = session["queries"]
return render_template("index.html", query_list=query_list)
@app.route('/about')
def about():
return render_template('%s.html' % 'about')
@app.route('/straw.pdf')
def pdf():
return app.send_static_file('assets/straw.pdf')
|
andymckay/zamboni
|
mkt/users/models.py
|
Python
|
bsd-3-clause
| 9,476
| 0.000106
|
from contextlib import contextmanager
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.core import validators
from django.db import models
from django.utils import translation
from django.utils.encoding import smart_unicode
from django.utils.functional import lazy
import commonware.log
import tower
from cache_nuggets.lib import memoize
from tower import ugettext as _
import amo
import amo.models
from amo.urlresolvers import reverse
from mkt.translations.fields import NoLinksField, save_signal
from mkt.translations.query import order_by_translation
log = commonware.log.getLogger('z.users')
class UserForeignKey(models.ForeignKey):
"""
A replacement for models.ForeignKey('users.UserProfile').
This field uses UserEmailField to make form fields key off the user's email
instead of the primary key id. We also hook up autocomplete automatically.
"""
def __init__(self, *args, **kw):
super(UserForeignKey, self).__init__(UserProfile, *args, **kw)
def value_from_object(self, obj):
return getattr(obj, self.name).email
def formfield(self, **kw):
defaults = {'form_class': UserEmailField}
defaults.update(kw)
return models.Field.formfield(self, **defaults)
class UserEmailField(forms.EmailField):
def clean(self, value):
if value in validators.EMPTY_VALUES:
raise forms.ValidationError(self.error_messages['required'])
try:
return UserProfile.objects.get(email=value)
except UserProfile.DoesNotExist:
raise forms.ValidationError(_('No user with that email.'))
def widget_attrs(self, widget):
lazy_reverse = lazy(reverse, str)
return {'class': 'email-autocomplete',
'data-src': lazy_reverse('users.ajax')}
AbstractBaseUser._meta.get_field('password').max_length = 255
class UserProfile(amo.models.OnChangeMixin, amo.models.ModelBase,
AbstractBaseUser):
USERNAME_FIELD = 'username'
username = models.CharField(max_length=255, default='', unique=True)
display_name = models.CharField(max_length=255, default='', null=True,
blank=True)
email = models.EmailField(unique=True, null=True)
averagerating = models.CharField(max_length=255, blank=True, null=True)
bio = NoLinksField(short=False)
confirmationcode = models.CharField(max_length=255, default='',
blank=True)
deleted = models.BooleanField(default=False)
display_collections = models.BooleanField(default=False)
display_collections_fav = models.BooleanField(default=False)
emailhidden = models.BooleanField(default=True)
homepage = models.URLField(max_length=255, blank=True, default='')
location = models.CharField(max_length=255, blank=True, default='')
notes = models.TextField(blank=True, null=True)
notifycompat = models.BooleanField(default=True)
notifyevents = models.BooleanField(default=True)
occupation = models.CharField(max_length=255, default='', blank=True)
# This is essentially a "has_picture" flag right now
picture_type = models.CharField(max_length=75, default='', blank=True)
resetcode = models.CharField(max_length=255, default='', blank=True)
resetcode_expires = models.DateTimeField(default=datetime.now, null=True,
blank=True)
read_dev_agreement = models.DateTimeField(null=True, blank=True)
last_login_ip = models.CharField(default='', max_length=45, editable=False)
last_login_attempt = models.DateTimeField(null=True, editable=False)
last_login_attempt_ip = models.CharField(default='', max_length=45,
editable=False)
failed_login_attempts = models.PositiveIntegerField(default=0,
editable=False)
source = models.PositiveIntegerField(default=amo.LOGIN_SOURCE_UNKNOWN,
editable=False, db_index=True)
is_verified = models.BooleanField(default=True)
region = models.CharField(max_length=11, null=True, blank=True,
editable=False)
lang = models.CharField(max_length=5, null=True, blank=True,
editable=False)
class Meta:
db_table = 'users'
def __init__(self, *args, **kw):
super(UserProfile, self).__init__(*args, **kw)
if self.username:
self.username = smart_unicode(self.username)
def __unicode__(self):
return u'%s: %s' % (self.id, self.display_name or self.username)
def save(self, force_insert=False, force_update=False, using=None, **kwargs):
# we have to fix stupid things that we defined poorly in remora
if not self.resetcode_expires:
self.resetcode_expires = datetime.now()
super(UserProfile, self).save(force_insert, force_update, using,
**kwargs)
@property
def is_superuser(self):
return self.groups.filter(rules='*:*').exists()
@property
def is_staff(self):
from mkt.access import acl
return acl.action_allowed_user(self, 'Admin', '%')
def has_perm(self, perm, obj=None):
return self.is_superuser
def has_module_perms(self, app_label):
return self.is_superuser
def get_backend(self):
return 'django_browserid.auth.BrowserIDBackend'
def set_backend(self, val):
pass
backend = property(get_backend, set_backend)
def is_anonymous(self):
return False
def get_url_path(self, src=None):
# See: bug 880767.
return '#'
def my_apps(self, n=8):
"""Returns n apps"""
qs = self.addons.filter(type=amo.ADDON_WEBAPP)
qs = order_by_translation(qs, 'name')
return qs[:n]
|
@amo.cached_property
def is_developer(self):
return self.addonuser_set.exists()
@property
def name(self):
return smart_unicode(self.display_name or self.username)
@amo.cached_propert
|
y
def reviews(self):
"""All reviews that are not dev replies."""
qs = self._reviews_all.filter(reply_to=None)
# Force the query to occur immediately. Several
# reviews-related tests hang if this isn't done.
return qs
def anonymize(self):
log.info(u"User (%s: <%s>) is being anonymized." % (self, self.email))
self.email = None
self.password = "sha512$Anonymous$Password"
self.username = "Anonymous-%s" % self.id # Can't be null
self.display_name = None
self.homepage = ""
self.deleted = True
self.picture_type = ""
self.save()
def check_password(self, raw_password):
# BrowserID does not store a password.
return True
def log_login_attempt(self, successful):
"""Log a user's login attempt"""
self.last_login_attempt = datetime.now()
self.last_login_attempt_ip = commonware.log.get_remote_addr()
if successful:
log.debug(u"User (%s) logged in successfully" % self)
self.failed_login_attempts = 0
self.last_login_ip = commonware.log.get_remote_addr()
else:
log.debug(u"User (%s) failed to log in" % self)
if self.failed_login_attempts < 16777216:
self.failed_login_attempts += 1
self.save()
def purchase_ids(self):
"""
I'm special casing this because we use purchase_ids a lot in the site
and we are not caching empty querysets in cache-machine.
That means that when the site is first launched we are having a
lot of empty queries hit.
We can probably do this in smarter fashion by making cache-machine
cache empty queries on an as need basis.
"""
# Circular import
from mkt.prices.models import AddonPurchase
@memoize(prefix='users:purchase-ids')
def ids(pk):
|
montgok/Python_Class
|
python_test2.py
|
Python
|
apache-2.0
| 31
| 0.096774
|
for i in range (10):
print
|
i
|
|
vault/bugit
|
user_manage/views.py
|
Python
|
mit
| 4,105
| 0.004629
|
from django.http import HttpResponse
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.forms.models import model_to_dict
from django.forms.util import ErrorList
from django.template import RequestContext
from django.db import IntegrityError
from django.core.exceptions import ObjectDoesNotExist
from common.models import PublicKey, UserProfile
from common.util import get_context
from user_manage.forms import UserForm, PublicKeyForm, ProfileForm
def user_settings(request):
user = request.user
new_pk = PublicKeyForm()
p_form = ProfileForm()
pubkeys = user.publickey_set.all()
profile = UserProfile.objects.get_or_create(user=user)[0]
if not user.is_authenticated():
return HttpResponse("Not authorized", status=401)
if request.method == 'GET':
user_form = UserForm(model_to_dict(user))
p_form = ProfileForm(model_to_dict(profile))
elif request.method == 'POST':
user_form = UserForm(request.POST, instance=user)
if user_form.is_valid():
user = user_form.save()
return redirect('user_settings')
context = get_context(request, { 'user_form' : user_form, 'pk_form': new_pk,
'keys' : pubkeys, 'profile_form': p_form })
return render_to_response('user_manage/user_settings.html', context, context_instance=RequestContext(request))
def user_profile(request):
user = request.user
if not user.is_authenticated():
return HttpResponse("Not authorized", status=401)
if request.method != "POST":
return HttpResponse("Method not allowed", 405)
else:
profile = user.get_profile()
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
profile = form.save()
return redirect('user_settings')
def pubkey_add(request):
user = request.user
if not user.is_authenticated():
return HttpResponse("You should be authenticated....", status=401)
if request.method == 'GET':
form = PublicKeyForm()
elif request.method == 'POST':
form = PublicKeyForm(request.POST)
if form.is_valid():
key = form.save(
|
commit=False)
key.owner = user
try:
key.save()
ret
|
urn redirect('user_settings')
except IntegrityError:
form._errors["description"] = ErrorList(["You have a public key with that name already"])
context = get_context(request, {'form' : form})
return render_to_response('user_manage/key_edit.html', context, context_instance=RequestContext(request))
def pubkey_delete(request, key_id):
pk = get_object_or_404(PublicKey, pk=key_id)
if pk.owner == request.user and request.method == 'POST':
pk.delete()
return redirect('user_settings')
def pubkey_edit(request, key_id=None):
if key_id is not None:
pk = get_object_or_404(PublicKey, pk=key_id)
else:
pk = None
user = request.user
if not user.is_authenticated() or pk.owner != user:
return HttpResponse("Not allowed", status=401)
if request.method == 'POST':
if key_id is not None:
form = PublicKeyForm(request.POST, instance=pk)
else:
form = PublicKeyForm(request.POST)
if form.is_valid():
try:
pk = form.save()
return redirect('user_settings')
except IntegrityError:
form._errors["description"] = ErrorList(["You have a public key with that name already"])
context = get_context(request, {'form' : form, 'pk': pk})
return render_to_response('user_manage/key_edit.html', context, context_instance=RequestContext(request))
elif request.method == 'GET':
form = PublicKeyForm(model_to_dict(pk))
else:
return HttpResponse("Not implemented", status=405)
context = get_context(request, {'form' : form, 'pk' : pk})
return render_to_response('user_manage/key_edit.html', context, context_instance= RequestContext(request))
|
ivanhorvath/openshift-tools
|
openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/lib_yaml_editor/build/src/yedit.py
|
Python
|
apache-2.0
| 6,339
| 0.001735
|
# pylint: skip-file
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
def __init__(self, filename=None, content=None, content_type='yaml'):
self.content = content
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
if self.filename and not self.content:
self.load(content_type=self.content_type)
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def remove_entry(data, key):
''' remove data at location key '''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
curr_data = data
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key):
data = data[dict_key]
continue
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for add
# expected list entry
if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return curr_data
@staticmethod
def get_entry(data, key):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
with open(self.filename, 'w') as yfd:
yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
def read(self):
''' write to file '''
# check if it exists
if not self.exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents:
return None
# check if it is yaml
|
try:
if content_type == 'yaml':
self.yaml_dict = yaml.load(contents)
elif content_type == 'json':
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as _:
# Error loading y
|
aml or json
return None
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
return entry
def delete(self, key):
''' remove key from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if not entry:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, key)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def put(self, key, value):
''' put key, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
result = Yedit.add_entry(self.yaml_dict, key, value)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def create(self, key, value):
''' create a yaml file '''
if not self.exists():
self.yaml_dict = {key: value}
return (True, self.yaml_dict)
return (False, self.yaml_dict)
|
google-research/google-research
|
smu/geometry/topology_from_geom_test.py
|
Python
|
apache-2.0
| 8,417
| 0.00202
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tester for topology_from_geometry
from absl.testing import absltest
import numpy as np
import pandas as pd
from google.protobuf import text_format
from smu import dataset_pb2
from smu.geometry import bond_length_distribution
from smu.geometry import smu_molecule
from smu.geometry import topology_from_geom
from smu.parser import smu_utils_lib
# Only needed so we can alter the default bond matching
# For the empirical bond length distributions, the resolution used.
# Which is not necessarily the same as what is used in the production system.
RESOLUTION = 1000
def triangular_distribution(min_dist, dist_max_value, max_dist):
"""Generate a triangular distribution.
Args:
min_dist: minimum X value
dist_max_value: X value of the triangle peak
max_dist: maximum X value
Returns:
Tuple of the X and Y coordinates that represent the distribution.
"""
population = np.zeros(RESOLUTION, dtype=np.float32)
x_extent = max_dist - min_dist
peak_index = int(round((dist_max_value - min_dist) / x_extent * RESOLUTION))
dy = 1.0 / peak_index
for i in range(0, peak_index):
population[i] = (i + 1) * dy
dy = 1.0 / (RESOLUTION - peak_index)
for i in range(peak_index, RESOLUTION):
population[i] = 1.0 - (i - peak_index) * dy
dx = x_extent / RESOLUTION
distances = np.arange(min_dist, max_dist, dx, dtype=np.float32)
return distances, population
class TestTopoFromGeom(absltest.TestCase):
def test_scores(self):
carbon = dataset_pb2.BondTopology.ATOM_C
single_bond = dataset_pb2.BondTopology.BondType.BOND_SINGLE
double_bond = dataset_pb2.BondTopology.BondType.BOND_DOUBLE
# For testing, turn off the need for complete matching.
smu_molecule.default_must_match_all_bonds = False
all_distributions = bond_length_distribution.AllAtomPairLengthDistributions(
)
x, y = triangular_distribution(1.0, 1.4, 2.0)
df = pd.DataFrame({"length": x, "count": y})
bldc1c = bond_length_distribution.EmpiricalLengthDistribution(df, 0.0)
all_distributions.add(carbon, carbon, single_bond, bldc1c)
x, y = triangular_distribution(1.0, 1.5, 2.0)
df = pd.DataFrame({"length": x, "count": y})
bldc2c = bond_length_distribution.EmpiricalLengthDistribution(df, 0.0)
all_distributions.add(carbon, carbon, double_bond, bldc2c)
bond_topology = text_format.Parse(
"""
atoms: ATOM_C
atoms: ATOM_C
bonds: {
atom_a: 0
atom_b: 1
bond_type: BOND_SINGLE
}
""", dataset_pb2.BondTopology())
geometry = text_format.Parse(
"""
atom_positions {
x: 0.0
y: 0.0
z: 0.0
},
atom_positions {
x: 0.0
y: 0.0
z: 0.0
}
""", dataset_pb2.Geome
|
try())
geometry.atom_positions[1].x = 1.4 / smu_utils_lib.BOHR_TO_ANGSTROMS
matching_parameters = smu_molecule.MatchingParameters()
matching_parameters.must_match_all_bonds = False
fate = dataset_pb2.Conformer.FATE_SUCCESS
conformer_id = 1001
result = topology_from_geom.bond_topologies_from_geom(
all_distributions, conformer_id, fate, bond_topology, geometry,
matching_paramete
|
rs)
self.assertIsNotNone(result)
self.assertLen(result.bond_topology, 2)
self.assertLen(result.bond_topology[0].bonds, 1)
self.assertLen(result.bond_topology[1].bonds, 1)
self.assertEqual(result.bond_topology[0].bonds[0].bond_type, single_bond)
self.assertEqual(result.bond_topology[1].bonds[0].bond_type, double_bond)
self.assertGreater(result.bond_topology[0].topology_score,
result.bond_topology[1].topology_score)
self.assertAlmostEqual(
np.sum(np.exp([bt.topology_score for bt in result.bond_topology])), 1.0)
self.assertAlmostEqual(result.bond_topology[0].geometry_score,
np.log(bldc1c.pdf(1.4)))
self.assertAlmostEqual(result.bond_topology[1].geometry_score,
np.log(bldc2c.pdf(1.4)))
def test_multi_topology_detection(self):
"""Tests that we can find multiple versions of the same topology."""
single = dataset_pb2.BondTopology.BondType.BOND_SINGLE
double = dataset_pb2.BondTopology.BondType.BOND_DOUBLE
all_dist = bond_length_distribution.AllAtomPairLengthDistributions()
for bond_type in [single, double]:
all_dist.add(
dataset_pb2.BondTopology.ATOM_N, dataset_pb2.BondTopology.ATOM_N,
bond_type,
bond_length_distribution.FixedWindowLengthDistribution(
1.0, 2.0, None))
# This conformer is a flat aromatic square of nitrogens. The single and
# double bonds can be rotated such that it's the same topology but
# individual bonds have switched single/double.
conformer = dataset_pb2.Conformer()
conformer.bond_topologies.add(bond_topology_id=123, smiles="N1=NN=N1")
conformer.bond_topologies[0].atoms.extend([
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_N,
])
conformer.bond_topologies[0].bonds.extend([
dataset_pb2.BondTopology.Bond(atom_a=0, atom_b=1, bond_type=single),
dataset_pb2.BondTopology.Bond(atom_a=1, atom_b=2, bond_type=double),
dataset_pb2.BondTopology.Bond(atom_a=2, atom_b=3, bond_type=single),
dataset_pb2.BondTopology.Bond(atom_a=3, atom_b=0, bond_type=double),
])
dist15a = 1.5 / smu_utils_lib.BOHR_TO_ANGSTROMS
conformer.optimized_geometry.atom_positions.extend([
dataset_pb2.Geometry.AtomPos(x=0, y=0, z=0),
dataset_pb2.Geometry.AtomPos(x=0, y=dist15a, z=0),
dataset_pb2.Geometry.AtomPos(x=dist15a, y=dist15a, z=0),
dataset_pb2.Geometry.AtomPos(x=dist15a, y=0, z=0),
])
matching_parameters = smu_molecule.MatchingParameters()
result = topology_from_geom.bond_topologies_from_geom(
bond_lengths=all_dist,
conformer_id=123,
fate=dataset_pb2.Conformer.FATE_SUCCESS,
bond_topology=conformer.bond_topologies[0],
geometry=conformer.optimized_geometry,
matching_parameters=matching_parameters)
self.assertLen(result.bond_topology, 2)
# The returned order is arbitrary so we figure out which is is marked
# as the starting topology.
starting_idx = min([
i for i, bt, in enumerate(result.bond_topology)
if bt.is_starting_topology
])
other_idx = (starting_idx + 1) % 2
starting = result.bond_topology[starting_idx]
self.assertTrue(starting.is_starting_topology)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 0, 1), single)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 1, 2), double)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 2, 3), single)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 3, 0), double)
other = result.bond_topology[other_idx]
self.assertFalse(other.is_starting_topology)
self.assertEqual(smu_utils_lib.get_bond_type(other, 0, 1), double)
self.assertEqual(sm
|
GeoscienceAustralia/sifra
|
__main__.py
|
Python
|
apache-2.0
| 11,130
| 0.001977
|
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
"""
title : __main__.py
description : entry point for core sira component
usage : python sira [OPTIONS]
-h Display this usage message
-d [input_directory] Specify the directory with the required
config and model files
-s Run simulation
-f Conduct model fitting. Must be done
|
after a complete run with `-s` flag
-l Conduct loss analysis. Must be done
after a complete run with `-s` flag
-v [LEVEL] Choose `verbose` mode, or choose logging
level DEBUG, INFO, WARNING, ERROR, CRITICAL
python_version : 3.7
"""
from __future
|
__ import print_function
import sys
import numpy as np
np.seterr(divide='print', invalid='raise')
import time
import re
from colorama import init, Fore, Back, Style
init()
import os
import argparse
from sira.logger import configure_logger
import logging
import logging.config
from sira.configuration import Configuration
from sira.scenario import Scenario
from sira.modelling.hazard import HazardsContainer
from sira.model_ingest import ingest_model
from sira.simulation import calculate_response
from sira.modelling.system_topology import SystemTopology
from sira.infrastructure_response import (
write_system_response,
plot_mean_econ_loss,
pe_by_component_class
)
from sira.fit_model import fit_prob_exceed_model
from sira.loss_analysis import run_scenario_loss_analysis
import numpy as np
def main():
# define arg parser
parser = argparse.ArgumentParser(
prog='sira', description="run sira", add_help=True)
# [Either] Supply config file and model file directly:
parser.add_argument("-c", "--config_file", type=str)
parser.add_argument("-m", "--model_file", type=str)
# [Or] Supply only the directory where the input files reside
parser.add_argument("-d", "--input_directory", type=str)
# Tell the code what tasks to do
parser.add_argument(
"-s", "--simulation", action='store_true', default=False)
parser.add_argument(
"-f", "--fit", action='store_true', default=False)
parser.add_argument(
"-l", "--loss_analysis", action='store_true', default=False)
parser.add_argument(
"-v", "--verbose", dest="loglevel", type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default="INFO",
help="Choose option for logging level from: \n"+
"DEBUG, INFO, WARNING, ERROR, CRITICAL.")
args = parser.parse_args()
# error handling
if args.input_directory and (args.config_file or args.model_file):
parser.error("--input_directory and [--config_file and --model_file]"
" are mutually exclusive ...")
sys.exit(2)
# error handling
if not any([args.simulation, args.fit, args.loss_analysis]):
parser.error(
"\nAt least one of these three flags is required:\n"
" --simulation (-s) or --fit (-f) or --loss_analysis (-s).\n"
" The options for fit or loss_analysis requires the -s flag, "
" or a previous completed run with the -s flag.")
sys.exit(2)
proj_root_dir = args.input_directory
if not os.path.isdir(proj_root_dir):
print("Invalid path supplied:\n {}".format(proj_root_dir))
sys.exit(1)
proj_input_dir = os.path.join(proj_root_dir, "input")
config_file_name = None
model_file_name = None
for fname in os.listdir(proj_input_dir):
confmatch = re.search(r"(?i)^config.*\.json$", fname)
if confmatch is not None:
config_file_name = confmatch.string
modelmatch = re.search(r"(?i)^model.*\.json$", fname)
if modelmatch is not None:
model_file_name = modelmatch.string
if config_file_name is None:
parser.error(
"Config file not found. "
"A valid config file name must begin with the term `config`, "
"and must be a JSON file.\n")
sys.exit(2)
if model_file_name is None:
parser.error(
"Model file not found. "
"A valid model file name must begin the term `model`, "
"and must be a JSON file.\n")
sys.exit(2)
args.config_file = os.path.join(proj_input_dir, config_file_name)
args.model_file = os.path.join(proj_input_dir, model_file_name)
args.output = os.path.join(args.input_directory, "output")
if not os.path.isfile(args.config_file):
parser.error(
"Unable to locate config file "+str(args.config_file)+" ...")
sys.exit(2)
if not os.path.isfile(args.model_file):
parser.error(
"Unable to locate model file "+str(args.model_file)+" ...")
sys.exit(2)
args.output = os.path.join(
os.path.dirname(os.path.dirname(args.config_file)), "output")
try:
if not os.path.exists(args.output):
os.makedirs(args.output)
except Exception:
parser.error(
"Unable to create output folder " + str(args.output) + " ...")
sys.exit(2)
# ---------------------------------------------------------------------
# Set up logging
# ---------------------------------------------------------------------
timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
log_path = os.path.join(args.output, "log.txt")
configure_logger(log_path, args.loglevel)
rootLogger = logging.getLogger(__name__)
print("\n")
rootLogger.info(Fore.GREEN +
'Simulation initiated at: {}\n'.format(timestamp) +
Fore.RESET)
# ---------------------------------------------------------------------
# Configure simulation model.
# Read data and control parameters and construct objects.
# ---------------------------------------------------------------------
config = Configuration(args.config_file, args.model_file, args.output)
scenario = Scenario(config)
hazards = HazardsContainer(config)
infrastructure = ingest_model(config)
# ---------------------------------------------------------------------
# SIMULATION
# Get the results of running a simulation
# ---------------------------------------------------------------------
# response_list = [
# {}, # [0] hazard level vs component damage state index
# {}, # [1] hazard level vs infrastructure output
# {}, # [2] hazard level vs component response
# {}, # [3] hazard level vs component type response
# [], # [4] array of infrastructure output per sample
# [], # [5] array of infrastructure econ loss per sample
# {}, # [6] hazard level vs component class dmg level pct
# {}] # [7] hazard level vs component class expected damage index
if args.simulation:
response_list = calculate_response(hazards, scenario, infrastructure)
# ---------------------------------------------------------------------
# Post simulation processing.
# After the simulation has run the results are aggregated, saved
# and the system fragility is calculated.
# ---------------------------------------------------------------------
write_system_response(response_list, infrastructure, scenario, hazards)
economic_loss_array = response_list[5]
plot_mean_econ_loss(scenario, economic_loss_array, hazards)
if config.HAZARD_INPUT_METHOD == "hazard_array":
pe_by_component_class(
response_list, infrastructure, scenario, hazards)
# ---------------------------------------------------------------------
# Visualizations
# Construct visualization for system topology
# ---------------------------------------------------------------------
sys_topology_view = SystemTopology(infrastructure, scenario)
sys_topolo
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.