repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
3v1n0/pywws | src/pywws/__init__.py | Python | gpl-2.0 | 67 | 0 | __v | ersion__ = '19.10.0.cwop'
_release = '1665'
_c | ommit = 'd22c19c'
|
HybridF5/jacket | jacket/tests/compute/unit/virt/disk/mount/test_api.py | Python | apache-2.0 | 7,718 | 0.001166 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from jacket.compute import test
from jacket.compute.virt.disk.mount import api
from jacket.compute.virt.disk.mount import block
from jacket.compute.virt.disk.mount import loop
from jacket.compute.virt.disk.mount import nbd
from jacket.compute.virt.image import model as imgmodel
PARTITION = 77
ORIG_DEVICE = "/dev/null"
AUTOMAP_PARTITION = "/dev/nullp77"
MAP_PARTITION = "/dev/mapper/nullp77"
class MountTestCase(test.NoDBTestCase):
def setUp(self):
super(MountTestCase, self).setUp()
def _test_map_dev(self, partition):
mount = api.Mount(mock.sentinel.image, mock.sentinel.mount_dir)
mount.device = ORIG_DEVICE
mount.partition = partition
mount.map_dev()
return mount
@mock.patch('compute.utils.trycmd')
def _test_map_dev_with_trycmd(self, partition, trycmd):
trycmd.return_value = [None, None]
mount = self._test_map_dev(partition)
self.assertEqual(1, trycmd.call_count) # don't care about args
return mount
def _exists_effect(self, data):
def exists_effect(filename):
try:
v = data[filename]
if isinstance(v, list):
if len(v) > 0:
return v.pop(0)
self.fail("Out of items for: %s" % filename)
return v
except KeyError:
self.fail("Unexpected call with: %s" % filename)
return exists_effect
def _check_calls(self, exists, filenames):
self.assertEqual([mock.call(x) for x in filenames],
exists.call_args_list)
@mock.patch('os.path.exists')
def test_map_dev_partition_search(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True})
mount = self._test_map_dev(-1)
self._check_calls(exists, [ORIG_DEVICE])
self.assertNotEqual("", mount.error)
self.assertFalse(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_good(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: False,
MAP_PARTITION: [False, True]})
mount = self._test_map_dev_with_trycmd(PARTITION)
self._check_calls(exists,
[ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION])
self.assertEqual("", mount.error)
self.assertTrue(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_error(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: False,
MAP_PARTITION: False})
mount = self._test_map_dev_with_trycmd(PARTITION)
self._check_calls(exists,
[ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION])
self.assertNotEqual("", mount.error)
self.assertFalse(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_automap(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: True})
mount = self._test_map_dev(PARTITION)
self._check_calls(exists,
[ORIG_DEVICE, AUTOMAP_PARTITION, AUTOMAP_PARTITION])
self.assertEqual(AUTOMAP_PARTITION, mount.mapped_device)
self.assertTrue(mount.automapped)
self.assertTrue(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_else(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: True})
mount = self._test_map_dev(None)
self._check_calls(exists, [ORIG_DEVICE])
self.assertEqual(ORIG_DEVICE, mount.mapped_device)
self.assertFalse(mount.automapped)
self.assertTrue(mount.mapped)
def test_instance_for_format_raw(self):
image = imgmodel.LocalFileImage("/some/file.raw",
imgmodel.FORMAT_RAW)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_format_qcow2(self):
image = imgmodel.LocalFileImage("/some/file.qcows",
imgmodel.FORMAT_QCOW2)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_format_block(self):
image = imgmodel.LocalBlockImage(
"/dev/mapper/instances--instance-0000001_disk",)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, block.BlockMount)
def test_instance_for_device_loop(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/loop0'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_device_loop_partition(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/loop0p1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_device_nbd(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/nbd0'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
| self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_device_nbd_partition(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/nbd0p1'
| inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_device_block(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/mapper/instances--instance-0000001_disk'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, block.BlockMount)
def test_instance_for_device_block_partiton(self,):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/instances--instance-0000001_diskp1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, block.BlockMount)
|
patsissons/Flexget | flexget/plugins/metainfo/imdb_url.py | Python | mit | 1,526 | 0.002621 | from __future__ import unicode_literals, division, absolute_import
import re
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils.imdb import extract_id, make_url
log = logging.getLogg | er('metainfo_imdb_url')
class MetainfoImdbUrl(object):
"""
Scan entry information for imdb url.
"""
schema = {'type': 'boolean'}
def on_task_metainfo(self, task, config):
# check if disabled (value set to false)
if 'scan_imdb' in task.config:
if | not task.config['scan_imdb']:
return
for entry in task.entries:
# Don't override already populated imdb_ids
if entry.get('imdb_id', eval_lazy=False):
continue
if not 'description' in entry:
continue
urls = re.findall(r'\bimdb.com/title/tt\d+\b', entry['description'])
# Find unique imdb ids
imdb_ids = filter(None, set(extract_id(url) for url in urls))
if not imdb_ids:
continue
if len(imdb_ids) > 1:
log.debug('Found multiple imdb ids; not using any of: %s' % ' '.join(imdb_ids))
continue
entry['imdb_id'] = imdb_ids[0]
entry['imdb_url'] = make_url(entry['imdb_id'])
log.debug('Found imdb url in description %s' % entry['imdb_url'])
@event('plugin.register')
def register_plugin():
plugin.register(MetainfoImdbUrl, 'scan_imdb', builtin=True, api_ver=2)
|
gscarella/pyOptFEM | pyOptFEM/common.py | Python | gpl-3.0 | 5,804 | 0.037216 | from scipy import sparse
import matplotlib.pyplot as plt
import os, errno, ctypes
from numpy import log
def NormInf(A):
"""This function returns the norm Inf of a *Scipy* sparse Matrix
:param A: A *Scipy* sparse matrix
:returns: norm Inf of A given by :math:`\| A\|_\infty=\max_{i,j}(|A_{i,j}|)`.
"""
if (A.data.shape[0]==0):
return 0
else:
return max(abs(A.data))
def showSparsity(M):
# from matplotlib.pyplot as plt
plt.spy(M, precision=1e-8, marker='.', markersize=3)
plt.show()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def BasesChoice(Num):
if Num==0:
ext='BaBa'
cbase='global alternate numbering with local alternate numbering'
elif Num==1:
ext='BaBb'
cbase='global block numbering with local alternate numbering'
elif Num==2:
ext='BbBa'
cbase='global alternate numbering with local block numbering'
elif Num==3:
ext='BbBb'
cbase='global block numbering with local block numbering'
return ext,cbase
def PrintResultsSphinx(versions,LN,Lnq,Lndof,T):
nV=len(versions)
nN=len(LN)
Sep1='+{:-^8}'.format("")*3 + '+{:-^14}'.format("")*nV+'+'
Sep2='+{:=^8}'.format("")*3 + '+{:=^14}'.format("")*nV+'+'
Sep3='|{:^8}'.format("")*3 + '+{:-^14}'.format("")*nV+'+'
Tit='|{:^8}'.format('N')+'|{:^8}'.format('nq')+'|{:^8}'.format('ndof')
for i in range(0,nV):
Tit+='|{:^14}'.format(versions[i])
Tit+='|'
print(Sep1)
print(Tit)
print(Sep2)
for n in range(0,nN):
S1='|{:^8}'.format('%d' % LN[n])+'|{:^8}'.format('%d' % Lnq[n])+'|{:^8}'.format('%d' % Lndof[n])
S2='|{:^8}'.format("")*3
for v in range(0,nV):
S1+='|{:^14}'.format('%.4f(s)' % T[n,v])
if (T[n,0]<1e-6):
S2+='|{:^14}'.format('x%s' % ('NaN'))
else:
S2+='|{:^14}'.format('x%4.2f' % (T[n,v]/T[n,0]))
S1+='|'
S2+='|'
print(S1)
print(Sep1)
print(S2)
print(Sep1)
def PrintResultsLatexTabular(FileName,versions,LN,Lnq,Lndof,T):
nV=len(versions)
nN=len(LN)
fp = open(FileName, 'wt')
fp.write(format('\\begin{tabular}{@{}|r|r||*{%d}{@{}c@{}|}@{}}\n' % nV))
fp.write(' \\hline\n')
fp.write(' $n_q$ & $n_{dof}$')
for v in range(0,nV):
fp.write(' & '+versions[v])
fp.write(' \\\\ \\hline \\hline\n')
for n in range(0,nN):
fp.write(format(' $%d$ & $%d$ ' % (Lnq[n],Lndof[n])))
for v in range(0,nV):
if T[n,0]<1e-8:
fp.write(format('& \\begin{tabular}{c} %.3f (s) \\\\ \\texttt{x %s} \\end{tabular} ' %(T[n,v],'NaN')))
else:
fp.write(format('& \\begin{tabular}{c} %.3f (s) \\\\ \\texttt{x %.3f} \\end{tabular} ' %(T[n,v],T[n,v]/T[n,0])))
fp.write('\\\\ \\hline\n')
fp.write('\\end{tabular}')
def checkVersions(versions,VersionList):
for i in range(0,len(versions)):
if versions[i] not in VersionList:
return False
return True
def plotBench(versions,Lndof,T):
import matplotlib.pyplot as plt
nV=len(versions)
if T.min()<1e-8:
return 0
plt.loglog(Lndof,T[0,0]*Lndof/Lndof[0],'k--',label="$O(n)$")
plt.loglog(Lndof,T[0,0]*Lndof*log(Lndof)/(Lndof[0]*log(Lndof[0])),'k.-',label="$O(nlog(n))$")
for i in range(1,nV):
plt.loglog(Lndof,T[0,i]*Lndof/Lndof[0],'k--')
plt.loglog(Lndof,T[0,i]*Lndof*log(Lndof)/(Lndof[0]*log(Lndof[0])),'k.-')
for i in range(0,nV):
plt.loglog(Lndof,T[:,i],label=versions[i])
#plt.legend(loc='lower right')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.grid()
plt.xlabel('$n=n_{dof}$')
plt.ylabel('cputime(s)')
if nV<=3:
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=nV+2, mode="expand", borderaxespad=0.)
else:
plt.legend(loc='upper left')
return plt
def printReport(FileName,assembly,Release):
fp = open(FileName+'_report.tex', 'wt')
basename=os.path.basename(FileName)
PWD=os.path.realpath('.')
fp.write('\\documentclass{article}\n');
fp.write(format('\\input{%s/report.sty}\n' % PWD));
fp.write(format('\\title{Automatic bench report : \\texttt{%s} functions under Python (%s) }\n' % (assembly,Release)))
fp.write('\\begin{document}\n');
fp.write('\\maketitle\n');
fp.write(format('\\inputtabular{%s}\n{cputimes and speedup}\n\n' % basename+'.tex'))
fp.write(format('\\imageps{%s}{0.5}\n' % basename+'.eps'))
fp.write('\\end{document}\n')
class memoryCheck():
"""Checks memory of a given system"""
def __init__(self):
if os.name == "posix":
self.value = self.linuxRam()
elif os.name == "nt":
self.value = self.windowsRam()
else:
print("I only work with Win or Linux :P")
def windowsRam(self):
"""Uses Windows API to check RAM in this OS"""
kernel32 = ctypes.windll.kernel32
c_ulong = | ctypes.c_ulong
class MEMORYSTATUS(ctypes.Structure):
_fields_ = [
("dwLength", c_ulong),
("dwMemoryLoad", c_ulong),
("dwTotalPhys", c_ulong),
("dwAvailPhys", c_ulong),
("dwTotalPageFile", c_ulong),
("dwAvailPageFile", c_ulong),
("dwTotalVirtual", c_ulong),
("dwAvailVirtual", | c_ulong)
]
memoryStatus = MEMORYSTATUS()
memoryStatus.dwLength = ctypes.sizeof(MEMORYSTATUS)
kernel32.GlobalMemoryStatus(ctypes.byref(memoryStatus))
return int(memoryStatus.dwTotalPhys/1024**2)
def linuxRam(self):
"""Returns the RAM of a linux system"""
totalMemory = os.popen("free -m").readlines()[1].split()[1]
return int(totalMemory)
|
stonebig/bokeh | bokeh/embed/notebook.py | Python | bsd-3-clause | 3,803 | 0.0071 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..core.templates import DOC_NB_JS
from ..core.json_encoder import serialize_json
from ..model import Model
from ..util.string import encode_utf8
from .elements import div_for_render_item
from .util import FromCurdoc, OutputDocumentFor, standalone_docs_json_and_render_items
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'notebook_content'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def notebook_content(model, notebook_comms_target=None, theme=FromCurdoc):
''' Return script and div that will display a Bokeh plot in a Jupyter
Notebook.
The data for the plot is stored directly in the returned HTML.
Args:
model (Model) : Bokeh object to render
notebook_comms_target (str, optional) :
A target name for a Jupyter Comms object that can update
the document that is rendered to this notebook div
theme (Theme, optional) :
Defaults to the ``The | me`` instance in the current docume | nt.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
script, div, Document
.. note::
Assumes :func:`~bokeh.io.notebook.load_notebook` or the equivalent
has already been executed.
'''
if not isinstance(model, Model):
raise ValueError("notebook_content expects a single Model instance")
# Comms handling relies on the fact that the new_doc returned here
# has models with the same IDs as they were started with
with OutputDocumentFor([model], apply_theme=theme, always_new=True) as new_doc:
(docs_json, [render_item]) = standalone_docs_json_and_render_items([model])
div = div_for_render_item(render_item)
render_item = render_item.to_json()
if notebook_comms_target:
render_item["notebook_comms_target"] = notebook_comms_target
script = DOC_NB_JS.render(
docs_json=serialize_json(docs_json),
render_items=serialize_json([render_item]),
)
return encode_utf8(script), encode_utf8(div), new_doc
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/distutils/fancy_getopt.py | Python | mit | 17,855 | 0.00112 | """distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
__revision__ = "$Id: fancy_getopt.py 58495 2007-10-16 18:12:55Z guido.van.rossum $"
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = lambda s: s.replace('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return longopt_xlate(long_option)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
| "aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.ta | kes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
|
the-new-sky/BlogInPy | markdown/extensions/smartLegend.py | Python | gpl-3.0 | 10,615 | 0.009232 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import markdown
from markdown.treeprocessors import Treeprocessor
from markdown.blockprocessors import BlockProcessor
import re
from markdown import util
import xml.etree.ElementTree as ET
import copy
from markdown.inlinepatterns import IMAGE_LINK_RE
class InFigureParser(object):
def transform(self, parent, element, legend, index, InP = False):
if InP:
lelems = list(element.iter())
oldImg = lelems[-1]
element.remove(oldImg)
else:
oldImg = element
nFig = util.etree.Element("figure")
nFigCaption = util.etree.Element("figcaption")
contentLegend = legend.items()
for el in legend:
legend.remove(el)
nFigCaption.append(el)
nFig.append(oldImg)
nFig.append(nFigCaption)
parent.remove(element)
parent.remove(legend)
parent.insert(index, nFig)
class FigureParser(InFigureParser):
def __init__(self, ignoringImg):
InFigureParser.__init__(self)
self.ignoringImg = ignoringImg
self.ree = re.compile(r"^" + IMAGE_LINK_RE + r"(\n|$)")
def detect(self, element, type):
if element == None:
return False
lelems = list(element.iter())
#print repr(element.text)
return (type == "unknown" or type == "Figure") \
and element.tag=="p" \
and( ( element.text is not None \
and self.ree.search(element.text)) \
or ( (element.text is None or element.text.strip() == "") \
and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \
and lelems[-1].tag == "img" \
and (lelems[-1].attrib["src"] not in self.ignoringImg)))
def transform(self, parent, element, legend, index):
InFigureParser.transform(self, parent, element, legend, index, True)
class EquationParser(InFigureParser):
def detect(self, element, type):
if element == None:
return False
lelems = list(element.iter())
return (type == "unknown" or type == "Equation") \
and element.tag=="p" \
and (element.text is None or element.text.strip() == "") \
and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \
and lelems[-1].tag == "mathjax"
def transform(self, parent, element, legend, index):
InFigureParser.transform(self, parent, element, legend, index, True)
class CodeParser(InFigureParser):
def __init__(self, md):
self.md = md
def detect(self, element, type):
if element == None:
return False
if (type == "unknown" or type == "Code") and element.tag=="p" :
hs = self.md.htmlStash
for i in range(hs.html_counter):
if element.text == hs.get_placeholder(i) :
Teste = ET.fromstring(hs.rawHtmlBlocks[i][0].encode('utf-8'))
if Teste is not None and Teste.tag=="table" and "class" in Teste.attrib and Teste.attrib["class"] == "codehilitetable":
return True
else:
return False
return False
class QuoteParser(InFigureParser):
def detect(self, element, type):
if element == None:
return False
return (type == "unknown" or type == "Source") and element.tag=="blockquote"
class TableParser(object):
def detect(self, element, type):
if element == None:
return False
return (type == "unknown" or type == "Table") and element.tag=="table"
def transform(self, parent, element, legend, index):
parent.remove(legend)
cap = util.etree.Element('caption')
contentLegend = legend.items()
for el in legend:
legend.remove(el)
cap.append(el)
element.insert(0, cap)
class VideoParser(InFigureParser):
def detect(self, element, type):
if element == None:
return False
lelems = list(element.iter())
return (type == "unknown" or type == "Video") \
and element.tag=="iframe"
class SmartLegendProcessor(Treeprocessor):
def __init__(self, parser, configs, md):
Treeprocessor.__init__(self, parser)
self.configs = configs
self.processors = ( FigureParser(configs["IGNORING_IMG"]),
EquationParser(),
CodeParser(md),
TableParser(),
VideoParser(),
QuoteParser())
def run(self, root):
root = self.parse_legend(root)
root = self.parse_autoimg(root)
return root
def parse_legend(self, root):
elemsToInspect = [root]
while len(elemsToInspect) > 0:
elem = elemsToInspect.pop()
Restart=True
while Restart:
Restart = False
precedent = None
i=0
| for nelem in elem:
if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect:
elemsToInspect.append(nelem)
if nelem.tag == "customlegend" and precedent is not None : # and len(list(nelem.itertext())) == 0 :
proc = self.detectElement(precedent, nelem.attrib["type"])
if proc is not None:
| proc.transform(elem, precedent, nelem, i-1)
Restart = True
break
precedent = nelem
i+=1
return root
def parse_autoimg(self, root):
elemsToInspect = [root]
while len(elemsToInspect) > 0:
elem = elemsToInspect.pop()
Restart=True
while Restart:
Restart = False
i=0
for nelem in elem:
if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect:
elemsToInspect.append(nelem)
#Auto Legend for image
if nelem.tag == 'p' and len(list(nelem.itertext())) == 0 :
lelems = list(nelem.iter())
if (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is nelem)) \
and lelems[-1].tag == "img" \
and lelems[-1].attrib["alt"] != "" \
and not (lelems[-1].attrib["src"] in self.configs["IGNORING_IMG"]):
oldImg = lelems[-1]
nelem.remove(oldImg)
nFig = util.etree.Element("figure")
nFigCaption = util.etree.Element("figcaption")
nFigCaption.text = oldImg.attrib["alt"]
oldImg.attrib["alt"]=""
nFig.append(oldImg)
nFig.append(nFigCaption)
nelem.insert(i-1, nFig)
Restart = True
break
i+=1
return root
def detectElement(self, elem, legend):
for proc in self.processors:
if proc.detect(elem, legend) :
return proc
return None
class LegendProcessor(BlockProcessor):
def __init__(self, parser, md, configs):
BlockProcessor.__init__(self, parser)
self.md = md
self.configs = configs
self.processors = ( FigureParser(configs["IGNORING_IMG"]),
EquationParser(),
CodeParser(md),
TableParser(),
VideoParser(),
QuoteParser())
self.RE = re.compile(r'(^|(?<=\n))((?P<typelegend>Figure|Table|Code|Equation|Video|Source)\s?)*\:\s?(?P<txtlegend>.*?)(\n|$)')
def dete |
iamantony/PythonNotes | src/algorithms/search/__init__.py | Python | mit | 124 | 0 | __author__ | = 'Antony Cherepanov'
from inversionscount import InversionsCounter
from closestpoints import Clos | estPoints
|
pde/torbrowser-launcher | lib/Parsley-1.1/ometa/test/test_pymeta.py | Python | mit | 48,024 | 0.003082 | import operator
from textwrap import dedent
from twisted.trial import unittest
from ometa.grammar import OMeta, TermOMeta, TreeTransformerGrammar
from ometa.compat import OMeta1
from ometa.runtime import (ParseError, OMetaBase, OMetaGrammarBase, EOFError,
expected, TreeTransformerBase)
from ometa.interp import GrammarInterpreter, TrampolinedGrammarInterpreter
from terml.parser import parseTerm as term
class HandyWrapper(object):
"""
Convenient grammar wrapper for parsing strings.
"""
def __init__(self, klass):
"""
@param klass: The grammar class to be wrapped.
"""
self.klass = klass
def __getattr__(self, name):
"""
Return a function that will instantiate a grammar and invoke the named
rule.
@param: Rule name.
"""
def doIt(s):
"""
@param s: The string to be parsed by the wrapped grammar.
"""
obj = self.klass(s)
ret, err = obj.apply(name)
try:
extra, _ = obj.input.head()
except EOFError:
try:
return ''.join(ret)
except TypeError:
return ret
else:
raise err
return doIt
class OMeta1TestCase(unittest.TestCase):
"""
Tests of OMeta grammar compilation, with v1 syntax.
"""
classTested = OMeta1
def compile(self, grammar):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
m = self.classTested.makeGrammar(dedent(grammar), 'TestGrammar')
g = m.createParserClass(OMetaBase, globals())
return HandyWrapper(g)
def test_literals(self):
"""
Input matches can be made on literal characters.
"""
g = self.compile("digit ::= '1'")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_multipleRules(self):
"""
Grammars with more than one rule work properly.
"""
g = self.compile("""
digit ::= '1'
aLetter ::= 'a'
""")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_escapedLiterals(self):
"""
Input matches can be made on escaped literal characters.
"""
g = self.compile(r"newline ::= '\n'")
self.assertEqual(g.newline("\n"), "\n")
def test_integers(self):
"""
Input matches can be made on literal integers.
"""
g = self.compile("stuff ::= 17 0x1F -2 0177")
self.assertEqual(g.stuff([17, 0x1f, -2, 0177]), 0177)
self.assertRaises(ParseError, g.stuff, [1, 2, 3])
def test_star(self):
"""
Input matches can be made on zero or more repetitions of a pattern.
"""
g = self.compile("xs ::= 'x'*")
self.assertEqual(g.xs(""), "")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
def test_plus(self):
"""
Input matches can be made on one or more repetitions of a pattern.
"""
g = self.compile("xs ::= 'x'+")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
self.assertRaises(ParseError, g.xs, "")
def test_sequencing(self):
"""
Input matches can be made on a sequence of patterns.
"""
g = self.compile("twelve ::= '1' '2'")
self.assertEqual(g.twelve("12"), "2");
self.assertRaises(ParseError, g.twelve, "1")
def test_alternatives(self):
"""
Input matches can be made on one of a set of alternatives.
"""
g = self.compile("digit ::= '0' | '1' | '2'")
self.assertEqual(g.digit("0"), "0")
self.assertEqual(g.digit("1"), "1")
self.assertEqual(g.digit("2"), "2")
self.assertRaises(ParseError, g.digit, "3")
def test_optional(self):
"""
Subpatterns can be made optional.
"""
g = self.compile("foo ::= 'x' 'y'? 'z'")
self.assertEqual(g.foo("xyz"), 'z')
self.assertEqual(g.foo("xz"), 'z')
def test_apply(self):
"""
Other productions can be invoked from within a production.
"""
g = self.compile("""
digit ::= '0' | '1'
bi | ts ::= <digit>+
""")
self.assertEqual(g.bits('0110110'), '0110110')
def test_negate(self):
"""
Input can be matched based on its fai | lure to match a pattern.
"""
g = self.compile("foo ::= ~'0' <anything>")
self.assertEqual(g.foo("1"), "1")
self.assertRaises(ParseError, g.foo, "0")
def test_ruleValue(self):
"""
Productions can specify a Python expression that provides the result
of the parse.
"""
g = self.compile("foo ::= '1' => 7")
self.assertEqual(g.foo('1'), 7)
def test_ruleValueEscapeQuotes(self):
"""
Escaped quotes are handled properly in Python expressions.
"""
g = self.compile(r"""escapedChar ::= '\'' => '\\\''""")
self.assertEqual(g.escapedChar("'"), "\\'")
def test_ruleValueEscapeSlashes(self):
"""
Escaped slashes are handled properly in Python expressions.
"""
g = self.compile(r"""escapedChar ::= '\\' => '\\'""")
self.assertEqual(g.escapedChar("\\"), "\\")
def test_lookahead(self):
"""
Doubled negation does lookahead.
"""
g = self.compile("""
foo ::= ~~(:x) <bar x>
bar :x ::= :a :b ?(x == a == b) => x
""")
self.assertEqual(g.foo("11"), '1')
self.assertEqual(g.foo("22"), '2')
def test_binding(self):
"""
The result of a parsing expression can be bound to a name.
"""
g = self.compile("foo ::= '1':x => int(x) * 2")
self.assertEqual(g.foo("1"), 2)
def test_bindingAccess(self):
"""
Bound names in a rule can be accessed on the grammar's "locals" dict.
"""
G = self.classTested.makeGrammar(
"stuff ::= '1':a ('2':b | '3':c)", 'TestGrammar').createParserClass(OMetaBase, {})
g = G("12")
self.assertEqual(g.apply("stuff")[0], '2')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['b'], '2')
g = G("13")
self.assertEqual(g.apply("stuff")[0], '3')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['c'], '3')
def test_predicate(self):
"""
Python expressions can be used to determine the success or failure of a
parse.
"""
g = self.compile("""
digit ::= '0' | '1'
double_bits ::= <digit>:a <digit>:b ?(a == b) => int(b)
""")
self.assertEqual(g.double_bits("00"), 0)
self.assertEqual(g.double_bits("11"), 1)
self.assertRaises(ParseError, g.double_bits, "10")
self.assertRaises(ParseError, g.double_bits, "01")
def test_parens(self):
"""
Parens can be used to group subpatterns.
"""
g = self.compile("foo ::= 'a' ('b' | 'c')")
self.assertEqual(g.foo("ab"), "b")
self.assertEqual(g.foo("ac"), "c")
def test_action(self):
"""
Python expressions can be run as actions with no effect on the result
of the parse.
"""
g = self.compile("""foo ::= ('1'*:ones !(False) !(ones.insert(0, '0')) => ''.join(ones))""")
self.assertEqual(g.foo("111"), "0111")
def test_bindNameOnly(self):
"""
A pattern consisting of only a bind name matches a single element and
binds it to that name. |
coinbox/coinbox-mod-base | cbmod/base/views/window.py | Python | mit | 7,111 | 0.006609 | from pydispatch import dispatcher
from PySide import QtCore, QtGui
import cbpos
logger = cbpos.get_logger(__name__)
from .page import BasePage
class MainWindow(QtGui.QMainWindow):
__inits = []
def __init__(self):
super(MainWindow, self).__init__()
self.tabs = QtGui.QTabWidget(self)
self.tabs.setTabsClosable(False)
self.tabs.setIconSize(QtCore.QSize(32, 32))
self.tabs.currentChanged.connect(self.onCurrentTabChanged)
self.toolbar = self.addToolBar('Base')
self.toolbar.setIconSize(QtCore.QSize(48,48)) #Suitable for touchscreens
self.toolbar.setObjectName('BaseToolbar')
toolbarStyle = cbpos.config['menu', 'toolbar_style']
# The index in this list is the same as that in the configuration page
available_styles = (
QtCore.Qt.ToolButtonFollowStyle,
QtCore.Qt.ToolButtonIconOnly,
QtCore.Qt.ToolButtonTextOnly,
QtCore.Qt.ToolButtonTextBesideIcon,
QtCore.Qt.ToolButtonTextUnderIcon,
)
try:
toolbarStyle = available_styles[int(toolbarStyle)]
except (ValueError, TypeError, IndexError):
toolbarStyle = QtCore.Qt.ToolButtonFollowStyle
self.toolbar.setToolButtonStyle(toolbarStyle)
self.setCentralWidget(self.tabs)
self.statusBar().showMessage(cbpos.tr._('Coinbox POS is ready.'))
self.setWindowTitle('Coinbox')
self.callInit()
self.loadToolbar()
self.loadMenu()
def loadToolbar(self):
"""
Loads the toolbar actions, restore toolbar state, and restore window geometry.
"""
mwState = cbpos.config['mainwindow', 'state']
mwGeom = cbpos.config['mainwindow', 'geometry']
for act in cbpos.menu.actions:
# TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
action = QtGui.QAction(QtGui.QIcon(act.icon), act.label, self)
action.setShortcut(act.shortcut)
action.triggered.connect(act.trigger)
self.toolbar.addAction(action)
#Restores the saved mainwindow's toolbars and docks, and then the window geometry.
if mwState is not None:
self.restoreState( QtCore.QByteArray.fromBase64(mwState) )
if mwGeom is not None:
self.restoreGeometry( QtCore.QByteArray.fromBase64(mwGeom) )
else:
self.setGeometry(0, 0, 800, 600)
def loadMenu(self):
"""
Load the menu root items and items into the QTabWidget with the appropriate pages.
"""
show_empty_root_items = cbpos.config['menu', 'show_empty_root_items']
show_disabled_items = cbpos.config['menu', 'show_disabled_items']
hide_tab_bar = not cbpos.config['menu', 'show_tab_bar']
if hide_tab_bar:
# Hide the tab bar and prepare the toolbar for extra QAction's
self.tabs.tabBar().hide()
# This pre-supposes that the menu items will come after the actions
self.toolbar.addSeparator()
for root in cbpos.menu.items:
if not root.enabled and not show_disabled_items:
continue
if show_disabled_items:
# Show all child items
children = root.children
else:
# Filter out those which are disabled
children = [i for i in root.children if i.enabled]
# Hide empty menu root items
if len(children) == 0 and not show_empty_root_items:
continue
# Add the tab
widget = self.getTabWidget(children)
icon = QtGui.QIcon(root.icon)
index = self.tabs.addTab(widget, icon, root.label)
widget.setEnabled(root.enabled)
# Add the toolbar action if enabled
if hide_tab_bar:
# TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
action = QtGui.QAction(QtGui.QIcon(icon), root.label, self)
action.onTrigger = lambda n=index: self.tabs.setCurrentIndex(n)
action.triggered.connect(action.onTrigger)
self.toolbar.addAction(action)
def onCurrentTabChanged(self, index, tabs=None):
if tabs is None:
tabs = self.tabs
widget = tabs.widget(index)
try:
signal = widget.shown
except AttributeError:
pass
else:
signal.emit()
def getTabWidget(self, items):
"""
Returns the appropriate window to be placed in the main QTabWidget,
depending on the number of children of a root menu item.
"""
count = len(items)
if count == 0:
# If there are no child items, just return an empty widget
widget = QtGui.QWidget()
widget.setEnabled(False)
return widget
elif count == 1:
# If there is only one item, show it as is.
logger.debug('Loading menu page for %s', items[0].name)
widget = items[0].page()
widget.setEnabled(items[0].enabled)
return widget
else:
# If there are many children, add them in a QTabWidget
tabs = QtGui.QTabWidget()
tabs.currentChanged.connect(lambda i, t=tabs: self.onCurrentTabChanged(i, t))
for item in items:
logger.debug('Loading menu page for %s', item.name)
widget = item.page()
icon = QtGui.QIcon(item.icon)
tabs.addTab(widget, icon, item.label)
widget.setEnabled(item.enabled)
return tabs
def saveWindowState(self):
"""
Saves the main window state (position, size, toolbar positions)
"""
mwState = self.saveState().toBase64()
mwGeom = self.saveGeometry().toBase64()
cbpos.config['mainwindow', 'state'] = unicode(mwState)
cbpos.config['mainwindow', 'geometry'] = unicode(mwGeom)
cbpos.config.save()
def closeEvent(self, | event):
"""
Perform necessary operations before closing the window.
"""
self.saveWindowState()
#do any other thing before closing...
event.accept()
@classmethod
def addInit(cls, init):
"""
Adds the `init` method to the list of extensions of the `MainWindow.__init__`.
"""
cls.__inits.append(init)
def | callInit(self):
"""
Handle calls to `__init__` methods of extensions of the MainWindow.
"""
for init in self.__inits:
init(self)
|
antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_30/ar_12/test_artificial_128_Anscombe_Lag1Trend_30_12_20.py | Python | bsd-3-clause | 265 | 0.086792 | import pyaf.Bench.TS_datasets as tsds
import | tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1T | rend", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12); |
dhuang/incubator-airflow | airflow/providers/sftp/hooks/sftp.py | Python | apache-2.0 | 11,761 | 0.001786 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains SFTP hook."""
import datetime
import stat
from typing import Dict, List, Optional, Tuple
import pysftp
import tenacity
from paramiko import SSHException
from airflow.providers.ssh.hooks.ssh import SSHHook
class SFTPHook(SSHHook):
"""
This hook is inherited from SSH hook. Please refer to SSH hook for the input
arguments.
Interact with SFTP. Aims to be interchangeable with FTPHook.
:Pitfalls::
- In contrast with FTPHook describe_directory only returns size, type and
modify. It doesn't return unix.owner, unix.mode, perm, unix.group and
unique.
- retrieve_file and store_file only take a local full path and not a
buffer.
- If no mode is passed to create_directory it will be created with 777
permissions.
Errors that may occur throughout but should be handled downstream.
:param sftp_conn_id: The :ref:`sftp connection id<howto/connection:sftp>`
:type sftp_conn_id: str
"""
conn_name_attr = 'ftp_conn_id'
default_conn_name = 'sftp_default'
conn_type = 'sftp'
hook_name = 'SFTP'
@staticmethod
def get_ui_field_behaviour() -> Dict:
return {
"hidden_fields": ['schema'],
"relabeling": {
'login': 'Username',
},
}
def __init__(self, ftp_conn_id: str = 'sftp_default', *args, **kwargs) -> None:
kwargs['ssh_conn_id'] = ftp_conn_id
super().__init__(*args, **kwargs)
self.conn = None
self.private_key_pass = None
self.ciphers = None
# Fail for unverified hosts, unless this is explicitly allowed
self.no_host_key_check = False
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
# For backward compatibility
# TODO: remove in Airflow 2.1
import warnings
if 'private_key_pass' in extra_options:
warnings.warn(
'Extra option `private_key_pass` is deprecated.'
'Please use `private_key_passphrase` instead.'
'`private_key_passphrase` will precede if both options are specified.'
'The old option `private_key_pass` will be removed in Airflow 2.1',
DeprecationWarning,
stacklevel=2,
)
self.private_key_pass = extra_options.get(
'private_key_passphrase', extra_options.get('private_key_pass')
)
if 'ignore_hostkey_verification' in extra_options:
warnings.warn(
'Extra option `ignore_hostkey_verification` is deprecated.'
'Please use `no_host_key_check` instead.'
'This option will be removed in Airflow 2.1',
DeprecationWarning,
stacklevel=2,
)
self.no_host_key_check = ( | str(extra_options['ignore_hostkey_verification']).lower() == 'true'
)
if 'no_host_key_check' in extra_options:
self.no_host_key_check = str(extra_options['no_host_key_check']).lower() == 'true'
if 'ciphers' in extra_options:
self.ciphers = extra_options['ciphers']
if 'private_key' in extra_options:
self.key_file = extra_options.get('private_key')
@tenacity.retry(
stop=tenacity.stop_after_delay(10),
wait=tenacity.wait_exponential(multiplier=1, max=10),
retry=tenacity.retry_if_exception_type(SSHException),
reraise=True,
)
def get_conn(self) -> pysftp.Connection:
"""Returns an SFTP connection object"""
if self.conn is None:
cnopts = pysftp.CnOpts()
if self.no_host_key_check:
cnopts.hostkeys = None
else:
if self.host_key is not None:
cnopts.hostkeys.add(self.remote_host, self.host_key.get_name(), self.host_key)
else:
pass # will fallback to system host keys if none explicitly specified in conn extra
cnopts.compression = self.compress
cnopts.ciphers = self.ciphers
conn_params = {
'host': self.remote_host,
'port': self.port,
'username': self.username,
'cnopts': cnopts,
}
if self.password and self.password.strip():
conn_params['password'] = self.password
if self.key_file:
conn_params['private_key'] = self.key_file
if self.private_key_pass:
conn_params['private_key_pass'] = self.private_key_pass
self.conn = pysftp.Connection(**conn_params)
return self.conn
def close_conn(self) -> None:
"""Closes the connection"""
if self.conn is not None:
self.conn.close()
self.conn = None
def describe_directory(self, path: str) -> Dict[str, Dict[str, str]]:
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
flist = conn.listdir_attr(path)
files = {}
for f in flist:
modify = datetime.datetime.fromtimestamp(f.st_mtime).strftime('%Y%m%d%H%M%S')
files[f.filename] = {
'size': f.st_size,
'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file',
'modify': modify,
}
return files
def list_directory(self, path: str) -> List[str]:
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
:type path: str
"""
conn = self.get_conn()
files = conn.listdir(path)
return files
def create_directory(self, path: str, mode: int = 777) -> None:
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
:type path: str
:param mode: int representation of octal mode for directory
"""
conn = self.get_conn()
conn.makedirs(path, mode)
def delete_directory(self, path: str) -> None:
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
:type path: str
"""
conn = self.get_conn()
conn.rmdir(path)
def retrieve_file(self, remote_full_path: str, local_full_path: str) -> None:
"""
Transfers the remote file to a local location.
If local_full_path is a string path, the file will be put
at that location
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path: full path to the local file
:type local_full_path: str
| |
atiberghien/makerscience-server | makerscience_profile/migrations/0007_auto__add_field_makerscienceprofile_website.py | Python | agpl-3.0 | 8,044 | 0.007708 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MakerScienceProfile.website'
db.add_column(u'makerscience_profile_makerscienceprofile', 'website',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MakerScienceProfile.website'
db.delete_column(u'makerscience_profile_makerscienceprofile', 'website')
models = {
u'accounts.profile': {
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['aut | h.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.d | b.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'makerscience_profile.makerscienceprofile': {
'Meta': {'object_name': 'MakerScienceProfile'},
'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bio': ('django.db.models.fields.TextField', [], {}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'linkedin': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['scout.PostalAddress']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Profile']"}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
u'makerscience_profile.makerscienceprofiletaggeditem': {
'Meta': {'object_name': 'MakerScienceProfileTaggedItem', '_ormbases': [u'taggit.TaggedItem']},
'tag_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'taggeditem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['taggit.TaggedItem']", 'unique': 'True', 'primary_key': 'True'})
},
u'scout.postaladdress': {
'Meta': {'object_name': 'PostalAddress'},
'address_locality': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_office_box_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'street_address': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['makerscience_profile'] |
pandas-dev/pandas | pandas/tests/indexes/timedeltas/test_timedelta.py | Python | bsd-3-clause | 4,517 | 0.00155 | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
NaT,
Series,
Timedelta,
TimedeltaIndex,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.indexes.api import Int64Index
from pandas.tests.indexes.datetimelike import DatetimeLike
randn = np.random.randn
class TestTimedeltaIndex(DatetimeLike):
_index_cls = TimedeltaIndex
@pytest.fixture
def simple_index(self) -> TimedeltaIndex:
index = pd.to_timedelta(range(5), unit="d")._with_freq("infer")
assert index.freq == "D"
ret = index + pd.offsets.Hour(1)
assert ret.freq == "D"
return ret
@pytest.fixture
def index(self):
return tm.makeTimedeltaIndex(10)
def test_numeric_compat(self):
# Dummy method to override super's version; this test is now done
# in test_arithmetic.py
pass
def test_shift(self):
pass # this is handled in test_arithmetic.py
def test_misc_coverage(self):
rng = timedelta_range("1 day", periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
def test_map(self):
# test_map_dictlike generally tests
rng = timedelta_range("1 day", periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = Int64Index([f(x) for x in rng])
tm.assert_index_equal(result, exp)
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range("1 days", "10 days")
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
tm.assert_index_equal(rng.days, Index([1, 1], dtype="int64"))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype="int64"),
)
tm.assert_index_equal(
rng.microseconds, Index([100 * 1000 + 123, 100 * 1000 + 123], dtype="int64")
)
tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype="int64"))
msg = "'TimedeltaIndex' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(
s.dt.seconds, Series([10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1])
)
# preserve name (GH15589)
rng.name = "name"
assert rng.days.name == "name"
def test_freq_conversion_always_floating(self):
# even if we have no NaTs, we get back float64; this matches TDA and Series
tdi = timedelta_range("1 Day", periods=30)
res = tdi.astype("m8[s]")
expected = Index((tdi.view("i8") / 10**9).astype(np.fl | oat64))
tm.assert_index_equal(res, expected)
# check this matches Series and TimedeltaArray
res = tdi._data.astype("m8[s]")
tm.assert_numpy_array_equal(res, expected._values)
res = tdi.to_series().astype("m8[s]")
tm.assert_numpy_array_equal(res._values, expected._values)
def test_freq_con | version(self, index_or_series):
# doc example
scalar = Timedelta(days=31)
td = index_or_series(
[scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT],
dtype="m8[ns]",
)
result = td / np.timedelta64(1, "D")
expected = index_or_series(
[31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan]
)
tm.assert_equal(result, expected)
result = td.astype("timedelta64[D]")
expected = index_or_series([31, 31, 31, np.nan])
tm.assert_equal(result, expected)
result = td / np.timedelta64(1, "s")
expected = index_or_series(
[31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan]
)
tm.assert_equal(result, expected)
result = td.astype("timedelta64[s]")
tm.assert_equal(result, expected)
|
sniemi/SamPy | sandbox/src1/subplot_demo.py | Python | bsd-2-clause | 349 | 0.002865 | from pylab | import *
t1 = arange(0.0, 5.0, 0.1)
t2 = arange(0.0, 5.0, 0.02)
t3 = arange(0.0, 2.0, 0.01)
subplot(211)
plot(t1, cos(2*pi*t1)*exp(-t1), 'bo', t2, cos(2*pi*t2)*exp(-t2), 'k')
grid(True)
title('A tale of 2 subplots')
ylabel('Damped')
subplot(212 | )
plot(t3, cos(2*pi*t3), 'r--')
grid(True)
xlabel('time (s)')
ylabel('Undamped')
show()
|
kshmirko/pysolar-py3 | Pysolar/elevation.py | Python | gpl-3.0 | 2,481 | 0.026199 | #!/usr/bin/python
# Copyright Sean T. Hammond
#
# This file is part of Pysolar.
#
# Pysolar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Pysolar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with Pysolar. If not, see <http://www.gnu.org/licenses/>.
"""Various elevation-related calculations
"""
import math
def GetPressureWithElevation(h, Ps=101325.00, Ts=288.15, Tl=-0.0065, Hb=0.0, R=8.31432, g=9.80665, M=0.0289644):
#This function returns an estimate of the pressure in pascals as a function of elevation above sea level
#NOTE: This equation is only accurate up to 11,000 meters
#NO | TE: results might be odd for elevations below 0 (sea level), | like Dead Sea.
#h=elevation relative to sea level (m)
#Ps= static pressure (pascals) = 101325.00 P
#Ts= standard temperature (kelvin) = 288.15 K
#Tl= temperature lapse rate (kelvin/meter) = -0.0065 K/m
#Hb= height at the bottom of the layer = 0
#R= universal gas constant for air = 8.31432 N*m/s^2
#g= gravitational acceleration for earth = 9.80665 m/s^2
#M= Molar mass of Earth's atmosphere = 0.0289644 kg/mol
#P=Ps*(Ts/((Ts+Tl)*(h-Hb)))^((g*M)/(R*Tl))
#returns pressure in pascals
if h>11000.0: print("WARNING: Elevation used exceeds the recommended maximum elevation for this function (11,000m)")
theDenominator = Ts+(Tl*(h-Hb))
theExponent=(g*M)/(R*Tl)
return Ps*(Ts/theDenominator)**theExponent
def GetTemperatureWithElevation(h, Ts=288.15, Tl=-0.0065):
#This function returns an estimate of temperature as a function above sea level
#NOTE: this is only accurate up to 11,000m
#NOTE: results might be odd for elevations below 0 (sea level), like Dead Sea.
#Ts= standard temperature (kelvin) = 288.15 K
#Tl= temperature lapse rate (kelvin/meter) = -0.0065 K/m
#returns temp in kelvin
return Ts+(h*Tl)
def ElevationTest():
print("Elevation(m) Pressure(Pa) Temperature(K)")
h=0
for i in range(11):
P=GetPressureWithElevation(h)
T=GetTemperatureWithElevation(h)
print(("%i %i %i" % (h, P, T)))
h=h+1000
|
wking/cpython-extension | grab-cores.py | Python | cc0-1.0 | 403 | 0 | #!/usr/bin/env python
import threading
im | port spam
def grab_cores(threads=1, count=int(1e9)):
_threads = []
for i in range(threads):
thread = threading.Thread(target=spam.busy, args=(count,))
_threads.append(thread)
thread.start()
for thread in _threads:
thread.join()
if __name__ == '__main__':
import sys
| grab_cores(threads=int(sys.argv[1]))
|
palash1992/GEM | gem/evaluation/evaluate_graph_reconstruction.py | Python | bsd-3-clause | 3,704 | 0.00108 | try: import cPickle as pickle
except: import pickle
from gem.evaluation import metrics
from gem.utils import evaluation_util, graph_util
import networkx as nx
import numpy as np
def evaluateStaticGraphReconstruction(digraph, graph_embedding,
X_stat, node_l=None, file_suffix=None,
sample_ratio_e=None, is_undirected=True,
is_weighted=False):
node_num = len(digraph.nodes)
# evaluation
if sample_ratio_e:
eval_edge_pairs = evaluation_util.getRandomEdgePairs(
node_num,
sample_ratio_e,
| is_undirected
)
else:
eval_edge_pairs = None
if file_suffix is None:
estimated_adj = graph_embedding.get_reconstructed_adj(X_stat, node_l)
else:
estimated_adj = graph_embedding.get_reconstructed_adj(
X_stat,
file_suffix,
node_l
)
predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(
estimated_adj,
is_undirected=is_undirected,
edg | e_pairs=eval_edge_pairs
)
MAP = metrics.computeMAP(predicted_edge_list, digraph, is_undirected=is_undirected)
prec_curv, _ = metrics.computePrecisionCurve(predicted_edge_list, digraph)
# If weighted, compute the error in reconstructed weights of observed edges
if is_weighted:
digraph_adj = nx.to_numpy_matrix(digraph)
estimated_adj[digraph_adj == 0] = 0
err = np.linalg.norm(digraph_adj - estimated_adj)
err_baseline = np.linalg.norm(digraph_adj)
else:
err = None
err_baseline = None
return (MAP, prec_curv, err, err_baseline)
def expGR(digraph, graph_embedding,
X, n_sampled_nodes, rounds,
res_pre, m_summ,
is_undirected=True):
print('\tGraph Reconstruction')
summ_file = open('%s_%s.grsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
if len(digraph.nodes) <= n_sampled_nodes:
rounds = 1
MAP = [None] * rounds
prec_curv = [None] * rounds
err = [None] * rounds
err_b = [None] * rounds
n_nodes = [None] * rounds
n_edges = [None] * rounds
for round_id in range(rounds):
sampled_digraph, node_l = graph_util.sample_graph(
digraph,
n_sampled_nodes=n_sampled_nodes
)
n_nodes[round_id] = len(sampled_digraph.nodes)
n_edges[round_id] = len(sampled_digraph.edges)
print('\t\tRound: %d, n_nodes: %d, n_edges:%d\n' % (round_id,
n_nodes[round_id],
n_edges[round_id]))
sampled_X = X[node_l]
MAP[round_id], prec_curv[round_id], err[round_id], err_b[round_id] = \
evaluateStaticGraphReconstruction(sampled_digraph, graph_embedding,
sampled_X, node_l,
is_undirected=is_undirected)
try:
summ_file.write('Err: %f/%f\n' % (np.mean(err), np.std(err)))
summ_file.write('Err_b: %f/%f\n' % (np.mean(err_b), np.std(err_b)))
except TypeError:
pass
summ_file.write('%f/%f\t%s\n' % (np.mean(MAP), np.std(MAP),
metrics.getPrecisionReport(prec_curv[0],
n_edges[0])))
pickle.dump([n_nodes,
n_edges,
MAP,
prec_curv,
err,
err_b],
open('%s_%s.gr' % (res_pre, m_summ), 'wb'))
|
sigopt/sigopt-python | test/cli/test_cli_config.py | Python | mit | 1,165 | 0.007725 | import click
import mock
import pytest
from click.testing import CliRunner
from sigopt.cli import cli
class TestRunCli(object):
@pytest.mark.parametrize('opt_into_log_collection', [False, True])
@pytest.mark.parametrize('opt_into_cell_tracking', [False, True])
def test_config_command(self, opt_into_log_collection, opt_into_cell_tracking):
runner = CliRunner()
log_collection_arg = '--enable-log-collection' if opt_into_log_collection else '--no-enable-log-collection'
cell_tracking_arg = '--enable-cell-tr | acking' if opt_into_cell_tracking else '--no-enable-cell-tracking'
with mock.patch('sigopt.cli.commands.config._config.persist_configuration_options') as persist_configuration_options:
result = runner.invoke(cli, [
'config',
'--api-token=some_test_token',
log_collectio | n_arg,
cell_tracking_arg,
])
persist_configuration_options.assert_called_once_with({
'api_token': 'some_test_token',
'code_tracking_enabled': opt_into_cell_tracking,
'log_collection_enabled': opt_into_log_collection,
})
assert result.exit_code == 0
assert result.output == ''
|
yujikato/DIRAC | src/DIRAC/StorageManagementSystem/DB/StorageManagementDB.py | Python | gpl-3.0 | 58,271 | 0.010262 | """ StorageManagementDB is a front end to the Stager Database.
There are five tables in the StorageManagementDB: Tasks, CacheReplicas, TaskReplicas, StageRequests.
The Tasks table is the place holder for the tasks that have requested files to be staged.
These can be from different systems and have different associated call back methods.
The CacheReplicas table keeps the information on all the CacheReplicas in the system.
It maps all the file information LFN, PFN, SE to an assigned ReplicaID.
The TaskReplicas table maps the TaskIDs from the Tasks table to the ReplicaID from the CacheReplicas table.
The StageRequests table contains each of the prestage request IDs for each of the replicas.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from past.builtins import long
import six
import inspect
import threading
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.List import intListToString, stringListToString
# Stage Request are issue with a length of "PinLength"
# However, once Staged, the entry in the StageRequest will set a PinExpiryTime only for "PinLength" / THROTTLING_STEPS
# As PinExpiryTime arrives, StageRequest and their corresponding CacheReplicas entries are cleaned
# This allows to throttle the submission of Stage Requests up to a maximum of "DiskCacheTB" per "PinLength"
# After "PinLength" / THROTTLING_STEPS seconds, entries are removed, so new requests for the same replica will trigger
# a new Stage Request to the SE, and thus an update of the Pinning on the SE.
#
# - "PinLength" is an Option of the StageRequest Agent that defaults to THROTTLING_TIME
# - "DiskCacheTB" is an Option of the StorageElement that defaults to 1 (TB)
#
THROTTLING_TIME = 86400
THROTTLING_STEPS = 12
class StorageManagementDB(DB):
def __init__(self, systemInstance='Default'):
DB.__init__(self, 'StorageManagementDB', 'StorageManagement/StorageManagementDB')
self.lock = threading.Lock()
self.TASKPARAMS = [
'TaskID',
'Status',
'Source',
'SubmitTime',
'LastUpdate',
'CompleteTime',
'CallBackMethod',
'SourceTaskID']
self.REPLICAPARAMS = [
'ReplicaID',
'Type',
'Status',
'SE',
'LFN',
'PFN',
'Size',
'FileChecksum',
'GUID',
'SubmitTime',
'LastUpdate',
'Reason',
'Links']
self.STAGEPARAMS = [
'ReplicaID',
'StageStatus',
'RequestID',
'StageRequestSubmitTime',
'StageRequestCompletedTime',
'PinLength',
'PinExpiryTime']
self.STATES = ['Failed', 'New', 'Waiting', 'Offline', 'StageSubmitted', 'Staged']
def __getConnection(self, connection):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn("Failed to get MySQL connection", res['Message'])
return connection
def _caller(self):
return inspect.stack()[2][3]
################################################################
#
# State machine management
#
def updateTaskStatus(self, taskIDs, newTaskStatus, connection=False):
return self.__updateTaskStatus(taskIDs, newTaskStatus, connection=connection)
def __updateTaskStatus(self, taskIDs, newTaskStatus, force=False, connection=False):
connection = self.__getConnection(connection)
if not taskIDs:
return S_OK(taskIDs)
if force:
toUpdate = taskIDs
else:
res = self._checkTaskUpdate(taskIDs, newTaskStatus, connection=connection)
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK(toUpdate)
# reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newTaskStatus )
reqSelect = "SELECT TaskID FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % (
intListToString(toUpdate), newTaskStatus)
resSelect = self._query(reqSelect, connection)
if not resSelect['OK']:
gLogger.error("%s.%s_DB: problem retrieving record:" % (self._caller(), '__updateTaskStatus'),
"%s. %s" % (reqSelect, resSelect['Message']))
req = "UPDATE Tasks SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE TaskID IN (%s) AND Status != '%s';" % (
newTaskStatus, intListToString(toUpdate), newTaskStatus)
res = self._update(req, connection)
if not res['OK']:
return res
taskIDs = []
for record in resSelect['Value']:
taskIDs.append(record[0])
gLogger.verbose(
"%s.%s_DB: to_update Tasks = %s" %
(self._caller(), '__updateTaskStatus', record))
if len(taskIDs) > 0:
reqSelect1 = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString(taskIDs)
resSelect1 = self._query(reqSelect1, connection)
if not resSelect1["OK"]:
gLogger.warn("%s.%s_DB: problem retrieving records: %s. %s" %
(self._caller(), '__updateTaskStatus', reqSelect1, resSelect1['Message']))
else:
for record in resSelect1['Value']:
gLogger.verbose(
"%s.%s_DB: updated Tasks = %s" %
(self._caller(), '__updateTaskStatus', record))
return S_OK(toUpdate)
def _checkTaskUpdate(self, taskIDs, newTaskState, connection=False):
connection = self.__getCon | nection(connection)
if not taskIDs:
return S_OK(taskIDs)
# * -> Failed
if newTaskState == 'Failed':
oldTaskState = []
# StageCompleting -> Done
elif newTaskState == 'Done':
oldTaskState = ['StageC | ompleting']
# StageSubmitted -> StageCompleting
elif newTaskState == 'StageCompleting':
oldTaskState = ['StageSubmitted']
# Waiting -> StageSubmitted
elif newTaskState == 'StageSubmitted':
oldTaskState = ['Waiting', 'Offline']
# New -> Waiting
elif newTaskState == 'Waiting':
oldTaskState = ['New']
elif newTaskState == 'Offline':
oldTaskState = ['Waiting']
else:
return S_ERROR("Task status not recognized")
if not oldTaskState:
toUpdate = taskIDs
else:
req = "SELECT TaskID FROM Tasks WHERE Status in (%s) AND TaskID IN (%s)" % (
stringListToString(oldTaskState), intListToString(taskIDs))
res = self._query(req, connection)
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK(toUpdate)
def updateReplicaStatus(self, replicaIDs, newReplicaStatus, connection=False):
connection = self.__getConnection(connection)
if not replicaIDs:
return S_OK(replicaIDs)
res = self._checkReplicaUpdate(replicaIDs, newReplicaStatus)
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK(toUpdate)
# reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newReplicaStatus )
reqSelect = "SELECT ReplicaID FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % (
intListToString(toUpdate), newReplicaStatus)
resSelect = self._query(reqSelect, connection)
if not resSelect['OK']:
gLogger.error(
"%s.%s_DB: problem retrieving record:" %
(self._caller(), 'updateReplicaStatus'), "%s. %s" %
(reqSelect, resSelect['Message']))
req = "UPDATE CacheReplicas SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE ReplicaID IN (%s) AND Status != '%s';" % (
newReplicaStatus, intListToString(toUpdate), newReplicaStatus)
res = self._update(req, connection)
if not res['OK']:
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append(record[0])
gLogger.verbose("%s.%s_DB: to_update CacheReplicas = %s" %
(self._caller(), 'updateReplicaStatus', record))
if len(replicaIDs) > 0:
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString(
replicaIDs)
resSelect1 = se |
VRaviTheja/SDN-policy | testing/testing_detection.py | Python | apache-2.0 | 3,341 | 0.055373 | #!/usr/bin/python
import pytricia
import reading_file_to_dict
import sys
import pprint
import csv
import p_trie
def patricia(device_values):
pyt_src = pytricia.PyTricia()
pyt_dst = pytricia.PyTricia()
return pyt_src,pyt_dst
def check_tcp_udp(flow_rule):
if(flow_rule["nw_proto"]=="6"):
return True
else :
return False
def add_rule_to_patricia(pyt_src,pyt_dst,flow_rule):
src_ip=flow_rule["src_ip"]
dst_ip=flow_rule["dst_ip"]
aas=flow_rule["aasno"]
pyt_src.insert(src_ip,aas)
pyt_dst.insert(dst_ip,aas)
def add_rule_to_newft(flow_rule):
print >>f_new, flow_rule
def finding_patricia_empty(pyt):
if(len(pyt)==0):
return True
else :
| return False
def detection_algorithm(r,gamma):
if(check_tcp_udp(r)==check_tcp_udp(gamma)):
add_rule_to_newft(r)
return
if(subset(pyt_src,pyt_dst,r,gamma)=="equal"): #do subset here
if(r["action "]==gamma["action "]):
conflict_resolver(gamma,r,redundancy)
print "Conflict is Redundancy : Sent to resolving"
else:
if(r["priority"]==gamma["priority"]):
| conflict_resolver(r,gamma,correlation)
print "Conflict is Correlation : Sent to resolving"
else:
print "Conflict is Generalization : Sent to resolving"
if(subset(pyt_src,pyt_dst,r,gamma)=="reverse"): #do subset here
if(r["action "]==gamma["action "]):
print "Conflict is Redundancy : Sent to resolving"
conflict_resolver(r,gamma,redundancy)
elif(r["priority"]==gamma["priority"]):
conflict_resolver(r,gamma,correlation)
print "Conflict is Correlation : Sent to resolving"
else:
conflict_resolver(r,gamma,shadowing)
print "Conflict is Shadowing : Sent to resolving"
if(subset(pyt_src,pyt_dst,r,gamma)=="intersection"):
if(r["action "]==gamma["action "]):
print "Conflict is Overlap : Sent to resolving"
conflict_resolver(r,gamma,overlap)
else :
conflict_resolver(r,gamma,correlation)
print "Conflict is Correlation : Sent to resolving"
def detect_imbrication(r,device_values):
for gamma in device_values:
if(r["nw_proto"]==gamma["nw_proto"]):
if(subset(pyt_src,pyt_dst,r,gamma)=="intersection"):
print "Conflict is Imbrication : Sent to resolving"
conflict_resolver(r,gamma,imbrication)
def creating_dict():
# Calls the csv_dict_list function, passing the named csv
device_values = reading_file_to_dict.csv_dict_list(sys.argv[1])
# Prints the results nice and pretty like
#pprint.pprint(device_values)
return device_values
def conflict_resolver(r,gamma,conflict_type):
if(conflict_type==shadowing or conflict_type==redundancy):
add_rule_to_newft(r)
if(conflict_type==overlap):
print "Do union here" #union operation
if(conflict_type==imbrication):
a=input('Cross layer conflict. Choose one flow rule : ')
if(a==r):
add_rule_to_newft(r)
else :
add_rule_to_newft(gamma)
if __name__ == "__main__" :
device_values = creating_dict()
pyt_src,pyt_dst = patricia(device_values)
finding_patricia_empty(pyt_src)
r=device_values[0]
gamma=device_values[1]
f_new=open("new_flow_table","w+")
#print r["action "]
#add_rule_to_newft(r)
#add_rule_to_newft(gamma)
detection_algorithm(gamma,r)
#print r["nw_proto"]
#add_rule_to_patricia(pyt_src,pyt_dst,r)
#check_tcp_udp(r)
#finding_patricia_empty(pyt_src)
|
antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_LinearTrend/cycle_7/ar_12/test_artificial_32_Anscombe_LinearTrend_7_12_0.py | Python | bsd-3-clause | 264 | 0.087121 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trend | type = | "LinearTrend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 12); |
arsenypoga/ImageboardDownloader | DownloaderConstants.py | Python | mit | 278 | 0 | # -*- coding: UTF-8 -*-
DOWNLOADER_VERSION = "0.0.1"
DOWNLOADER_LOG_FILE = "downloader.l | og"
DOWNLOADER_LOG_SIZE = 10485760
DOWNLOADER_LOG_COUNT = 10
DOWNLOADER_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname) | s - %(message)s"
DOWNLOADER_REQUIREMENTS_PATH = "requirements.txt"
|
csirtgadgets/bearded-avenger-sdk-py | cifsdk/msg.py | Python | mpl-2.0 | 3,924 | 0.000255 | import ujson as json
from pprint import pprint
import msgpack
import logging
from cifsdk.constants import PYVERSION
import os
TRACE = os.environ.get('CIFSDK_CLIENT_MSG_TRACE')
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
if TRACE:
logger.setLevel(logging.DEBUG)
MAP = {
1: 'ping',
2: 'ping_write',
3: 'indicators_create',
4: 'indicators_search',
5: 'indicators_delete',
6: 'tokens_search',
7: 'tokens_create',
8: 'tokens_delete',
9: 'tokens_edit',
}
class Msg(object):
PING = 1
PING_WRITE = 2
INDICATORS_CREATE = 3
INDICATORS_SEARCH = 4
INDICATORS_DELETE = 5
TOKENS_SEARCH = 6
TOKENS_CREATE = 7
TOKENS_DELETE = 8
TOKENS_EDIT = 9
def __init__(self, *args, **kwargs):
for k in kwargs:
if isinstance(kwargs[k], str):
try:
kwargs[k] = kwargs[k].encode('utf-8')
except UnicodeDecodeError:
pass
self.id = kwargs.get('id')
self.client_id = kwargs.get('client_id')
self.mtype = kwargs.get('mtype')
self.token = kwargs.get('token')
self.data = kwargs.get('data')
self.null = ''.encode('utf-8')
# from str to int
def mtype_to_int(self, mtype):
for m in MAP:
if MAP[m] == mtype:
return m
def __repr__(self):
m = {
'id': self.id,
'mtype': self.mtype,
'token': self.token,
'data': self.data,
}
return json.dumps(m)
def recv(self, s):
m = s.recv_multipart()
if len(m) == 6:
id, client_id, null, token, mtype, data = m
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
return id, client_id, token.decode('utf-8'), mtype, data.decode('utf-8')
elif len(m) == 5:
id, null, token, mtype, data = m
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
return id, token.decode('utf-8'), mtype, data.decode('utf-8')
elif len(m) == 4:
id, token, mtype, data = m
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
return id, token.decode('utf-8'), mtype, data.decode('utf-8')
elif len(m) == 3:
id, mtype, data = m
try:
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
except msgpack.exceptions.ExtraData:
pass
return id, mtype, data.decode('utf-8')
else:
mtype, data = m
return mtype, data.decode("utf-8")
def to_list(self):
m = []
if self.id:
m.append(self.id)
if self.client_id:
m.append(self.client_id)
if len(m) > 0:
m.append(self.null)
if self.token:
if isinstance(self.token, str):
| self.token = self.token.encode('utf-8')
if PYVERSION == 2:
if isinstance(self.token, unicode):
self.token = self.token.encode('utf-8')
m.append(self.token)
if self.mtype:
if isinstance(self.mtype, bytes):
self.mtype = self.mtype_to_int(self.mtype.decode('utf-8'))
m.append(msgpack.packb(self.mtype))
if isinstance(self.data, dict):
| self.data = [self.data]
if isinstance(self.data, list):
self.data = json.dumps(self.data)
if isinstance(self.data, str):
self.data = self.data.encode('utf-8')
if PYVERSION == 2:
if isinstance(self.data, unicode):
self.data = self.data.encode('utf-8')
m.append(self.data)
return m
def send(self, s):
m = self.to_list()
logger.debug('sending...')
s.send_multipart(m)
|
drphilmarshall/StatisticalMethods | lessons/graphics/notparallel_chain_fit.py | Python | gpl-2.0 | 3,306 | 0.013914 | # Copied from LMC documentation
# Modified to use MPI (but not enable parallelization), to increase the parameter degeneracy, and to disperse the start points
# Here is a simple example. As shown it will run in non-parallel mode; comments indicate what to do for parallelization.
from lmc import *
## for MPI
from mpi4py import MPI
mpi_rank = MPI.COMM_WORLD.Get_rank()
from numpy.random import rand
### Define some parameters.
startx = [-10.0, -10.0, 10.0, 10.0]
starty = [-10.0, 10.0, -10.0, 10.0]
x = Parameter(name='x', value=startx[mpi_rank], width=0.1)
y = Parameter(name='y', value=starty[mpi_rank], width=0.1)
### This is the object that will be passed to the likelihood function.
### In this simple case, it just holds the parameter objects, but in general it could be anything.
### E.g., usually it would also contain or point to the data being used to constrain the model. A good idea is to write the state of any updaters to a file after each adaptation (using the on_adapt functionality), in which case keeping pointers to the updaters here is convenient. Also commonly useful: a DerivedParameter which holds the value of the posterior log-density for each sample.
class Thing:
def __init__(self, x, y):
self.x = x
self.y = y
thing = Thing(x, y)
### The log-posterior function. Here we just assume a bivariate Gaussian posterior with marginal standard deviations s(x)=2 and s(y)=3, correlation coefficient 0.75, and means <x>=-1, <y>=1.
def post(thing):
r = 0.99
sx = 2.0
sy = 3.0
mx = -1.0
my = 1.0
return -0.5/(1.0-r**2)*( (thing.x()-mx)**2/sx**2 + (thing.y()-my)**2/sy**2 - 2.0*r*(thing.x()-mx)/sx*(thing.y()-my)/sy )
### Create a parameter space consisting of x and y, and associate the log-posterior function with it.
space = ParameterSpace([thing.x, thing.y], post)
### If we'd bothered to define a DerivedParameter in Thing which would hold the posterior density, we might want to define a larger ParameterSpace and pass it to the Engine later on to be saved in the Backends (instead of space).
#trace = ParameterSpace([thing.x, thing.y, thing.logP])
### Use slice sampling for robustness. Adapt the proposal distribution every 100 iterations starting with the 100th.
step = Metropolis()
parallel = None
## for MPI parallelization
#parallel = MPI.COMM_WORLD
## for parallelization via the filesystem, this would have to be set to a different value for each concurrently running instance
#parallel = 1
updater = MultiDimSequentialUpdater(space, step, 100, 100, parallel=parallel)
### Create an Engine and tell it to drive this Updater and to store the values of the free parameters.
engine = Engine([updater], space)
### Store | the chain in a text file.
#chainfile = open("chain.txt", 'w')
## For filesystem parallelization, each instance should write to a different file.
## For MPI, the same is true, e.g.
chainfile = open( | "notparallel" + str(MPI.COMM_WORLD.Get_rank()) + ".txt", 'w')
backends = [ textBackend(chainfile) ]
### Print the chain to the terminal as well
#backends.append( stdoutBackend() )
### Run the chain for 10000 iterations
engine(10000, thing, backends)
### Close the text file to clean up.
chainfile.close()
## If this was a parallel run, print the convergence criterion for each parameter.
# print updater.R
|
Trebek/pydealer | docs/conf.py | Python | gpl-3.0 | 8,642 | 0.005439 | # -*- coding: utf-8 -*-
#
# PyDealer documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 22 21:57:58 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyDealer: Playing Card Package'
copyright = u'2015, Alex Crawford'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4'
# The full version, including alpha/beta/rc tags.
release = '1.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ["_themes"]
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
html_theme_path = ["_themes"]
# html_theme = 'sphinx_rtd_theme'
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "./images/pd.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyDealerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyDealer.tex', u'PyDealer Documentation',
u'Alex Crawford', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as | an appendix to all manuals.
#latex_appendices = []
# If false, no | module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pydealer', u'PyDealer Documentation',
[u'Alex Crawford'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyDealer', u'Py |
gegenschall/cython-ldns | tools/stripcomments.py | Python | bsd-3-clause | 244 | 0.004098 | import fileinput |
for line in fileinput.input():
_line = line.strip()
# super dumb...
if _line.startswith( | '//') or _line.startswith('/*') or _line.startswith('*') or _line.startswith('*/'):
continue
print line.rstrip()
|
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Specials/Predicters/Predicter/tests/01_tests_dynamic_rate/01_test_dynamic_rate_oneagent_onesensor_ExampleCell.py | Python | mit | 852 | 0.051643 | #/###################/#
# Import modules
#
#ImportModules
import ShareYourSystem as SYS
#/###################/#
# Build the model
#
#Simulation time
BrianingDebugVariable=25.
#A - transition matrix
JacobianTimeFloat = 30. #(ms)
A = (-1./float(JacobianTimeFloat)
)*SYS.numpy.array([[1.]])
#Define
MyPredicter=SYS.PredicterClass(
).predict(
_DynamicBool=True,
_JacobianVariable=A,
_CommandVariable="#custom:#clock:25*ms:0.5*mV*(int(t==25*ms)+int(t==50*ms))",
_DecoderVariable = [2.],
_InteractionStr="Rate"
).simulate(
100.
)
#/###################/#
# | View
#
MyPredicter.mapSet(
{
'PyplotingFigureVariable':{
'figsize':(10,8)
},
'PyplotingGridVariable':(30,30)
}
).view(
).pyplot(
).show(
)
#/###################/#
# Print
#
#Definition the AttestedStr
| print('MyPredicter is ')
SYS._print(MyPredicter)
|
raghakot/keras-vis | vis/grad_modifiers.py | Python | mit | 1,247 | 0 | from __future__ import absolute_import
import numpy as np
from keras import backend as K
from .utils import utils
def negate(grads):
| """Negates the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The negated gradients.
"""
return -grads
def absolute(grads):
"""Computes absolute gradients.
Args:
grads: A numpy array of grads to use.
Ret | urns:
The absolute gradients.
"""
return np.abs(grads)
def invert(grads):
"""Inverts the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The inverted gradients.
"""
return 1. / (grads + K.epsilon())
def relu(grads):
"""Clips negative gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The rectified gradients.
"""
grads[grads < 0.] = 0.
return grads
def small_values(grads):
"""Can be used to highlight small gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The modified gradients that highlight small values.
"""
return absolute(invert(grads))
def get(identifier):
return utils.get_identifier(identifier, globals(), __name__)
|
rapydo/do | controller/commands/volatile.py | Python | mit | 802 | 0.001247 | """
[DEPRECATED] Run a single container in debug mode
"" | "
import typer
from controller import print_and_exit
from controller.app import Application
# Deprecated since 2.1
@Application.app.command(help="Replaced by run --debug command")
def volatile(
service: str = typer.Argument(
...,
help="Service name",
shell_complete=Application.autocomplete_allservic | e,
),
command: str = typer.Argument(
"bash", help="UNIX command to be executed on selected running service"
),
user: str = typer.Option(
None,
"--user",
"-u",
help="User existing in selected service",
show_default=False,
),
) -> None:
# Deprecated since 2.1
print_and_exit("Volatile command is replaced by rapydo run --debug {}", service)
|
acopar/crow | crow/crow/transfer/mocktransfer.py | Python | gpl-3.0 | 150 | 0.013333 | def | empty_reduce(rank, device_list, output, source=0):
pass
def empty_sync_matrix(rank, device_list, output, source=0, collect= | False):
pass
|
DmitryTsybin/Study | Coursera/Algorithmic_Thinking/Project_1/Project_1_Degree_distributions_for_graphs.py | Python | mit | 2,805 | 0.002496 | import random
"""Define const graphs"""
EX_GRAPH0 = {0: set([1, 2]), 1: set([]), 2: set([])}
EX_GRAPH1 = {
0: set([1, 4, 5]),
1: set([2, 6]),
2: set([3]),
3: set([0]),
4: set([1]),
5: set([2]),
6: set([])
}
EX_GRAPH2 = {
0: set([1, 4, 5]),
1: set([2, 6]),
2: set([3, 7]),
3: set([7]),
4: set([1]),
5: set([2]),
6: set([]),
7: set([3]),
8: set([1, 2]),
9: set([0, 3, 4, 5, 6, 7])
}
def make_complete_graph(num_nodes):
"""Takes the number of nodes num_nodes and returns a dictionary
corresponding to a complete directed graph with the specified number
of nodes."""
graph = {}
for node in range(0, num_nodes):
values = range(0, num_nodes)
values.remove(node)
graph[node] = set(values)
return graph
def make_probability_directional_graph(num_nodes, probability):
graph = {}
for node in range (0, num_nodes):
values = set()
possible_values = range(0, num_nodes)
possible_values.remove(node)
for value in possible_values:
if random.random() <= probability:
values.add(value)
graph[node] = set(values)
return graph
def compute_in_degrees(digraph):
"""Takes a directed graph digraph (represented as a dictionary) and
computes the in-degrees for the nodes in the graph."""
degrees = {}
for looked_node in digraph:
degree = 0
for node in digraph:
if node != looked_node:
if looked_node in digraph[node]:
degree += 1
degrees[looked_node] = degree
return degrees
def in_degree_distribution(digraph):
"""Takes a directed graph digraph (represented as a dictionary) and
computes the unnormalized distribution of the in-degrees of the graph."""
distribution = {}
degrees = compute_in_degrees(digraph)
for degree in range(0, len(digraph)):
count = 0
for node in degrees:
if degrees[node] == degree:
count += 1
if count != 0:
distribution[degree] = count
return distribution
def normalized_in_degree_distribution(digraph):
distribution = in_degree_distribution(digraph)
normalized_distribution = {}
divisor = 0
for value in distri | bution:
divisor += distribution[value]
for value in distribution:
| normalized_distribution[value] = distribution[value] / float(divisor)
return normalized_distribution
#print in_degree_distribution(EX_GRAPH0)
#print normalized_in_degree_distribution(EX_GRAPH0)
#print in_degree_distribution(EX_GRAPH1)
#print normalized_in_degree_distribution(EX_GRAPH1)
#print in_degree_distribution(EX_GRAPH2)
#print normalized_in_degree_distribution(EX_GRAPH2)
|
jemandez/creaturas-magicas | Configuraciones básicas/scripts/addons/blendertools-1.0.0/maketarget/export_mh_obj.py | Python | gpl-3.0 | 8,237 | 0.004856 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehuman.org/
**Code Home Page:** http://code.google.com/p/makehuman/
**Authors:** Thomas Larsson
**Copyright(c):** MakeHuman Team 2001-2014
**Licensing:** AGPL3 (see also http://www.makehuman.org/node/318)
**Coding Standards:** See http://www.makehuman.org/node/165
Abstract
--------
Custom obj exporter for MakeHuman Maketarget
"""
import bpy, os, mathutils
import math
from mathutils import *
from . import maketarget
#
# GroupZOrderSuffix
# Determines the Z-order of face groups.
# Faces with lower Z-order are exported first
# Only looks at the last part of the group name
# Groups not listed have Z-order 0.
# Faces within a given Z-order are exported with lower face number first.
# Preferably add new groups with higher Z-order, to not ruin numbering
# of existing faces. It is not a disaster to change face numbers, but
# it does require some changes in MHX export.
#
GroupZOrderSuffix = {
1: ["lash","eyebrown","cornea"],
2: ["tights","skirt"]
}
#
# When materials represent face groups, we must figure out the real materials
# in some other way. Use this dict.
# If the group name contains the key string, assign it to the value material.
# If not, material is "skin".
#
# Ignored when object has real materials.
#
GroupMaterials = {
"nail" : "nail",
"eye-ball" : "eye",
"teeth" : "teeth",
"cornea" : "cornea",
"joint" : "joint",
"skirt" : "joint",
"tights" : "joint",
}
# Minimal distance for merging tex verts
Epsilon = 1e-4
#
# exportObjFile(path, groupsAsMaterials, context):
#
def exportObjFile(path, groupsAsMaterials, context):
global BMeshAware
ob = context.object
me = ob.data
if (not me) or (len(me.materials) < 2):
raise MHError("Mesh must have materials")
try:
faces = me.polygons
BMeshAware = True
print("Using BMesh")
except:
faces = me.faces
BMeshAware = False
print("Not using BMesh")
orderedFaces = zOrderFaces(me, faces)
(name,ext) = os.path.splitext(path)
if ext.lower() != ".obj":
path = path + ".obj"
fp = open(path, "w", encoding="utf-8", newline="\n")
scn = context.scene
for v in me.vertices:
fp.write("v %.4f %.4f %.4f\n" % (v.co[0], v.co[2], -v.co[1]))
for v in me.vertices:
fp.write("vn %.4f %.4f %.4f\n" % (v.normal[0], v.normal[2], -v.normal[1]))
info = (-2, None)
if me.uv_textures:
(uvFaceVerts, texVerts, nTexVerts) = setupTexVerts(me, faces)
for vtn in range(nTexVerts):
vt = texVerts[vtn]
fp.write("vt %.4f %.4f\n" % (vt[0], vt[1]))
for f in orderedFaces:
info = writeNewGroup(fp, f,info, me, ob, groupsAsMaterials)
uvVerts = uvFaceVerts[f.index]
faceLine = []
for n,v in enumerate(f.vertices):
(vt, uv) = uvVerts[n]
faceLine.append("%d/%d" % (v+1, vt+1))
fp.write("f "+ (" ".join(faceLine)) +"\n")
else:
for f in orderedFaces:
info = writeNewGroup(fp, f, info, me, ob, groupsAsMaterials)
fp.write("f ")
faceLine = []
for vn in f.vertices:
faceLine.append("%d" % (vn+1))
fp.write("f "+ (" ".join(faceLine)) +"\n")
fp.close()
print("%s written" % path)
return
def writeNewGroup(fp, f, info, me, ob, groupsAsMaterials):
(gnum, mname) = info
if groupsAsMaterials:
if f.material_index != gnum:
gnum = f.material_index
gname = me.materials[gnum].name
mname1 = "skin"
for key in GroupMaterials.keys():
if key in gname:
mname1 = GroupMaterials[key]
break
if mname != mname1:
mname = mname1
fp.write("usemtl %s\n" % mname)
fp.write("g %s\n" % gname)
info = (gnum, mname)
else:
nhits = {}
for vn in f.vertices:
v = me.vertices[vn]
for grp in v.groups:
try:
nhits[grp.group] += 1
except:
nhits[grp.group] = 1
gn = -1
nverts = len(f.vertices)
for (gn1,n) in nhits.items():
if n == nverts:
gn = gn1
break
if gn != gnum:
mat = me.materials[f.material_index]
if mname != mat.name:
mname = mat.name
#fp.write("usemtl %s\n" % mname)
gnum = gn
if gnum < 0:
fp.write("g body\n")
else:
for vgrp in ob.vertex_groups:
if vgrp.index == gnum:
fp.write("g %s\n" % vgrp.name)
break
info = (gnum, mname)
return info
#
# zOrderFaces(me, faces):
#
def zOrderFaces(me, faces):
zGroupFaces = {}
zGroupFaces[0] = []
for n in GroupZOrderSuffix.keys():
zGroupFaces[n] = []
for f in faces:
group = me.materials[f.material_index].name
suffix = group.split("-")[-1]
zgroup = zGroupFaces[0]
for (prio,suffices) in GroupZOrderSuffix.items():
if suffix in suffices:
zgroup = zGroupFaces[prio]
break
zgroup.append(f)
zlist = list(zGroupFaces.items())
zlist.sort()
zfaces = []
for (key, zflist) in zlist:
zfaces += zflist
return zfaces
#
# setupTexVerts(me, faces):
#
def setupTexVerts(me, faces):
global BMeshAware
vertEdges = {}
vertFaces = {}
for v in me.vertices:
vertEdges[v.index] = []
vertFaces[v.index] = []
for e in me.edges:
for vn in e.vertices:
vertEdges[vn].append(e)
for f in faces:
for vn in f.vertices:
vertFaces[vn].append(f)
edgeFaces = {}
for e in me.edges:
edgeFaces[e.index] = []
faceEdges = {}
for f in faces:
faceEdges[f.index] = []
for f in faces:
for vn in f.vertices:
for e in vertEdges[vn]:
v0 = e.vertices[0]
v1 = e.vertices[1]
if (v0 in f.vertices) and (v1 in f.vertices):
if f not in edgeFaces[e.index]:
edgeFaces[e.index].append(f)
if e not in faceEdges[f.index]:
faceEdges[f.index].append(e)
faceNeighbors = {}
uvFaceVerts = {}
| for f in faces:
faceNeighbors[f.index] = []
uvFaceVerts[f.index] = []
for f in faces:
for e i | n faceEdges[f.index]:
for f1 in edgeFaces[e.index]:
if f1 != f:
faceNeighbors[f.index].append((e,f1))
vtn = 0
texVerts = {}
if BMeshAware:
uvloop = me.uv_layers[0]
n = 0
for f in faces:
for vn in f.vertices:
vtn = findTexVert(uvloop.data[n].uv, vtn, f, faceNeighbors, uvFaceVerts, texVerts)
n += 1
else:
uvtex = me.uv_textures[0]
for f in faces:
uvf = uvtex.data[f.index]
vtn = findTexVert(uvf.uv1, vtn, f, faceNeighbors, uvFaceVerts, texVerts)
vtn = findTexVert(uvf.uv2, vtn, f, faceNeighbors, uvFaceVerts, texVerts)
vtn = findTexVert(uvf.uv3, vtn, f, faceNeighbors, uvFaceVerts, texVerts)
if len(f.vertices) > 3:
vtn = findTexVert(uvf.uv4, vtn, f, faceNeighbors, uvFaceVerts, texVerts)
return (uvFaceVerts, texVerts, vtn)
#
# findTexVert(uv, vtn, f, faceNeighbors, uvFaceVerts, texVerts):
#
def findTexVert(uv, vtn, f, faceNeighbors, uvFaceVerts, texVerts):
for (e,f1) in faceNeighbors[f.index]:
for (vtn1,uv1) in uvFaceVerts[f1.index]:
vec = uv - uv1
if vec.length < Epsilon:
uvFaceVerts[f.index].append((vtn1,uv))
return vtn
uvFaceVerts[f.index].app |
hpcleuven/easybuild-easyblocks | easybuild/easyblocks/o/openfoam.py | Python | gpl-2.0 | 18,326 | 0.003929 | ##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing OpenFOAM, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Xavier Besseron (University of Luxembourg)
@author: Ward Poelmans (Ghent University)
@author: Balazs Hajgato (Free University Brussels (VUB))
"""
import glob
import os
import re
import shutil
import stat
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_OpenFOAM(EasyBlock):
"""Support for building and installing OpenFOAM."""
def __init__(self, *args, **kwargs):
"""Specify that OpenFOAM should be built in install dir."""
super(EB_OpenFOAM, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.wm_compiler = None
self.wm_mplib = None
self.openfoamdir = None
self.thrdpartydir = None
if 'extend' in self.name.lower():
if LooseVersion(self.version) >= LooseVersion('3.0'):
self.openfoamdir = 'foam-extend-%s' % self.version
else:
self.openfoamdir = 'OpenFOAM-%s-ext' % self.version
else:
self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])])
self.log.debug("openfoamdir: %s" % self.openfoamdir)
def extract_step(self):
"""Extract sources as expected by the OpenFOAM(-Extend) build scripts."""
super(EB_OpenFOAM, self).extract_step()
# make sure that the expected subdir is really there after extracting
# if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail
openfoam_installdir = os.path.join(self.installdir, self.openfoamdir)
if not os.path.exists(openfoam_installdir):
self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir)
try:
contents_installdir = os.listdir(self.installdir)
# it's one directory but has a wrong name
if len(contents_installdir) == 1 and os.path.isdir(os.path.join(self.installdir, contents_installdir[0])):
source = os.path.join(self.installdir, contents_installdir[0])
target = os.path.join(self.installdir, self.openfoamdir)
self.log.debug("Renaming %s to %s", source, target)
os.rename(source, target)
else:
mkdir(openfoam_installdir)
for fil in contents_installdir:
if fil != self.openfoamdir:
source = os.path.join(self.installdir, fil)
target = os.path.join(openfoam_installdir, fil)
self.log.debug("Moving %s to %s", source, target)
shutil.move(source, target)
os.chdir(openfoam_installdir)
except OSError, err:
raise EasyBuildError("Failed to move all files to %s: %s", openfoam_installdir, err)
def patch_step(self):
"""Adjust start directory and start path for patching to correct directory."""
self.cfg['start_dir'] = os.path.join(self.installdir, self.openfoamdir)
super(EB_OpenFOAM, self).patch_step(beginpath=self.cfg['start_dir'])
def configure_step(self):
"""Configure OpenFOAM build by setting appropriate environment variables."""
# compiler & compiler flags
comp_fam = self.toolchain.comp_family()
extra_flags = ''
if comp_fam == toolchain.GCC: # @UndefinedVariable
self.wm_compiler = 'Gcc'
if get_software_version('GCC') >= LooseVersion('4.8'):
# make sure non-gold version of ld is used, since OpenFOAM requires it
# see http://www.openfoam.org/mantisbt/view.php?id=685
extra_flags = '-fuse-ld=bfd'
# older versions of OpenFOAM-Extend require -fpermissive
if 'extend' in self.name.lower() and LooseVersion(self.version) < LooseVersion('2.0'):
extra_flags += ' -fpermissive'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
self.wm_compiler = 'Icc'
# make sure -no-prec-div is used with Intel compilers
extra_flags = '-no-prec-div'
else:
raise EasyBuildError("Unknown compiler family, don't know how to set WM_COMPILER")
for env_var in ['CFLAGS', 'CXXFLAGS']:
env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags))
# patch out hardcoding of WM_* environment variables
# for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER'
for script in [os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc']]:
self.log.debug("Patching out hardcoded $WM_* env va | rs in %s", script)
# disable any third party stuff, we use EB controlled builds
regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")]
WM_env_var = ['WM_COMPILER', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR']
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
WM_env_var.append('WM_LABEL_SIZE') |
for env_var in WM_env_var:
regex_subs.append((r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var,
r": ${\g<var>:=\g<val>}; export \g<var>"))
apply_regex_substitutions(script, regex_subs)
# inject compiler variables into wmake/rules files
ldirs = glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*'))
langs = ['c', 'c++']
suffixes = ['', 'Opt']
wmake_rules_files = [os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes]
mpicc = os.environ['MPICC']
mpicxx = os.environ['MPICXX']
cc_seq = os.environ.get('CC_SEQ', os.environ['CC'])
cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX'])
if self.toolchain.mpi_family() == toolchain.OPENMPI:
# no -cc/-cxx flags supported in OpenMPI compiler wrappers
c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc)
cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx)
else:
# -cc/-cxx should work for all MPICH-based MPIs (inclu |
texuf/myantname | main.py | Python | mit | 869 | 0.004603 | from app import app
import argparse
import os
import routes
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Run the MightySpring backend server.')
parser.add_argument('--debug',
'-d',
default=True)
parser.add_argument('--port',
'-p',
nargs='?',
default=int(os.environ. | get('PORT', 5000)),
type=int)
parser.add_argument('--bind-address',
'-b',
nargs='?',
default=u'0.0.0.0',
type=unicode)
args = parser.parse_args()
debug = args.debug
por | t = args.port
bind_address = args.bind_address
app.run(host=bind_address, port=port, debug=debug) |
geelweb/geelweb-django-contactform | tests/views.py | Python | mit | 202 | 0.009901 | from djan | go.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse('Page content')
def custom(request):
retu | rn render(request, 'custom.html', {})
|
wulczer/ansible | v2/ansible/plugins/lookup/inventory_hostnames.py | Python | gpl-3.0 | 1,756 | 0.003417 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Steven Dossett <sdossett@panath.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gn | u.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
import ansible.inventory as inventory
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
| ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if 'runner' in kwargs:
self.host_list = kwargs['runner'].inventory.host_list
else:
raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list):
raise errors.AnsibleError("with_inventory_hostnames expects a list")
return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
|
anish/buildbot | master/buildbot/util/sautils.py | Python | gpl-2.0 | 2,925 | 0.000342 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from contextlib import contextmanager
import sqlalchemy as sa
from sqlalchemy.ext import compiler
from sqlalchemy.sql.expression import ClauseElement
from sqlalchemy.sql.expression import Executable
# from http:
# | //www.sqlalchemy.org/docs/core/compiler.html#compiling-sub-elements-of-a-custom-expression-construct # noqa pylint: | disable=line-too-long
# _execution_options per
# http://docs.sqlalchemy.org/en/rel_0_7/core/compiler.html#enabling-compiled-autocommit
# (UpdateBase requires sqlalchemy 0.7.0)
class InsertFromSelect(Executable, ClauseElement):
_execution_options = \
Executable._execution_options.union({'autocommit': True})
def __init__(self, table, select):
self.table = table
self.select = select
@compiler.compiles(InsertFromSelect)
def _visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO {} {}".format(compiler.process(element.table, asfrom=True),
compiler.process(element.select))
def sa_version():
if hasattr(sa, '__version__'):
def tryint(s):
try:
return int(s)
except (ValueError, TypeError):
return -1
return tuple(map(tryint, sa.__version__.split('.')))
return (0, 0, 0) # "it's old"
def Table(*args, **kwargs):
"""Wrap table creation to add any necessary dialect-specific options"""
# work around the case where a database was created for us with
# a non-utf8 character set (mysql's default)
kwargs['mysql_character_set'] = 'utf8'
return sa.Table(*args, **kwargs)
@contextmanager
def withoutSqliteForeignKeys(engine, connection=None):
conn = connection
if engine.dialect.name == 'sqlite':
if conn is None:
conn = engine.connect()
# This context is not re-entrant. Ensure it.
assert not getattr(engine, 'fk_disabled', False)
engine.fk_disabled = True
conn.execute('pragma foreign_keys=OFF')
try:
yield
finally:
if engine.dialect.name == 'sqlite':
engine.fk_disabled = False
conn.execute('pragma foreign_keys=ON')
if connection is None:
conn.close()
|
kgadek/evogil | statistic/stats_bootstrap.py | Python | gpl-3.0 | 4,906 | 0.003474 | import random
from math import sqrt
import numpy
def validate_cost(result, boot_size, delta=500):
budget = result["budget"]
for metric_name, _, data_process in result['analysis']:
if metric_name == "cost":
cost_data = list(x() for x in data_process)
data_analysis = yield_analysis(cost_data, boot_size)
cost_val = data_analysis["btstrpd"]["metrics"]
return cost_val <= budget + delta
return True
def find_acceptable_result_for_budget(results, boot_size):
delta = 500
prev_budget = results[-1]['budget']
for result in reversed(results):
budget = result['budget']
delta += prev_budget - budget
if validate_cost(result,boot_size, delta):
return result
prev_budget = budget
return None
def average(xs):
if len(xs) == 0:
return -float("inf")
return sum(xs) * 1.0 / len(xs)
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population"""
n = len(population) - 1
return [population[int(random.randint(0, n))] for i in range(k)]
def bootstrap(population, f, n, k, alpha):
btstrp = sorted(f(sample_wr(popula | tion, k)) for i in range(n))
return {
"confidence": 100.0 * (1 - 2 * alpha),
"from": btstrp[int(1.0 * n * alpha)],
"to": btstrp[int(1.0 * n * (1 - alpha))],
"metrics": f(population)
}
def yield_analysis(data_process, boot_size):
q1 = numpy.percentile(data_process, 25)
q3 = numpy.percentile(data_process, 75)
iq = q3 - q1
low_inn_fence = q1 - 1.5*iq
| upp_inn_fence = q3 + 1.5*iq
low_out_fence = q1 - 3*iq
upp_out_fence = q3 + 3*iq
# noinspection PyRedeclaratione
extr_outliers = len([x
for x in data_process
if (x < low_out_fence or upp_out_fence < x)])
# noinspection PyRedeclaration
mild_outliers = len([x for x in data_process if (x < low_inn_fence or upp_inn_fence < x)]) - extr_outliers
extr_outliers = extr_outliers > 0 and "{0:6.2f}%".format(extr_outliers * 100.0 / len(data_process)) or "--"
mild_outliers = mild_outliers > 0 and "{0:6.2f}%".format(mild_outliers * 100.0 / len(data_process)) or "--"
metrics_nooutliers = average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence])
try:
mean_nooutliers = float(average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence]))
variance_nooutliers = [(x - mean_nooutliers) ** 2 for x in data_process if low_inn_fence <= x <= upp_inn_fence]
stdev_nooutliers = sqrt(average(variance_nooutliers))
except ValueError:
stdev_nooutliers = -float("inf")
mean_nooutliers = float("inf")
btstrpd = bootstrap(data_process, average, boot_size, int(len(data_process) * 0.66), 0.025)
goodbench = "✓"
try:
mean = float(average(data_process))
variance = [(x - mean) ** 2 for x in data_process]
stdev = sqrt(average(variance))
lower = mean - 3 * stdev
upper = mean + 3 * stdev
if len([x for x in data_process if lower <= x <= upper]) < 0.95 * len(data_process):
goodbench = "╳╳╳╳╳"
except ValueError:
stdev = lower = upper = mean = float("inf")
goodbench = "?"
try:
mean_nooutliers_diff = 100.0 * (mean_nooutliers - mean) / mean
except ZeroDivisionError:
mean_nooutliers_diff = float("inf")
try:
stdev_nooutliers_diff = 100.0 * (stdev_nooutliers - stdev) / stdev
except ZeroDivisionError:
stdev_nooutliers_diff = float("inf")
dispersion_warn = ""
try:
pr_dispersion = 100.0 * (float(btstrpd["to"]) - float(btstrpd["from"])) / btstrpd["metrics"]
if abs(pr_dispersion) > 30.:
dispersion_warn = " HIGH"
except ZeroDivisionError:
pr_dispersion = float("+Infinity")
return {
"low_inn_fence": low_inn_fence,
"upp_inn_fence": upp_inn_fence,
"low_out_fence": low_out_fence,
"upp_out_fence": upp_out_fence,
"stdev": stdev,
"mean": mean,
"lower": lower,
"upper": upper,
"goodbench": goodbench,
"btstrpd": btstrpd,
"mild_outliers": mild_outliers,
"extr_outliers": extr_outliers,
"metrics_nooutliers": metrics_nooutliers,
"mean_nooutliers_diff": mean_nooutliers_diff,
"stdev_nooutliers": stdev_nooutliers,
"stdev_nooutliers_diff": stdev_nooutliers_diff,
"pr_dispersion": pr_dispersion,
"dispersion_warn": dispersion_warn
}
# return low_inn_fence, upp_inn_fence, low_out_fence, upp_out_fence, stdev, mean, lower, upper, goodbench, btstrpd,
# stdev, mild_outliers, extr_outliers, metrics_nooutliers, mean_nooutliers_diff, stdev_nooutliers,
# stdev_nooutliers_diff, pr_dispersion, dispersion_warn |
mbolivar/zephyr | scripts/parse_syscalls.py | Python | apache-2.0 | 4,506 | 0.000444 | #!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
import sys
import re
import argparse
import os
import json
api_regex = re.compile(r'''
__syscall\s+ # __syscall attribute, must be first
([^(]+) # type and name of system call (split later)
[(] # Function opening parenthesis
([^)]*) # Arg list (split later)
[)] # Closing parenthesis
''', re.MULTILINE | re.VERBOSE)
typename_regex = re.compile(r'(.*?)([A-Za-z0-9_]+)$')
class SyscallParseException(Exception):
pass
def typename_split(item):
if "[" in item:
raise SyscallParseException(
"Please pass arrays to syscalls as pointers, unable to process '%s'" %
item)
if "(" in item:
raise SyscallParseException(
"Please use typedefs for function pointers")
mo = typename_regex.match(item)
if not mo:
raise SyscallParseException("Malformed system call invocation")
m = mo.groups()
return (m[0].strip(), m[1])
def analyze_fn(match_group, fn):
func, args = match_group
try:
if args == "void":
args = []
else:
args = [typename_split(a.strip()) for a in args.split(",")]
func_type, func_name = typename_split(func)
except SyscallParseException:
sys.stderr.write("In declaration of %s\n" % func)
raise
sys_id = "K_SYSCALL_" + func_name.upper()
if func_type == "void":
suffix = "_VOID"
is_void = True
else:
is_void = False
if func_type in ["s64_t", "u64_t"]:
suffix = "_RET64"
else:
suffix = ""
is_void = (func_type == "void")
# Get the proper system call macro invocation, which depends on the
# number of arguments, the return type, and whether the implementation
# is an inline function
macro = "K_SYSCALL_DECLARE%d%s" % (len(args), suffix)
# Flatten the argument lists and generate a comma separated list
# of t0, p0, t1, p1, ... tN, pN as expected by the macros
flat_args = [i for sublist in args for i in sublist]
if not is_void:
flat_args = [func_type] + fl | at_args
flat_args = [sys_id, func_name] + flat_args
argslist = ", ".join(flat_args)
invocation = "%s(%s);" % (macro, argslist)
handler = "_handler_" + func_name
# Entry in _k_syscall_table
table_entry = "[%s] = %s" % (sys_id, handler)
return (fn, handler, invocation, sys_id, table_entry)
def analyze_headers(base_path):
ret = []
for root, dirs, files in os.walk(base_path):
for fn in files:
# toolchain/common.h has the definition of __syscal | l which we
# don't want to trip over
path = os.path.join(root, fn)
if not fn.endswith(".h") or path.endswith(os.path.join(os.sep, 'toolchain', 'common.h')):
continue
with open(path, "r", encoding="utf-8") as fp:
try:
result = [analyze_fn(mo.groups(), fn)
for mo in api_regex.finditer(fp.read())]
except Exception:
sys.stderr.write("While parsing %s\n" % fn)
raise
ret.extend(result)
return ret
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--include", required=True,
help="Base include directory")
parser.add_argument(
"-j", "--json-file", required=True,
help="Write system call prototype information as json to file")
args = parser.parse_args()
def main():
parse_args()
syscalls = analyze_headers(args.include)
syscalls_in_json = json.dumps(
syscalls,
indent=4,
sort_keys=True
)
# Check if the file already exists, and if there are no changes,
# don't touch it since that will force an incremental rebuild
path = args.json_file
new = syscalls_in_json
if os.path.exists(path):
with open(path, 'r') as fp:
old = fp.read()
if new != old:
with open(path, 'w') as fp:
fp.write(new)
else:
with open(path, 'w') as fp:
fp.write(new)
if __name__ == "__main__":
main()
|
ffsdmad/af-web | cgi-bin/plugins2/doc_view_list.py | Python | gpl-3.0 | 444 | 0.010336 | # -*- coding: utf8 -*-
SQL = """select SQL_CALC_FOUND_ROWS * FROM doc_view order by `name` asc limit %(offset)d,%(limit)d ;"""
FOUND_ROWS = True
ROOT = "doc_view_list"
ROOT_PREFIX = "<doc_view_edit />"
ROOT_POSTFIX= None
XSL_TEMPLATE = "data/af-w | eb.xsl"
EVENT = None
WHERE = ()
PARAM = None
TITLE="Список видов документов"
MESSAGE="ошибка получен | ия списка видов документов"
ORDER = None
|
hammerlab/immuno | test/test_mhc_formats.py | Python | apache-2.0 | 2,503 | 0.002397 | from immuno.mhc_formats import parse_netmhc_stdout
from immuno.peptide_binding_measure import (
IC50_FIELD_NAME,
PERCENTILE_RANK_FIELD_NAME,
)
def test_mhc_stdout():
s = """
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 | 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
| 6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
class MutationEntry(object):
pass
mutation_entry = MutationEntry()
mutation_entry.SourceSequence = "QQQQQYFPEITHIIASSSL"
mutation_entry.MutationStart = 2
mutation_entry.MutationEnd = 3
mutation_entry.GeneInfo = "TP53 missense"
mutation_entry.Gene = "TP53"
mutation_entry.GeneMutationInfo = "g.2 some mutation info"
mutation_entry.PeptideMutationInfo = "p.2 T>Q"
mutation_entry.TranscriptId = "TID0"
mutation_entry.chr = 'X'
mutation_entry.pos = 39393
mutation_entry.ref = 'A'
mutation_entry.alt = 'T'
peptide_entries = {"id0": mutation_entry}
rows = parse_netmhc_stdout(s, peptide_entries)
assert len(rows) == 12
for i in xrange(len(rows)):
assert rows[i]['EpitopeStart'] == i
assert rows[i]['Allele'] == 'HLA-A*02:03'
assert rows[0][IC50_FIELD_NAME] == 38534.25
assert rows[0][PERCENTILE_RANK_FIELD_NAME] == 50.00 |
ResearchSoftwareInstitute/MyHPOM | myhpom/migrations/0017_ad_thumbnail.py | Python | bsd-3-clause | 527 | 0.001898 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db imp | ort migrations, models
class Migration(migrati | ons.Migration):
dependencies = [
('myhpom', '0015_userdetails_updated'),
]
operations = [
migrations.AddField(
model_name='advancedirective',
name='thumbnail',
field=models.FileField(help_text=b"The first-page thumbnail image of the user's Advance Directive.", null=True, upload_to=b'myhpom/advance_directives'),
),
]
|
CeltonMcGrath/TACTIC | src/tactic/ui/widget/scrollbar_wdg.py | Python | epl-1.0 | 4,809 | 0.004367 | ###########################################################
#
# Copyright (c) 2014, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclos | ed in any way without written permission.
#
#
#
__all__ = ['ScrollbarWdg', 'TestScrollbarWdg']
from tactic.ui.common import BaseRefreshWdg
from pyasm.web import DivWdg
class TestScrollbarWdg(BaseRefreshWdg):
|
def get_display(my):
top = my.top
top.add_style("width: 600px")
top.add_style("height: 400px")
return top
class ScrollbarWdg(BaseRefreshWdg):
def get_display(my):
top = my.top
top.add_class("spt_scrollbar_top")
content = my.kwargs.get("content")
content_class = my.kwargs.get("content_class")
if not content_class:
content_class = "spt_content"
width = 8
top.add_style("width: %s" % width)
top.add_style("position: absolute")
top.add_style("top: 0px")
top.add_style("right: 0px")
top.add_color("background", "background")
top.add_style("margin: 3px 5px")
top.add_style("opacity: 0.0")
top.add_behavior( {
'type': 'load',
'cbjs_action': my.get_onload_js()
} )
top.add_behavior( {
'type': 'load',
'content_class': content_class,
'cbjs_action': '''
var parent = bvr.src_el.getParent("." + bvr.content_class);
var size = parent.getSize();
bvr.src_el.setStyle("height", size.y);
var scrollbar = parent.getElement(".spt_scrollbar_top");
parent.addEvent("mouseenter", function() {
new Fx.Tween(scrollbar, {duration: 250}).start("opacity", 1.0);
} );
parent.addEvent("mouseleave", function() {
new Fx.Tween(scrollbar, {duration: 250}).start("opacity", 0.0);
} );
parent.addEvent("keypress", function(evt) {
new Fx.Tween(scrollbar, {duration: 250}).start("opacity", 0.0);
console.log(evt);
} );
parent.addEvent("mousewheel", function(evt) {
evt.stopPropagation();
spt.scrollbar.content = parent;
if (evt.wheel == 1) {
spt.scrollbar.scroll(15)
}
else {
spt.scrollbar.scroll(-15)
}
} );
'''
} )
bar = DivWdg()
bar.add_class("spt_scrollbar")
bar.add_class("hand")
top.add(bar)
bar.add_style("width: %s" % width)
bar.add_style("height: 30px")
bar.add_style("border: solid 1px black")
bar.add_color("background", "background3")
#bar.add_border()
bar.add_style("border-radius: 5")
bar.add_style("position: absolute")
bar.add_style("top: 0px")
top.add_behavior( {
'type': 'smart_drag',
'bvr_match_class': 'spt_scrollbar',
'ignore_default_motion' : True,
"cbjs_setup": 'spt.scrollbar.drag_setup( evt, bvr, mouse_411 );',
"cbjs_motion": 'spt.scrollbar.drag_motion( evt, bvr, mouse_411 );'
} )
return top
def get_onload_js(my):
return r'''
spt.scrollbar = {};
spt.scrollbar.mouse_start_y = null;
spt.scrollbar.el_start_y = null;
spt.scrollbar.top = null;
spt.scrollbar.content = null;
spt.scrollbar.drag_setup = function(evt, bvr, mouse_411) {
spt.scrollbar.mouse_start_y = mouse_411.curr_y;
var src_el = spt.behavior.get_bvr_src( bvr );
var pos_y = parseInt(src_el.getStyle("top").replace("px", ""));
spt.scrollbar.el_start_y = pos_y;
spt.scrollbar.content = $("spt_SCROLL");
spt.scrollbar.top = src_el.getParent(".spt_scrollbar_top")
}
spt.scrollbar.drag_motion = function(evt, bvr, mouse_411) {
var src_el = spt.behavior.get_bvr_src( bvr );
var dy = mouse_411.curr_y - spt.scrollbar.mouse_start_y;
var pos_y = spt.scrollbar.el_start_y + dy;
if (pos_y < 0) {
return;
}
var content = spt.scrollbar.content;
var content_size = spt.scrollbar.content.getSize();
var top_size = spt.scrollbar.top.getSize();
var bar_size = src_el.getSize();
if (pos_y > top_size.y - bar_size.y - 5) {
return;
}
bvr.src_el.setStyle("top", pos_y);
//var content = bvr.src_el.getParent(".spt_content");
content.setStyle("margin-top", -dy);
}
spt.scrollbar.scroll = function(dy) {
spt.scrollbar.content = $("spt_SCROLL");
var content = spt.scrollbar.content;
var pos_y = parseInt(content.getStyle("margin-top").replace("px", ""));
content.setStyle("margin-top", pos_y + dy);
}
'''
|
qk4l/Flexget | flexget/plugins/parsers/parser_internal.py | Python | mit | 1,607 | 0.001867 | fr | om __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import time
| from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
from flexget.utils.titles.movie import MovieParser
from flexget.utils.titles.series import SeriesParser
from .parser_common import ParseWarning
log = logging.getLogger('parser_internal')
class ParserInternal(object):
# movie_parser API
@plugin.priority(1)
def parse_movie(self, data, **kwargs):
log.debug('Parsing movie: `%s` kwargs: %s', data, kwargs)
start = time.clock()
parser = MovieParser()
try:
parser.parse(data)
except ParseWarning as pw:
log_once(pw.value, logger=log)
end = time.clock()
log.debug('Parsing result: %s (in %s ms)', parser, (end - start) * 1000)
return parser
# series_parser API
@plugin.priority(1)
def parse_series(self, data, **kwargs):
log.debug('Parsing series: `%s` kwargs: %s', data, kwargs)
start = time.clock()
parser = SeriesParser(**kwargs)
try:
parser.parse(data)
except ParseWarning as pw:
log_once(pw.value, logger=log)
end = time.clock()
log.debug('Parsing result: %s (in %s ms)', parser, (end - start) * 1000)
return parser
@event('plugin.register')
def register_plugin():
plugin.register(ParserInternal, 'parser_internal', interfaces=['movie_parser', 'series_parser'], api_ver=2)
|
morenopc/edx-platform | common/lib/xmodule/xmodule/modulestore/xml_exporter.py | Python | agpl-3.0 | 10,461 | 0.002868 | """
Methods for exporting course data to XML
"""
import logging
import lxml.etree
from xblock.fields import Scope
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore import Location
from xmodule.modulestore.inheritance import own_metadata
from fs.osfs import OSFS
from json import dumps
import json
import datetime
import os
from path import path
import shutil
DRAFT_DIR = "drafts"
PUBLISHED_DIR = "published"
EXPORT_VERSION_FILE = "format.json"
EXPORT_VERSION_KEY = "export_format"
DEFAULT_CONTENT_FIELDS = ['metadata', 'data']
class EdxJSONEncoder(json.JSONEncoder):
"""
Custom JSONEncoder that handles `Location` and `datetime.datetime` objects.
`Location`s are encoded as their url string form, and `datetime`s as
ISO date strings
"""
def default(self, obj):
if isinstance(obj, Location):
return obj.to_deprecated_string()
elif isinstance(obj, datetime.datetime):
if obj.tzinfo is not None:
if obj.utcoffset() is None:
return obj.isoformat() + 'Z'
else:
return obj.isoformat()
else:
return obj.isoformat()
else:
return super(EdxJSONEncoder, self).default(obj)
def export_to_xml(modulestore, contentstore, course_key, root_dir, course_dir, draft_modulestore=None):
"""
Export all modules from `modulestore` and content from `contentstore` as xml to `root_dir`.
`modulestore`: A `ModuleStore` object that is the source of the modules to export
`contentstore`: A `ContentStore` object that is the source of the content to export, can be None
`course_key`: The `CourseKey` of the `CourseModuleDescriptor` to export
`root_dir`: The directory to write the exported xml to
`course_dir`: The name of the directory inside `root_dir` to write the course content to
`draft_modulestore`: An optional `DraftModuleStore` that contains draft content, which will be exported
alongside the public content in the course.
"""
course = modulestore.get_course(course_key)
fsm = OSFS(root_dir)
export_fs = course.runtime.export_fs = fsm.makeopendir(course_dir)
root = lxml.etree.Element('unknown')
course.add_xml_to_node(root)
with export_fs.open('course.xml', 'w') as course_xml:
lxml.etree.ElementTree(root).write(course_xml)
# export the static assets
policies_dir = export_fs.makeopendir('policies')
if contentstore:
contentstore.export_all_for_course(
course_key,
root_dir + '/' + course_dir + '/static/',
root_dir + '/' + course_dir + '/policies/assets.json',
)
# If we are using the default course image, export it to the
# legacy location to support backwards compatibility.
if course.course_image == course.fields['course_image'].default:
try:
course_image = contentstore.find(
StaticContent.compute_location(
course.id,
course.course_image
),
)
except NotFoundError:
pass
else:
output_dir = root_dir + '/' + course_dir + '/static/images/'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
with OSFS(output_dir).open('course_image.jpg', 'wb') as course_image_file:
course_image_file.write(course_image.data)
# export the static tabs
export_extra_content(export_fs, modulestore, course_key, 'static_tab', 'tabs', '.html')
# export the custom tags
export_extra_content(export_fs, modulestore, course_key, 'custom_tag_template', 'custom_tags')
# export the course updates
export_extra_content(export_fs, modulestore, course_key, 'course_info', 'info', '.html')
# export the 'about' data (e.g. overview, etc.)
export_extra_content(export_fs, modulestore, course_key, 'about', 'about', '.html')
# export the grading policy
course_run_policy_dir = policies_dir.makeopendir(course.location.name)
with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy:
grading_policy.write(dumps(course.grading_policy, cls=EdxJSONEncoder))
# export all of the course metadata in policy.json
with course_run_policy_dir.open('policy.json', 'w') as course_policy:
policy = {'course/' + course.location.name: own_metadata(course)}
course_policy.write(dumps(policy, cls=EdxJSONEncoder))
# export draft content
# NOTE: this code assumes that verticals are the top most draftable container
# should we change the application, then this assumption will no longer
# be valid
if draft_modulestore is not None:
draft_verticals = draft_modulestore.get_items(course_key, category='vertical', revision='draft')
if len(draft_verticals) > 0:
draft_course_dir = export_fs.makeopendir(DRAFT_DIR)
for draft_vertical in draft_verticals:
parent_locs = draft_modulestore.get_parent_locations(draft_vertical.location)
# Don't try to export orphaned items.
if len(parent_locs) > 0:
logging.debug('parent_locs = {0}'.format(parent_locs))
draft_vertical.xml_attributes['parent_sequential_url'] = parent_locs[0].to_deprecated_string()
sequential = modulestore.get_item(parent_locs[0])
index = sequential.children.index(draft_vertical.location)
draft_vertical.xml_attributes['index_in_children_list'] = str(index)
draft_vertical.runtime.export_fs = draft_course_dir
node = lxml.etree.Element('unknown')
draft_vertical.add_xml_to_node(node)
def _export_field_content(xblock_item, item_dir):
"""
Export all fields related to 'xblock_item' other than 'metadata' and 'data' to json file in provided directory
"""
module_data = xblock_item.get_explicitly_set_fields_by_scope(Scope.content)
if isinstance(module_data, dict):
for field_name in module_data:
if field_name not in DEFAULT_CONTENT_FIELDS:
# filename format: {dirname}.{field_name}.json
with item_dir.open('{0}.{1}.{2}'.format(xblock_item.location.name, field_name, 'json'),
'w') as field_content_file:
field_content_file.write(dumps(module_data.get(field_name, {}), cls=EdxJSONEncoder))
def export_extra_content(export_fs, modulestore, course_key, category_type, dirname, file_suffix=''):
items = modulestore.get_items(course_key, category=category_type)
if len(items) > 0:
item_dir = export_fs.makeopendir(dirname)
for item in items:
with item_dir.open(item.location.name + file_suffix, 'w') as item_file:
item_file.write(item.data.encode('utf8'))
# export content fields other then metadata and data in json format in current directory
_export_field_content(item, item_dir)
def convert_between_versions(s | ource_dir, target_dir):
"""
Converts a version 0 export format to version 1, and vice versa.
@param source_dir: the directory structure with the course export that should be converted.
The contents of source_dir will not be altered.
@param target_dir: the directory where | the converted export should be written.
@return: the version number of the converted export.
"""
def convert_to_version_1():
""" Convert a version 0 archive to version 0 """
os.mkdir(copy_root)
with open(copy_root / EXPORT_VERSION_FILE, 'w') as f:
f.write('{{"{export_key}": 1}}\n'.format(export_key=EXPORT_VERSION_KEY))
# If a drafts folder exists, copy it over.
copy_drafts()
# Now copy everything into the published directory
published_dir = copy_root / PUBLISHED_DIR
shutil.copytree(path(sour |
neerajvashistha/pa-dude | lib/python2.7/site-packages/pptx/action.py | Python | mit | 7,551 | 0 | # encoding: utf-8
"""
Objects related to mouse click and hover actions on a shape or text.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from .enum.action import PP_ACTION
from .opc.constants import RELATIONSHIP_TYPE as RT
from .shapes import Subshape
from .util import lazyproperty
class ActionSetting(Subshape):
"""
Properties that specify how a shape or text run reacts to mouse actions
during a slide show.
"""
# Subshape superclass provides access to the Slide Part, which is needed
# to access relationships.
def __init__(self, xPr, parent, hover=False):
super(ActionSetting, self).__init__(parent)
# xPr is either a cNvPr or rPr element
self._element = xPr
# _hover determines use of `a:hlinkClick` or `a:hlinkHover`
self._hover = hover
@property
def action(self):
"""
A member of the :ref:`PpActionType` enumeration, such as
`PP_ACTION.HYPERLINK`, indicating the type of action that will result
when the specified shape or text is clicked or the mouse pointer is
positioned over the shape during a slide show.
"""
hlink = self._hlink
if hlink is None:
return PP_ACTION.NONE
action_verb = hlink.action_verb
if action_verb == 'hlinkshowjump':
relative_target = hlink.action_fields['jump']
return {
'firstslide': PP_ACTION.FIRST_SLIDE,
'lastslide': PP_ACTION.LAST_SLIDE,
'lastslideviewed': PP_ACTION.LAST_SLIDE_VIEWED,
'nextslide': PP_ACTION.NEXT_SLIDE,
'previousslide': PP_ACTION.PREVIOUS_SLIDE,
'endshow': PP_ACTION.END_SHOW,
}[relative_target]
return {
None: PP_ACTION.HYPERLINK,
'hlinksldjump': PP_ACTION.NAMED_SLIDE,
'hlinkpres': PP_ACTION.PLAY,
'hlinkfile': PP_ACTION.OPEN_FILE,
'customshow': PP_ACTION.NAMED_SLIDE_SHOW,
'ole': PP_ACTION.OLE_VERB,
'macro': PP_ACTION.RUN_MACRO,
'program': PP_ACTION.RUN_PROGRAM,
}[action_verb]
@lazyproperty
def hyperlink(self):
"""
A |Hyperlink| object representing the hyperlink action defined on
this click or hover mouse event. A |Hyperlink| object is always
returned, even if no hyperlink or other click action is defined.
"""
return Hyperlink(self._element, self._parent, self._hover)
@property
def target_slide(self):
"""
A reference to the slide in this presentation that is the target of
the slide jump action in this shape. Slide jump actions include
`PP_ACTION.FIRST_SLIDE`, `LAST_SLIDE`, `NEXT_SLIDE`,
`PREVIOUS_SLIDE`, and `NAMED_SLIDE`. Returns |None| for all other
actions. In particular, the `LAST_SLIDE_VIEWED` action and the `PLAY`
(start other presentation) actions are not supported.
"""
slide_jump_actions = (
PP_ACTION.FIRST_SLIDE,
PP_ACTION.LAST_SLIDE,
PP_ACTION.NEXT_SLIDE,
PP_ACTION.PREVIOUS_SLIDE,
PP_ACTION.NAMED_SLIDE,
)
if self.action not in slide_jump_actions:
return None
if self.action == PP_ACTION.FIRST_SLIDE:
return self._slides[0]
elif self.action == PP_ACTION.LAST_SLIDE:
return self._slides[-1]
elif self.action == PP_ACTION.NEXT_SLIDE:
next_slide_idx = self._slide_index + 1
if next_slide_idx >= len(self._slides):
| raise ValueError('no next slide')
return self._slides[next_slide_idx]
eli | f self.action == PP_ACTION.PREVIOUS_SLIDE:
prev_slide_idx = self._slide_index - 1
if prev_slide_idx < 0:
raise ValueError('no previous slide')
return self._slides[prev_slide_idx]
elif self.action == PP_ACTION.NAMED_SLIDE:
rId = self._hlink.rId
return self._slide.rels.related_parts[rId]
@property
def _hlink(self):
"""
Reference to the `a:hlinkClick` or `h:hlinkHover` element for this
click action. Returns |None| if the element is not present.
"""
if self._hover:
return self._element.hlinkHover
return self._element.hlinkClick
@lazyproperty
def _slide(self):
"""
Reference to the slide containing the shape having this click action.
"""
return self.part
@lazyproperty
def _slide_index(self):
"""
Position in the slide collection of the slide containing the shape
having this click action.
"""
return self._slides.index(self._slide)
@lazyproperty
def _slides(self):
"""
Reference to the slide collection for this presentation.
"""
return self.part.package.presentation.slides
class Hyperlink(Subshape):
"""
Represents a hyperlink action on a shape or text run.
"""
def __init__(self, xPr, parent, hover=False):
super(Hyperlink, self).__init__(parent)
# xPr is either a cNvPr or rPr element
self._element = xPr
# _hover determines use of `a:hlinkClick` or `a:hlinkHover`
self._hover = hover
@property
def address(self):
"""
Read/write. The URL of the hyperlink. URL can be on http, https,
mailto, or file scheme; others may work. Returns |None| if no
hyperlink is defined, including when another action such as
`RUN_MACRO` is defined on the object. Assigning |None| removes any
action defined on the object, whether it is a hyperlink action or
not.
"""
hlink = self._hlink
# there's no URL if there's no click action
if hlink is None:
return None
# a click action without a relationship has no URL
rId = hlink.rId
if not rId:
return None
return self.part.target_ref(rId)
@address.setter
def address(self, url):
# implements all three of add, change, and remove hyperlink
self._remove_hlink()
if url:
rId = self.part.relate_to(url, RT.HYPERLINK, is_external=True)
hlink = self._get_or_add_hlink()
hlink.rId = rId
def _get_or_add_hlink(self):
"""
Get the `a:hlinkClick` or `a:hlinkHover` element for the Hyperlink
object, depending on the value of `self._hover`. Create one if not
present.
"""
if self._hover:
return self._element.get_or_add_hlinkHover()
return self._element.get_or_add_hlinkClick()
@property
def _hlink(self):
"""
Reference to the `a:hlinkClick` or `h:hlinkHover` element for this
click action. Returns |None| if the element is not present.
"""
if self._hover:
return self._element.hlinkHover
return self._element.hlinkClick
def _remove_hlink(self):
"""
Remove the a:hlinkClick or a:hlinkHover element, including dropping
any relationship it might have.
"""
hlink = self._hlink
if hlink is None:
return
rId = hlink.rId
if rId:
self.part.drop_rel(rId)
self._element.remove(hlink)
|
zhlooking/LearnPython | helloworld.py | Python | mit | 196 | 0.005102 | print "Hello Python!"
pr | int "Something change to the file"
print "Git is a distributed version control system"
print "Git has mutable index called stage"
print "Git | tracks changes again and again" |
waseem18/oh-mainline | vendor/packages/twisted/twisted/internet/test/test_time.py | Python | agpl-3.0 | 615 | 0 | # Copyright (c) Twisted Matrix Laboratories.
# | See LICENSE for details.
"" | "
Tests for implementations of L{IReactorTime}.
"""
__metaclass__ = type
from twisted.internet.test.reactormixins import ReactorBuilder
class TimeTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorTime}.
"""
def test_delayedCallStopsReactor(self):
"""
The reactor can be stopped by a delayed call.
"""
reactor = self.buildReactor()
reactor.callLater(0, reactor.stop)
reactor.run()
globals().update(TimeTestsBuilder.makeTestCaseClasses())
|
nirmeshk/oh-mainline | vendor/packages/sphinx/sphinx/directives/code.py | Python | agpl-3.0 | 7,800 | 0.001026 | # -*- coding: utf-8 -*-
"""
sphinx.directives.code
~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import codecs
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.util import parselinenos
from sphinx.util.nodes import set_source_info
class Highlight(Directive):
"""
Directive to set the highlighting language for code blocks, as well |
as the threshold for line numbers.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'linenothreshold': directives.unchanged,
}
def run(self):
if 'linenothreshold' in self.options:
try:
linenothreshold = int(self.options['linenothreshold'])
except Exception:
| linenothreshold = 10
else:
linenothreshold = sys.maxint
return [addnodes.highlightlang(lang=self.arguments[0].strip(),
linenothreshold=linenothreshold)]
class CodeBlock(Directive):
"""
Directive for a code block with special highlighting or line numbering
settings.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'linenos': directives.flag,
'emphasize-lines': directives.unchanged_required,
}
def run(self):
code = u'\n'.join(self.content)
linespec = self.options.get('emphasize-lines')
if linespec:
try:
nlines = len(self.content)
hl_lines = [x+1 for x in parselinenos(linespec, nlines)]
except ValueError, err:
document = self.state.document
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
literal = nodes.literal_block(code, code)
literal['language'] = self.arguments[0]
literal['linenos'] = 'linenos' in self.options
if hl_lines is not None:
literal['highlight_args'] = {'hl_lines': hl_lines}
set_source_info(self, literal)
return [literal]
class LiteralInclude(Directive):
"""
Like ``.. include:: :literal:``, but only warns if the include file is
not found, and does not raise errors. Also has several options for
selecting what to include.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'linenos': directives.flag,
'tab-width': int,
'language': directives.unchanged_required,
'encoding': directives.encoding,
'pyobject': directives.unchanged_required,
'lines': directives.unchanged_required,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
'prepend': directives.unchanged_required,
'append': directives.unchanged_required,
'emphasize-lines': directives.unchanged_required,
}
def run(self):
document = self.state.document
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
rel_filename, filename = env.relfn2path(self.arguments[0])
if 'pyobject' in self.options and 'lines' in self.options:
return [document.reporter.warning(
'Cannot use both "pyobject" and "lines" options',
line=self.lineno)]
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
f = None
try:
f = codecs.StreamReaderWriter(open(filename, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
except (IOError, OSError):
return [document.reporter.warning(
'Include file %r not found or reading it failed' % filename,
line=self.lineno)]
except UnicodeError:
return [document.reporter.warning(
'Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option' %
(encoding, filename))]
finally:
if f is not None:
f.close()
objectname = self.options.get('pyobject')
if objectname is not None:
from sphinx.pycode import ModuleAnalyzer
analyzer = ModuleAnalyzer.for_file(filename, '')
tags = analyzer.find_tags()
if objectname not in tags:
return [document.reporter.warning(
'Object named %r not found in include file %r' %
(objectname, filename), line=self.lineno)]
else:
lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1]
linespec = self.options.get('lines')
if linespec is not None:
try:
linelist = parselinenos(linespec, len(lines))
except ValueError, err:
return [document.reporter.warning(str(err), line=self.lineno)]
# just ignore nonexisting lines
nlines = len(lines)
lines = [lines[i] for i in linelist if i < nlines]
if not lines:
return [document.reporter.warning(
'Line spec %r: no lines pulled from include file %r' %
(linespec, filename), line=self.lineno)]
linespec = self.options.get('emphasize-lines')
if linespec:
try:
hl_lines = [x+1 for x in parselinenos(linespec, len(lines))]
except ValueError, err:
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
startafter = self.options.get('start-after')
endbefore = self.options.get('end-before')
prepend = self.options.get('prepend')
append = self.options.get('append')
if startafter is not None or endbefore is not None:
use = not startafter
res = []
for line in lines:
if not use and startafter and startafter in line:
use = True
elif use and endbefore and endbefore in line:
use = False
break
elif use:
res.append(line)
lines = res
if prepend:
lines.insert(0, prepend + '\n')
if append:
lines.append(append + '\n')
text = ''.join(lines)
if self.options.get('tab-width'):
text = text.expandtabs(self.options['tab-width'])
retnode = nodes.literal_block(text, text, source=filename)
set_source_info(self, retnode)
if self.options.get('language', ''):
retnode['language'] = self.options['language']
if 'linenos' in self.options:
retnode['linenos'] = True
if hl_lines is not None:
retnode['highlight_args'] = {'hl_lines': hl_lines}
env.note_dependency(rel_filename)
return [retnode]
directives.register_directive('highlight', Highlight)
directives.register_directive('highlightlang', Highlight) # old
directives.register_directive('code-block', CodeBlock)
directives.register_directive('sourcecode', CodeBlock)
directives.register_directive('literalinclude', LiteralInclude)
|
wolverineav/neutron | neutron/tests/common/config_fixtures.py | Python | apache-2.0 | 2,517 | 0 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import fixtures
import six
from neutron.tests import base
class ConfigDict(base.AttributeDict):
def update(self, other):
self.convert_to_attr_dict(other)
super(ConfigDict, self).update(other)
def convert_to_attr_dict(self, other):
"""Convert nested dicts to AttributeDict.
:param other: dictionary to be directly modified.
"""
for key, value in six.iteritems(other):
if isinstance(value, dict):
if not isinstance(value, base.AttributeDict):
other[key] = base.AttributeDict(value)
self.convert_to_attr_dict(value)
class ConfigFileFixture(fixtures.Fixture):
"""A fixture that knows how to translate configurations to files.
:param base_filename: the filename to use on disk.
:param config: a ConfigDict instance.
:param temp_dir: an existing temporary directory to use for storage.
"""
def __init__(self, base_filename, config, temp_dir):
super(ConfigFileFixture, self).__init__()
self.base_filename = base_filename
self.config = config
self.temp_dir = temp_dir
def _setUp(self):
config_parser = self.dict_to_config_parser(self.config)
# Need to randomly generate a unique folder to put the file in
self.filename = os.path.join(self.temp_dir, self.base_filename)
with open(self.filename, 'w') as f:
config_parser.write(f)
| f.flush()
def dict_to_config_parser(self, config_dict):
config_parser = six.moves.configparser.SafeConfigParser()
for section, section_dict in six.iteritems(config_dict):
if section != 'DEFAULT':
config_parser.add_section(section)
for option, value in six.iteritems(sectio | n_dict):
config_parser.set(section, option, value)
return config_parser
|
testvidya11/ejrf | questionnaire/migrations/0043_auto__del_field_questionnaire_published__del_field_questionnaire_is_op.py | Python | bsd-3-clause | 20,582 | 0.007239 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Questionnaire.published'
db.delete_column(u'questionnaire_questionnaire', 'published')
# Deleting field 'Questionnaire.is_open'
db.delete_column(u'questionnaire_questionnaire', 'is_open')
# Deleting field 'Questionnaire.finalized'
db.delete_column(u'questionnaire_questionnaire', 'finalized')
# Adding field 'Questionnaire.status'
db.add_column(u'questionnaire_questionnaire', 'status',
self.gf('model_utils.fields.StatusField')(default='finalized', max_length=100, no_check_for_status=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Questionnaire.published'
db.add_column(u'questionnaire_questionnaire', 'published',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Questionnaire.is_open'
db.add_column(u'questionnaire_questionnaire', 'is_open',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Questionnaire.finalized'
db.add_column(u'questionnaire_questionnaire', 'finalized',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'Questionnaire.status'
db.delete_column(u'questionnaire_questionnaire', 'status')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {' | to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField' | , [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'questionnaire.answergroup': {
'Meta': {'object_name': 'AnswerGroup'},
'answer': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'answergroup'", 'null': 'True', 'to': "orm['questionnaire.Answer']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_groups'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'questionnaire.comment': {
'Meta': {'object_name': 'Comment'},
'answer_group': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.AnswerGroup']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null |
Yelp/git-code-debt | tests/server/servlets/commit_test.py | Python | mit | 741 | 0 | import flask
from testing.asserti | ons.response import assert_no_response_errors
def test_it_loads(server_with_data):
resp = server_with_data.server.client.get(
flask.url_for(
'commit.show',
sha=server_with_data.cloneable_with_commits.commits[3].sha,
),
)
assert_no_response_errors(resp)
import_row = resp.pq.find('th:contains("PythonImportCount")').parent()
assert import_row.find('td').text() == '2'
def test_it_loads_for_firs | t_commit(server_with_data):
resp = server_with_data.server.client.get(
flask.url_for(
'commit.show',
sha=server_with_data.cloneable_with_commits.commits[0].sha,
),
)
assert_no_response_errors(resp)
|
endlessm/chromium-browser | testing/scripts/run_performance_tests.py | Python | bsd-3-clause | 27,620 | 0.008653 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs telemetry benchmarks and gtest perf tests.
This script attempts to emulate the contract of gtest-style tests
invoked via recipes. The main contract is that the caller passes the
argument:
--isolated-script-test-output=[FILENAME]
json is written to that file in the format detailed here:
https://www.chromium.org/developers/the-json-test-results-format
Optional argument:
--isolated-script-test-filter=[TEST_NAMES]
is a double-colon-separated ("::") list of test names, to run just that subset
of tests. This list is forwarded to the run_telemetry_benchmark_as_googletest
script.
This script is intended to be the base command invoked by the isolate,
followed by a subsequent Python script. It could be generalized to
invoke an arbitrary executable.
It currently runs several benchmarks. The benchmarks it will execute are
based on the shard it is running on and the sharding_map_path.
If this is executed with a gtest perf test, the flag --non-telemetry
has to be passed in to the script so the script knows it is running
an executable and not the run_benchmark command.
This script obeys the --isolated-script-test-output flag and merges test results
from all the benchmarks into the one output.json file. The test results and perf
results are also put in separate directories per
benchmark. Two files will be present in each directory; perf_results.json, which
is the perf specific results (with unenforced format, could be histogram or
graph json), and test_results.json, which is a JSON test results
format file
https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md
TESTING:
To test changes to this script, please run
cd tools/perf
./run_tests ScriptsSmokeTest.testRunPerformanceTests
"""
import argparse
import json
import os
import shutil
import sys
import time
import tempfile
import traceback
import common
CHROMIUM_SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
PERF_DIR = os.path.join(CHROMIUM_SRC_DIR, 'tools', 'perf')
sys.path.append(PERF_DIR)
import generate_legacy_perf_dashboard_json
from core import path_util
PERF_CORE_DIR = os.path.join(PERF_DIR, 'core')
sys.path.append(PERF_CORE_DIR)
import results_merger
# Add src/testing/ into sys.path for importing xvfb and test_env.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import xvfb
import test_env
# Unfortunately we need to copy these variables from ../test_env.py.
# Importing it and using its get_sandbox_env breaks test runs on Linux
# (it seems to unset DISPLAY).
CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
CHROME_SANDBOX_PATH = '/opt/chrom | ium/chrome_sandbox'
SHARD_MAPS_DIRECTORY = os.path.join(
os.path.dirname(__file__), | '..', '..', 'tools', 'perf', 'core',
'shard_maps')
# See https://crbug.com/923564.
# We want to switch over to using histograms for everything, but converting from
# the format output by gtest perf tests to histograms has introduced several
# problems. So, only perform the conversion on tests that are whitelisted and
# are okay with potentially encountering issues.
GTEST_CONVERSION_WHITELIST = [
'angle_perftests',
'base_perftests',
'blink_heap_unittests',
'blink_platform_perftests',
'cc_perftests',
'components_perftests',
'command_buffer_perftests',
'dawn_perf_tests',
'gpu_perftests',
'load_library_perf_tests',
'media_perftests',
'net_perftests',
'browser_tests',
'services_perftests',
'sync_performance_tests',
'tracing_perftests',
'views_perftests',
'viz_perftests',
'wayland_client_perftests',
'xr.vr.common_perftests',
]
class OutputFilePaths(object):
"""Provide paths to where results outputs should be written.
The process_perf_results.py merge script later will pull all of these
together, so that's why they aren't in the standard locations. Also,
note that because of the OBBS (One Build Bot Step), Telemetry
has multiple tests running on a single shard, so we need to prefix
these locations with a directory named by the benchmark name.
"""
def __init__(self, isolated_out_dir, perf_test_name):
self.name = perf_test_name
self.benchmark_path = os.path.join(isolated_out_dir, perf_test_name)
def SetUp(self):
os.makedirs(self.benchmark_path)
return self
@property
def perf_results(self):
return os.path.join(self.benchmark_path, 'perf_results.json')
@property
def test_results(self):
return os.path.join(self.benchmark_path, 'test_results.json')
@property
def logs(self):
return os.path.join(self.benchmark_path, 'benchmark_log.txt')
@property
def csv_perf_results(self):
"""Path for csv perf results.
Note that the chrome.perf waterfall uses the json histogram perf results
exclusively. csv_perf_results are implemented here in case a user script
passes --output-format=csv.
"""
return os.path.join(self.benchmark_path, 'perf_results.csv')
def print_duration(step, start):
print 'Duration of %s: %d seconds' % (step, time.time() - start)
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
class GtestCommandGenerator(object):
def __init__(self, options, override_executable=None, additional_flags=None,
ignore_shard_env_vars=False):
self._options = options
self._override_executable = override_executable
self._additional_flags = additional_flags or []
self._ignore_shard_env_vars = ignore_shard_env_vars
def generate(self, output_dir):
"""Generate the command to run to start the gtest perf test.
Returns:
list of strings, the executable and its arguments.
"""
return ([self._get_executable()] +
self._generate_filter_args() +
self._generate_repeat_args() +
self._generate_also_run_disabled_tests_args() +
self._generate_output_args(output_dir) +
self._generate_shard_args() +
self._get_additional_flags()
)
@property
def executable_name(self):
"""Gets the platform-independent name of the executable."""
return self._override_executable or self._options.executable
def _get_executable(self):
executable = str(self.executable_name)
if IsWindows():
return r'.\%s.exe' % executable
else:
return './%s' % executable
def _get_additional_flags(self):
return self._additional_flags
def _generate_shard_args(self):
"""Teach the gtest to ignore the environment variables.
GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS will confuse the gtest
and convince it to only run some of its tests. Instead run all
of them.
"""
if self._ignore_shard_env_vars:
return ['--test-launcher-total-shards=1', '--test-launcher-shard-index=0']
return []
def _generate_filter_args(self):
if self._options.isolated_script_test_filter:
filter_list = common.extract_filter_list(
self._options.isolated_script_test_filter)
return ['--gtest_filter=' + ':'.join(filter_list)]
return []
def _generate_repeat_args(self):
# TODO(crbug.com/920002): Support --isolated-script-test-repeat.
return []
def _generate_also_run_disabled_tests_args(self):
# TODO(crbug.com/920002): Support
# --isolated-script-test-also-run-disabled-tests.
return []
def _generate_output_args(self, output_dir):
output_args = []
if self._options.use_gtest_benchmark_script:
output_args.append('--output-dir=' + output_dir)
# These flags are to make sure that test output perf metrics in the log.
if not '--verbose' in self._get_additional_flags():
output_args.append('--verbose')
if (not '--test-launcher-print-test-stdio=always'
in self._get_additional_flags()):
output_args.append('--test-launcher-print-test-stdio=always')
return output_args
def write_simple_test_results(return_code, output_filepath, benchmark_name):
# TODO(crbug.com/920002): Fix to output
# https://chromium.googlesou |
b-e-p/bep | Bep/run.py | Python | bsd-3-clause | 24,761 | 0.010218 | #! /usr/bin/env python
#----------------------------------------------------------------
# Author: Jason Gors <jasonDOTgorsATgmail>
# Creation Date: 07-30-2013
# Purpose: this is where the program is called into action.
#----------------------------------------------------------------
import argparse
import os
from os.path import join
import sys
import copy
from collections import OrderedDict
from Bep.core import usage
from Bep.core.release_info import __version__, name
from Bep.core import utils
from Bep.cmds import install, list_packages, remove_packages, turn_off, turn_on, update_packages
usr_home_dir = os.path.expanduser('~') # specifies the user's home dir
#top_level_dir = join(options['top_level_dir'], '.{}'.format(name))
top_level_dir = join(usr_home_dir, '.{}'.format(name))
installed_pkgs_dir = join(top_level_dir, 'installed_pkgs')
install_logs_dir = join(top_level_dir, '.install_logs')
install_dirs = dict(installed_pkgs_dir=installed_pkgs_dir, install_logs_dir=install_logs_dir)
#installation_db = 'installation_db.json'
#installation_db_path = join(top_level_dir, installation_db)
packages_file = '.{}_packages'.format(name)
packages_file_path = join(usr_home_dir, packages_file)
repo_choices = ['github', 'bitbucket', 'local'] # 'remote'
other_choices = ['packages'] # 'stable'
possible_choices = repo_choices + other_choices
def main(): # needs to be done as a main func for setuptools to work correctly in creating an executable
# for the approach i am taking here using nested subparsers:
# https://mail.python.org/pipermail/python-list/2010-August/585617.html
# nargs options:
# (default): by not specifying nargs at all, you just get a string of 1 item
# = N where N is some specified number of args
# = '?' makes a string of one item, and if no args are given, then default is used.
# = '*' makes a list of all args passed after command and if no args given, then default is used.
# = '+' makes list of all args passed after command, but requires at least one arg
top_parser = argparse.ArgumentParser(description=name.upper(),
formatter_class=argparse.RawDescriptionHelpFormatter,
#formatter_class=argparse.RawTextHelpFormatter,
#add_help=False,
epilog=usage.epilog_use)
#################################
### this goes at the top level
top_parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
top_parser.add_argument('-l', '--language', nargs='?', default='python', help=usage.lang_use)
group = top_parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true", help=usage.verbose_use)
group.add_argument("-q", "--quiet", action="store_true", help=usage.quiet_use)
#################################
def check_for_all_error(cmd_arg):
if cmd_arg in ['all', 'All', 'ALL', '--All', '--ALL']:
raise SystemExit("\nError: Did you mean to specifiy --all instead?")
# If --all is passed in:
# Skip stuff below if '--all' is specified w/ one of these accepted cmds
# (this is some seriously hacky brute force shit!)
build_up_subparsers = True
additional_args = []
cmds_that_accept_all_arg = ['update', 'remove', 'turn_off']
for cmd in cmds_that_accept_all_arg:
if cmd in sys.argv:
for i in sys.argv: # test for misspecified '--all' command
check_for_all_error(i)
if '--all' in sys.argv:
#print(sys.argv)
build_up_subparsers = False
# TODO add help page for all
top_parser.add_argument('--all', action='store_true', help=usage.all_use) #metavar="arg")
args = top_parser.parse_known_args()
args, additional_args = args
if len(additional_args) > 1: # this makes it so that it could only be len(additional_args)==1
error_all_arg = "--all can only be called with one of the following args:\n\t"
error_all_arg = error_all_arg + '{update, remove, turn_off}'
top_parser.error(error_all_arg)
#else:
#additional_args = additional_args[0]
# To display how to run a command:
# look at all pkgs and check that passed in package name is one that's already installed
everything_already_installed = utils.all_pkgs_and_branches_for_all_pkg_types_already_installed(installed_pkgs_dir)
any_of_this_pkg_already_installed = lambda pkg_to_process: utils.lang_and_pkg_type_and_pkg_and_branches_tuple(
pkg_to_process, everything_already_installed)
cmds_that_can_display_how_to = cmds_that_accept_all_arg + ['turn_on']
for cmd in cmds_that_can_display_how_to: # everything except install i think
if (cmd in sys.argv) and ('--all' not in sys.argv):
if ('-h' not in sys.argv) and ('--help' not in sys.argv):
args = top_parser.parse_known_args()
args, additional_args = args
if len(additional_args) == 2:
additional_args_copy = copy.copy(additional_args)
additional_args_copy.remove(cmd) # 2 things in here, one equal to cmd, the other is what we want to see if it's alreay installed
potential_pkg_to_proc = additional_args_copy[0]
#print any_of_this_pkg_already_installed(potential_pkg_to_proc)
if any_of_this_pkg_already_installed(potential_pkg_to_proc):
# should i make a function call out of this instead of relying on the command to be handled below?
print(" **** This is how to {} {} ****".format(cmd, potential_pkg_to_proc))
build_up_subparsers = False
elif potential_pkg_to_proc not in possible_choices: # else if the other arg/package name passed in is not a pkg_already_installed (& not one of the next possible cmd options)
#print an error say that whatever is passed in cannot be updated/turned_on/etc
#b/c it's not currently installed.
error_msg = "cannot {} {}: not a currently installed package.\n".format(cmd, potential_pkg_to_proc)
error_msg = error_msg + "[Execute `{} list` to see installed packages.]".format(name)
top_parser.error(error_msg)
#else: | # want this instead b/c otherwise the above hides the help pages
#additional_args = [] # set back to empty to avoid the flag at the end of argparse stuff
#else:
#error_msg = "An already installed package name must be passed in with {}".format(cmd)
| #top_parser.error(error_msg)
else:
additional_args = [] # set back to empty to avoid the flag at the end of argparse stuff
if build_up_subparsers:
top_subparser = top_parser.add_subparsers(title='Commands',
description='[ These are the commands that can be passed to %(prog)s ]',
#help=usage.subparser_use)
help='[ Command specific help info ]')
### create parser for the "list" command
# maybe make it so that it can list all branches installed for a specific pkg,
parser_list = top_subparser.add_parser('list', help=usage.list_use)
parser_list.add_argument('list_arg', action="store_true", help=usage.list_sub_use) #metavar="arg")
class CheckIfCanBeInstalled(argparse.Action):
''' makes sure a repo to install has both a user_name and repo_name:
eg. ipython/ipython
or is an actual path to a repo on the local filesystem'''
def __call__(self, pars |
Phoenyx/TruemaxScriptPackage | Truemax/exportAnimFBX.py | Python | gpl-2.0 | 3,473 | 0.004319 | __author__ = 'sofiaelm'
# version 1.0
"""
"""
from pymel.all import *
import maya.cmds as cmds
# Regex for our scene name structure. Example: genericTurnLeft45A_v013_sm
SCENE_FILE_NAME_REGEX = r'[a-zA-Z]+[0-9]+[A-Z]{1}_v[0-9]{3}_[a-zA-Z]{2}'
# Regex for our top node name structure. Example: genericTurnLeft45A
SCENE_FILE_TOP_NODE_REGEX = r'([a-zA-Z]+[0-9]+[A-Z]{1})(_)'
def exportAnimFBX():
# Checks if the geometry group exists
if cmds.objExists("*:geo_grp*") == 0:
cmds.warning(">>>>> No group matches name 'geo_grp' <<<<<")
else:
# if the export folder doesnt exist in the directory then create it.
file_dir = os.path.dirname(cmds.file(q=1, sceneName=1))
export_dir = os.path.join(os.path.dirname(file_dir), "export")
if not os.path.exists(export_dir):
os.mkdir(export_dir)
# Get the full scene name
scene_file_raw = str(cmds.file(q=1, sceneName=1, shortName=1))
# See if the scene file is named correctly according to the regex
if not re.match(SCENE_FILE_NAME_REGEX, scene_file_raw):
print (">> Your scene file is named incorrectly <<")
# See if the scene file is found in the top node Regex
matches = re.search(SCENE_FILE_TOP_NODE_REGEX, scene_file_raw)
if matches is None:
print (">> Something went wrong... <<")
# The regex consists of two groups: the name and the "_"
groups = matches.groups()
# Error if "groups" is too big
if len(groups) != 2:
print (">> Something went wrong... <<")
# if the first element of "groups" is not in the scene, then error
if groups[0] not in cmds.ls():
print (">> Your top node is not named correctly <<")
# the top node in the scene should be name like the first element of the group
top_node = groups[0]
# The name of the fbx we export is defined
scene_fbx = os.path.join(export_dir, str(top_node) + ".fbx")
# Get start time and end time of animation base on the timeline
#startTime = int(cmds.playbackOptions(query=True, minTime=True))
#endTime = int(cmds.playbackOptions(query=True, maxTime=True))
#print startTime
#print endTime
# select bind joints
def selectBNDJNTS():
select("*:*_geo", "*:main_bnd*", r=1)
selectBNDJNTS()
# Make a list of the selection
selectBND = cmds.ls(sl=1)
print selectBND
# Bake anim to bind joints
#cmds.bakeResults(selectBND, t=(startTime, endTime))
# Select bind joints and geometry and export as FBX
select(cl=1)
selectBNDJNTS()
# Make a list of the selection to "Export selection" as FBX
toExport = cmds.ls(sl=True)
cmds.select(toExport)
# Copied from another script. It is not pretty b | ut exporting ia not allowed in Python
# It exports the selection as FBX using a preset file. Writes message to command line when finished.
preset_file = "{0}{1}{2}".format(os.path.dirname(os.path.realpath(__file__)), os.path.sep,
"UnityExportAnim.fbxexportpreset").replace("\\", " | /")
mel.eval('FBXLoadExportPresetFile -f "{0}";'.format(preset_file))
mel.eval('FBXExport -f "{0}" -s;'.format(scene_fbx.replace("\\", "/")))
sys.stdout.write(">>>>> FBX with Animation Exported! <<<<<")
|
hankcs/HanLP | hanlp/layers/embeddings/contextual_string_embedding_tf.py | Python | apache-2.0 | 4,988 | 0.002005 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-19 03:24
from typing import List
import tensorflow as tf
import numpy as np
from hanlp.components.rnn_language_model_tf import RNNLanguageModel
from hanlp_common.constant import PAD
from hanlp.utils.io_util import get_resource
from hanlp.utils.tf_util import copy_mask, hanlp_register, str_tensor_2d_to_list
from hanlp_common.util import infer_space_after
@hanlp_register
class ContextualStringEmbeddingTF(tf.keras.layers.Layer):
def __init__(self, forward_model_path=None, backward_model_path=None, max_word_len=10,
trainable=False, name=None, dtype=None,
dynamic=True, **kwargs):
assert dynamic, 'ContextualStringEmbedding works only in eager mode'
super().__init__(trainable, name, dtype, dynamic, **kwargs)
assert any([forward_model_path, backward_model_path]), 'At least one model is required'
self.forward_model_path = forward_model_path
self.backward_model_path = backward_model_path
self.forward_model = self._load_lm(forward_model_path) if forward_model_path else None
self.backward_model = self._load_lm(backward_model_path) if backward_model_path else None
if trainable:
self._fw = self.forward_model.model
self._bw = self.backward_model.model
for m in self._fw, self._bw:
m.trainable = True
self.supports_masking = True
self.max_word_len = max_word_len
def call(self, inputs, **kwargs):
str_inputs = str_tensor_2d_to_list(inputs)
outputs = self.embed(str_inputs)
copy_mask(inputs, outputs)
return outputs
def _load_lm(self, filepath):
filepath = get_resource(filepath)
lm = RNNLanguageModel()
lm.load(filepath)
model: tf.keras.Sequential = lm.model
for idx, layer in enumerate(model.layers):
if isinstance(layer, tf.keras.layers.LSTM):
lm.model = tf.keras.Sequential(model.layers[:idx + 1]) # discard dense layer
return lm
def embed(self, texts: List[List[str]]):
"""Embedding sentences (list of words) with contextualized string embedding
Args:
texts: List of words, not chars
texts: List[List[str]]:
Returns:
"""
fw = None
if self.forward_model:
fw = self._run_rnn(texts, model=self.forward_model)
bw = None
if self.backward_model:
bw = self._run_rnn(texts, model=self.backward_model)
if not all(x is not None for x in [fw, bw]):
return fw if fw is not None else bw
else:
return tf.concat([fw, bw], axis=-1)
def _run_rnn(self, texts, model):
embeddings = []
inputs = []
offsets = []
tokenizer = model.transform.tokenize_func()
backward = not model.config['forward']
for sent in texts:
raw, off = self._get_raw_string(sent, tokenizer)
inputs.append(raw)
offsets.append(off)
outputs = model.model_from_config.predict(model.transform.inputs_to_dataset(inputs))
if backward:
outputs = tf.reverse(outputs, axis=[1])
maxlen = len(max(texts, key=len))
for hidden, off, sent in zip(outputs, offsets, texts):
embed = []
for (start, end), word in zip(off, sent):
embed.append(hidden[end - 1, :])
if len(embed) < maxlen:
embed += [np.zeros_like(embed[-1])] * (maxlen - len(embed))
embeddings.append(np.stack(embed))
return tf.stack(embeddings)
def _get_raw_string(self, sent: List[str], tokenizer):
raw_string = []
offsets = []
whitespace_after = infer_space_after(sent)
start = 0
for word, space in zip(sent, whitespace_after):
ch | ars = tokenizer(word)
chars = chars[:self.max_word_len]
if space:
chars += [' ']
end = start + len(chars)
offsets.append((start, end))
start = end
raw_string += chars
return raw_string, offsets
def get_config(self):
config = | {
'forward_model_path': self.forward_model_path,
'backward_model_path': self.backward_model_path,
'max_word_len': self.max_word_len,
}
base_config = super(ContextualStringEmbeddingTF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@property
def output_dim(self):
dim = 0
for model in self.forward_model, self.backward_model:
if model:
dim += model.config['rnn_units']
return dim
def compute_output_shape(self, input_shape):
return input_shape + self.output_dim
def compute_mask(self, inputs, mask=None):
return tf.not_equal(inputs, PAD)
|
yamitzky/dotfiles | home/.ipython/profile_default/startup/01-function.py | Python | mit | 3,117 | 0 | def ulen(li):
return len(set(li))
def set_style():
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
set_japanese_font()
def set_japanese_font():
import matplotlib as __matplotlib
font_path = '/Library/Fonts/Osaka.ttf'
font_prop = __matplotlib.font_manager.FontProperties(fname=font_path)
__matplotlib.rcParams['font.family'] = font_prop.get_name()
__matplotlib.rcParams['pdf.fonttype'] = 42
__matplotlib.rcParams['savefig.dpi'] = 200
__matplotlib.rcParams['mathtext.default'] = 'regular'
def import_ds():
global pd, np, plt, sns, zimpala, zhive
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import zutil.impala as zimpala
import zutil.hive as zhive
def nanargsort(a, axis=-1, kind='quicksort', order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specifi | ed axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sor | t : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
import numpy as np
if np.any(np.isnan(a)):
raise Exception("The matrix contains NaN value")
else:
return np.argsort(a, axis, kind, order)
|
flake123p/ProjectH | Make/PY02_dump_dependent_tree/mod6/list_to_build_script.py | Python | gpl-3.0 | 1,336 | 0.023952 | #!/usr/bin/python
# Usage: list_to_make_var.py <input file (mod list)> <output file 1(build script)> <output file 2(clean script)> <OS>
# argv: argv[0] argv[1] argv[2] argv[3] argv[4]
#
# Include library
#
import os
import sys
def OpenFile(fileName, mode = 'r'): # mode : 'r', 'w', ...
try:
fp = open(fileName, mode)
except OSError as err:
print("OS error: {0}".format(err))
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(1)
return fp
mod_base_path = '../'
curr | _os = str(sys.argv[4])
if curr_os == 'WIN':
mod_build_file = 'build_mod.bat'
mod_clean_file = 'clean_mod.bat'
else:
mod_build_file = 'build_mod.sh'
mod_clean_file = 'clean_mod.sh'
#
# main
#
if len(sys.argv) != 5:
print("Arguments Number Error. It should be 5.")
sys.exit(1)
finList = OpenFile(str(sys.argv[1]))
foutBuildfile = OpenFile(str(sys.argv[2]), 'w')
foutCleanfile = OpenFile(str(sys.argv[3]), 'w')
for each_line in fin | List:
each_mod = each_line.strip()
# build files
str = mod_base_path + each_mod + '/' + mod_build_file + '\n'
foutBuildfile.write(str)
# clean files
str = mod_base_path + each_mod + '/' + mod_clean_file + '\n'
foutCleanfile.write(str)
finList.close()
foutBuildfile.close()
foutCleanfile.close() |
RGU5Android/PythonLectureNotes | HomeWork/09-27-14/toggle_bit_at_position.py | Python | gpl-2.0 | 635 | 0.007874 | #!/usr/bin/python
def toggle_bit_at_position(variable, position):
if type(variable) is long or type(variable) is int:
if type(position) is long or type(position) is int:
position = position - 1
value = variable ^ (1 << position)
return ("Variable: " + str(variable) + " : " + " Position: " + str(position + 1) + " Value after toggle: " + str(value))
else:
return "Wrong position value inserted"
else:
return "Wrong variable value inserted"
a, | b = input("Enter the value for variable and position to be toggled: ")
print (toggle_bit_at_position(a, b)) | ;
|
saeki-masaki/cinder | cinder/openstack/common/report/views/text/process.py | Python | apache-2.0 | 1,233 | 0 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of t | he License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides process view
This module provides a view for
visualizing pr | ocesses in human-readable formm
"""
import cinder.openstack.common.report.views.jinja_view as jv
class ProcessView(jv.JinjaView):
"""A Process View
This view displays process models defined by
:class:`openstack.common.report.models.process.ProcessModel`
"""
VIEW_TEXT = (
"Process {{ pid }} (under {{ parent_pid }}) "
"[ run by: {{ username }} ({{ uids.real|default('unknown uid') }}),"
" state: {{ state }} ]\n"
"{% for child in children %}"
" {{ child }}"
"{% endfor %}"
)
|
evangeline97/localwiki-backend-server | localwiki/versionutils/versioning/fields.py | Python | gpl-2.0 | 645 | 0.004651 | from django.db import models
from django.contrib.auth.models import User
class AutoSetField(object):
pass
class AutoUserField(models.ForeignKey, AutoSetField):
def __init__(self, **kws):
if 'to' in kws:
# Fixes south. We always want this to point to the User
# model.
del kws['to']
return super(AutoUserField, self).__init__(User, **kws)
class AutoIPAddressField(models.IPAddressField, AutoSetField):
pass
try:
from south.modelsinspector import add_introspecti | on_rules
add_introspection_rules([], ["^versionutils\.versioning\.fields"])
except ImportError:
pass | |
Duoxilian/home-assistant | homeassistant/components/climate/wink.py | Python | mit | 15,904 | 0 | """
Support for Wink thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.wink/
"""
from homeassistant.components.wink import WinkDevice, DOMAIN
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE,
ATTR_CURRENT_HUMIDITY)
from homeassistant.const impor | t (
TEMP_CELSIUS, STATE_ON,
STATE_OFF, STATE_UNKNOWN)
DEPENDENCIES = [' | wink']
STATE_AUX = 'aux'
STATE_ECO = 'eco'
STATE_FAN = 'fan'
SPEED_LOWEST = 'lowest'
SPEED_LOW = 'low'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
ATTR_EXTERNAL_TEMPERATURE = "external_temperature"
ATTR_SMART_TEMPERATURE = "smart_temperature"
ATTR_ECO_TARGET = "eco_target"
ATTR_OCCUPIED = "occupied"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Wink thermostat."""
import pywink
temp_unit = hass.config.units.temperature_unit
for climate in pywink.get_thermostats():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkThermostat(climate, hass, temp_unit)])
for climate in pywink.get_air_conditioners():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkAC(climate, hass, temp_unit)])
# pylint: disable=abstract-method,too-many-public-methods, too-many-branches
class WinkThermostat(WinkDevice, ClimateDevice):
"""Representation of a Wink thermostat."""
def __init__(self, wink, hass, temp_unit):
"""Initialize the Wink device."""
super().__init__(wink, hass)
self._config_temp_unit = temp_unit
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
if self.external_temperature:
data[ATTR_EXTERNAL_TEMPERATURE] = self._convert_for_display(
self.external_temperature)
if self.smart_temperature:
data[ATTR_SMART_TEMPERATURE] = self.smart_temperature
if self.occupied:
data[ATTR_OCCUPIED] = self.occupied
if self.eco_target:
data[ATTR_ECO_TARGET] = self.eco_target
current_humidity = self.current_humidity
if current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = current_humidity
return data
@property
def current_temperature(self):
"""Return the current temperature."""
return self.wink.current_temperature()
@property
def current_humidity(self):
"""Return the current humidity."""
if self.wink.current_humidity() is not None:
# The API states humidity will be a float 0-1
# the only example API response with humidity listed show an int
# This will address both possibilities
if self.wink.current_humidity() < 1:
return self.wink.current_humidity() * 100
else:
return self.wink.current_humidity()
@property
def external_temperature(self):
"""Return the current external temperature."""
return self.wink.current_external_temperature()
@property
def smart_temperature(self):
"""Return the current average temp of all remote sensor."""
return self.wink.current_smart_temperature()
@property
def eco_target(self):
"""Return status of eco target (Is the termostat in eco mode)."""
return self.wink.eco_target()
@property
def occupied(self):
"""Return status of if the thermostat has detected occupancy."""
return self.wink.occupied()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if not self.wink.is_on():
current_op = STATE_OFF
elif self.wink.current_hvac_mode() == 'cool_only':
current_op = STATE_COOL
elif self.wink.current_hvac_mode() == 'heat_only':
current_op = STATE_HEAT
elif self.wink.current_hvac_mode() == 'aux':
current_op = STATE_HEAT
elif self.wink.current_hvac_mode() == 'auto':
current_op = STATE_AUTO
elif self.wink.current_hvac_mode() == 'eco':
current_op = STATE_ECO
else:
current_op = STATE_UNKNOWN
return current_op
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
target_hum = None
if self.wink.current_humidifier_mode() == 'on':
if self.wink.current_humidifier_set_point() is not None:
target_hum = self.wink.current_humidifier_set_point() * 100
elif self.wink.current_dehumidifier_mode() == 'on':
if self.wink.current_dehumidifier_set_point() is not None:
target_hum = self.wink.current_dehumidifier_set_point() * 100
else:
target_hum = None
return target_hum
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.current_operation != STATE_AUTO and not self.is_away_mode_on:
if self.current_operation == STATE_COOL:
return self.wink.current_max_set_point()
elif self.current_operation == STATE_HEAT:
return self.wink.current_min_set_point()
else:
return None
else:
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_high(self):
"""Return the higher bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.wink.current_max_set_point()
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self.wink.away()
@property
def is_aux_heat_on(self):
"""Return true if aux heater."""
if self.wink.current_hvac_mode() == 'aux' and self.wink.is_on():
return True
elif self.wink.current_hvac_mode() == 'aux' and not self.wink.is_on():
return False
else:
return None
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.current_operation == STATE_COOL:
target_temp_high = target_temp
if self.current_operation == STATE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
target_temp_low = target_temp_low
if target_temp_high is not None:
target_temp_high = target_temp_high
self.wink.set_temperature(target_temp_low, target_temp_high)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode == STATE_HEAT:
self.wink.set_operation_mode('heat_only')
elif operation_mode == STATE_COOL:
self.wink.set_operation_mode('cool_only')
elif operation_mode == STATE_AUTO:
self |
aikikode/tomboy2evernote | tomboy2evernote/command_line.py | Python | mit | 5,742 | 0.003135 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
from datetime import timedelta, date
import glob
import os
import sys
import pyinotify
from evernote.edam.error.ttypes import EDAMUserException
from tomboy2evernote.tomboy2evernote import Evernote, convert_tomboy_to_evernote
__author__ = 'Denis Kovalev (aikikode)'
TOMBOY_DIR = os.path.join(os.environ['HOME'], ".local", "share", "tomboy")
CONFIG_DIR = os.path.join(os.path.expanduser('~'), '.config', 't2ev')
if not os.path.isdir(CONFIG_DIR):
os.mkdir(CONFIG_DIR)
if CONFIG_DIR not in sys.path:
sys.path.append(CONFIG_DIR)
CONFIG_FILE = os.path.join(CONFIG_DIR, 'settings.py')
import logging
logger = logging.getLogger(__name__)
def get_token():
try:
from settings import DEV_TOKEN
except ImportError:
DEV_TOKEN = ''
with open(CONFIG_FILE, 'w') as config_file:
config_file.write("DEV_TOKEN = ''")
if not DEV_TOKEN:
logger.error(
'Please, get new Evernote development token from the site and put it into the\n'
'{} file. E.g.: DEV_TOKEN = "12345"'.format(CONFIG_FILE)
)
return DEV_TOKEN
def main():
parser = argparse.ArgumentParser(
description='Tomboy2Evernote notes converter. Upload Tomboy notes to your Evernote account')
parser.add_argument('-t', action='store', choices=['day', 'week', 'month', 'all'], default='day',
help='Upload only notes modified during this period. Default: day', required=False)
parser.add_argument('-d', '--daemon', action='store_true', help='Run as daemon', required=False)
args = parser.parse_args()
try:
evernote = Evernote(token=get_token())
except EDAMUserException as ex:
sys.exit(ex.errorCode)
if args.daemon:
run_as_daemon(evernote)
else:
convert_all_tomboy_notes(evernote, args.t)
def convert_all_tomboy_notes(evernote, modified_time=None):
delta = timedelta.max
if modified_time == 'day':
delta = timedelta(days=1)
elif modified_time == 'week':
delta = timedelta(weeks=1)
elif modified_time == 'month':
delta = timedelta(weeks=4)
today = date.today()
notes_files = list(filter(lambda f: delta > today - date.fromtimestamp(os.path.getmtime(f)),
glob.glob(os.path.join(TOMBOY_DIR, "*.note"))))
total_notes = len(notes_files)
failed_notes = []
notes_hash = dict()
for idx, tomboy_note in enumerate(notes_files):
print('[{}/{}]:'.format(idx + 1, total_notes), end=' ')
ev_note = convert_tomboy_to_evernote(tomboy_note)
if ev_note:
print('Converted \'{}\'. Uploading...'.format(ev_note['title']), end=' ')
try:
evernote.create_or_update_note(ev_note)
except:
failed_notes.append(ev_note['title'])
print('FAILED')
else:
print('OK')
notes_hash[tomboy_note] = ev_note['title']
else:
print('Skipped template note')
if failed_notes:
print('The following notes failed to upload:')
for idx, note_title in enumerate(failed_notes):
print('[{}]: \'{}\''.format(idx + 1, note_title))
return notes_hash
def run_as_daemon(evernote_client):
# First we need to get all current notes and their titles to correctly handle note deletion
notes = convert_all_tomboy_notes(evernote_client)
# Configure daemon
wm = pyinotify.WatchManager()
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE | pyinotify.IN_MODIFY | \
pyinotify.IN_MOVED_TO | pyinotify.IN_MOVED_FROM
class EventHandler(pyinotify.ProcessEvent):
def my_init(self, evernote, notes_hash):
self.evernote = evernote
self.notes_hash = notes_hash
def process_IN_CREATE(self, event):
| self.process_IN_MOVED_TO(event)
def process_IN_DELETE(self, event):
| self.process_IN_MOVED_FROM(event)
def process_IN_MODIFY(self, event):
self.process_IN_MOVED_TO(event)
def process_IN_MOVED_TO(self, event):
# New note / Modify note
tomboy_note = event.pathname
if os.path.isfile(tomboy_note) and os.path.splitext(tomboy_note)[1] == '.note':
ev_note = convert_tomboy_to_evernote(tomboy_note)
if ev_note:
try:
self.evernote.create_or_update_note(ev_note)
self.notes_hash[tomboy_note] = ev_note['title']
logger.info('Updated \'{}\''.format(ev_note['title']))
except:
logger.error('ERROR: Failed to upload \'{}\' note'.format(ev_note['title']))
def process_IN_MOVED_FROM(self, event):
# Delete note
tomboy_note = event.pathname
note_title = self.notes_hash.get(tomboy_note)
if note_title:
try:
self.evernote.remove_note(note_title)
logger.info('Deleted \'{}\''.format(note_title))
self.notes_hash.pop(tomboy_note, None)
except:
logger.error('ERROR: Failed to delete "{}" note'.format(note_title))
handler = EventHandler(evernote=evernote_client, notes_hash=notes)
notifier = pyinotify.Notifier(wm, handler)
wm.add_watch(TOMBOY_DIR, mask, rec=False)
try:
notifier.loop(daemonize=True, pid_file='/tmp/t2ev.pid', stdout='/tmp/t2ev.log')
except pyinotify.NotifierError as ex:
logger.exception('ERROR: notifier exception: {}'.format(ex))
if __name__ == "__main__":
()
|
absalon-james/usage | usage/meter.py | Python | apache-2.0 | 4,874 | 0 | import datetime
import itertools
import query
import utils
from | exc import InvalidTimeRangeError
from exc import NoSamplesError
from log import logging
from reading import Reading
logger = logging.getLogger('usage.meter')
def _cmp_sample(a, b):
"""Compare two samples.
First compare the resource ids. Compa | re the timestamps if the
resource ids are the same.
:param a: First sample
:param b: Second sample
:return: Result of cmp function.
:rtype: Integer
"""
result = cmp(a.resource_id, b.resource_id)
if result == 0:
result = cmp(a.timestamp, b.timestamp)
return result
class Meter:
"""
Class for interacting with a ceilometer meter.
"""
def __init__(self, client, name, max_samples=15000):
"""Init the meter.
:param client: Ceilometer client
:type client: ceilometerclient.client
:param name: Name of the meter
:type name: String
:param max_samples: Max number of samples per query.
:type max_samples: Integer
"""
self.client = client
self.name = name
self.max_samples = max_samples
# Extra time is 4 hours. 4 * 60 * 60 = 14400
self._extra_time = datetime.timedelta(seconds=14400)
def last_non_deleted_sample(self, group):
"""Get last sample that is not in deleted or deleting.
Starts at the end of the group and looks backward for the first sample
that is not in a deleted or deleting status.
If a sample cannot be found, just return the last sample.
:param group: List of samples that are already sorted by timestamp.
:type group: List
:returns: Last non deleted status sample.
:rtype: sample
"""
deleted_status = set(['deleted', 'deleting'])
for i in xrange(len(group) - 1, -1, -1):
if group[i].resource_metadata.get('status') not in deleted_status:
return group[i]
return group[-1]
def _reading_generator(self, samples, start, stop):
"""Yields one reading at a time.
Samples are grouped by resource id(already sorted by resource id)
and then used to create a reading object.
:param samples: List of samples sorted by resource_id and timestamp.
:type samples: List
:param start: Reading start time
:type start: Datetime
:param stop: Reading stop time
:type stop: Datetime
:yields: Reading objects
"""
# Yield a reading for each resource/meter pair
for _, g in itertools.groupby(samples, lambda x: x.resource_id):
try:
yield Reading(list(g), start, stop)
except NoSamplesError:
continue
def read(self, start=None, stop=None, q=None):
"""Read a meter.
:param start: Start date and time.
:type start: datetime
:param stop: Stop date and time.
:type stop: datetime
:param q: List of filters excluding timestamp filters
:type q: List
:return: Value of reading
:rtype: Float
"""
# Default times to month to date
default_start, default_stop = utils.mtd_range()
if not start:
start = default_start
if not stop:
stop = default_stop
logger.info("Start: {}".format(start))
logger.info("Stop: {}".format(stop))
logger.info("Meter name: {}".format(self.name))
if start > stop:
raise InvalidTimeRangeError(start, stop)
# Add times to query. times are +- the extra time.
q = q or []
q.append(query.query(
'timestamp', 'gt', start - self._extra_time, 'datetime'
))
q.append(query.query(
'timestamp', 'le', stop + self._extra_time, 'datetime'
))
schedule = query.Scheduler(
self.client,
self.name,
start - self._extra_time,
stop + self._extra_time,
q=[],
max_samples=self.max_samples
)
for s_start, s_stop, s_query, s_count in schedule:
logger.debug("{} - {} - {}".format(s_start, s_stop, s_count))
logger.debug("Count of scheduled samples {}".format(schedule.count()))
# Get samples
samples = schedule.list()
logger.debug(
"{} samples according to sample-list.".format(len(samples))
)
# Convert timestamps from strings to datetime objects
for s in samples:
s.timestamp = utils.normalize_time(
utils.parse_datetime(s.timestamp)
)
# Sort by resource id and then timestamps in ascending order
samples.sort(cmp=_cmp_sample)
# Return generator
return self._reading_generator(samples, start, stop)
|
Execut3/CTF | IRAN Cert/2016/1- Easy/PPC & Web/project/project/urls.py | Python | gpl-2.0 | 567 | 0.007055 | from django.conf.urls import include, url, patt | erns
from django.contrib import admin
urlpatterns = patterns('',
#url(r'^admin/', include(admin.site.urls)),
url(r'^index$', 'app.views.index'),
url(r'^$', 'app.views.index'),
url(r'^login$', 'app.views.login'),
url(r'^logout$', 'app.views.log_out'),
url(r'^register$', 'app.views.register'),
url(r'^challenge-0', 'challenge_simple_post_10pt.views.index'),
url(r'^challenge-1', 'challenge_prime_sum_30pt.views.index'),
url(r'^cha | llenge-2', 'challenge_easy_math_50pt.views.index'),
) |
plumdog/myhome | urls.py | Python | mit | 620 | 0 | from urls_base import Urls
from slugify import slugify
from settings import ASSET_TAG
urls = Urls()
urls.add('index', '/')
urls.add('projects', '/projects/')
urls. | add('blog', '/blog/ | ')
urls.add('404.html', '/404.html')
urls.add('post', '/post/{slug}.html',
format_func=lambda **kwargs: dict(slug=slugify(kwargs['post'].title)))
urls.add('tag', '/post/tag/{slug}.html',
format_func=lambda **kwargs: dict(slug=slugify(kwargs['tag'])))
urls.add('static', '/static/{path}?_/{asset_tag}',
format_func=lambda **kwargs: dict(asset_tag=ASSET_TAG))
get_url = urls.get_url
get_path = urls.get_path
|
magfest/panels | panels/site_sections/attractions_admin.py | Python | agpl-3.0 | 16,616 | 0.001143 | from uber.common import *
from panels.models.attraction import *
from panels.site_sections.attractions import _attendee_for_badge_num
@all_renderable(c.STUFF)
class Root:
@renderable_override(c.STUFF, c.PEOPLE, c.REG_AT_CON)
def index(self, session, filtered=False, message='', **params):
admin_account = session.current_admin_account()
if filtered:
attraction_filter = [Attraction.owner_id == admin_account.id]
else:
attraction_filter = []
attractions = session.query(Attraction).filter(*attraction_filter) \
.options(
subqueryload(Attraction.department),
subqueryload(Attraction.owner)
.subqueryload(AdminAccount.attendee)) \
.order_by(Attraction.name).all()
return {
'admin_account': admin_account,
'filtered': filtered,
'message': message,
'attractions': attractions
}
def form(self, session, message='', **params):
attraction_id = params.get('id')
if not attraction_id or attraction_id == 'None':
raise HTTPRedirect('index')
if cherrypy.request.method == 'POST':
if 'advance_notices' in params:
ns = listify(params.get('advance_notices', []))
params['advance_notices'] = [int(n) for n in ns if n != '']
attraction = session.attraction(
params,
bools=Attraction.all_bools,
checkgroups=Attraction.all_checkgroups)
message = check(attraction)
if no | t message:
if not attraction.department_id:
attraction.department_id = None
session.add(attraction)
raise HTTPRedirect(
'form?id={}&message={}',
attraction.id,
'{} updated successfully'.format(attraction.name))
else:
attraction = session.quer | y(Attraction) \
.filter_by(id=attraction_id) \
.options(
subqueryload(Attraction.department),
subqueryload(Attraction.features)
.subqueryload(AttractionFeature.events)
.subqueryload(AttractionEvent.attendees)) \
.order_by(Attraction.id).one()
return {
'admin_account': session.current_admin_account(),
'message': message,
'attraction': attraction
}
def new(self, session, message='', **params):
if params.get('id', 'None') != 'None':
raise HTTPRedirect('form?id={}', params['id'])
if 'advance_notices' in params:
ns = listify(params.get('advance_notices', []))
params['advance_notices'] = [int(n) for n in ns if n != '']
admin_account = session.current_admin_account()
attraction = session.attraction(
params,
bools=Attraction.all_bools,
checkgroups=Attraction.all_checkgroups)
if not attraction.department_id:
attraction.department_id = None
if cherrypy.request.method == 'POST':
message = check(attraction)
if not message:
attraction.owner = admin_account
session.add(attraction)
raise HTTPRedirect('form?id={}', attraction.id)
return {
'admin_account': admin_account,
'attraction': attraction,
'message': message,
}
@csrf_protected
def delete(self, session, id, message=''):
if cherrypy.request.method == 'POST':
attraction = session.query(Attraction).get(id)
attendee = session.admin_attendee()
if not attendee.can_admin_attraction(attraction):
raise HTTPRedirect(
'form?id={}&message={}',
id,
"You cannot delete an attraction that you don't own")
session.delete(attraction)
raise HTTPRedirect(
'index?message={}',
'The {} attraction was deleted'.format(attraction.name))
raise HTTPRedirect('form?id={}', id)
def feature(self, session, attraction_id=None, message='', **params):
if not attraction_id or attraction_id == 'None':
attraction_id = None
if not attraction_id \
and (not params.get('id') or params.get('id') == 'None'):
raise HTTPRedirect('index')
feature = session.attraction_feature(
params,
bools=AttractionFeature.all_bools,
checkgroups=AttractionFeature.all_checkgroups)
attraction_id = feature.attraction_id or attraction_id
attraction = session.query(Attraction).filter_by(id=attraction_id) \
.order_by(Attraction.id).one()
if cherrypy.request.method == 'POST':
if feature.is_new:
feature.attraction_id = attraction_id
message = check(feature)
if not message:
session.add(feature)
raise HTTPRedirect(
'form?id={}&message={}',
attraction_id,
'The {} feature was successfully {}'.format(
feature.name, 'created' if feature.is_new else 'updated'))
session.rollback()
return {
'attraction': attraction,
'feature': feature,
'message': message
}
@csrf_protected
def delete_feature(self, session, id):
feature = session.query(AttractionFeature).get(id)
attraction_id = feature.attraction_id
message = ''
if cherrypy.request.method == 'POST':
attraction = session.query(Attraction).get(attraction_id)
if not session.admin_attendee().can_admin_attraction(attraction):
message = "You cannot delete a feature from an attraction you don't own"
else:
session.delete(feature)
raise HTTPRedirect(
'form?id={}&message={}',
attraction_id,
'The {} feature was deleted'.format(feature.name))
if not message:
raise HTTPRedirect('form?id={}', attraction_id)
else:
raise HTTPRedirect('form?id={}&message={}', attraction_id, message)
@csv_file
def export_feature(self, out, session, id):
from uber.decorators import _set_response_filename
feature = session.query(AttractionFeature).get(id)
_set_response_filename('{}.csv'.format(filename_safe(feature.name)))
out.writerow(['Name', 'Signup Time', 'Checkin Time'])
for event in feature.events:
for signup in event.signups:
out.writerow([
signup.attendee.full_name,
signup.signup_time_label,
signup.checkin_time_label
])
def event(
self,
session,
attraction_id=None,
feature_id=None,
previous_id=None,
delay=0,
message='',
**params):
if not attraction_id or attraction_id == 'None':
attraction_id = None
if not feature_id or feature_id == 'None':
feature_id = None
if not previous_id or previous_id == 'None':
previous_id = None
if not attraction_id and not feature_id and not previous_id \
and (not params.get('id') or params.get('id') == 'None'):
raise HTTPRedirect('index')
event = session.attraction_event(
params,
bools=AttractionEvent.all_bools,
checkgroups=AttractionEvent.all_checkgroups)
if not event.is_new:
attraction_id = event.feature.attraction_id
previous = None
feature = None
if feature_id:
feature = session.query(AttractionFeature).get(feature_id)
attraction_id = feature |
congminghaoxue/learn_python | xml_rpc/SimpleXMLRPCServer.py | Python | apache-2.0 | 1,576 | 0.003173 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-08-15 10:11:05
# @Author : Zhou Bo (zhoub@suooter.com)
# @Link : http://onlyus.online
# @Version : $Id$
try:
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
from SocketServer import ThreadingMixIn
except ImportError:
from socketserver import ThreadingMixIn
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
# from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
#Threaded XML-RPC
class TXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer): pass
# Restrict to a particular path.
# class RequestHandler(SimpleXMLRPCRequestHandler):
# rpc_paths = ('/RPC2',)
server = TXMLRPCServer(('', 8000), SimpleXMLRPCRequestHandler)
# Create server
# server = SimpleXMLRPCServer(("localhost", 8000),
# requestHandler=RequestHandler)
se | rver.register_introspection_functions()
# Register pow() function; this will use the value of
# pow.__name__ as the name, which is just 'pow'.
server.register_function(pow)
# add two value
def adder_function(x, y):
'''
add two value
'''
return x + y
server.register_function(adder_function, 'add')
| # Register an instance; all the methods of the instance are
# published as XML-RPC methods (in this case, just 'mul').
class MyFuncs:
def mul(self, x, y):
return x * y
server.register_instance(MyFuncs())
# Run the server's main loop
server.serve_forever()
|
nirbheek/cerbero | cerbero/packages/wix_packager.py | Python | lgpl-2.1 | 13,420 | 0.000745 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
import shutil
from zipfile import ZipFile
from cerbero.errors import EmptyPackageError
from cerbero.packages import PackagerBase, PackageType
from cerbero.packages.package import Package, App
from cerbero.utils import messages as m
from cerbero.utils import shell, to_winepath, get_wix_prefix
from cerbero.tools import strip
from cerbero.packages.wix import MergeModule, VSMergeModule, MSI, WixConfig, Fragment
from cerbero.packages.wix import VSTemplatePackage
from cerbero.config import Platform
class MergeModulePackager(PackagerBase):
def __init__(self, config, package, store):
PackagerBase.__init__(self, config, package, store)
self._with_wine = config.platform != Platform.WINDOWS
self.wix_prefix = get_wix_prefix()
def pack(self, output_dir, devel=False, force=False, keep_temp=False):
PackagerBase.pack(self, output_dir, devel, force, keep_temp)
paths = []
# create runtime package
p = self.create | _merge_module(output_dir, PackageType.RUNTIME, force,
self.package.version, keep_temp)
paths.append(p)
if devel:
p = self.create_merge_module(output_dir, PackageType.DEVEL, force,
self.package.version, keep_temp)
paths.append(p)
return paths
def create_merge_module(self, output_dir, package_type, force, version,
keep_temp, keep_stri | p_temp_dir=False):
self.package.set_mode(package_type)
files_list = self.files_list(package_type, force)
if isinstance(self.package, VSTemplatePackage):
mergemodule = VSMergeModule(self.config, files_list, self.package)
else:
mergemodule = MergeModule(self.config, files_list, self.package)
tmpdir = None
# For packages that requires stripping object files, we need
# to copy all the files to a new tree and strip them there:
if self.package.strip:
tmpdir = tempfile.mkdtemp()
for f in files_list:
src = os.path.join(self.config.prefix, f)
dst = os.path.join(tmpdir, f)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
shutil.copy(src, dst)
s = strip.Strip(self.config, self.package.strip_excludes)
for p in self.package.strip_dirs:
s.strip_dir(os.path.join(tmpdir, p))
package_name = self._package_name(version)
if self.package.wix_use_fragment:
mergemodule = Fragment(self.config, files_list, self.package)
sources = [os.path.join(output_dir, "%s-fragment.wxs" % package_name)]
wixobjs = [os.path.join(output_dir, "%s-fragment.wixobj" % package_name)]
else:
mergemodule = MergeModule(self.config, files_list, self.package)
sources = [os.path.join(output_dir, "%s.wxs" % package_name)]
wixobjs = [os.path.join(output_dir, "%s.wixobj" % package_name)]
if tmpdir:
mergemodule.prefix = tmpdir
mergemodule.write(sources[0])
for x in ['utils']:
wixobjs.append(os.path.join(output_dir, "%s.wixobj" % x))
sources.append(os.path.join(os.path.abspath(self.config.data_dir),
'wix/%s.wxs' % x))
if self._with_wine:
final_wixobjs = [to_winepath(x) for x in wixobjs]
final_sources = [to_winepath(x) for x in sources]
else:
final_wixobjs = wixobjs
final_sources = sources
candle = Candle(self.wix_prefix, self._with_wine)
candle.compile(' '.join(final_sources), output_dir)
if self.package.wix_use_fragment:
path = wixobjs[0]
else:
light = Light(self.wix_prefix, self._with_wine)
path = light.compile(final_wixobjs, package_name, output_dir, True)
# Clean up
if not keep_temp:
os.remove(sources[0])
if not self.package.wix_use_fragment:
for f in wixobjs:
os.remove(f)
try:
os.remove(f.replace('.wixobj', '.wixpdb'))
except:
pass
if keep_strip_temp_dir:
return (path, tmpdir)
elif tmpdir:
shutil.rmtree(tmpdir)
return path
def _package_name(self, version):
if self.config.variants.uwp:
platform = 'uwp'
elif self.config.variants.visualstudio:
platform = 'msvc'
else:
platform = 'mingw'
if self.config.variants.visualstudio and self.config.variants.vscrt == 'mdd':
platform += '+debug'
return "%s-%s-%s-%s" % (self.package.name, platform,
self.config.target_arch, version)
class MSIPackager(PackagerBase):
UI_EXT = '-ext WixUIExtension'
UTIL_EXT = '-ext WixUtilExtension'
def __init__(self, config, package, store):
PackagerBase.__init__(self, config, package, store)
self._with_wine = config.platform != Platform.WINDOWS
self.wix_prefix = get_wix_prefix()
def pack(self, output_dir, devel=False, force=False, keep_temp=False):
self.output_dir = os.path.realpath(output_dir)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.force = force
self.keep_temp = keep_temp
paths = []
self.merge_modules = {}
# create runtime package
p = self._create_msi_installer(PackageType.RUNTIME)
paths.append(p)
# create devel package
if devel and not isinstance(self.package, App):
p = self._create_msi_installer(PackageType.DEVEL)
paths.append(p)
# create zip with merge modules
if not self.package.wix_use_fragment:
self.package.set_mode(PackageType.RUNTIME)
zipf = ZipFile(os.path.join(self.output_dir, '%s-merge-modules.zip' %
self._package_name()), 'w')
for p in self.merge_modules[PackageType.RUNTIME]:
zipf.write(p)
zipf.close()
if not keep_temp and not self.package.wix_use_fragment:
for msms in list(self.merge_modules.values()):
for p in msms:
os.remove(p)
return paths
def _package_name(self):
if self.config.variants.uwp:
platform = 'uwp'
elif self.config.variants.visualstudio:
platform = 'msvc'
else:
platform = 'mingw'
if self.config.variants.visualstudio and self.config.variants.vscrt == 'mdd':
platform += '+debug'
return "%s-%s-%s-%s" % (self.package.name, platform,
self.config.target_arch, self.package.version)
def _create_msi_installer(self, package_type):
self.package.set_mode(package_type)
self.packagedeps = self.store.get_package_deps(self.package, True)
if isinstance(self.package, App):
self.packagedeps = [self.package]
tm |
spvkgn/youtube-dl | youtube_dl/extractor/giga.py | Python | unlicense | 3,820 | 0.002357 | # coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..utils import (
qualities,
compat_str,
parse_duration,
parse_iso8601,
str_to_int,
)
class GigaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?giga\.de/(?:[^/]+/)*(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.giga.de/filme/anime-awesome/trailer/anime-awesome-chihiros-reise-ins-zauberland-das-beste-kommt-zum-schluss/',
'md5': '6bc5535e945e724640664632055a584f',
'info_dict': {
'id': '2622086',
'display_id': 'anime-awesome-chihiros-reise-ins-zauberland-das-beste-kommt-zum-schluss',
'ext': 'mp4',
'title': 'Anime Awesome: Chihiros Reise ins Zauberland – Das Beste kommt zum Schluss',
'description': 'md5:afdf5862241aded4718a30dff6a57baf',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 578,
'timestamp': 1414749706,
'upload_date': '20141031',
'uploader': 'Robin Schweiger',
'view_count': int,
},
}, {
| 'url': 'http://www.giga.de/games/channel/giga-top-montag/giga-topmontag-die-besten-serien-2014/',
| 'only_matching': True,
}, {
'url': 'http://www.giga.de/extra/netzkultur/videos/giga-games-tom-mats-robin-werden-eigene-wege-gehen-eine-ankuendigung/',
'only_matching': True,
}, {
'url': 'http://www.giga.de/tv/jonas-liest-spieletitel-eingedeutscht-episode-2/',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
[r'data-video-id="(\d+)"', r'/api/video/jwplayer/#v=(\d+)'],
webpage, 'video id')
playlist = self._download_json(
'http://www.giga.de/api/syndication/video/video_id/%s/playlist.json?content=syndication/key/368b5f151da4ae05ced7fa296bdff65a/'
% video_id, video_id)[0]
quality = qualities(['normal', 'hd720'])
formats = []
for format_id in itertools.count(0):
fmt = playlist.get(compat_str(format_id))
if not fmt:
break
formats.append({
'url': fmt['src'],
'format_id': '%s-%s' % (fmt['quality'], fmt['type'].split('/')[-1]),
'quality': quality(fmt['quality']),
})
self._sort_formats(formats)
title = self._html_search_meta(
'title', webpage, 'title', fatal=True)
description = self._html_search_meta(
'description', webpage, 'description')
thumbnail = self._og_search_thumbnail(webpage)
duration = parse_duration(self._search_regex(
r'(?s)(?:data-video-id="{0}"|data-video="[^"]*/api/video/jwplayer/#v={0}[^"]*")[^>]*>.+?<span class="duration">([^<]+)</span>'.format(video_id),
webpage, 'duration', fatal=False))
timestamp = parse_iso8601(self._search_regex(
r'datetime="([^"]+)"', webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'class="author">([^<]+)</a>', webpage, 'uploader', fatal=False)
view_count = str_to_int(self._search_regex(
r'<span class="views"><strong>([\d.,]+)</strong>',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'view_count': view_count,
'formats': formats,
}
|
giorgiobornia/femus | applications/2021_fall/hdongamm/input/parametric_squre_himali2.py | Python | lgpl-2.1 | 2,834 | 0.016937 | #!/usr/bin/env python
###
### This file is generated automatically by SALOME v9.7.0 with dump python functionality
###
import sys
import salome
salome.salome_init()
import salome_notebook
notebook = salome_notebook.NoteBook()
sys.path.insert(0, r'/home/student/software/femus/applications/2021_fall/hdongamm/input')
####################################################
## Begin of NoteBook variables section ##
####################################################
notebook.set("l_x", 1)
notebook.set("l_y", 0.5)
notebook.set("l_x_half", "0.5*l_x")
notebook.set("l_y_half", "0.5*l_y")
####################################################
## End of NoteBook variables section ##
####################################################
###
### GEOM component
###
import GEOM
from salome.geom import geomBuilder
import math
import SALOMEDS
geompy = geomBuilder.New()
O = geompy.MakeVertex(0, 0, 0)
OX = geompy.MakeVectorDXDYDZ(1, 0, 0)
OY = geompy.MakeVectorDXDYDZ(0, 1, 0)
OZ = geompy.MakeVectorDXDYDZ(0, 0, 1)
O_1 = geompy.MakeVertex(0, 0, 0)
OX_1 = geompy.MakeVectorDXDYDZ(1, 0, 0)
OY_1 = geompy.MakeVectorDXDYDZ(0, 1, 0)
OZ_1 = geompy.MakeVectorDXDYDZ(0, 0, 1)
Face_1 = geompy.MakeFaceHW("l_x", " | l_y", 1)
Translation_1 = geompy.MakeTranslation(Face_1, "l_x_half", "l_y_half", 0)
Translation_2 = geompy.MakeTranslation(Face_1, "l_x_half", "l_y_half", 0)
geompy.addToStudy( O, 'O' )
geompy.addToStudy( OX, 'OX' )
geompy.addToStudy | ( OY, 'OY' )
geompy.addToStudy( OZ, 'OZ' )
geompy.addToStudy( O_1, 'O' )
geompy.addToStudy( OX_1, 'OX' )
geompy.addToStudy( OY_1, 'OY' )
geompy.addToStudy( OZ_1, 'OZ' )
geompy.addToStudy( Face_1, 'Face_1' )
geompy.addToStudy( Translation_1, 'Translation_1' )
geompy.addToStudy( Translation_2, 'Translation_2' )
###
### SMESH component
###
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New()
#smesh.SetEnablePublish( False ) # Set to False to avoid publish in study if not needed or in some particular situations:
# multiples meshes built in parallel, complex and numerous mesh edition (performance)
Mesh_1 = smesh.Mesh(Translation_1)
NETGEN_2D = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_2D)
status = Mesh_1.RemoveHypothesis(NETGEN_2D)
MEFISTO_2D = Mesh_1.Triangle(algo=smeshBuilder.MEFISTO)
Regular_1D = Mesh_1.Segment()
Number_of_Segments_1 = Regular_1D.NumberOfSegments(4)
Number_of_Segments_1.SetNumberOfSegments( 8 )
isDone = Mesh_1.Compute()
## Set names of Mesh objects
smesh.SetName(NETGEN_2D.GetAlgorithm(), 'NETGEN 2D')
smesh.SetName(Regular_1D.GetAlgorithm(), 'Regular_1D')
smesh.SetName(MEFISTO_2D.GetAlgorithm(), 'MEFISTO_2D')
smesh.SetName(Number_of_Segments_1, 'Number of Segments_1')
smesh.SetName(Mesh_1.GetMesh(), 'Mesh_1')
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser()
|
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpoid.py | Python | apache-2.0 | 6,179 | 0.036899 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmpoid(base_resource) :
""" Configuration for SNMP Object Identifier resource. """
def __init__(self) :
self._entitytype = ""
self._name = ""
self._Snmpoid = ""
self.___count = 0
@property
def entitytype(self) :
ur"""The type of entity whose SNMP OIDs you want to displayType of entity whose SNMP OIDs you want the NetScaler appliance to display.<br/>Possible values = VSERVER, SERVICE, SERVICEGROUP.
"""
try :
return self._entitytype
except Exception as e:
raise e
@entitytype.setter
def entitytype(self, entitytype) :
ur"""The type of entity whose SNMP OIDs you want to displayType of entity who | se SNMP OIDs you want the NetScaler appliance to display.<br/>Possible values = VSERVER, SERVICE, SERVICEGROUP
"""
try :
self._entitytype = entitytype
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the entity wh | ose SNMP OID you want the NetScaler appliance to display.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the entity whose SNMP OID you want the NetScaler appliance to display.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def Snmpoid(self) :
ur"""The snmp oid.
"""
try :
return self._Snmpoid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmpoid_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmpoid
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the snmpoid resources that are configured on netscaler.
"""
try :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [snmpoid() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
ur""" Use this API to fetch all the snmpoid resources that are configured on netscaler.
# This uses snmpoid_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = snmpoid()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_, obj) :
ur""" Use this API to fetch filtered set of snmpoid resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client, obj) :
ur""" Use this API to count the snmpoid resources configured on NetScaler.
"""
try :
option_ = options()
option_.count = True
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_, obj) :
ur""" Use this API to count filtered the set of snmpoid resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.count = True
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Entitytype:
VSERVER = "VSERVER"
SERVICE = "SERVICE"
SERVICEGROUP = "SERVICEGROUP"
class snmpoid_response(base_response) :
def __init__(self, length=1) :
self.snmpoid = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmpoid = [snmpoid() for _ in range(length)]
|
AlvarBer/Persimmon | persimmon/view/util/types.py | Python | mit | 673 | 0.002972 | from enum import Enum
from abc import ABCMeta
from kivy.uix.widget import WidgetMetaclass
class AbstractWidget(ABCMeta, WidgetMetaclass):
""" Necessary because python meta | class | es do not support multiple
inheritance. """
pass
class Type(Enum):
ANY = 0.9, 0.9, 0.9
DATAFRAME = .667, .224, .224
CLASSIFICATOR = .667, .424, .224
CROSS_VALIDATOR = .133, .4, .4
STATE = .667, .667, .224
STR = .408, .624, .608
class BlockType(Enum):
IO = .667, .224, .224
CLASSIFICATOR = .667, .424, .224
MODEL_SELECTION = .176, .533, .176
CROSS_VALIDATOR = .133, .4, .4
STATE = .667, .667, .224
FIT_AND_PREDICT = .345, .165, .447
|
uian/docker-py | docker/unixconn/unixconn.py | Python | apache-2.0 | 2,986 | 0 | # Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
if six.PY3:
import http.client as httplib
else:
import httplib
import requests.adapters
import socket
try:
import urllib3
except ImportError:
import requests.packages.urllib3 as urllib3
class UnixHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, base_url, unix_socket, timeout=60):
httplib.HTTPConnection.__init__(self, 'localhost', timeout=timeout)
self.base_url = base_url
self.unix_socket = unix_socket |
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.base_url.rep | lace("http+unix:/", ""))
self.sock = sock
def _extract_path(self, url):
# remove the base_url entirely..
return url.replace(self.base_url, "")
def request(self, method, url, **kwargs):
url = self._extract_path(self.unix_socket)
super(UnixHTTPConnection, self).request(method, url, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60):
urllib3.connectionpool.HTTPConnectionPool.__init__(
self, 'localhost', timeout=timeout
)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(self.base_url, self.socket_path,
self.timeout)
class UnixAdapter(requests.adapters.HTTPAdapter):
def __init__(self, base_url, timeout=60):
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
self.base_url = base_url
self.timeout = timeout
self.pools = RecentlyUsedContainer(10,
dispose_func=lambda p: p.close())
super(UnixAdapter, self).__init__()
def get_connection(self, socket_path, proxies=None):
with self.pools.lock:
pool = self.pools.get(socket_path)
if pool:
return pool
pool = UnixHTTPConnectionPool(self.base_url,
socket_path,
self.timeout)
self.pools[socket_path] = pool
return pool
def close(self):
self.pools.clear()
|
pombredanne/or-tools | tools/setup_py3.py | Python | apache-2.0 | 2,291 | 0.014841 | from setuptools import setup, Extension
from os.path import join as pjoin
from os.path import dirname
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(pjoin(dirname(__file__), fname)).read()
dummy_module = Extension('dummy_ortools_dependency',
sources = ['dummy/dummy_ortools_dependency.cc'],
DELETEUNIX extra_link_args=['/MANIFEST'],
)
setup(
name='py3-ortools',
version='2.VVVV',
packages=[
'ortools',
'ortools.algorithms',
'ortools.constraint_solver',
'ortools.graph',
'ortools.linear_solver',],
ext_modules | = [dummy_module],
install_requires = [
'protobuf >= 2.8.0'],
package_data = {
'ortools.constraint_solver' : ['_pywrapcp.dll'],
'ortools.linear_solver' : ['_pywraplp.dll'],
'ortools.graph' : ['_pywrapgraph.dll'],
'ortools.algorithms' : ['_pywrapknapsack_solver.dll'],
DELETEWIN 'ortools' : ['libortools.DLL']
},
license='Apache 2.0',
author = 'Google I | nc',
author_email = 'lperron@google.com',
description = 'Google OR-Tools python libraries and modules',
keywords = ('operations research, constraint programming, ' +
'linear programming,' + 'flow algorithms,' +
'python'),
url = 'https://developers.google.com/optimization/',
download_url = 'https://github.com/google/or-tools/releases',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Topic :: Office/Business :: Scheduling',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules'],
long_description = read('README.txt'),
)
|
jsbUSMC/django-edge-api | apps/core/renderers.py | Python | mit | 977 | 0 | import json
from rest_framework.renderers import JSONRenderer
class CustomJSONRenderer(JSONRenderer):
charset = 'utf-8'
object_label = 'object'
pagination_object_label = 'objects'
pagination_count_label = 'count'
def render(self, data, media_type=None, renderer_context=None):
if data.get('results', None) is not None:
return json.dumps({
self.pagination_object_label: data['results'],
self.pagination_count_label: data['count']
})
# If the view throws an error (su | ch as the user can't be authenticated
# or something similar), `data` will contain an `errors` key. We want
# the default JSONRenderer to handle rendering errors, so we need to
# check for t | his case.
elif data.get('errors', None) is not None:
return super(CustomJSONRenderer, self).render(data)
return json.dumps({
self.object_label: data
})
|
iwm911/plaso | plaso/lib/timelib_test.py | Python | apache-2.0 | 19,167 | 0.004017 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a unit test for the timelib in Plaso."""
import calendar
import datetime
import unittest
from plaso.lib import timelib
import pytz
def CopyStringToTimestamp(time_string):
"""Copies a string containing a date and time value to string.
Test function that does not rely on dateutil parser.
Args:
time_string: A string containing a date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. Both the seconds fraction
and timezone offset are optional. The default timezone is UTC.
Returns:
An integer containing the timestamp.
Raises:
ValueError: if the time string is invalid or not supported.
"""
time_string_length = len(time_string)
# The time string should at least contain 'YYYY-MM-DD hh:mm:ss'.
if (time_string_length < 19 or time_string[4] != '-' or
time_string[7] != '-' or time_string[10] != ' ' or
time_string[13] != ':' or time_string[16] != ':'):
raise ValueError(u'Inval | id time string.')
try:
year = int(time_string[0:4], 10)
except ValueError:
raise ValueError(u'Unable to parse year.' | )
try:
month = int(time_string[5:7], 10)
except ValueError:
raise ValueError(u'Unable to parse month.')
if month not in range(1, 13):
raise ValueError(u'Month value out of bounds.')
try:
day_of_month = int(time_string[8:10], 10)
except ValueError:
raise ValueError(u'Unable to parse day of month.')
if day_of_month not in range(1, 32):
raise ValueError(u'Day of month value out of bounds.')
try:
hours = int(time_string[11:13], 10)
except ValueError:
raise ValueError(u'Unable to parse hours.')
if hours not in range(0, 24):
raise ValueError(u'Hours value out of bounds.')
try:
minutes = int(time_string[14:16], 10)
except ValueError:
raise ValueError(u'Unable to parse minutes.')
if minutes not in range(0, 60):
raise ValueError(u'Minutes value out of bounds.')
try:
seconds = int(time_string[17:19], 10)
except ValueError:
raise ValueError(u'Unable to parse day of seconds.')
if seconds not in range(0, 60):
raise ValueError(u'Seconds value out of bounds.')
micro_seconds = 0
timezone_offset = 0
if time_string_length > 19:
if time_string[19] != '.':
timezone_index = 19
else:
for timezone_index in range(19, time_string_length):
if time_string[timezone_index] in ['+', '-']:
break
# The calculation that follow rely on the timezone index to point
# beyond the string in case no timezone offset was defined.
if timezone_index == time_string_length - 1:
timezone_index += 1
if timezone_index > 19:
fraction_of_seconds_length = timezone_index - 20
if fraction_of_seconds_length not in [3, 6]:
raise ValueError(u'Invalid time string.')
try:
micro_seconds = int(time_string[20:timezone_index], 10)
except ValueError:
raise ValueError(u'Unable to parse fraction of seconds.')
if fraction_of_seconds_length == 3:
micro_seconds *= 1000
if timezone_index < time_string_length:
if (time_string_length - timezone_index != 6 or
time_string[timezone_index + 3] != ':'):
raise ValueError(u'Invalid time string.')
try:
timezone_offset = int(time_string[
timezone_index + 1:timezone_index + 3])
except ValueError:
raise ValueError(u'Unable to parse timezone hours offset.')
if timezone_offset not in range(0, 24):
raise ValueError(u'Timezone hours offset value out of bounds.')
# Note that when the sign of the timezone offset is negative
# the difference needs to be added. We do so by flipping the sign.
if time_string[timezone_index] == '-':
timezone_offset *= 60
else:
timezone_offset *= -60
try:
timezone_offset += int(time_string[
timezone_index + 4:timezone_index + 6])
except ValueError:
raise ValueError(u'Unable to parse timezone minutes offset.')
timezone_offset *= 60
timestamp = int(calendar.timegm((
year, month, day_of_month, hours, minutes, seconds)))
return ((timestamp + timezone_offset) * 1000000) + micro_seconds
class TimeLibUnitTest(unittest.TestCase):
"""A unit test for the timelib."""
def testCocoaTime(self):
"""Tests the Cocoa timestamp conversion."""
self.assertEquals(
timelib.Timestamp.FromCocoaTime(395011845),
CopyStringToTimestamp('2013-07-08 21:30:45'))
self.assertEquals(
timelib.Timestamp.FromCocoaTime(395353142),
CopyStringToTimestamp('2013-07-12 20:19:02'))
self.assertEquals(
timelib.Timestamp.FromCocoaTime(394993669),
CopyStringToTimestamp('2013-07-08 16:27:49'))
def testHFSTimes(self):
"""Tests the HFS timestamp conversion."""
self.assertEquals(
timelib.Timestamp.FromHfsTime(
3458215528, timezone=pytz.timezone('EST5EDT'), is_dst=True),
CopyStringToTimestamp('2013-08-01 15:25:28-04:00'))
self.assertEquals(
timelib.Timestamp.FromHfsPlusTime(3458215528),
CopyStringToTimestamp('2013-08-01 15:25:28'))
self.assertEquals(
timelib.Timestamp.FromHfsPlusTime(3413373928),
CopyStringToTimestamp('2012-02-29 15:25:28'))
def testTimestampIsLeapYear(self):
"""Tests the is leap year check."""
self.assertEquals(timelib.Timestamp.IsLeapYear(2012), True)
self.assertEquals(timelib.Timestamp.IsLeapYear(2013), False)
self.assertEquals(timelib.Timestamp.IsLeapYear(2000), True)
self.assertEquals(timelib.Timestamp.IsLeapYear(1900), False)
def testTimestampDaysInMonth(self):
"""Tests the days in month function."""
self.assertEquals(timelib.Timestamp.DaysInMonth(0, 2013), 31)
self.assertEquals(timelib.Timestamp.DaysInMonth(1, 2013), 28)
self.assertEquals(timelib.Timestamp.DaysInMonth(1, 2012), 29)
self.assertEquals(timelib.Timestamp.DaysInMonth(2, 2013), 31)
self.assertEquals(timelib.Timestamp.DaysInMonth(3, 2013), 30)
self.assertEquals(timelib.Timestamp.DaysInMonth(4, 2013), 31)
self.assertEquals(timelib.Timestamp.DaysInMonth(5, 2013), 30)
self.assertEquals(timelib.Timestamp.DaysInMonth(6, 2013), 31)
self.assertEquals(timelib.Timestamp.DaysInMonth(7, 2013), 31)
self.assertEquals(timelib.Timestamp.DaysInMonth(8, 2013), 30)
self.assertEquals(timelib.Timestamp.DaysInMonth(9, 2013), 31)
self.assertEquals(timelib.Timestamp.DaysInMonth(10, 2013), 30)
self.assertEquals(timelib.Timestamp.DaysInMonth(11, 2013), 31)
with self.assertRaises(ValueError):
timelib.Timestamp.DaysInMonth(-1, 2013)
with self.assertRaises(ValueError):
timelib.Timestamp.DaysInMonth(12, 2013)
def testTimestampDaysInYear(self):
"""Test the days in year function."""
self.assertEquals(timelib.Timestamp.DaysInYear(2013), 365)
self.assertEquals(timelib.Timestamp.DaysInYear(2012), 366)
def testTimestampDayOfYear(self):
"""Test the day of year function."""
self.assertEquals(timelib.Timestamp.DayOfYear(0, 0, 2013), 0)
self.assertEquals(timelib.Timestamp.DayOfYear(0, 2, 2013), 31 + 28)
self.assertEquals(timelib.Timestamp.DayOfYear(0, 2, 2012), 31 + 29)
self.assertEquals(timelib.Tim |
ronnix/fabtools | fabtools/service.py | Python | bsd-2-clause | 3,226 | 0 | """
System services
===============
This module provides low-level tools for managing system services,
using the ``service`` command. It supports both `upstart`_ services
and traditional SysV-style ``/etc/init.d/`` scripts.
.. _upstart: http://upstart.ubuntu.com/
"""
from fabric.api import hide, settings
from fabtools import systemd
from fabtools.system import using_systemd, distrib_family
from fabtools.utils import run_as_root
def is_running(service):
"""
Check if a service is running.
::
import fabtools
if fabtools.service.is_running('foo'):
print "Service foo is running!"
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'),
warn_only=True):
if using_systemd():
return systemd.is_running(service)
else:
if distrib_family() != "gentoo":
test_upstart = run_as_root('test -f /etc/init/%s.conf' %
service)
status = _service(service, 'status')
if test_upstart.succeeded:
return 'running' in status
else:
return status.succeeded
else:
# gentoo
status = _service(service, 'status')
return ' started' in status
def start(service):
"""
Start a service.
::
import fabtools
# Start service if it is not running
if not fabtools.service.is_running('foo'):
fabtools.service.start('foo')
"""
_service(service, 'start')
def stop(service):
"""
Stop a service.
::
import fabtools
# Stop service if it is running
if fabtool | s.service.is_running('foo'):
fabtools.service.stop('foo')
"""
_service(service, 'stop')
def restart(service):
"""
Restart a service.
::
import fabtools
# Start service, or restart it if it is already running
if fabtools.service.is_running('foo'):
fabtools.service.restart('foo')
els | e:
fabtools.service.start('foo')
"""
_service(service, 'restart')
def reload(service):
"""
Reload a service.
::
import fabtools
# Reload service
fabtools.service.reload('foo')
.. warning::
The service needs to support the ``reload`` operation.
"""
_service(service, 'reload')
def force_reload(service):
"""
Force reload a service.
::
import fabtools
# Force reload service
fabtools.service.force_reload('foo')
.. warning::
The service needs to support the ``force-reload`` operation.
"""
_service(service, 'force-reload')
def _service(service, action):
"""
Compatibility layer for distros that use ``service`` and those that don't.
"""
if distrib_family() != "gentoo":
status = run_as_root('service %(service)s %(action)s' % locals(),
pty=False)
else:
# gentoo
status = run_as_root('/etc/init.d/%(service)s %(action)s' % locals(),
pty=False)
return status
|
wonder-sk/QGIS | python/plugins/processing/algs/qgis/ui/FieldsCalculatorDialog.py | Python | gpl-2.0 | 9,650 | 0.000933 | # -*- coding: utf-8 -*-
"""
***************************************************************************
FieldsCalculatorDialog.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt, QSettings
from qgis.PyQt.QtWidgets import QDialog, QFileDialog, QApplication, QMessageBox
from qgis.PyQt.QtGui import QCursor
from qgis.core import QgsExpressionContext, QgsExpressionContextUtils
from qgis.gui import QgsEncodingFileDialog
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.gui.AlgorithmExecutor import runalg
from processing.tools import dataobjects
from processing.gui.Postprocessing import handleAlgorithmResults
pluginPath = os.path.dirname(__file__)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'DlgFieldsCalculator.ui'))
class FieldsCalculatorDialog(BASE, WIDGET):
def __init__(self, alg):
super(FieldsCalculatorDialog, self).__init__(None)
self.setupUi(self)
self.executed = False
self.alg = alg
self.layer = None
self.cmbInputLayer.currentIndexChanged.connect(self.updateLayer)
self.btnBrowse.clicked.connect(self.selectFile)
self.mNewFieldGroupBox.toggled.connect(self.toggleExistingGroup)
self.mUpdateExistingGroupBox.toggled.connect(self.toggleNewGroup)
self.mOutputFieldTypeComboBox.currentIndexChanged.connect(self.setupSpinboxes)
# Default values for field width and precision
self.mOutputFieldWidthSpinBox.setValue(10)
self.mOutputFieldPrecisionSpinBox.setValue(3)
# Output is a shapefile, so limit maximum | field name length
self.mOutputFieldNameLineEdit.setMaxLength(10)
self.manageGui()
def manageGui(self):
if hasattr(self.leOutputFile, 'setPlaceholderText' | ):
self.leOutputFile.setPlaceholderText(
self.tr('[Save to temporary file]'))
self.mOutputFieldTypeComboBox.blockSignals(True)
for t in self.alg.type_names:
self.mOutputFieldTypeComboBox.addItem(t)
self.mOutputFieldTypeComboBox.blockSignals(False)
self.cmbInputLayer.blockSignals(True)
layers = dataobjects.getVectorLayers()
for layer in layers:
self.cmbInputLayer.addItem(layer.name())
self.cmbInputLayer.blockSignals(False)
self.builder.loadRecent('fieldcalc')
self.initContext()
self.updateLayer()
def initContext(self):
exp_context = self.builder.expressionContext()
exp_context.appendScope(QgsExpressionContextUtils.globalScope())
exp_context.appendScope(QgsExpressionContextUtils.projectScope())
exp_context.appendScope(QgsExpressionContextUtils.layerScope(self.layer))
exp_context.lastScope().setVariable("row_number", 1)
exp_context.setHighlightedVariables(["row_number"])
self.builder.setExpressionContext(exp_context)
def updateLayer(self):
self.layer = dataobjects.getObject(self.cmbInputLayer.currentText())
self.builder.setLayer(self.layer)
self.builder.loadFieldNames()
self.populateFields()
def setupSpinboxes(self, index):
if index != 0:
self.mOutputFieldPrecisionSpinBox.setEnabled(False)
else:
self.mOutputFieldPrecisionSpinBox.setEnabled(True)
if index == 0:
self.mOutputFieldWidthSpinBox.setRange(1, 20)
self.mOutputFieldWidthSpinBox.setValue(10)
self.mOutputFieldPrecisionSpinBox.setRange(0, 15)
self.mOutputFieldPrecisionSpinBox.setValue(3)
elif index == 1:
self.mOutputFieldWidthSpinBox.setRange(1, 10)
self.mOutputFieldWidthSpinBox.setValue(10)
elif index == 2:
self.mOutputFieldWidthSpinBox.setRange(1, 255)
self.mOutputFieldWidthSpinBox.setValue(80)
else:
self.mOutputFieldWidthSpinBox.setEnabled(False)
self.mOutputFieldPrecisionSpinBox.setEnabled(False)
def selectFile(self):
output = self.alg.getOutputFromName('OUTPUT_LAYER')
fileFilter = output.getFileFilter(self.alg)
settings = QSettings()
if settings.contains('/Processing/LastOutputPath'):
path = settings.value('/Processing/LastOutputPath')
else:
path = ProcessingConfig.getSetting(ProcessingConfig.OUTPUT_FOLDER)
lastEncoding = settings.value('/Processing/encoding', 'System')
fileDialog = QgsEncodingFileDialog(self,
self.tr('Save file'),
path,
fileFilter,
lastEncoding)
fileDialog.setFileMode(QFileDialog.AnyFile)
fileDialog.setAcceptMode(QFileDialog.AcceptSave)
fileDialog.setOption(QFileDialog.DontConfirmOverwrite, False)
if fileDialog.exec_() == QDialog.Accepted:
files = fileDialog.selectedFiles()
encoding = str(fileDialog.encoding())
output.encoding = encoding
filename = str(files[0])
selectedFileFilter = str(fileDialog.selectedNameFilter())
if not filename.lower().endswith(
tuple(re.findall("\*(\.[a-z]{1,10})", fileFilter))):
ext = re.search("\*(\.[a-z]{1,10})", selectedFileFilter)
if ext:
filename = filename + ext.group(1)
self.leOutputFile.setText(filename)
settings.setValue('/Processing/LastOutputPath',
os.path.dirname(filename))
settings.setValue('/Processing/encoding', encoding)
def toggleExistingGroup(self, toggled):
self.mUpdateExistingGroupBox.setChecked(not toggled)
def toggleNewGroup(self, toggled):
self.mNewFieldGroupBox.setChecked(not toggled)
def populateFields(self):
if self.layer is None:
return
self.mExistingFieldComboBox.clear()
fields = self.layer.fields()
for f in fields:
self.mExistingFieldComboBox.addItem(f.name())
def setParamValues(self):
if self.mUpdateExistingGroupBox.isChecked():
fieldName = self.mExistingFieldComboBox.currentText()
else:
fieldName = self.mOutputFieldNameLineEdit.text()
layer = dataobjects.getObjectFromName(self.cmbInputLayer.currentText())
self.alg.setParameterValue('INPUT_LAYER', layer)
self.alg.setParameterValue('FIELD_NAME', fieldName)
self.alg.setParameterValue('FIELD_TYPE',
self.mOutputFieldTypeComboBox.currentIndex())
self.alg.setParameterValue('FIELD_LENGTH',
self.mOutputFieldWidthSpinBox.value())
self.alg.setParameterValue('FIELD_PRECISION',
self.mOutputFieldPrecisionSpinBox.value())
self.alg.setParameterValue('NEW_FIELD',
self.mNewFieldGroupBox.isChecked())
|
Just-D/chromium-1 | tools/telemetry/telemetry/decorators_unittest.py | Python | bsd-3-clause | 5,452 | 0.005869 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import decorators
from telemetry.core import util
util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'mock')
import mock
class FakePlatform(object):
def GetOSName(self):
return 'os_name'
def GetOSVersionName(self):
return 'os_version_name'
class FakePossibleBrowser(object):
def __init__(self):
self.browser_type = 'browser_type'
self.platform = FakePlatform()
self.supports_tab_control = False
class FakeTest(object):
def SetEnabledStrings(self, enabled_strings):
# pylint: disable=W0201
self._enabled_strings = enabled_strings
def SetDisabledStrings(self, disabled_strings):
# pylint: disable=W0201
self._disabled_strings = disabled_strings
class TestShouldSkip(unittest.TestCase):
def testEnabledStrings(self):
test = FakeTest()
possible_browser = FakePossibleBrowser()
# When no enabled_strings is given, everything should be enabled.
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['os_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['another_os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['os_version_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['os_name', 'another_os_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['another_os_name', 'os_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['another_os_name', 'another_os_version_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
def testDisabledStrings(self):
test = FakeTest()
possible_browser = FakePossibleBrowser()
# When no disabled_strings is given, nothing should be disabled.
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['another_os_name'])
self.assertFalse(decorators.ShouldSkip(test | , possible_browser)[0])
test.SetDisabledStrings(['os_version_name'])
| self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['os_name', 'another_os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['another_os_name', 'os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['another_os_name', 'another_os_version_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
class TestDeprecation(unittest.TestCase):
@mock.patch('warnings.warn')
def testFunctionDeprecation(self, warn_mock):
@decorators.Deprecated(2015, 12, 1)
def Foo(x):
return x
Foo(1)
warn_mock.assert_called_with(
'Function Foo is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
@mock.patch('warnings.warn')
def testMethodDeprecated(self, warn_mock):
class Bar(object):
@decorators.Deprecated(2015, 12, 1, 'Testing only.')
def Foo(self, x):
return x
Bar().Foo(1)
warn_mock.assert_called_with(
'Function Foo is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. Testing only.\n', stacklevel=4)
@mock.patch('warnings.warn')
def testClassWithoutInitDefinedDeprecated(self, warn_mock):
@decorators.Deprecated(2015, 12, 1)
class Bar(object):
def Foo(self, x):
return x
Bar().Foo(1)
warn_mock.assert_called_with(
'Class Bar is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
@mock.patch('warnings.warn')
def testClassWithInitDefinedDeprecated(self, warn_mock):
@decorators.Deprecated(2015, 12, 1)
class Bar(object):
def __init__(self):
pass
def Foo(self, x):
return x
Bar().Foo(1)
warn_mock.assert_called_with(
'Class Bar is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
@mock.patch('warnings.warn')
def testInheritedClassDeprecated(self, warn_mock):
class Ba(object):
pass
@decorators.Deprecated(2015, 12, 1)
class Bar(Ba):
def Foo(self, x):
return x
class Baz(Bar):
pass
Baz().Foo(1)
warn_mock.assert_called_with(
'Class Bar is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
def testReturnValue(self):
class Bar(object):
@decorators.Deprecated(2015, 12, 1, 'Testing only.')
def Foo(self, x):
return x
self.assertEquals(5, Bar().Foo(5))
|
petrjasek/superdesk-server | superdesk/__init__.py | Python | agpl-3.0 | 3,012 | 0.001992 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""Superdesk"""
import logging
from flask import abort, json, Blueprint, current_app as app # noqa
from flask.ext.script import Command as BaseCommand, Option # noqa @UnresolvedImport
from werkzeug.exceptions import HTTPException
from eve.utils import config # noqa
from eve.methods.common import document_link # noqa
from .eve_backend import EveBackend
from .datalayer import SuperdeskDataLayer # noqa
from .services import BaseService as Service # noqa
from .resource import Resource # noqa
from .privilege import privilege, intrinsic_privilege, get_intrinsic_privileges # noqa
from .workflow import * # noqa
API_NAME = 'Superdesk API'
VERSION = (0, 0, 1)
DOMAIN = {}
COMMANDS = {}
BLUEPRINTS = []
app_components = dict()
app_models = dict()
resources = dict()
eve_backend = EveBackend()
default_user_preferences = dict()
default_session_preferences = dict()
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
The Eve framework changes introduced with https://github.com/nicolaiarocci/eve/issues/213 make the commands fail.
Reason being the flask-script's run the commands using test_request_context() which is invalid.
That's the reason we are inheriting the Flask-Script's Command to overcome this issue.
"""
def __call__(self, app=None, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
def get_headers(self, environ=None):
"""Fix CORS for abort responses.
todo(petr): put in in custom flask error handler instead
"""
return [
('Content-Type', 'text/html'),
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', '*'),
]
setattr(HTTPException, 'get_headers', get_headers)
def domain(resource, res_config):
"""Register domain resource"""
DOMAIN[resource] = res_config
def command(name, command):
"""Register command"""
COMMANDS[name] = command
def blueprint(blueprint, **kwargs):
"""Register blueprint"""
blueprint.kwargs = kwargs
BLUEPRINTS.append(bluepr | int)
def get_backend():
"""Returns the available backend, this will be changed in a factory if needed."""
return eve_backend
def get_resource_service(resource_name):
return resources[resource_name].service
def get_resource_privileges(resource_name):
attr = getattr(resources[resource_name], 'privileges', {})
return attr
def register_default_user_preference(preference_name, preference):
default_use | r_preferences[preference_name] = preference
def register_default_session_preference(preference_name, preference):
default_session_preferences[preference_name] = preference
from .commands import * # noqa
|
joshzarrabi/e-mission-server | emission/clients/data/data.py | Python | bsd-3-clause | 4,698 | 0.020647 | # Standard imports
import logging
import time as systime
from datetime import datetime, time, timedelta
import json
# Our imports
import emission.analysis.result.carbon as carbon
import emission.net.api.stats as stats
from emission.core.wrapper.user import User
# BEGIN: Code to get and set client specific fields in the profile (currentScore and previousScore)
def getCarbonFootprint(user):
profile = user.getProfile()
if profile is None:
return None
return profile.get("carbon_footprint")
def setCarbonFootprint(user, newFootprint):
user.setClientSpecificProfileFields({'carbon_footprint': newFootprint})
# END: Code to get and set client specific fields in the profile (currentScore and previousScore)
def getResult(user_uuid):
# This is in here, as opposed to the top level as recommended by the PEP
# because then we don't have to worry about loading bottle in the unit tests
from bottle import template
user = User.fromUUID(user_uuid)
currFootprint = getCarbonFootprint(user)
if currFootprint is None:
currFootprint = carbon.getFootprintCompare(user_uuid)
setCarbonFootprint(user, currFootprint)
(myModeShareCount, avgModeShareCount,
myModeShareDistance, avgModeShareDistance,
myModeCarbonFootprint, avgModeCarbonFootprint,
myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, # ignored
myOptimalCarbonFootprint, avgOptimalCarbonFootprint,
myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = currFootprint
renderedTemplate = template("clients/data/result_template.html",
myModeShareCount = json.dumps(myModeShareCount),
avgModeShareCount = json.dumps(avgModeShareCount),
myModeShareDistance = json.dumps(myModeShareDistance),
avgModeShareDistance = json.dumps(avgModeShareDistance),
myModeCarbonFootprint = json.dumps(myModeCarbonFootprint),
avgModeCarbonFootprint = json.dumps(avgModeCarbonFootprint),
myOptimalCarbonFootprint = json.dumps(myOptimalCarbonFootprint),
avgOptimalCarbonFootprint = json.dumps(avgOptimalCarbonFootprint))
# logging.debug(renderedTemplate)
return renderedTemplate
def getCategorySum(carbonFootprintMap):
return sum(carbonFootprintMap.values())
# TODO: Change the use of runBackgroundTasks
def runBackgroundTasks(user_uuid):
today = datetime.now()
runBackgroundTasksForDay(user_uuid, today)
def runBackgroundTasksForDay(user_uuid, today):
today_dt = datetime.combine(today, time.max)
user = User.fromUUID(user_uuid)
# carbon compare results is a tuple. Tuples are converted to arrays
# by mongodb
# In [44]: testUser.setScores(('a','b', 'c', 'd'), ('s', 't', 'u', 'v'))
# In [45]: testUser.getScore()
# Out[45]: ([u'a', u'b', u'c', u'd'], [u's', u't', u'u', u'v'])
weekago = today_dt - timedelta(days=7)
carbonCompareResults = carbon.getFootprintCompareForRange(user_uuid, weekago, today_dt)
setCarbonFootprint(user, carbonCompareResults)
(myModeShareCount, avgModeShareCount,
myModeShareDistance, avgModeShareDistance,
myModeCarbonFootprint, avgModeCarbonFootprint,
myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, # ignored
myOptimalCarbonFootprint, avgOptimalCarbonFootprint,
myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = carbonCompareResults
# We only compute server stats in the background, because including them in
# the set call means that they may be invoked when the user makes a call and
# the cached value is None, which wo | uld potentially slow down user response time
msNow = systime.time()
stats.storeResultEntry(user_uuid, stats.STAT_MY_CARBON_FOOTPRINT, msNow, getCategorySum(myModeCarbonFootprint))
stats.storeResultEntry(user_uuid, stats.STAT_MY_CARBON_FOOTPRINT_NO_AIR, msNow, getCategorySum(myModeCarbonFootprintNoLongMotorized))
stats.storeResultEntry(user_uuid, stats.STAT_MY_OPTIMAL_FOOTPRINT, msNow, getCategorySum(myOptimalCarbonFootprint))
stats.storeResultEntry(user_uuid, stats.STAT_MY_O | PTIMAL_FOOTPRINT_NO_AIR, msNow, getCategorySum(myOptimalCarbonFootprintNoLongMotorized))
stats.storeResultEntry(user_uuid, stats.STAT_MY_ALLDRIVE_FOOTPRINT, msNow, getCategorySum(myModeShareDistance) * (278.0/(1609 * 1000)))
stats.storeResultEntry(user_uuid, stats.STAT_MEAN_FOOTPRINT, msNow, getCategorySum(avgModeCarbonFootprint))
stats.storeResultEntry(user_uuid, stats.STAT_MEAN_FOOTPRINT_NO_AIR, msNow, getCategorySum(avgModeCarbonFootprintNoLongMotorized))
|
J1bz/ecoloscore | coffeecups/urls.py | Python | bsd-3-clause | 406 | 0 | # -*- coding: utf-8 -*-
from | django.conf.urls import url, patterns, include
from rest_framework.routers import DefaultRouter
from coffeecups.views import TakeView, ThrowView, C | upPolicyView
router = DefaultRouter()
router.register(r'takes', TakeView)
router.register(r'throws', ThrowView)
router.register(r'policies', CupPolicyView)
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
)
|
otron/zenodo | zenodo/modules/deposit/tasklets/__init__.py | Python | gpl-3.0 | 922 | 0.017354 | # -*- coding: utf-8 -*-
#
## This file is part of Zenodo.
## Copyright (C) 2012, 2013 CERN.
##
## Zenodo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Zenodo is distributed in the hope that it | will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## | granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction. |
zwChan/VATEC | ~/eb-virt/Lib/site-packages/pip/index.py | Python | apache-2.0 | 37,235 | 0.000027 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS,
)
from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', ' | PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chr | ome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip9Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing install |
theshammy/GenAn | src/main/common.py | Python | mit | 1,218 | 0 | class GeneratorAdapter:
def visit_selector_view(self, view):
pass
def visit_selector_object(self, object, property):
pass
def visit_selector_fk_object(self, object, property, fk_properties):
pass
def visit_view(self, view):
pass
def visit_page(self, page):
pass
def visit_action_selector(self, object, actions):
pass
def visit_other_selector(self, name, **kwargs):
pass
def visit_row(self, row):
pass
def visit_object(self, object):
pass
class FrontendGenerator(GeneratorAdapter):
def __init__(self, model, builtins, path):
self.model = model
self.builtins = builtins
self.path = path
self.backend_base_url = "http://localhost:8080/"
class BackendGenerator(GeneratorAdapter):
def __init__(self, model, builtins, path):
se | lf.model = model
self.builtins = builtins
self.path = path
self.base_url = "http://localhost:8080/"
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = | '\033[4m'
|
shifvb/hash_photos | _tools/get_hash.py | Python | apache-2.0 | 605 | 0 | import hashlib
def | get_hash(instance, abs_filename: str):
"""
得到特定文件十六进制哈希值
:param instance:
:param abs_filename: 文件绝对路径
:return: 十六进制哈希值字符串
"""
_value = instance.choose_hash_method_var.get()
m = None
if _value == 0:
m = hashlib.md5()
elif _value == 1:
m = hashlib.sha1()
elif _value == 2:
m = hashlib.sha256()
elif _value == 3:
| m = hashlib.sha512()
with open(abs_filename, 'rb') as f:
for data in f:
m.update(data)
return m.hexdigest()
|
kreatorkodi/repository.torrentbr | plugin.video.yatp/site-packages/hachoir_parser/video/__init__.py | Python | gpl-2.0 | 324 | 0.003086 | from hachoir_par | ser.video.asf import AsfFile
from hachoir_parser.video.flv import FlvFile
from hachoir_parser.video.mov import MovFile
from hachoir_parser.video.mpeg_video import MPEGVideoFile
from hachoir_parser.video.mpeg_ts import MPEG_TS
from hachoir_parser.video.avchd import AVCHDINDX, AVCHDMOBJ, AVCHDMPLS, AVC | HDCLPI
|
shearichard/membaman | membaman/fees/migrations/0005_auto_20150320_2255.py | Python | gpl-3.0 | 2,159 | 0.003242 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('members', '0012_auto_20150318_2331'),
('fees', '0004_auto_20150312_1747'),
]
operations = [
migrations.CreateModel(
name='AccountEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.DecimalField(max_digits=3, decimal_places=2)),
| ('date', models.DateField()),
],
options={
},
bases=(models.Model, | ),
),
migrations.CreateModel(
name='AccountDebt',
fields=[
('accountentry_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fees.AccountEntry')),
('invoice_reference', models.CharField(max_length=10)),
],
options={
},
bases=('fees.accountentry',),
),
migrations.CreateModel(
name='AccountPayment',
fields=[
('accountentry_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fees.AccountEntry')),
('payment_type', models.CharField(default=b'OT', max_length=2, choices=[(b'AT', b'Automated Payment'), (b'TR', b'Bank Transfer'), (b'CA', b'Cash'), (b'CH', b'Credit'), (b'DI', b'Discount'), (b'OT', b'Other')])),
('payment_reference', models.CharField(max_length=10)),
('description', models.CharField(max_length=10, null=True, blank=True)),
('notes', models.CharField(max_length=128, null=True, blank=True)),
],
options={
},
bases=('fees.accountentry',),
),
migrations.AddField(
model_name='accountentry',
name='member',
field=models.ForeignKey(to='members.Member'),
preserve_default=True,
),
]
|
validata/ExaminationChat | Server/Controller/Server_threading.py | Python | mit | 1,049 | 0.000953 | import threading
class Server_recv(threading.Thread):
def __init__(self, client_sock_, list_of_sockets_):
threading.Thread.__init__(self)
self.client_sock = client_sock_
self.list_of_sockets = list_of_sockets | _
def run(self):
while True:
msg_from_client = self.client_sock.recv(1024).decode()
if msg_from_client == "/quit":
self.client_sock.close()
self.list_of_sockets.remove(self.client_sock)
return
print("Message from client: "+msg_from_cl | ient)
for sock in self.list_of_sockets:
sock.send(msg_from_client.encode())
class Server_send(threading.Thread):
def __init__(self, client_sock_, list_of_sockets_):
threading.Thread.__init__(self)
self.client_sock = client_sock_
self.list_of_sockets = list_of_sockets_
def run(self):
while True:
var = input()
for sock in self.list_of_sockets:
sock.send(var.encode())
|
bdusell/pycfg | demos/cnf_html_test.py | Python | mit | 318 | 0.003145 | '''Convert a grammar to CNF form | and print it to stdout in HTML.'''
from cfg import core, cnf
CFG = core.ContextFreeGrammar
CNF = cnf.ChomskyNormalForm
G = CFG('''
S -> ASA | aB
A -> B | S
B -> b |
''')
print '<h1><var>G</var>:</h1>'
print G.html()
print
print '<h1><var>G′ | </var>:</h1>'
print CNF(G).html()
|
obi-two/Rebelion | data/scripts/templates/object/installation/manufacture/shared_weapon_factory.py | Python | mit | 465 | 0.045161 | #### NOTICE: THIS FILE IS AUTO | GENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Installation()
result.template = "object/installation/manufacture/shared_weapon_factory.iff"
result.attribute_template_id = -1
result.stfName("installation_n","item_factory")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | |
SUSE/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/policy/v2016_04_01/models/policy_assignment.py | Python | mit | 1,773 | 0.001128 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PolicyAssignment(Model):
"""The policy definition.
:param display_name: The display name of the policy assignment.
| :type display_name: str
:param policy_definition_id: The ID of the policy definition.
:type policy_definition_id: str
:param scope: The scope for the policy assignment.
:type scope: str
:param id: The ID of the policy assignment.
:type id: str
:param type: The type of the policy assignment.
:type type: str
:param name: The name of the policy assignment.
:typ | e name: str
"""
_attribute_map = {
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'policy_definition_id': {'key': 'properties.policyDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, display_name=None, policy_definition_id=None, scope=None, id=None, type=None, name=None):
self.display_name = display_name
self.policy_definition_id = policy_definition_id
self.scope = scope
self.id = id
self.type = type
self.name = name
|
ahmadpgh/deepSimDEF | deepSimDEF_for_gene_expression.py | Python | mit | 24,970 | 0.009491 | # CUDA_VISIBLE_DEVICES=gpu-number python deepSimDEF_for_gene_expression.py arguments
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import sys
import logging
import random
import numpy as np
import pprint
import argparse
import tensorflow.keras
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from | networks import deepSimDEF_network
from datasets import gene_expression_dataset, generic_production_dataset
from dataloaders import generic_dataloader, generic_production_dataloader
from scipy.stats.stats import pearsonr, spearmanr
import datetime
from pytz import timezone
import collections
from utils import *
tz = timez | one('US/Eastern') # To monitor training time (showing start & end points of a fixed timezone when the code runs on a remote server)
pp = pprint.PrettyPrinter(indent=4)
#checkpoint = '[base_dir]/2020.03.04-23h40m37s_server_name/model_checkpoints/epoch_58'
checkpoint = None
parser = argparse.ArgumentParser(description='Calculate gene-product pairs similarity.')
# experiment arguments
parser.add_argument('--deepsimdef_mode', default='training', type=str, help='mode of the model can be either "training", "evaluation", or "production"')
parser.add_argument('--nb_fold', default=10, type=int, help='number of folds of training and evaluation in n-fold cross-validation (default: 10)')
parser.add_argument('--iea', default=True, type=str2bool, help='whether to consider "inferred from electronic annotations" or not')
parser.add_argument('--sub_ontology', default='all', type=str, help='considering annotations of what subontologies, "bp", "cc", "mf", or "all" (default: "all")')
parser.add_argument('--inpute_file', default='default', type=str, help='inpute file of the gene product terms and the score(s), if not provide use default file')
parser.add_argument('--production_input_file', default='', type=str, help='test file of the gene product terms used in production mode (you should provide directory too)')
parser.add_argument('--production_output_file', default='', type=str, help='result file of the gene product terms used in production mode (you should provide directory too)')
parser.add_argument('--experiment_mode', default=2, type=int, help='1: any pairs of unseen genes; 2: only pair in which both genes are unseen')
parser.add_argument('--partial_shuffle_percent', default=0.0, type=float, help='Should be more than 0.0 for "Negative Control" experiments (default: 0.0)')
parser.add_argument('--species', default='yeast', type=str, help='the species of interest for evaluation (human, yeast, etc)')
# network arguments
parser.add_argument('--dropout', default=0.3, type=float, help='dropout applied to dense layers of the network (default: 0.3)')
parser.add_argument('--embedding_dropout', default=0.15, type=float, help='dropout applied to embedding layers of the network; i.e., percentage of features dropped out completely (default: 0.15)')
parser.add_argument('--annotation_dropout', default=0.0, type=float, help='dropout applied to annotations of a gene at training time; i.e., percentage of annotations ignored (default: 0.0)')
parser.add_argument('--pretrained_embedding', default=True, type=str2bool, help='whether the GO term embeddings loaded should be computed in advance from a pretrained unsupervised model (default: True)')
parser.add_argument('--updatable_embedding', default=True, type=str2bool, help='whether the GO term embeddings should be updatable during the traning (default: True)')
parser.add_argument('--activation_hidden', default='relu', type=str, help='activation function of hidden layers (default: "relu")')
parser.add_argument('--activation_highway', default='relu', type=str, help='activation function of highway layer (default: "relu")')
parser.add_argument('--activation_output', default='linear', type=str, help='activation function of last, i.e. output, layer (default: "linear")')
parser.add_argument('--embedding_dim', default=100, type=int, help='dimentionality of GO term embeddings, i.e. number of latent features (default: 100)')
parser.add_argument('--highway_layer', default=True, type=str2bool, help='True if highway layer instead of cosince similarity (default: True)')
parser.add_argument('--cosine_similarity', default=False, type=str2bool, help='True cosince similarity instead of highway layer (default: False)')
# training arguments
parser.add_argument('--nb_epoch', default=400, type=int, help='number of epochs for training')
parser.add_argument('--batch_size', default=256, type=int, help='batch size (default: 256)')
parser.add_argument('--loss', default='mean_squared_error', type=str, help='loss type of the onjective function that gets optimized ("binary_crossentropy" or "mean_squared_error")')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer algorithm, can be: "adam", "rmsprop", etc. (default: "adam")')
parser.add_argument('--learning_rate', default=0.001, type=float, help='starting learning rate for optimization')
parser.add_argument('--adaptive_lr', default=True, type=str2bool, help='whether to use adavtive learning rate or not')
parser.add_argument('--adaptive_lr_rate', default=10, type=int, help='after how many epoch, decay the learning rate')
# checkpointting arguments
parser.add_argument('--checkpoint', default=checkpoint, help='starting from scratch or using model checkpoints')
parser.add_argument('--save_model', default=False, type=str2bool, help='model checkpointing, whether to save the models during training')
parser.add_argument('--save_embeddings', default=False, type=str2bool, help='storing weights of the embedding layers, whether to save updated embeddings')
parser.add_argument('--save_interval', default=5, type=int, help='-1, checkpoint if see improvement in the result; otherwise after each interval (default: -1)')
parser.add_argument('--log_dir', default='logs/', type=str, help='base log folder (will be created if it does not exist)')
parser.add_argument('--log_name', default='GE_test', type=str, help='prefix name to use when logging this model')
# misc arguments
parser.add_argument('--verbose', default=False, type=str2bool, help='if print extra information during model training')
parser.add_argument('--reproducible', default=True, type=str2bool, help='whether we want to have a reproducible result (mostly helpful with training on a CPU at the cost of training speed)')
parser.add_argument('--seed', default=2021, type=int, help='seed used for Random Number Generation if "reproducible=True"')
def fit_gene_expression(models, args):
best_epoch_pearson, best_epoch_spearman = 0, 0
final_pearson, final_spearman = [], []
start_time = datetime.datetime.now(tz)
former_iteration_endpoint = start_time
print("~~~~~~~~~~~~~ TIME ~~~~~~~~~~~~~~")
print("Time started: {}".format(start_time.strftime("%Y-%m-%d %H:%M:%S")))
"""Training loop"""
for e in range(checkpoint_baseline, args.nb_epoch):
print("~~~~~~~~ {} ({}) ~~~~~~~~ EPOCH {}/{} (Embedding dimention: {}) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n".format(
'/'.join(sub_ontology_interested), args.species, e+1, args.nb_epoch, args.embedding_dim))
if args.adaptive_lr:
learning_rate = exp_decay(epoch=e//args.adaptive_lr_rate, initial_lrate=args.learning_rate) # claculating the desired learning rate using the exponential decay formula
else:
learning_rate = args.learning_rate
epoch_pearsons, epoch_spearmans = [], []
for model_index in range(len(models)): # Going through each model one by one
# Preparing the data
train_pair, X_train, y_train, test_pair, X_test, y_test, train_gene, test_gene = generic_dataloader(
model_index, nb_test_genes, gene_shuffled_indx, gene_1, gene_2, fully_annotated_genes,
gene_1_annotation, gene_2_annotation, prediction_value, sub_ontology_interested,
args.experiment_mode)
if e==0: save_gene_pairs(logdir=logdir, model_id=model_index, train_pair=train_pair,
test_pair=test_pair, train_gene=train_gene, t |
obi-two/Rebelion | data/scripts/templates/object/tangible/medicine/shared_stimpack_sm_s1.py | Python | mit | 454 | 0.046256 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/medicine/shared_stimpack_sm_s1.iff"
result.attribute_template_id = 7
result.stfName("medicine_name","stimpack_sm_s1")
| #### BEGIN MODIFICATIONS ####
#### E | ND MODIFICATIONS ####
return result |
luftdanmark/fifo.li | config/urls.py | Python | mit | 1,716 | 0.004662 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', 'fifo.landing.views.home', name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# all auth
url(r'^accounts/', include('allauth.urls')),
# users
url(r'^users/', include('fifo.users.urls', namespace='users')),
# api
url(r'^api/v1/', include('fifo.api_v1.api', namespace='api_v1')),
# queues
url(r'^queues/', include("fifo.queues.urls", namespace="queues")),
# entries
#url(r'^entries/', include("fifo.entries.urls", namespace="entries")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$ | ', default_views.permission_denied, kwargs={'exception': Exception('Permission | Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
admin.site.site_header = 'fifo.li admin site'
admin.site.site_title = 'fifo.li staff '
|
molgor/spystats | HEC_runs/fit_fia_sppn.py | Python | bsd-2-clause | 4,595 | 0.019808 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Automatic Batch processing for GP models
========================================
Requires:
* TensorFlow
* GPFlow
* Pandas
* GeoPandas
* Numpy
* shapely
"""
__author__ = "Juan Escamilla Mólgora"
__copyright__ = "Copyright 2017, JEM"
__license__ = "GPL"
__mantainer__ = "Juan"
__email__ ="molgor@gmail.com"
import geopandas as gpd
from shapely.geometry import Point
import pandas as pd
import GPflow as gf
#import gpflow as gf
import numpy as np
import sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# create a file handler
handler = logging.FileHandler('batchprocess.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
def subselectDataFrame(pandasDF, minx,maxx,miny,maxy):
"""
This function selects a inner region of the DataFrame.
Receives: A pandas dataframe.
Returns: A geopandas datafame.
"""
data = pandasDF
## Create the geometric column
data['geometry'] = data.apply(lambda z: Point(z.LON, z.LAT), axis=1)
new_data = gpd.GeoDataFrame(data)
## Subselect the data
section = new_data[lambda x: (x.LON > minx) & (x.LON < maxx) & (x.LAT > miny) & (x.LAT < maxy) ]
return section
def fitMatern12Model(X,Y,optimise=True):
k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1] )
model = gf.gpr.GPR(X.as_matrix(),Y.as_matrix().reshape(len(Y),1).astype(float),k)
model.likelihood.variance = 10
if optimise:
model.optimize()
return model
def buildPredictiveSpace(inputGeoDataFrame,GPFlowModel,num_of_predicted_coordinates=100):
"""
Creates a grid based on the extent of the inputDataFrame coordinates.
It uses the information in the geometry column.
Receives:
* inputGeoDataFrame : a Geopanda instance with geometry column
* num_of_predicted_coordinates : Integer, | the size of the grid partitioning.
* GPFlowModel : a GpFlow regressor object (the model that predicts).
"""
Nn = num_of_predicted_coordinates
dsc = inputGeoDataFrame
longitudes = dsc.apply(lambda c : c.geometry.x, axis=1)
latitudes = dsc.apply(lambda c : c.geometry.y, axis=1)
predicted_x = np.linspace(min(longitudes),max(longitudes),Nn)
predicted_y = np.linspace(min(latitude | s),max(latitudes),Nn)
Xx, Yy = np.meshgrid(predicted_x,predicted_y)
predicted_coordinates = np.vstack([ Xx.ravel(), Yy.ravel()]).transpose()
#predicted_coordinate
means,variances = GPFlowModel.predict_y(predicted_coordinates)
results = pd.DataFrame([means,variances,longitudes,latitudes])
return results
def main(csv_path,minx,maxx,miny,maxy,predicted_size=300):
"""
The main batch processing
"""
logger.info("Reading Data")
#data = pd.read_csv("/RawDataCSV/idiv_share/plotsClimateData_11092017.csv")
#data = pd.read_csv("/home/hpc/28/escamill/csv_data/idiv/plotsClimateData_11092017.csv")
data = pd.read_csv(csv_path)
minx = minx
maxx = maxx
miny = miny
maxy = maxy
logger.info("Subselecting Region")
section = subselectDataFrame(data, minx, maxx, miny, maxy)
X = section[['lon','lat']]
Y = section['SppN']
logger.info("Fitting GaussianProcess Model")
model = fitMatern12Model(X, Y, optimise=True)
param_model = pd.DataFrame(model.get_parameter_dict())
param_model.to_csv('sppnmodel_parameters.csv')
logger.info("Predicting Points")
space = buildPredictiveSpace(section, model,num_of_predicted_coordinates=predicted_size)
space.to_csv('spppredicted_points.csv')
logger.info("Finished! Results in: tests1.csv")
if __name__ == "__main__":
csv_path = sys.argv[1]
minx = float(sys.argv[2])
maxx = float(sys.argv[3])
miny = float(sys.argv[4])
maxy = float(sys.argv[5])
predicted_size = float(sys.argv[6])
main(csv_path,minx,maxx,miny,maxy,predicted_size)
## For running
## python fit_fia_sppn.py /path/to/csv -90 -80 30 40 300
#python fit_fia_sppn.py /RawDataCSV/idiv_share/plotsClimateData_11092017.csv -90 -80 30 40 300
## In hec
#python fit_fia_sppn.py /home/hpc/28/escamill/csv_data/idiv/plotsClimateData_11092017.csv -90 -80 30 40 300
## Wishes
#Arguments for the DataSource
#Arguments for the putput name.
#Atguments maybe for the kernel model.
#For now let's make the test in HEC
|
linuxwhatelse/plugin.audio.linuxwhatelse.gmusic | addon/routes/files.py | Python | gpl-3.0 | 1,076 | 0 | import mapper
from addon.routes import my_library
from addon.routes import actions
from addon.routes import generic
MPR = mapper.Mapper.get()
@MPR.s_url('/track/<track_id>/')
@MPR.s_u | rl('/track/<track_id>/<title>/')
def track(track_id, title=''):
actions.play_track(track_id, track_title=title)
@MPR.s_url('/artist/<artist_id>/')
def artist(artist_id):
generic.artist_top_trac | ks(artist_id)
@MPR.s_url('/album/<album_id>/')
def album(album_id):
generic.album(album_id)
@MPR.s_url('/playlist/')
def playlist(playlist_id=None, playlist_token=None):
if playlist_id:
my_library.my_library_playlist(playlist_id)
elif playlist_token:
generic.listen_now_shared_playlist(playlist_token)
@MPR.s_url('/station/')
def station(station_id=None, station_name=None, artist_id=None, album_id=None,
genre_id=None, track_id=None, curated_station_id=None,
playlist_token=None):
generic.station(station_id, station_name, artist_id, album_id,
track_id, genre_id, curated_station_id, playlist_token)
|
nguenanhvu/bioinformatics | rosalind_reversecomplement.py | Python | mit | 1,528 | 0.007199 | #rosalind_reversecomplement
#Objective: produce a DNA complementary stra | nd
'''The question is why we've gotta to reverse?
Note that DNA reading invention is starting from 5-end to 3-end direction.
Only producing the complementary sequence without reversing will result in
a 3-end to 5-end sequence => we would be reading the result in the wrong direction
'''
import os
os.chdir('/home/vu/Downloads/')
d = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}
f = | open('rosalind_revc (1).txt', 'r').read()[:-1]
s = ''.join(d[i] for i in f) #3-end to 5-end complementary sequence
s = s[::-1] #reverse the complementary sequence
print s
'''Result
AGCAATTCGTTCCGCCGTAGGAGAATGTTAAATTGTTCCCACGCGGGCCTC
TTACCCTGGGCTATTCCAAGCGCGATACCGTGAGTATCGAAGTCAATTAAT
CCCCCCCGCTTATGCTGATACATCTCTCCGATACCTTGTGCGTGAGTTTGA
ACTGTGAGAAAGAGCGGGCACAATCAGAAGTAATTTGTTTAATTTTTCTCT
ATCGCCCATCACCTTGATAGTGCGGATGCTGCTTACTTAAGTTGCACCGAA
GAGCGGACGATGAGCATCTGCATCTTGCGCGGGACGTGCAGGGTTTCTTGT
ACAGCCCTCAAACTGCATAGCTTCCGGAACTGGCGGCATCATTGTCATGCG
AGTCCTCTTCAGTCGATAACACCACATCCTACGGCTTGTTCCTGAGTCAGC
GCTGGGGCGTCCTGGGATTGTAGCCATCGAGCAAAGGGCGGCGCAGTCGAC
ATTGGCGACTCTCCTACTTAGACGTGTTGCCTTCAACTTTTAATCGGCGCA
ATTCGGCTTATCGCGCTTCTCACAACAGAAGAGGGAACAAGACATGAGAGC
GATAGCTAAGATAGAATATCACGGCCAAGACCGGCGACGTTGCTACACCCC
TAATATTTCTGTGCTAGTCGTCGTGCGATGATATTTATCCCTCCCAAGTTG
GGGTCGGATCATACGGCTAACAGTTGCAATGTACTAGCGACTTGTAAACAT
GCCGAACCCCGAGGCCAGTATGTATAGATGTTACTCCTATCTCCCAAGGTC
CACACTGGAGAGTTCTAGGACTTAGAATATCGCTCCTCTTGTCGGCTATGG
AAGTATGGAAGTGCGCCAATGGGGTTACTCGAGACCGTTGCACAGAAGCAT
TTAATTTAT'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.