repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
gglyptodon/tmp | Pysickle/pysickle/pysickle.py | Python | gpl-3.0 | 24,572 | 0.016238 | #!/usr/bin/env python
'''
Copyright (C) 2014 Janina Mass
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
'''
import sys
import getopt
import subprocess
import threading
import os
import shutil
import matplotlib
#don't use X:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
from distutils import spawn
#########################
# last update:
# Fr 16 Mai 2014 14:25:46 CEST
# [JMass]
#########################
global GAP
GAP = "-"
class Alignment(object):
def __init__(self, id=None, fasta = None, members = []):
self.id = id
self.fasta = fasta
self.members = []
self.gapPos = []
self.mismatchPos = []
self.matchPos = []
self.matchGapPos = []
self.attachSequences()
self.calcNumbers()
def __repr__(self):
ids = self.members
return("Alignment:{},{}".format(self.id, ids))
def __len__(self):
try:
return(len(self.members[0].sequence))
except TypeError as e:
sys.stderr.write(e)
sys.stderr.write("attachSequences first")
return(0)
def getStats(self):
res = ""
res+="{},{},{},{},{},{}".format(len(self.matchPos),len(self.matchGapPos),len(self.mismatchPos),len(self)-len(self.gapPos),len(self.gapPos),len(self) )
return(res)
def attachSequences(self):
fp = FastaParser()
print("FASTA:", self.fasta)
for f in fp.read_fasta(self.fasta):
newSeq = Sequence(id = f[0], sequence = f[1])
self.members.append(newSeq)
def calcNumbers(self):
for i in range(0, len(self)):
curpos = [m.sequence[i] for m in self.members]
if GAP in curpos:
#dynamic penalty:
tmp = "".join(curpos)
gappyness = tmp.count(GAP)/float(len(self.members))
half = len(self.members)/2.0
if gappyness > half:
toPunish = [m for m in self.members if m.sequence[i]!=GAP]
for t in toPunish:
t._dynamicPenalty+=gappyness
elif gappyness < half:
#punish gappers
toPunish = [m for m in self.members if m.sequence[i]==GAP]
for t in toPunish:
t._dynamicPenalty+=1-gappyness
else:
pass
#/dyn penalty
self.gapPos.append(i)
#sequences that cause gaps:
gappers = [m for m in self.members if m.sequence[i] == GAP]
for m in gappers:
m.gapsCaused.append(i)
#unique gaps caused:
if len(gappers) == 1:
m.uniqueGapsCaused.append(i)
#insertions
inserters = [m for m in self.members if m.sequence[i] != GAP]
for m in inserters:
m.insertionsCaused.append(i)
#unique insertions caused:
if len(inserters) == 1:
m.uniqueInsertionsCaused.append(i)
nongap = [c for c in curpos if c != GAP]
cpset = set(curpos)
if (len(cpset) >1 and GAP not in cpset):
self.mismatchPos.append(i)
for m in self.members:
m.mismatchShared.append(i)
elif (len(cpset) == 1 and GAP not in cpset):
self.matchPos.append(i)
for m in self.members:
m.matchShared.append(i)
elif (len(cpset)==2 and GAP in cpset and len(nongap)>2):
self.matchGapPos.append(i)
def showAlignment(self, numbers = False):
res = []
mmPos = []
alignmentLength = len(self.members[0].sequence)
for i in range(0, alignmentLength):
curpos = [m.sequence[i] for m in self.members]
if numbers:
res.append(str(i)+" "+" ".join(curpos))
else:
res.append(" ".join(curpos))
return("\n".join(res))
class Sequence():
def __init__(self, id = "", sequence = None, isForeground = False):
self.id = id
self.sequence = sequence
self.isForeground = isForeground
self.insertionsCaused = [] #positions
self.uniqueInsertionsCaused = []
self.gapsCaused = []#positions
self.uniqueGapsCaused = []
self.matchShared = []
self.mismatchShared = []
self._penalty = None
# penalize by site:
# > n/2 gaps (@site): penalyze inserts by gaps/n
# < n/2 gaps (@site): penalyze gaps by inserts/n
self._dynamicPenalty = 0
def setForeground(self, bool = True):
self.isForeground = bool
def __repr__(self):
return("Sequence: {}".format(self.id))
@property
def penalty(self, uniqueGapPenalty=10, uniqueInsertPenalty=10, gapPenalty = 1, insertPenalty =1 ):
self.penalty =sum([ len(self.insertionsCaused)*insertPenalty, len(self.uniqueInsertionsCaused)*uniqueGapPenalty, len(self.gapsCaused)*gapPenalty, len(self.uniqueGapsCaused)*uniqueGapPenalty])
return(self.penalty)
def summary(self):
s = ""
s+=self.id
s+="insertionsCaused:{},uniqueInsertionsCaused:{}, gapsCaused:{}, uniqueGapsCaused:{}, penalty:{}, dynPenalty:{}".format(len(self.insertionsCaused), len(self.uniqueInsertionsCaused), len(self.gapsCaused), len(self.uniqueGapsCaused), self.penalty, self._dynamicPenalty)
return(s)
def getCustomPenalty(self,gapPenalty, uniqueGapPenalty, insertionPenalty , uniqueInsertionPenalty, mismatchPenalty, matchReward):
res = (len(self.gapsCaused)-len(self.uniqueGapsCaused))* gapPenalty\
+ len(self.uniqueGapsCaused)*uniqueGapPenalty\
+ (len(self.insertionsCaused)-len(self.uniqueInserti | onsCaused)) * insertionP | enalty\
+ len(self.uniqueInsertionsCaused) * uniqueInsertionPenalty\
+ len(self.mismatchShared)* mismatchPenalty\
+ len(self.matchShared) *matchReward
return(res)
class FastaParser(object):
def read_fasta(self, fasta, delim = None, asID = 0):
"""read from fasta fasta file 'fasta'
and split sequence id at 'delim' (if set)\n
example:\n
>idpart1|idpart2\n
ATGTGA\n
and 'delim="|"' returns ("idpart1", "ATGTGA")
"""
name = ""
fasta = open(fasta, "r")
while True:
line = name or fasta.readline()
if not line:
break
seq = []
while True:
name = fasta.readline()
name = name.rstrip()
if not name or name.startswith(">"):
break
else:
seq.append(name)
joinedSeq = "".join(seq)
line = line[1:]
if delim:
line = line.split(delim)[asID]
yield (line.rstrip(), joinedSeq.rstrip())
fasta.close()
###########################################
#TODO documentation
def usage():
print ("""
######################################
# pysickle.py v0.1.1
######################################
usage:
pysickle.py -f multifasta alignment
options:
-f, --fasta=FILE multifasta alignment (eg. "align.fas")
OR
-F, --fasta_dir=DIR directory with multifasta files (needs -s SUFFIX)
-s, --suffix=SUFFIX will try to work with files that end with SUFFIX (eg ".fas")
-a, --ms |
red-hood/calendarserver | txdav/xml/extensions.py | Python | apache-2.0 | 1,673 | 0.000598 | # Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the S | oftware, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in a | ll
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
"""
Implementation of draft-sanchez-webdav-current-principal-02.
"""
__all__ = []
from txdav.xml.base import WebDAVTextElement, twisted_dav_namespace
from txdav.xml.element import registerElement, registerElementClass
@registerElement
@registerElementClass
class ErrorDescription(WebDAVTextElement):
"""
The human-readable description of a failed precondition
"""
namespace = twisted_dav_namespace
name = "error-description"
protected = True
|
MobicoTIC/MongoLite | mongolite/mongo_exceptions.py | Python | bsd-3-clause | 2,399 | 0.010004 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# R | edistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in | binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# SchemaDocument Exceptions
class StructureError(Exception):pass
class BadKeyError(Exception):pass
class AuthorizedTypeError(Exception):pass
class SchemaTypeError(Exception):pass
class DefaultFieldTypeError(Exception):pass
# Mongo Document Exceptions
from bson import InvalidDocument
try:
from pymongo.connection import OperationFailure
except ImportError:
from pymongo.errors import OperationFailure
class ConnectionError(Exception):pass
class BadIndexError(Exception):pass
#class MongoAuthException(Exception):pass
#class MultipleResultsFound(Exception):pass
#class BadIndexError(Exception):pass
#class AutoReferenceError(Exception):pass
#class MaxDocumentSizeError(Exception):pass
#class OptionConflictError(Exception):pass
#class UpdateQueryError(Exception):pass
|
senttech/Cura | plugins/USBPrinting/__init__.py | Python | agpl-3.0 | 1,268 | 0.007098 | # | Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import USBPrinterOutputDeviceManager
from PyQt5.QtQml import qmlRegisterType, qmlRegisterSingletonType
from UM.i18n import i18nCatalog
i18n_catalog = i18nCatalog("cura")
def getMetaData():
return {
"type": "extension",
"plugin": {
"name": i18n_catalog.i18nc("@label", "USB printing"),
"author": "Ultimaker", |
"version": "1.0",
"api": 3,
"description": i18n_catalog.i18nc("@info:whatsthis","Accepts G-Code and sends them to a printer. Plugin can also update firmware.")
}
}
def register(app):
# We are violating the QT API here (as we use a factory, which is technically not allowed).
# but we don't really have another means for doing this (and it seems to you know -work-)
qmlRegisterSingletonType(USBPrinterOutputDeviceManager.USBPrinterOutputDeviceManager, "Cura", 1, 0, "USBPrinterManager", USBPrinterOutputDeviceManager.USBPrinterOutputDeviceManager.getInstance)
return {"extension":USBPrinterOutputDeviceManager.USBPrinterOutputDeviceManager.getInstance(), "output_device": USBPrinterOutputDeviceManager.USBPrinterOutputDeviceManager.getInstance()}
|
Geosyntec/python-tidegates | tidegates/analysis.py | Python | bsd-3-clause | 15,076 | 0.000862 | """ Top-level functions for python-tidegates.
This contains main functions to evaluate the extent of floodinga and
damage due to floods.
(c) Geosyntec Consultants, 2015.
Released under the BSD 3-clause license (see LICENSE file for more info)
Written by Paul Hobson (phobson@geosyntec.com)
"""
import os
import sys
import glob
import datetime
import numpy
import arcpy
from . import utils
METERS_PER_FOOT = 0.3048
def process_dem_and_zones(dem, zones, ID_column, cleanup=True, **verbose_options):
""" Convert DEM and Zones layers to numpy arrays.
This is a pre-processor of the DEM and Zone of Influent input data.
Both are converted to numpy arrays and returned along with a raster
template so that the arrays can be correctly georeferenced if
necessary. These returned values are intended to be fed directly to
:func:`flood_area`.
Parameters
----------
dem : str or arcpy.Raster
The topographic data needed to assess flood impacts.
zones : str or arcpy.mapping.Layer
The shapefile or feature class representing the tidegates' zones
of influence.
ID_column : str
Name of the column in the attribute table of ``zones`` that
uniquely identifies each tidegate.
cleanup : bool, optional (True)
Toggles the removal of temporary files.
Other Parameters
----------------
verbose : bool (default = False)
Toggles the printing of messages communication the progress
of the processing.
asMessage : bool (default = False)
When True, progress messages are passed through
``arcpy.AddMessage``. Otherwise, the msg is simply printed to
stdin.
Returns
-------
topo_array, zones_array : numpy.ndarray
Arrays of the topo data and zones of influence.
template : tidegates.utils.RasterTemplate
A raster template that can be used to georeference the returned
arrays.
See also
--------
flood_area
tidegates.utils.RasterTemplate
"""
utils._status('WorkSpace set to {}'.format(arcpy.env.workspace), **verbose_options)
# load the raw DEM (topo data)
raw_topo = utils.load_data(
datapath=dem,
datatype="raster",
msg='Loading DEM {}'.format(dem),
**verbose_options
)
# load the zones of influence, converting to a raster
_p2r_outfile = utils.create_temp_filename("pgon_as_rstr", filetype='raster')
zones_raster = utils.polygons_to_raster(
polygons=zones,
ID_column=ID_column,
cellsize=raw_topo.meanCellWidth,
outfile=_p2r_outfile,
msg='Processing {} polygons'.format(zones),
**verbose_options
)
template = utils.RasterTemplate.from_raster(zones_raster)
# clip the DEM to the zones raster
_cd2z_outfile = utils.create_temp_filename("clipped2zones", filetype='raster')
topo_raster = utils.clip_dem_to_zones(
dem=raw_topo,
zones=zones_raster,
outfile=_cd2z_outfile,
msg='Clipping DEM to extent of {}'.format(zones),
**verbose_options
)
# convert the clipped DEM and zones to numpy arrays
topo_array, zones_array = utils.rasters_to_arrays(
topo_raster,
zones_raster,
msg='Converting rasters to arrays',
**verbose_options
)
if cleanup:
_temp_files = []
utils.cleanup_temp_results(
_p2r_outfile,
_cd2z_outfile,
msg="Removing intermediate rasters",
**verbose_options
)
return topo_array, zones_array, template
def flood_area(topo_array, zones_array, template, ID_column, elevation_feet,
filename=None, num=0, cleanup=True, **verbose_options):
""" Mask out portions of a a tidegates area of influence below
a certain elevation.
Parameters
----------
topo_array : numpy array
Floating point array of the digital elevation model.
zones_array : numpy array
Categorical (integer) array of where each non-zero value
delineates a tidegate's zone of influence.
template : arcpy.Raster or utils.RasterTemplate
A raster or raster-like object that define the spatial extent
of the analysis area. Required attributes are:
- templatemeanCellWidth
- templatemeanCellHeight
- templateextent.lowerLeft
ID_column : str
Name of the column in the ``zones`` layer that associates
each geomstry with a tidegate.
elevation_feet: float
The theoritical flood elevation (in ft MSL) that will be
analyzed.
filename : str, optional
Filename to which the flooded zone will be saved.
cleanup : bool (default = True)
When True, temporary results are removed from disk.
Other Parameters
----------------
verbose : bool (default = False)
Toggles the printing of messages communication the progress
of the processing.
asMessage : bool (default = False)
When True, progress messages are passed through
``arcpy.AddMessage``. Otherwise, the msg is simply printed to
stdin.
Returns
-------
flood_zones : arcpy.mapping.Layer
arcpy Layer of the zones showing the extent flooded behind
each tidegate.
See also
--------
process_dem_and_zones,
assess_impact,
area_of_impacts,
count_of_impacts
"""
# convert the elevation to meters to match the DEM
elevation_meters = elevation_feet * METERS_PER_FOOT
if filename is None: # pragma: no cover
datefmt = '%Y%m%d_%H%M'
datestring = datetime.datetime.now().strftime(dat | efmt)
tem | p_filename = "_temp_FloodedZones_" + datestring
else:
temp_filename = utils.create_temp_filename(filename, filetype='shape', num=num)
# compute floods of zoned areas of topo
flooded_array = utils.flood_zones(
zones_array=zones_array,
topo_array=topo_array,
elevation=elevation_meters,
msg='Flooding areas up to {} ft'.format(elevation_feet),
**verbose_options
)
# convert flooded zone array back into a Raster
_fr_outfile = utils.create_temp_filename('floods_raster', filetype='raster', num=num)
flooded_raster = utils.array_to_raster(
array=flooded_array,
template=template,
outfile=_fr_outfile,
msg='Converting flooded array to a raster dataset',
**verbose_options
)
# convert raster into polygons
temp_polygons = utils.raster_to_polygons(
flooded_raster,
temp_filename,
newfield=ID_column,
msg='Converting raster of floods to polygons',
**verbose_options
)
# dissolve (merge) broken polygons for each tidegate
flood_zones = utils.aggregate_polygons(
polygons=temp_polygons,
ID_field=ID_column,
filename=filename,
msg="Dissolving polygons",
**verbose_options
)
if cleanup:
_temp_files = []
utils.cleanup_temp_results(
temp_polygons.dataSource,
_fr_outfile,
msg="Removing intermediate files",
**verbose_options
)
return flood_zones
def assess_impact(floods_path, flood_idcol, cleanup=False,
wetlands_path=None, wetlands_output=None,
buildings_path=None, buildings_output=None,
bldg_idcol='STRUCT_ID', **verbose_options):
""" Assess the extent of damage due to flooding in wetlands and
buildings.
Parameters
----------
floods_path : str or arcpy.mapping.Layer
The (filepath to the) layer of the extent of flooding. Ideally,
this layer should be generated with ``flood_area``.
flood_idcol : str
Name of the column in the ``floods_path`` layer that associates
each geomstry with a tidegate.
wetlands_path, buildings_path : str
Paths to layers containing wetlands and building footprints.
wetlands_output, buildings_output : str
Path to where the final output of the assessed damage to the
wetlands and building |
aocks/ox_herd | ox_herd/core/plugins/awstools_plugin/test_awstools.py | Python | bsd-2-clause | 1,442 | 0 | """Tests for awstools plugin.
"""
import shutil
import os
import tempfile
from ox_herd.core.plugins.awstools_plugin import core
class TestableBackupTask(core.BackupPostgresToAWS):
"""Sub-class of BackupPostgresToAWS for testing.
"""
@classmethod
def make_dump_cmdline(cls, ox_herd_task, outfile):
"""Override to just cat file for dump to simplify testing.
"""
return ['cp', cls.get_conn_string(ox_herd_task), outfile]
@staticmethod
def get_conn_string(ox_herd_task):
"""Provide simplified version for testing in case we want override.
Helpful if we deal with a sqlite connection string.
"""
return ox_herd_task.conn_string.split('://')[-1]
def test_basic_operation():
"Test simplified version of backup task."
try:
db_loc = tempfile.mktemp()
backup_loc = tempfile.mktemp()
os.mkdir(backup_loc)
open(db_loc, 'w').write('test_data.txt')
task = TestableBackupTask(
name='test_task', conn_string=db_loc, prefix='test',
| bucket_name='@'+backup_loc)
result = task.main_call(task)
assert result['return_value'].split()[:4] == [
'Finished', 'dump:', 'extra', 'messages="b\'\'".']
finally:
for name in [db_loc]:
if os.path.exists(name):
os.remove(name)
i | f os.path.exists(backup_loc):
shutil.rmtree(backup_loc)
|
kallewoof/bitcoin | test/functional/wallet_send.py | Python | mit | 29,974 | 0.004704 | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the send RPC command."""
from decimal import Decimal, getcontext
from itertools import product
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif
class WalletSendTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["-whitelist=127.0.0.1","-walletrbf=1"],
["-whitelist=127.0.0.1","-walletrbf=1"],
]
getcontext().prec = 8 # Satoshi precision for Decimal
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_send(self, from_wallet, to_wallet=None, amount=None, data=None,
arg_conf_target=None, arg_estimate_mode=None, arg_fee_rate=None,
conf_target=None, estimate_mode=None, fee_rate=None, add_to_wallet=None, psbt=None,
inputs=None, add_inputs=None, include_unsafe=None, change_address=None, change_position=None, change_type=None,
include_watching=None, locktime=None, lock_unspents=None, replaceable=None, subtract_fee_from_outputs=None,
expect_error=None, solving_data=None):
assert (amount is None) != (data is None)
from_balance_before = from_wallet.getbalances()["mine"]["trusted"]
if include_unsafe:
from_balance_before += from_wallet.getbalances()["mine"]["untrusted_pending"]
if to_wallet is None:
assert amount is None
else:
to_untrusted_pending_before = to_wallet.getbalances()["mine"]["untrusted_pending"]
if amount:
dest = to_wallet.getnewaddress()
outputs = {dest: amount}
else:
outputs = {"data": data}
# Con | struct options dictionary
options = {}
if add_to_wallet is not None:
options["add_to_wallet"] = add_to_wallet
else:
| if psbt:
add_to_wallet = False
else:
add_to_wallet = from_wallet.getwalletinfo()["private_keys_enabled"] # Default value
if psbt is not None:
options["psbt"] = psbt
if conf_target is not None:
options["conf_target"] = conf_target
if estimate_mode is not None:
options["estimate_mode"] = estimate_mode
if fee_rate is not None:
options["fee_rate"] = fee_rate
if inputs is not None:
options["inputs"] = inputs
if add_inputs is not None:
options["add_inputs"] = add_inputs
if include_unsafe is not None:
options["include_unsafe"] = include_unsafe
if change_address is not None:
options["change_address"] = change_address
if change_position is not None:
options["change_position"] = change_position
if change_type is not None:
options["change_type"] = change_type
if include_watching is not None:
options["include_watching"] = include_watching
if locktime is not None:
options["locktime"] = locktime
if lock_unspents is not None:
options["lock_unspents"] = lock_unspents
if replaceable is None:
replaceable = True # default
else:
options["replaceable"] = replaceable
if subtract_fee_from_outputs is not None:
options["subtract_fee_from_outputs"] = subtract_fee_from_outputs
if solving_data is not None:
options["solving_data"] = solving_data
if len(options.keys()) == 0:
options = None
if expect_error is None:
res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
else:
try:
assert_raises_rpc_error(expect_error[0], expect_error[1], from_wallet.send,
outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
except AssertionError:
# Provide debug info if the test fails
self.log.error("Unexpected successful result:")
self.log.error(arg_conf_target)
self.log.error(arg_estimate_mode)
self.log.error(arg_fee_rate)
self.log.error(options)
res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
self.log.error(res)
if "txid" in res and add_to_wallet:
self.log.error("Transaction details:")
try:
tx = from_wallet.gettransaction(res["txid"])
self.log.error(tx)
self.log.error("testmempoolaccept (transaction may already be in mempool):")
self.log.error(from_wallet.testmempoolaccept([tx["hex"]]))
except JSONRPCException as exc:
self.log.error(exc)
raise
return
if locktime:
return res
if from_wallet.getwalletinfo()["private_keys_enabled"] and not include_watching:
assert_equal(res["complete"], True)
assert "txid" in res
else:
assert_equal(res["complete"], False)
assert not "txid" in res
assert "psbt" in res
from_balance = from_wallet.getbalances()["mine"]["trusted"]
if include_unsafe:
from_balance += from_wallet.getbalances()["mine"]["untrusted_pending"]
if add_to_wallet and not include_watching:
# Ensure transaction exists in the wallet:
tx = from_wallet.gettransaction(res["txid"])
assert tx
assert_equal(tx["bip125-replaceable"], "yes" if replaceable else "no")
# Ensure transaction exists in the mempool:
tx = from_wallet.getrawtransaction(res["txid"], True)
assert tx
if amount:
if subtract_fee_from_outputs:
assert_equal(from_balance_before - from_balance, amount)
else:
assert_greater_than(from_balance_before - from_balance, amount)
else:
assert next((out for out in tx["vout"] if out["scriptPubKey"]["asm"] == "OP_RETURN 35"), None)
else:
assert_equal(from_balance_before, from_balance)
if to_wallet:
self.sync_mempools()
if add_to_wallet:
if not subtract_fee_from_outputs:
assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before + Decimal(amount if amount else 0))
else:
assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before)
return res
def run_test(self):
self.log.info("Setup wallets...")
# w0 is a wallet with coinbase rewards
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
# w1 is a regular wallet
self.nodes[1].createwallet(wallet_name="w1")
w1 = self.nodes[1].get_wallet_rpc("w1")
# w2 contains the private keys for w3
self.nodes[1].createwallet(wallet_name="w2", blank=True)
w2 = self.nodes[1].get_wallet_rpc("w2")
xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v"
xpub = "tp |
limscoder/amfast | examples/auth/python/utils.py | Python | mit | 1,088 | 0.001838 | """Utility functions."""
import sys
import logging
import amfast
from amfast.encoder import Encoder
from amfast.decoder import Decoder
from amfast.remoting import Service, CallableTarget
import controller
def setup_channel_set(channel_set):
"""Configures an amfast.remoting.channel.ChannelSet object."""
# Send log messages to STDOUT
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
amfast.logger.addHandler(handler)
# Map service targets to controller methods
cont_obj = controller.Controller()
service = Service('ExampleService')
# Set secure=True to raise an exception
# when an un-authenticated user attempts
# to access the target.
service.mapTarget | (CallableTarget(cont_obj.echo, 'echo', secure=True))
channel_set.service_mapper.mapService(service)
# Set the ChannelSet's 'checkCredentials' attribute
# to enable authentication.
#
# In this example, we're usi | ng a method from the
# controller to check credentials.
channel_set.checkCredentials = cont_obj.checkCredentials
|
rproepp/spykeutils | spykeutils/plugin/data_provider_neo.py | Python | bsd-3-clause | 40,506 | 0.000321 | import os
import sys
from copy import copy
from collections import OrderedDict
import traceback
import atexit
import neo
from data_provider import DataProvider
from .. import conversions as convert
class NeoDataProvider(DataProvider):
""" Base class for data providers using NEO"""
# Dictionary of block lists, indexed by (filename, block index) tuples
loaded_blocks = {}
# Dictionary of index in file, indexed by block object
block_indices = {}
# Dictionary of io, indexed by block object
block_ios = {}
# Dictionary of io (IO name, read paramters) tuples for loaded blocks
block_read_params = {}
# Mode for data lazy loading:
# 0 - Full load
# 1 - Lazy load
# 2 - Caching lazy load
data_lazy_mode = 0
# Mode for lazy cascade
cascade_lazy = False
# Forced IO class for all files. If None, determine by file extension.
forced_io = None
# Active IO read parameters (dictionary indexed by IO class)
io_params = {}
def __init__(self, name, progress):
super(NeoDataProvider, self).__init__(name, progress)
@classmethod
def clear(cls):
""" Clears cached blocks
"""
cls.loaded_blocks.clear()
cls.block_indices.clear()
cls.block_read_params.clear()
ios = set()
for io in cls.block_ios.itervalues():
if io in ios:
continue
if hasattr(io, 'close'):
io.close()
ios.add(io)
cls.block_ios.clear()
@classmethod
def get_block(cls, filename, index, lazy=None, force_io=None,
read_params=None):
""" Return the block at the given index in the specified file.
:param str filename: Path to the file from which to load the block.
:param int index: The index of the block in the file.
:param int lazy: Override global lazy setting if not ``None``:
0 regular load, 1 lazy load, 2 caching lazy load.
:param force_io: Override global forced_io for the Neo IO class
to use when loading the file. If ``None``, the global
forced_io is used.
:param dict read_params: Override read parameters for the IO that
will load the block. If ``None``, the global io_params are
used.
"""
if lazy is None:
lazy = cls.data_lazy_mode > 0
else:
lazy = lazy > 0
if force_io is None:
force_io = cls.forced_io
if filename in cls.loaded_blocks:
return cls.loaded_blocks[filename][index]
io, blocks = cls._load_neo_file(filename, lazy, force_io, read_params)
if io and not lazy and not cls.cascade_lazy and hasattr(io, 'close'):
io.close()
if blocks is None:
return None
return blocks[index]
@classmethod
def get_blocks(cls, filename, lazy=None, force_io=None,
read_params=None):
""" Return a list of blocks loaded from the specified file
:param str filename: Path to the file from which to load the blocks.
:param int lazy: Override global lazy setting if not ``None``:
0 regular load, 1 lazy load, 2 caching lazy load.
:param force_io: Override global forced_io for the Neo IO class
to use when loading the file. If ``None``, the global
forced_io is used.
:param dict read_params: Override read parameters for the IO that
will load the block. If ``None``, the global io_params are
used.
"""
if lazy is None:
lazy = cls.data_lazy_mode > 0
else:
lazy = lazy > 0
if force_io is None:
force_io = cls.forced_io
if filename in cls.loaded_blocks:
return cls.loaded_blocks[filename]
io, blocks = cls._load_ne | o_file(filename, lazy, force_io, read_params)
if io and not lazy and not cls.cascade_lazy and hasattr(io, 'close'):
io.close()
return blocks
@classmethod
def _load_neo_file(cls, filename, lazy, force_io, read_params):
""" Returns a NEO io object and a list of contain | ed blocks for a
file name. This function also caches all loaded blocks
:param str filename: The full path of the file (relative or absolute).
:param bool lazy: Determines if lazy mode is used for Neo io.
:param force_io: IO class to use for loading. If None, determined
by file extension or through trial and error for directories.
:param dict read_params: Override read parameters for the IO that
will load the block. If ``None``, the global io_params are
used.
"""
cascade = 'lazy' if cls.cascade_lazy else True
if os.path.isdir(filename):
if force_io:
try:
n_io = force_io(filename)
if read_params is None:
rp = cls.io_params.get(force_io, {})
else:
rp = read_params
content = n_io.read(lazy=lazy, cascade=cascade, **rp)
if force_io == neo.TdtIO and \
isinstance(content, neo.Block) and \
not content.segments:
# TdtIO can produce empty blocks for invalid dirs
sys.stderr.write(
'Could not load any blocks from "%s"' % filename)
return None, None
return cls._content_loaded(
content, filename, lazy, n_io, rp)
except Exception, e:
sys.stderr.write(
'Load error for directory "%s":\n' % filename)
tb = sys.exc_info()[2]
while not ('self' in tb.tb_frame.f_locals and
tb.tb_frame.f_locals['self'] == n_io):
if tb.tb_next is not None:
tb = tb.tb_next
else:
break
traceback.print_exception(type(e), e, tb)
else:
for io in neo.io.iolist:
if io.mode == 'dir':
try:
n_io = io(filename)
if read_params is None:
rp = cls.io_params.get(force_io, {})
else:
rp = read_params
content = n_io.read(lazy=lazy, cascade=cascade, **rp)
if io == neo.TdtIO and \
isinstance(content, neo.Block) and \
not content.segments:
# TdtIO can produce empty blocks for invalid dirs
continue
return cls._content_loaded(
content, filename, lazy, n_io, rp)
except Exception, e:
sys.stderr.write(
'Load error for directory "%s":\n' % filename)
tb = sys.exc_info()[2]
while not ('self' in tb.tb_frame.f_locals and
tb.tb_frame.f_locals['self'] == n_io):
if tb.tb_next is not None:
tb = tb.tb_next
else:
break
traceback.print_exception(type(e), e, tb)
else:
if force_io:
if read_params is None:
rp = cls.io_params.get(force_io, {})
else:
rp = read_params
return cls._load_file_with_io(filename, force_io, lazy, rp)
extension = filename.split('.')[-1]
for io in neo.io.iolist:
if extension in io.extens |
k04la/ijust_server | project/controllers/api_1/contest.py | Python | gpl-3.0 | 51,483 | 0.001049 | # -*- coding: utf-8 -*-
__author__ = 'AminHP'
# python imports
import os
import shutil
import zipfile
import StringIO
import base64
# flask imports
from flask import jsonify, request, g, send_file, abort
# project | imports
from project | import app
from project.extensions import db, auth
from project.modules.datetime import utcnowts
from project.modules.paginator import paginate
from project.models.contest import Contest, Problem, ContestDateTimeError
from project.models.team import Team
from project.models.user import User
from project.forms.problem import UploadProblemBody, UploadTestCase
@app.api_route('', methods=['POST'])
@app.api_validate('contest.create_schema')
@auth.authenticate
def create():
"""
Create Contest
---
tags:
- contest
parameters:
- name: body
in: body
description: Contest information
required: true
schema:
id: ContestCreation
required:
- name
- starts_at
- ends_at
properties:
name:
type: string
example: babyknight
minLength: 1
maxLength: 32
starts_at:
type: integer
description: Contest starts_at (utc timestamp)
ends_at:
type: integer
description: Contest ends_at (utc timestamp)
recaptcha:
type: string
- name: Access-Token
in: header
type: string
required: true
description: Token of current user
responses:
201:
description: Successfully created
schema:
$ref: "#/definitions/api_1_contest_list_owner_get_ContestInfo"
400:
description: Bad request
401:
description: Token is invalid or has expired
406:
description: EndTime must be greater than StartTime and StartTime must be greater than CreationTime
409:
description: Contest already exists
"""
json = request.json
try:
obj = Contest()
obj.owner = User.objects.get(pk=g.user_id)
obj.populate(json)
obj.save()
return jsonify(obj.to_json()), 201
except db.NotUniqueError:
return abort(409, "Contest already exists")
except ContestDateTimeError:
return abort(406, "EndTime must be greater than StartTime and StartTime must be greater than CreationTime")
@app.api_route('<string:cid>', methods=['GET'])
@auth.authenticate
def info(cid):
"""
Get Contest Info
---
tags:
- contest
parameters:
- name: cid
in: path
type: string
required: true
description: Id of contest
- name: Access-Token
in: header
type: string
required: true
description: Token of current user
responses:
200:
description: Contest information
schema:
id: ContestInfoUser
type: object
properties:
id:
type: string
description: Contest id
name:
type: string
description: Contest name
owner:
description: Owner info
schema:
id: ContestOwnerInfo
type: object
properties:
id:
type: string
description: Owner id
username:
type: string
description: Owner username
created_at:
type: integer
description: Contest created_at (utc timestamp)
starts_at:
type: integer
description: Contest starts_at (utc timestamp)
ends_at:
type: integer
description: Contest ends_at (utc timestamp)
is_active:
type: boolean
description: Contest is_active
is_ended:
type: boolean
description: Contest is_ended
is_owner:
type: boolean
description: Contest is_owner
is_admin:
type: boolean
description: Contest is_admin
pending_teams_num:
type: integer
description: Contest number of pending teams
accepted_teams_num:
type: integer
description: Contest number of accepted teams
joining_status:
type: object
description: Contest user joining status
schema:
properties:
status:
type: integer
description: joining status (0=not_joined, 1=waiting, 2=joined)
team:
schema:
id: TeamAbsInfo
properties:
id:
type: string
description: Team id
name:
type: string
description: Team name
owner:
description: Owner info
schema:
$ref: "#/definitions/api_1_team_info_get_UserAbsInfo"
401:
description: Token is invalid or has expired
404:
description: Contest does not exist
"""
try:
obj = Contest.objects.get(pk=cid)
user_obj = User.objects.get(pk=g.user_id)
return jsonify(obj.to_json_user(user_obj)), 200
except (db.DoesNotExist, db.ValidationError):
return abort(404, "Contest does not exist")
@app.api_route('<string:cid>', methods=['PUT'])
@app.api_validate('contest.edit_schema')
@auth.authenticate
def edit(cid):
"""
Edit Contest
---
tags:
- contest
parameters:
- name: cid
in: path
type: string
required: true
description: Id of contest
- name: body
in: body
description: Contest information
required: true
schema:
id: ContestEdition
properties:
name:
type: string
example: babyknight
minLength: 1
maxLength: 32
starts_at:
type: integer
description: Contest starts_at (utc timestamp)
ends_at:
type: integer
description: Contest ends_at (utc timestamp)
- name: Access-Token
in: header
type: string
required: true
description: Token of current user
responses:
200:
description: Successfully edited
schema:
$ref: "#/definitions/api_1_contest_list_owner_get_ContestInfo"
400:
description: Bad request
401:
description: Token is invalid or has expired
403:
description: You aren't owner or admin of the contest
404:
description: Contest does not exist
406:
description: EndTime must be greater than StartTime and StartTime must be greater than CreationTime
409:
description: Contest name already exists
"""
json = request.json
try:
obj = Contest.objects.get(pk=cid)
user_obj = User.objects.get(pk=g.user_id)
if (user_obj != obj.owner) and (not user_obj in obj.admins):
return abort(403, "You aren't owner or admin of the contest")
obj.populate(json)
obj.save()
return jsonify(obj.to_json()), 200
except db.NotUniqueError:
return abort(409, "Contest name already exists")
except (db.DoesNotExist, db.ValidationError):
return abort(404, "Contest does not exist")
except ContestDateTimeError:
return abort(406, "EndTime must be greater than StartTime and StartTime must be greater than CreationTime")
@app.api_route('<string:cid>/', methods=['DELETE'])
@auth.authenticate
def delete(cid):
"""
Contest Delete
---
tags:
- contest
|
tyb0807/angr | angr/engines/vex/dirty.py | Python | bsd-2-clause | 14,622 | 0.005061 | import claripy
import logging
import time
from ... import sim_options as o
l = logging.getLogger("angr.engines.vex.dirty")
#####################
# Dirty calls
#####################
# they return retval, constraints
# Reference:
# http://www-inteng.fnal.gov/Integrated_Eng/GoodwinDocs/pdf/Sys%20docs/PowerPC/PowerPC%20Elapsed%20Time.pdf
# and
# http://www.cap-lore.com/code/TB/
def ppcg_dirtyhelper_MFTB(state):
# TODO: This is an incorrect implementation. Fix it later!
return state.se.BVV(0x200, 64), [ ]
def ppc32g_dirtyhelper_MFSPR_287(state):
return state.se.BVV(0x200, 32), [ ]
def amd64g_dirtyhelper_RDTSC(state):
if o.USE_SYSTEM_TIMES in state.options:
val = state.solver.BVV(int(time.clock() * 1000000) + 12345678, 64)
else:
val = state.solver.BVS('RDTSC', 64, key=('hardware', 'rdtsc'))
return val, []
x86g_dirtyhelper_RDTSC = amd64g_dirtyhelper_RDTSC
# For all the CPUID helpers: we've implemented the very nice CPUID functions, but we don't use them.
# we claim to be a much dumber cpu than we can support because otherwise we get bogged down doing
# various tasks in the libc initializers.
# Copied basically directly from the vex source
def amd64g_dirtyhelper_CPUID_baseline(state, _):
old_eax = state.regs.rax[31:0]
def SET_ABCD(a, b, c, d, condition=None):
if condition is None:
state.registers.store('rax', a, size=8)
state.registers.store('rbx', b, size=8)
state.registers.store('rcx', c, size=8)
state.registers.store('rdx', d, size=8)
else:
cond = old_eax == condition
state.registers.store('rax', a, size=8, condition=cond)
state.registers.store('rbx', b, size=8, condition=cond)
state.registers.store('rcx', c, size=8, condition=cond)
state.registers.store('rdx', d, size=8, condition=cond)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000)
SET_ABCD(0x00000001, 0x72676e41, 0x21444955, 0x50432079, 0)
SET_ABCD(0x00000f5a, 0x01000800, 0x00000000, 0x078bfbff, 1)
SET_ABCD(0x80000018, 0x68747541, 0x444d4163, 0x69746e65, 0x80000000 | )
SET_ABCD(0x00000f5a, 0x00000505, 0x00000000, 0x21d3fbff, 0x80000001)
SET_ABCD(0x20444d41, 0x6574704f, 0x206e6f72, 0x296d7428, 0x80000002)
SET_ABCD(0x6f725020, 0x73736563, 0x3820726f, 0x00003834, 0x80000003)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000004)
SET_ABCD(0xff08ff08, | 0xff20ff20, 0x40020140, 0x40020140, 0x80000005)
SET_ABCD(0x00000000, 0x42004200, 0x04008140, 0x00000000, 0x80000006)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x80000007)
SET_ABCD(0x00003028, 0x00000000, 0x00000000, 0x00000000, 0x80000008)
return None, [ ]
amd64g_dirtyhelper_CPUID_avx_and_cx16 = amd64g_dirtyhelper_CPUID_baseline
amd64g_dirtyhelper_CPUID_avx2 = amd64g_dirtyhelper_CPUID_baseline
def CORRECT_amd64g_dirtyhelper_CPUID_avx_and_cx16(state, _):
old_eax = state.regs.rax[31:0]
old_ecx = state.regs.rcx[31:0]
def SET_ABCD(a, b, c, d, condition=None, condition2=None):
if condition is None:
state.registers.store('rax', a, size=8)
state.registers.store('rbx', b, size=8)
state.registers.store('rcx', c, size=8)
state.registers.store('rdx', d, size=8)
elif condition2 is None:
cond = old_eax == condition
state.registers.store('rax', a, size=8, condition=cond)
state.registers.store('rbx', b, size=8, condition=cond)
state.registers.store('rcx', c, size=8, condition=cond)
state.registers.store('rdx', d, size=8, condition=cond)
else:
cond = claripy.And(old_eax == condition, old_ecx == condition2)
state.registers.store('rax', a, size=8, condition=cond)
state.registers.store('rbx', b, size=8, condition=cond)
state.registers.store('rcx', c, size=8, condition=cond)
state.registers.store('rdx', d, size=8, condition=cond)
SET_ABCD(0x00000007, 0x00000340, 0x00000340, 0x00000000)
SET_ABCD(0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0x00000000)
SET_ABCD(0x000206a7, 0x00100800, 0x1f9ae3bf, 0xbfebfbff, 0x00000001)
SET_ABCD(0x76035a01, 0x00f0b0ff, 0x00000000, 0x00ca0000, 0x00000002)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004)
SET_ABCD(0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0x00000004, 0x00000000)
SET_ABCD(0x1c004122, 0x01c0003f, 0x0000003f, 0x00000000, 0x00000004, 0x00000001)
SET_ABCD(0x1c004143, 0x01c0003f, 0x000001ff, 0x00000000, 0x00000004, 0x00000002)
SET_ABCD(0x1c03c163, 0x02c0003f, 0x00001fff, 0x00000006, 0x00000004, 0x00000003)
SET_ABCD(0x00000040, 0x00000040, 0x00000003, 0x00001120, 0x00000005)
SET_ABCD(0x00000077, 0x00000002, 0x00000009, 0x00000000, 0x00000006)
SET_ABCD(0x00000000, 0x00000800, 0x00000000, 0x00000000, 0x00000007)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009)
SET_ABCD(0x07300803, 0x00000000, 0x00000000, 0x00000603, 0x0000000a)
SET_ABCD(0x00000000, 0x00000000, old_ecx, 0x00000000, 0x0000000b)
SET_ABCD(0x00000001, 0x00000001, 0x00000100, 0x00000000, 0x0000000b, 0x00000000)
SET_ABCD(0x00000004, 0x00000004, 0x00000201, 0x00000000, 0x0000000b, 0x00000001)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000c)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000d)
SET_ABCD(0x00000007, 0x00000340, 0x00000340, 0x00000000, 0x0000000d, 0x00000000)
SET_ABCD(0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x00000001)
SET_ABCD(0x00000100, 0x00000240, 0x00000000, 0x00000000, 0x0000000d, 0x00000002)
SET_ABCD(0x00000007, 0x00000340, 0x00000340, 0x00000000, 0x0000000e)
SET_ABCD(0x00000007, 0x00000340, 0x00000340, 0x00000000, 0x0000000f)
SET_ABCD(0x80000008, 0x00000000, 0x00000000, 0x00000000, 0x80000000)
SET_ABCD(0x00000000, 0x00000000, 0x00000001, 0x28100800, 0x80000001)
SET_ABCD(0x20202020, 0x20202020, 0x65746e49, 0x2952286c, 0x80000002)
SET_ABCD(0x726f4320, 0x4d542865, 0x35692029, 0x3033322d, 0x80000003)
SET_ABCD(0x50432030, 0x20402055, 0x30382e32, 0x007a4847, 0x80000004)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000005)
SET_ABCD(0x00000000, 0x00000000, 0x01006040, 0x00000000, 0x80000006)
SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000100, 0x80000007)
SET_ABCD(0x00003024, 0x00000000, 0x00000000, 0x00000000, 0x80000008)
return None, [ ]
def amd64g_dirtyhelper_IN(state, portno, sz): #pylint:disable=unused-argument
return state.se.Unconstrained('IN', 64, key=('hardware', 'in')), [ ]
def amd64g_dirtyhelper_OUT(state, portno, data, sz): #pylint:disable=unused-argument
return None, [ ]
def amd64g_dirtyhelper_SxDT(state, addr, op): #pylint:disable=unused-argument
# SIDT and SGDT are the only instructions dealt with by vex
# and they both store 80 bit of data
# See http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2008/10/24594_APM_v3.pdf
# page 377
state.memory.store(addr, state.se.Unconstrained('SxDT', 80))
return None, [ ]
def x86g_dirtyhelper_CPUID_sse0(state, _):
old_eax = state.regs.eax
def SET_ABCD(a, b, c, d, condition=None, condition2=None):
if condition is None:
state.registers.store('eax', a, size=4)
state.registers.store('ebx', b, size=4)
state.registers.store('ecx', c, size=4)
state.registers.store('edx', d, size=4)
elif condition2 is None:
cond = old_eax == condition
state.registers.store('eax', a, size=4, condition=cond)
state.registers.store('ebx', b, size=4, condition=cond)
state.registers.store('ecx', c, size=4, condition=cond)
state.registers.store('edx', d, size=4, condition=cond)
SET_ABCD(0x543, 0, 0, 0x8001bf)
SET_ABCD(0x1, 0x72676e41, 0x21444955, 0x50432079, 0)
re |
dbbhattacharya/kitsune | vendor/packages/pylint/test/input/func_noerror_except_pass.py | Python | bsd-3-clause | 204 | 0.004902 | """
#3205: W0704 (except doesn't do anything) false positive if some statements
follow a | "pass" |
"""
__revision__ = None
try:
A = 2
except ValueError:
pass # pylint: disable-msg=W0107
print A
|
oscurart/BlenderAddons | oscurart_tools/oscurart_animation.py | Python | gpl-2.0 | 2,647 | 0.001511 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from mathutils import Matrix
# ---------------------------QUICK PARENT------------------
def DefQuickParent(inf, out):
ob = bpy.context.object
if ob.type == "ARMATURE":
target = [object for object in bpy.context.selected_objects if object != ob][0]
ob = bpy.context.active_pose_bone if bpy.context.object.type == 'ARMATURE' else bpy.context.object
target.select = False
bpy.context.scene.frame_set(frame=bpy.conte | xt.scene.quick_animation_in)
| a = Matrix(target.matrix_world)
a.invert()
i = Matrix(ob.matrix)
for frame in range(inf, out):
bpy.context.scene.frame_set(frame=frame)
ob.matrix = target.matrix_world * a * i
bpy.ops.anim.keyframe_insert(type="LocRotScale")
else:
target = [object for object in bpy.context.selected_objects if object != ob][0]
ob = bpy.context.active_pose_bone if bpy.context.object.type == 'ARMATURE' else bpy.context.object
target.select = False
bpy.context.scene.frame_set(frame=bpy.context.scene.quick_animation_in)
a = Matrix(target.matrix_world)
a.invert()
i = Matrix(ob.matrix_world)
for frame in range(inf, out):
bpy.context.scene.frame_set(frame=frame)
ob.matrix_world = target.matrix_world * a * i
bpy.ops.anim.keyframe_insert(type="LocRotScale")
class QuickParent(bpy.types.Operator):
"""Creates a parent from one object to other in a selected frame range"""
bl_idname = "anim.quick_parent_osc"
bl_label = "Quick Parent"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
DefQuickParent(
bpy.context.scene.quick_animation_in,
bpy.context.scene.quick_animation_out,
)
return {'FINISHED'}
|
DuinoDu/label_ellipse | label.py | Python | mit | 6,224 | 0.01446 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
python label.py [imgdir]
Usage:
d -> delete current label
n -> next picture
q -> exit
'''
import os, sys
import cv2
import numpy as np
import math
import copy
import xml.etree.ElementTree as ET
x1,y1, x2, y2 = 0,0,0,0
state = 0
# ellipse parameter
x0, y0 = 0, 0
angle = 0
a, b = 0, 0
im = np.zeros((100,100,3))
filename = ""
def onmouse(event, x, y, flags, param):
global im
global x1, y1, x2, y2
global x0, y0, a, b, angle
global state
img = im.copy()
height, width = img.shape[:2]
# draw two line
if state == 0:
# click one
if event == cv2.EVENT_LBUTTONDOWN:
x1, y1 = x, y
x0, y0 = 0, 0
angle = 0
a, b = 0, 0
elif event == cv2.EVENT_LBUTTONUP:
state = 1
elif state == 1:
if event == cv2.EVENT_MOUSEMOVE:
x2, y2 = x, y
cv2.line(img, (x1, y1), (x2, y2), (0,255,0), 2)
# click two
elif event == cv2.EVENT_LBUTTONDOWN:
state = 2
# compute angle
x0 = int((x1+x2)/2)
y0 = int((y1+y2)/2)
if x1 != x2 and y1 != y2:
angle = math.atan((y1-y2)/(x1-x2))/math.pi * 180
elif x1 != x2:
angle = 90
elif y1 != y2:
angle = 0
cv2.line(img, (x1, y1), (x2, y2), (0,255,0), 2)
elif state == 2:
cv2.line(img, (x1, y1), (x2, y2), (0,255,0), 2)
# click three
if event == cv2.EVENT_MOUSEMOVE:
a = int(math.sqrt((x1-x2)**2 + (y1-y2)**2)/2)
# compute Ax+By+1=0
A = (y1-y2)/(x1*y2-x2*y1)
B = -1*(x1-x2)/(x1*y2-x2*y1)
dist = abs(A*x + B*y + 1)/math.sqrt(A**2 + B**2)
b = int(dist)
cv2.ellipse(img,(x0,y0),(a, b), angle, 0,360, (0,255,255))
if event == cv2.EVENT_LBUTTONDOWN:
state = 3
elif state == 3:
cv2.line(img, (x1, y1), (x2, y2), (0,255,0), 2)
cv2.ellipse(img,(x0,y0),(a, b), angle, 0,360, (0,255,255))
if x0 != 0 and y0 != 0:
cv2.ellipse(img,(x0,y0),(a, b), angle, 0,360, (0,255,255))
cv2.imshow('img', img)
def clear():
global x1, y1, x2, y2
global x0, y0, a, b, angle
global state
x1,y1, x2, y2 = 0,0,0,0
x0, y0 = 0, 0
angle = 0
a, b = 0, 0
state = 0
# xml template
Annnotation = """<annotation>
<folder>FetalHead</folder>
<filename>{}</filename>
<source>
<database>The VOC2007 Database</database>
<annotation>PASCAL VOC2007</annotation>

<flickrid>341012865</flickrid>
</source>
<size>
<width>{}</width>
<height>{}</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
{}
</annotation>
"""
Object = """
<object>
<name>{}</name>
<pose>Left</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<x>{}</x>
<y>{}</y>
<a>{}</a>
<b>{}</b>
<angle>{}</angle>
</bndbox>
</object>
"""
def save(annodir):
global x0, y0, a, b, angle
global filename
global im
height, width = im.shape[:2]
name = 'head'
newObj = copy.deepcopy(Object).format(name, int(x0), int(y0), int(a), int(b), int(angle))
newAnno = copy.deepcopy(Annnotation).format(filename, width, height, newObj)
xmlfile = os.path.join(annodir, '{}.xml'.format(filename))
if os.path.exists(xmlfile):
os.remove(xmlfile)
with open(xmlfile, 'w') as fid:
fid.write(newAnno)
print('Image No. {} saved.'.format(filename))
def load(xmlfile):
global x0, y0, a, b, angle
global im
img = im.copy()
tree = ET.parse(xmlfile)
width = int(tree.find('size').find('width').text)
height = int(tree.find('size').find('height').text)
objs = tree.findall('object')
for index, obj in enumerate(objs):
name = obj.find('name').text.lower()
bbox = obj.find('bndbox')
x0 = int(bbox.find('x').text)
y0 = int(bbox.find('y').text)
a = int(bbox.find('a').text)
b = int(bbox.find('b').text)
angle = int(bbox.find('angle').text)
cv2.ellipse(img,(x0,y0),(a, b), angle, 0,360, (0,255,255))
cv2.imshow('img', img)
def label(argv):
root = os.path.abspath(argv[1])
if not os.path.exists(os.path.join(root, 'JPEGImages')):
print("Use https://github.com/DuinoDu/BBox-Label-Tool/blob/master/tools/createDS.py to convert images to voc-format")
return
imgdir = os.path.join(root, 'JPEGImages')
imgfiles = sorted([os.path.jo | in(imgdir, x) for x in sorted(os.listdir(imgdir)) if x.endswith('.JPG') or x.endswith('.jpg')])
annodir = os.path.join(root, 'Annotations')
if not os.path.exists(annodir):
| os.makedirs(annodir)
global im
global filename
cv2.namedWindow('img')
cv2.setMouseCallback('img', onmouse)
#for f in imgfiles:
img_ind = 0
while True:
if img_ind >= len(imgfiles):
print("Finish.")
break
img_ind = 0 if img_ind < 0 else img_ind
f = imgfiles[img_ind]
print(f)
filename = os.path.basename(f)[:-4]
im = cv2.imread(f)
xmlfile = os.path.join(annodir, '{}.xml'.format(filename))
if os.path.exists(xmlfile):
load(xmlfile)
else:
cv2.imshow('img', im)
while True:
ch = cv2.waitKey(0) & 0xff
if ch == ord('q'):
break
elif ch == ord('n'):
save(annodir)
clear()
img_ind += 1
break
elif ch == ord('b'):
save(annodir)
clear()
img_ind -= 1
break
elif ch == ord('d'):
im = cv2.imread(f)
cv2.imshow('img', im)
clear()
if ch == ord('q'):
break
def main():
import sys
if len(sys.argv) != 2:
print(__doc__)
return
label(sys.argv)
if __name__ == "__main__":
main()
|
parkbyte/electrumparkbyte | gui/qt/qrwindow.py | Python | mit | 3,180 | 0.001887 | #!/usr/bin/env python
#
# Electrum - lightweight ParkByte client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import platform
from decimal import Decimal
from urllib import quote
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
from electrum_gui.qt.qrcodewidget import QRCodeWidget
from electrum.i18n import _
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
column_index = 4
class QR_Window(QWidget):
def __init__(self, win):
QWidget.__init__(self)
self.win = win
self.setWindowTitle('Electrum - '+_('Payment Request'))
self.setMinimumSize(800, 250)
self.address = ''
self.label = ''
self.amount = 0
self.setFocusPolicy(QtCore.Qt.NoFocus)
main_box = QHB | oxLayout()
self.qrw | = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
vbox = QVBoxLayout()
main_box.addLayout(vbox)
self.address_label = QLabel("")
#self.address_label.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.address_label)
self.label_label = QLabel("")
vbox.addWidget(self.label_label)
self.amount_label = QLabel("")
vbox.addWidget(self.amount_label)
vbox.addStretch(1)
self.setLayout(main_box)
def set_content(self, address, amount, message, url):
address_text = "<span style='font-size: 18pt'>%s</span>" % address if address else ""
self.address_label.setText(address_text)
if amount:
amount = self.win.format_amount(amount)
amount_text = "<span style='font-size: 21pt'>%s</span> <span style='font-size: 16pt'>%s</span> " % (amount, self.win.base_unit())
else:
amount_text = ''
self.amount_label.setText(amount_text)
label_text = "<span style='font-size: 21pt'>%s</span>" % message if message else ""
self.label_label.setText(label_text)
self.qrw.setData(url)
|
Uruwolf/pyshop | products/admin.py | Python | gpl-3.0 | 1,218 | 0.00821 | '''
This file is part of pyShop
pyShop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyShop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyShop. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) Steve "Uru" West 2012 <uruwolf@gmail.com>
'''
from products.models import Catergory, Product
from django.contrib import admin
#Catergories get the generic admin treatment
admin.site.register(Catergory)
class ProductAdmin(admin.ModelAdmin):
'''Contains the admin panel settings for product | objects
Currently set to display the name and catergory,
be filterable by catergory
and searchabl | e via name and description.'''
list_display = ('name', 'catergory')
list_filter = ['catergory']
search_fields = ['name', 'description']
admin.site.register(Product, ProductAdmin)
|
PaddlePaddle/Paddle | python/paddle/fluid/layers/tensor.py | Python | apache-2.0 | 67,174 | 0.002799 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed un | der the Apache License, Version 2.0 (the "Li | cense");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unlessf required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import numpy
import warnings
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..initializer import Initializer
from ..framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator, device_guard
from ..framework import Variable
from ..initializer import Constant
from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
from . import utils
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from paddle.utils import deprecated
from .utils import check_shape
from paddle import _C_ops
__all__ = [
'create_tensor',
'create_parameter',
'create_global_var',
'cast',
'tensor_array_to_tensor',
'concat',
'sums',
'assign',
'fill_constant_batch_size_like',
'fill_constant',
'argmin',
'argmax',
'argsort',
'ones',
'zeros',
'reverse',
'has_inf',
'has_nan',
'isfinite',
'range',
'linspace',
'zeros_like',
'ones_like',
'diag',
'eye',
'triu',
]
def create_tensor(dtype, name=None, persistable=False):
"""
Create a variable, which will hold a Tensor with data type dtype.
Args:
dtype(string|numpy.dtype): the data type of Tensor to be created, the
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor to be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tensor = fluid.layers.create_tensor(dtype='float32')
"""
check_dtype(dtype, 'dtype', [
'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
'int64'
], 'create_tensor')
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable)
def create_parameter(shape,
dtype,
name=None,
attr=None,
is_bias=False,
default_initializer=None):
"""
:api_attr: Static Graph
This function creates a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
Parameters:
shape (list of int): Shape of the parameter
dtype (str): Data type of the parameter
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
attr (ParamAttr, optional): Attributes of the parameter
is_bias (bool, optional): This can affect which default initializer is chosen
when default_initializer is None. If is_bias,
initializer.Constant(0.0) will be used. Otherwise,
Xavier() will be used.
default_initializer (Initializer, optional): Initializer for the parameter
Returns:
The created parameter.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
"""
check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
for item in shape:
check_type(item, 'item of shape',
(int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
numpy.int64), 'create_parameter')
check_dtype(dtype, 'dtype', [
'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
'int64', 'uint8'
], 'create_parameter')
check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
check_type(default_initializer, 'default_initializer',
(type(None), Initializer), 'create_parameter')
helper = LayerHelper("create_parameter", **locals())
if attr is None:
attr = ParamAttr(name=name)
return helper.create_parameter(attr, shape,
convert_dtype(dtype), is_bias,
default_initializer)
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list[int]|tuple[int]): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
check_type(shape, 'shape', (list, tuple, numpy.ndarray),
'create_global_var')
for item in shape:
check_type(item, 'item of shape',
(int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
numpy.int64), 'create_global_var')
check_dtype(dtype, 'dtype', [
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
], 'create_global_var')
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype,
shape=shape,
persistable=persistable,
name=name,
stop_gradient=True)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var
def cast(x, dtype):
"""
This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
to the output with :attr:`dtype`. It's meaningless if the output dtype
equals the input dtype, but it's fine if you do so.
Args:
x(Tensor): An input N-D Tensor with data type bool, float16,
float32, float64, int32, int64, uint8.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
bool, float16, float32, float64, int8, int32, int64, uint8.
Returns:
Tensor: A Tensor with the same shape as input's.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 4], 'float64')
y = paddle.cast(x, 'uint8')
"""
if in_dy |
mastizada/kuma | vendor/packages/ipython/IPython/rlineimpl.py | Python | mpl-2.0 | 1,760 | 0.002841 | # -*- coding: utf-8 -*-
""" Imports and provides the 'correct' version of readline for the platform.
Readline is used throughout IPython as 'import IPython.rlineimpl as readline'.
In addition to normal readline stuff, this module provides have_readline
boolean and _outputfile variable used in genutils.
"""
import sys
try:
from readline import *
import readline as _rl
have_readline = True
except ImportError:
try:
from pyreadline import *
import pyreadline as _rl
have_readline = True
except ImportError:
have_readline = False
if sys.platform == 'win32' and have_readline:
try:
_outputfile=_rl.GetOutputFile()
except AttributeError:
print "Failed GetOutputFile"
have_readline = False
# Test to see if libedit is being used | instead of GNU readline.
# Thanks to Boyd Waters for this patch.
uses_libedit = False
if sys.platform == 'darwin' and have_readline:
import commands
(status, result) = commands.getstatusoutput( "otool -L %s | grep l | ibedit" % _rl.__file__ )
if status == 0 and len(result) > 0:
# we are bound to libedit - new in Leopard
_rl.parse_and_bind("bind ^I rl_complete")
print "Leopard libedit detected."
uses_libedit = True
# the clear_history() function was only introduced in Python 2.4 and is
# actually optional in the readline API, so we must explicitly check for its
# existence. Some known platforms actually don't have it. This thread:
# http://mail.python.org/pipermail/python-dev/2003-August/037845.html
# has the original discussion.
if have_readline:
try:
_rl.clear_history
except AttributeError:
def clear_history(): pass
_rl.clear_history = clear_history
|
syci/ingadhoc-odoo-addons | report_extended/models/__init__.py | Python | agpl-3.0 | 983 | 0.001017 | # -*- coding: utf-8 -*-
##############################################################################
#
# Ingenieria ADHOC - ADHOC SA
# https://launchpad.net/~ingenieria-adhoc
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of th | e
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have r | eceived a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report
import company
|
astagi/chickenfoot | chickenfoot/modules_register.py | Python | mit | 443 | 0.009029 | from modules.front_motor import FrontMotor
from modules.back_motor import BackMotor
from services.temperature_sensor import TemperatureSensor
modules = {}
def get_module(module_name, **data):
return modules[module_name] | (**data)
def register_module(module_name, cls):
modules[module_name] = cls
register_module('FrontMotor', FrontMotor)
register_module('BackMotor', BackMotor)
register_module('Temperatur | eSensor', TemperatureSensor) |
timberline-secondary/hackerspace | src/djcytoscape/tests/test_views.py | Python | gpl-3.0 | 5,010 | 0.002994 | from django.contrib.auth import get_user_model
# from django.urls import reverse
from model_bakery import baker
from tenant_schemas.test.cases import TenantTestCase
from tenant_schemas.test.client import TenantClient
# from siteconfig.models import SiteConfig
from hackerspace_online.tests.utils import ViewTestUtilsMixin
from djcytoscape.models import CytoScape
User = get_user_model()
class ViewTests(ViewTestUtilsMixin, TenantTestCase):
def setUp(self):
self.client = TenantClient(self.tenant)
# need a teacher and a student with known password so tests can log in as each, or could use force_login()?
self.test_password = "password"
# need a teacher before students can be created or the profile creation will fail when trying to notify
self.test_teacher = User.objects.create_user('test_teacher', password=self.test_password, is_staff=True)
self.tes | t_student1 = User.objects.create_user('test_student', password=self.test_password)
self.map = baker.make('djcytoscape.CytoScape')
def test_all_page_status_codes_for_anonymous(self):
''' If not logged in then all views should redirect to home page '''
self.assertRedirectsLogin('djcytoscape:index')
self.assertRedirectsLogin('djcytoscape:primary')
self.assertRedirectsLogin('djcytos | cape:quest_map', args=[1])
self.assertRedirectsLogin('djcytoscape:quest_map_personalized', args=[1, 1])
self.assertRedirectsLogin('djcytoscape:quest_map_interlink', args=[1, 1, 1])
self.assertRedirectsLogin('djcytoscape:list')
self.assertRedirectsAdmin('djcytoscape:regenerate', args=[1])
self.assertRedirectsAdmin('djcytoscape:regenerate_all')
self.assertRedirectsAdmin('djcytoscape:generate_map', kwargs={'quest_id': 1, 'scape_id': 1})
self.assertRedirectsAdmin('djcytoscape:generate_unseeded')
self.assertRedirectsAdmin('djcytoscape:update', args=[1])
self.assertRedirectsAdmin('djcytoscape:delete', args=[1])
def test_all_page_status_codes_for_students(self):
success = self.client.login(username=self.test_student1.username, password=self.test_password)
self.assertTrue(success)
self.assert200('djcytoscape:index')
self.assert200('djcytoscape:quest_map_personalized', args=[self.map.id, self.test_student1.id])
# need to build interlinked maps to test this. Do in own test
# self.assert200('djcytoscape:quest_map_interlink', args=[1, 1, 1])
self.assert200('djcytoscape:list')
self.assert200('djcytoscape:primary')
self.assert200('djcytoscape:quest_map', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:update', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:delete', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:regenerate', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:regenerate_all')
self.assertRedirectsAdmin('djcytoscape:generate_map', kwargs={'quest_id': 1, 'scape_id': 1})
self.assertRedirectsAdmin('djcytoscape:generate_unseeded')
def test_all_page_status_codes_for_teachers(self):
# log in a teacher
success = self.client.login(username=self.test_teacher.username, password=self.test_password)
self.assertTrue(success)
self.assert200('djcytoscape:index')
self.assert200('djcytoscape:quest_map_personalized', args=[self.map.id, self.test_student1.id])
# need to build interlinked maps to test this. Do in own test
# self.assert200('djcytoscape:quest_map_interlink', args=[1, 1, 1])
self.assert200('djcytoscape:list')
self.assert200('djcytoscape:primary')
self.assert200('djcytoscape:quest_map', args=[self.map.id])
self.assert200('djcytoscape:update', args=[self.map.id])
self.assert200('djcytoscape:delete', args=[self.map.id])
# These will need their own tests:
# self.assert200('djcytoscape:regenerate', args=[self.map.id])
# self.assert200('djcytoscape:regenerate_all')
# self.assert200('djcytoscape:generate_map', kwargs={'quest_id': 1, 'scape_id': 1})
# self.assert200('djcytoscape:generate_unseeded')
class PrimaryViewTests(ViewTestUtilsMixin, TenantTestCase):
def test_initial_map_generated_on_first_view(self):
# shouldn't be any maps from the start
self.assertFalse(CytoScape.objects.exists())
# log in anoyone
self.client = TenantClient(self.tenant)
anyone = User.objects.create_user('anyone', password="password")
success = self.client.login(username=anyone.username, password="password")
self.assertTrue(success)
# Access the primary map view
self.assert200('djcytoscape:primary')
# Should have generated the "Main" map
self.assertEqual(CytoScape.objects.count(), 1)
self.assertTrue(CytoScape.objects.filter(name="Main").exists())
|
guillon/mdi | examples/mini/scripts/generate_executions.py | Python | mit | 3,906 | 0.003584 | #!/usr/bin/env python
#
# Machine Description Interface C API
#
# This software is delivered under the terms of the MIT License
#
# Copyright (c) 2016 STMicroelectronics
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
|
from __future__ imp | ort print_function
import sys
class ENUM:
instructions_list = []
def __init__(self, ID, mnemonic, properties, parsing, encoding, short_desc, execution, description):
self.ID = ID
self.mnemonic = mnemonic
self.properties = properties
self.parsing = parsing
self.encoding = encoding
self.short_desc = short_desc
self.execution = execution
self.description = description
self.instructions_list.append(self)
@staticmethod
def emit_execution(out):
with open(out, "w") as outf:
print("/* BEGIN: Generated executions */", file=outf)
ENUM._emit_executions(outf)
print("/* END: Generated executions */", file=outf)
@staticmethod
def _emit_executions(out):
idx = 0;
print("#define P(idx) EXE_OPS(_operands,idx)", file=out)
print("#define NEXT_PC() (RR(PC,0) + _op_size)", file=out)
print("#define RR(rf,idx) EXE_CPU_RR((*_cpu_prev),rf,idx)", file=out)
print("#define RS(rf,idx) EXE_CPU_RS(_cpu,rf,idx)", file=out)
print("#define MR32(idx) EXE_MEM_FETCH32(_mem,idx)", file=out)
print("#define MS32(idx) EXE_MEM_SLICE32(_mem,idx)", file=out)
for inst in ENUM.instructions_list:
print("", file=out)
print("static int32_t _execution_%i /* %s */ (EXE_CTX_T _context, EXE_OPS_T _operands, size_t _op_size)" %
(idx, inst.ID), file=out)
print("{", file=out)
print(" CPU_T _cpu, *_cpu_prev = EXE_CTX_CPU(_context);", file=out)
print(" MEM_T _mem, *_mem_prev = EXE_CTX_MEM(_context);", file=out)
print(" EXE_CPU_CLONE(_cpu, _cpu_prev);", file=out)
print(" EXE_MEM_CLONE(_mem, _mem_prev);", file=out)
print(" RS(PC,0) = NEXT_PC();", file=out)
print(" %s;" % inst.execution, file=out)
print(" EXE_CPU_UPDATE(*_cpu_prev, &_cpu);", file=out);
print(" EXE_CPU_UPDATE(*_mem_prev, &_mem);", file=out);
print(" return 0;", file=out)
print("}", file=out);
idx += 1
print("#undef RF", file=out)
print("#undef MEM", file=out)
print("typedef int32_t (*EXE_FUNC_T)(EXE_CTX_T _context, EXE_OPS_T _operands, size_t _op_size);", file=out);
print("static const EXE_FUNC_T _executions[] = {", file=out)
idx = 0
for inst in ENUM.instructions_list:
print(" _execution_%i /* %s */," % (idx, inst.ID), file=out)
idx += 1
print("};", file=out)
execfile(sys.argv[1])
ENUM.emit_execution(sys.argv[2])
|
Kleptobismol/scikit-bio | skbio/format/sequences/tests/test_fastq.py | Python | bsd-3-clause | 1,232 | 0 | #!/usr/bin/env python
import numpy as np
from unittest import TestCase, main
from skbio.format.sequences import format_fastq_record
from skbio.format.sequences.fastq import _phred_to_ascii33, _phred_to_ascii64
class FASTQFormatTests(TestCase):
def setUp(self):
self.qual_scores = np.array([38, 39, 40], dtype=np.int8)
self.args = (b'abc', b'def', self.qual_scores)
def test_format_fastq_record_phred_offset_33(self):
| exp = b"@abc\ndef\n+\nGHI\n"
obs = format_fastq_record(*self.args, phred_offset=33)
self.assertEqual(obs, exp)
def test_format_fastq_record_phred_offset_64(self):
exp = b"@abc\ndef\n+\nfgh\n"
obs = format_fastq_record(*self.args, phred_offset=64)
self.assertEqual(obs, exp)
def test_format_fastq_record_invalid_phred_offset(self):
with self.assertRaises(ValueError):
format_fastq_record(*self.args, phred_offset=42)
d | ef test_phred_to_ascii33(self):
obs = _phred_to_ascii33(self.qual_scores)
self.assertEqual(obs, b'GHI')
def test_phred_to_ascii64(self):
obs = _phred_to_ascii64(self.qual_scores)
self.assertEqual(obs, b'fgh')
if __name__ == '__main__':
main()
|
BenoitDherin/data-analysis-with-R | website/plugins/gzip_cache/gzip_cache.py | Python | mit | 1,902 | 0.002629 | '''
Copyright (c) 2012 Matt Layman
Gzip cache
----------
A plugin to create .gz cache files for optimization.
'''
import gzip
import logging
import os
from pelican import signals
logger = logging.getLogger(__name__)
# A list of file types to exclude from possible compression
EXCLUDE_TYPES = [
# Compressed types
'.bz2',
'.gz',
# Audio types
'.aac',
'.flac',
'.mp3',
'.wma',
# Image types
'.gif',
'.jpg',
'.jpeg',
'.png',
# Video types
'.avi',
'.mov',
'.mp4',
]
def create_gzip_cache(pelican):
'''Create a gzip cache file for every file that a webserver would
reasonably want to cache (e.g., text type files).
:param pelican: The Pelican instance
'''
for dirpath, _, filenames in os.walk(pelican.settings['OUTPUT_PATH']):
for name in filenames:
if should_compress(name):
filepath = os.path.join(dirpath, name)
create_gzip_file(filepath)
def should_compress(filename):
'''Check if the filename is a type of file that should be compressed.
:param filename: A file name to check against
'''
for extension in EXCLUDE_TYPES:
if filename.endswith(extension):
return False
return True
def create_gzip_file(filepath):
'''Create a gzipped file in the same directory with a filepath.gz name.
:param filepath: A file to compress
'''
compressed_path = filepath + '.gz'
with open(filepath, 'rb') as uncompressed:
try:
logger.debug('Compre | ssing: %s' % filepath | )
compressed = gzip.open(compressed_path, 'wb')
compressed.writelines(uncompressed)
except Exception as ex:
logger.critical('Gzip compression failed: %s' % ex)
finally:
compressed.close()
def register():
signals.finalized.connect(create_gzip_cache)
|
kelle/astropy | astropy/vo/client/tests/test_conesearch.py | Python | bsd-3-clause | 9,454 | 0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for `astropy.vo.client.conesearch` and `astropy.vo.client.async`."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# STDLIB
import os
import time
import warnings
# THIRD-PARTY
import pytest
import numpy as np
# LOCAL
from .. import conesearch, vos_catalog
from ..exceptions import VOSError, ConeSearchError
from ... import conf
from .... import units as u
from ....coordinates import ICRS, SkyCoord
from ....tests.helper import remote_data
from ....utils.data import get_pkg_data_filename
from ....utils.exceptions import AstropyDeprecationWarning
from ....utils import data
__doctest_skip__ = ['*']
# Global variables for TestConeSearch
SCS_RA = 0
SCS_DEC = 0
SCS_SR = 0.1
SCS_CENTER = ICRS(SCS_RA * u.degree, SCS_DEC * u.degree)
SCS_RADIUS = SCS_SR * u.degree
def setup_module():
"""Ignore all deprecation warnings here."""
warnings.simplefilter('ignore', AstropyDeprecationWarning)
def teardown_module():
warnings.resetwarnings()
@remote_data
class TestConeSearch(object):
"""Test Cone Search on a pre-defined access URL.
.. note::
This test will fail if the URL becomes inaccessible,
which is beyond AstroPy's control. When this happens,
change the test to use a different URL.
At the time this was written, ``pedantic=True`` will
not yield any successful search.
"""
def setup_class(self):
# If this link is broken, use the next in database that works
self.url = ('http://vizier.u-strasbg.fr/viz-bin/votable/-A?-out.all&'
'-source=I/252/out&')
self.catname = 'USNO-A2'
# Avoid downloading the full database
conf.conesearch_dbname = 'conesearch_simple'
# Sometimes 3s is not enough
data.conf.remote_timeout = 10
self.verbose = False
self.pedantic = False
def test_cat_listing(self):
assert (conesearch.list_catalogs() ==
['BROKEN', 'USNO ACT', 'USNO NOMAD', 'USNO-A2', 'USNO-B1'])
assert (conesearch.list_catalogs(pattern='usno*a') ==
['USNO ACT', 'USNO NOMAD', 'USNO-A2'])
def test_no_result(self):
with pytest.raises(VOSError):
conesearch.conesearch(
SCS_CENTER, 0.001, catalog_db=self.url,
pedantic=self.pedantic, verbose=self.verbose)
@pytest.mark.parametrize(('center', 'radius'),
[((SCS_RA, SCS_DEC), SCS_SR),
(SCS_CENTER, SCS_RADIUS)])
def test_one_search(self, center, radius):
"""This does not necessarily uses ``self.url`` because of
unordered dict in JSON tree.
"""
tab_1 = conesearch.conesearch(
center, radius, pedantic=None, verbose=self.verbose)
assert tab_1.array.size > 0
def test_sky_coord(self):
"""
Check that searching with a SkyCoord works too
"""
sc_cen = SkyCoord(SCS_CENTER)
tab = conesearch.conesearch(
sc_cen, SCS_RADIUS, catalog_db=self.url,
pedantic=self.pedantic, verbose=self.verbose)
assert tab.array.size > 0
def test_timeout(self):
"""Test time out error."""
with pytest.raises(VOSError) as e:
with data.conf.set_temp('remote_timeout', 0.001):
conesearch.conesearch(
SCS_CENTER, SCS_RADIUS, pedantic=self.pedantic,
verbose=self.verbose, catalog_db=self.url, cache=False)
assert 'timed out' in e.info, 'test_timeout failed'
def test_searches(self):
tab_2 = conesearch.conesearch(
SCS_CENTER, SCS_RADIUS, catalog_db=self.url,
pedantic=self.pedantic, verbose=self.verbose)
tab_3 = conesearch.conesearch(
SCS_CENTER, SCS_RADIUS,
catalog_db=[self.catname, self.url],
pedantic=self.pedantic, verbose=self.verbose)
tab_4 = conesearch.conesearch(
SCS_CENTER, SCS_RADIUS,
catalog_db=vos_catalog.get_remote_catalog_db(
conf.conesearch_dbname),
pedantic=self.pedantic, verbose=self.verbose)
assert tab_2.url == tab_3.url
np.testing.assert_array_equal(tab_2.array, tab_3.array)
# If this fails, it is because of dict hashing, no big deal.
if tab_2.url == tab_4.url:
np.testing.assert_array_equal(tab_2.array, tab_4.array)
else:
pytest.xfail('conesearch_simple.json used a different URL')
@pytest.mark.parametrize(('center', 'radius'),
[((SCS_RA, SCS_DEC), SCS_SR),
(SCS_CENTER, SCS_RADIUS)])
def test_search_all(self, center, radius):
all_results = conesearch.search_all(
center, radius, catalog_db=['BROKEN', self.url],
pedantic=self.pedantic, verbose=self.verbose)
assert len(all_results) == 1
tab_1 = all_results[self.url]
assert tab_1.array.size > 0
def test_async(self):
async_search = conesearch.AsyncConeSearch(
SCS_CENTER, SCS_RADIUS, pedantic=self.pedantic)
# Wait a little for the instance to set up properly
time.sleep(1)
tab = async_search.get(timeout=data.conf.remote_timeout)
try:
assert async_search.done()
except AssertionError as exc:
pytest.xfail(str(exc))
else:
assert tab.array.size > 0
def test_async_all(self):
async_search_all = conesearch.AsyncSearchAll(
SCS_CENTER, SCS_RADIUS, pedantic=self.pedantic)
# Wait a little for the instance to set up properly
time.sleep(1)
all_results = async_search_all.get(timeout=data.conf.remote_timeout*3)
try: |
assert async_search_all.done()
except AssertionError as exc:
pytest.xfail(str(exc))
else:
for tab in all_results.values():
assert tab.array.size > 0
@pytest.mark.parametrize(('center', 'radius'),
[((SCS_RA, SCS_DEC), 0.8),
(SCS_CENTER, 0.8 * u.degree)])
def test_prediction(self, center, radius):
"""Prediction tests a | re not very accurate but will have to do."""
t_1, tab_1 = conesearch.conesearch_timer(
center, radius, catalog_db=self.url,
pedantic=self.pedantic, verbose=self.verbose)
n_1 = tab_1.array.size
t_2, n_2 = conesearch.predict_search(
self.url, center, radius,
pedantic=self.pedantic, verbose=self.verbose)
assert n_2 > 0 and n_2 <= n_1 * 1.5
# Timer depends on network latency as well, so upper limit is very lax.
assert t_2 > 0 and t_2 <= t_1 * 10
def test_prediction_neg_radius(self):
with pytest.raises(ConeSearchError):
t, n = conesearch.predict_search(
self.url, SCS_CENTER, -1, pedantic=self.pedantic,
verbose=self.verbose)
def teardown_class(self):
conf.reset('conesearch_dbname')
data.conf.reset('remote_timeout')
class TestErrorResponse(object):
"""Test Cone Search error response handling.
This is defined in Section 2.3 of Simple Cone Search Version 1.03,
IVOA Recommendation, 22 February 2008.
Also see https://github.com/astropy/astropy/issues/1001
"""
def setup_class(self):
self.datadir = 'data'
self.pedantic = False
self.conesearch_errmsg = {
'conesearch_error1.xml': 'Error in input RA value: as3f',
'conesearch_error2.xml': 'Error in input RA value: as3f',
'conesearch_error3.xml': 'Invalid data type: text/html',
'conesearch_error4.xml': 'Invalid data type: text/html'}
def conesearch_compare(self, xmlfile, msg):
"""Bypassing Cone Search query and just imitating the reply,
then check if appropriate error message is caught.
"""
# conesearch_error4.xml |
peterayeni/dash | dash/orgs/models.py | Python | bsd-3-clause | 12,481 | 0.001602 | from __future__ import absolute_import, unicode_literals
from datetime import datetime
import json
import random
import pytz
from smartmin.models import SmartModel
from temba import TembaClient
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.cache import cache
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text, python_2_unicode_compatible
from dash.api import API
from dash.dash_email import send_dash_email
from dash.utils import datetime_to_ms
STATE = 1
DISTRICT = 2
# we cache boundary data for a month at a time
BOUNDARY_CACHE_TIME = getattr(settings, 'API_BOUNDARY_CACHE_TIME', 60 * 60 * 24 * 30)
BOUNDARY_CACHE_KEY = 'org:%d:boundaries'
BOUNDARY_LEVEL_1_KEY = 'geojson:%d'
BOUNDARY_LEVEL_2_KEY = 'geojson:%d:%s'
@python_2_unicode_compatible
class Org(SmartModel):
name = models.CharField(
verbose_name=_("Name"), max_length=128,
help_text=_("The name of this organization"))
logo = models.ImageField(
upload_to='logos', null=True, blank=True,
help_text=_("The logo that should be used for this organization"))
administrators = models.ManyToManyField(
User, verbose_name=_("Administrators"), related_name="org_admins",
help_text=_("The administrators in your organization"))
viewers = models.ManyToManyField(
User, verbose_name=_("Viewers"), related_name="org_viewers",
help_text=_("The viewers in your organization"))
editors = models.ManyToManyField(
User, verbose_name=_("Editors"), related_name="org_editors",
help_text=_("The editors in your organization"))
language = models.CharField(
verbose_name=_("Language"), max_length=64, null=True, blank=True,
help_text=_("The main language used by this organization"))
subdomain = models.SlugField(
verbose_name=_("Subdomain"), null=True, blank=True, max_length=255, unique=True,
error_messages=dict(unique=_("This subdomain is not available")),
help_text=_("The subdomain for this organization"))
domain = models.CharField(
verbose_name=_("Domain"), null=Tr | ue, blank=True, max_length=255, unique=True,
error_messages=dict(unique=_("This domain is not available")),
help_text=_("The custom domain for this organization"))
timezone = models.CharField(
verbose_name=_("Timezone"), max_length=64, default='UTC',
help_te | xt=_("The timezone your organization is in."))
api_token = models.CharField(
max_length=128, null=True, blank=True,
help_text=_("The API token for the RapidPro account this dashboard "
"is tied to"))
config = models.TextField(
null=True, blank=True,
help_text=_("JSON blob used to store configuration information "
"associated with this organization"))
def set_timezone(self, timezone):
self.timezone = timezone
self._tzinfo = None
def get_timezone(self):
tzinfo = getattr(self, '_tzinfo', None)
if not tzinfo:
# we need to build the pytz timezone object with a context of now
tzinfo = timezone.now().astimezone(pytz.timezone(self.timezone)).tzinfo
self._tzinfo = tzinfo
return tzinfo
def get_config(self, name):
config = getattr(self, '_config', None)
if config is None:
if not self.config:
return None
config = json.loads(self.config)
self._config = config
return config.get(name, None)
def set_config(self, name, value, commit=True):
if not self.config:
config = dict()
else:
config = json.loads(self.config)
config[name] = value
self.config = json.dumps(config)
self._config = config
if commit:
self.save()
def get_org_admins(self):
return self.administrators.all()
def get_org_editors(self):
return self.editors.all()
def get_org_viewers(self):
return self.viewers.all()
def get_org_users(self):
org_users = self.get_org_admins() | self.get_org_editors() | self.get_org_viewers()
return org_users.distinct()
def get_user_org_group(self, user):
if user in self.get_org_admins():
user._org_group = Group.objects.get(name="Administrators")
elif user in self.get_org_editors():
user._org_group = Group.objects.get(name="Editors")
elif user in self.get_org_viewers():
user._org_group = Group.objects.get(name="Viewers")
else:
user._org_group = None
return getattr(user, '_org_group', None)
def get_user(self):
user = self.administrators.filter(is_active=True).first()
if user:
org_user = user
org_user.set_org(self)
return org_user
def get_temba_client(self):
host = getattr(settings, 'SITE_API_HOST', None)
agent = getattr(settings, 'SITE_API_USER_AGENT', None)
if not host:
host = '%s/api/v1' % settings.API_ENDPOINT # UReport sites use this
return TembaClient(host, self.api_token, user_agent=agent)
def get_api(self):
return API(self)
def build_host_link(self, user_authenticated=False):
host_tld = getattr(settings, "HOSTNAME", 'locahost')
is_secure = getattr(settings, 'SESSION_COOKIE_SECURE', False)
prefix = 'http://'
if self.domain and is_secure and not user_authenticated:
return prefix + str(self.domain)
if is_secure:
prefix = 'https://'
if self.subdomain == '':
return prefix + host_tld
return prefix + force_text(self.subdomain) + "." + host_tld
@classmethod
def rebuild_org_boundaries_task(cls, org):
from dash.orgs.tasks import rebuild_org_boundaries
rebuild_org_boundaries.delay(org.pk)
def build_boundaries(self):
this_time = datetime.now()
temba_client = self.get_temba_client()
client_boundaries = temba_client.get_boundaries()
# we now build our cached versions of level 1 (all states) and level 2
# (all districts for each state) geojson
states = []
districts_by_state = dict()
for boundary in client_boundaries:
if boundary.level == STATE:
states.append(boundary)
elif boundary.level == DISTRICT:
osm_id = boundary.parent
if osm_id not in districts_by_state:
districts_by_state[osm_id] = []
districts = districts_by_state[osm_id]
districts.append(boundary)
# mini function to convert a list of boundary objects to geojson
def to_geojson(boundary_list):
features = [dict(type='Feature',
geometry=dict(type=b.geometry.type,
coordinates=b.geometry.coordinates),
properties=dict(name=b.name, id=b.boundary, level=b.level))
for b in boundary_list]
return dict(type='FeatureCollection', features=features)
boundaries = dict()
boundaries[BOUNDARY_LEVEL_1_KEY % self.id] = to_geojson(states)
for state_id in districts_by_state.keys():
boundaries[BOUNDARY_LEVEL_2_KEY % (self.id, state_id)] = to_geojson(
districts_by_state[state_id])
key = BOUNDARY_CACHE_KEY % self.pk
value = {'time': datetime_to_ms(this_time), 'results': boundaries}
cache.set(key, value, BOUNDARY_CACHE_TIME)
return boundaries
def get_boundaries(self):
key = BOUNDARY_CACHE_KEY % self.pk
cached_value = cache.get(key, None)
if cached_value:
return cached_value['results']
Org.rebuild_org_boundaries_task(self)
def get_country_geojson(self):
boundaries = self.get_boundaries()
i |
litui/openparliament | parliament/accounts/models.py | Python | agpl-3.0 | 3,041 | 0.004604 | from base64 import urlsafe_b64encode
import datetime
import os
from django.core import urlresolvers
from django.core.mail import send_mail
from django.db import models
from django.template import loader
from jsonfield import JSONField
class User(models.Model):
email = models.EmailField(unique=True, db_index=True)
email_bouncing = models.BooleanField(default=False)
email_bounce_reason = models.TextField(blank=True)
name = models.CharField(max_length=250, blank=True)
created = models.DateTimeField(default=datetime.datetime.now)
last_login = models.DateTimeField(blank=True, null=True)
data = JSONField(blank=True, default={})
def __unicode__(self):
return self.email
def log_in(self, request):
request.authenticated_email = self.email
self.__class__.objects.filter(id=self.id).update(last_login=datetime.datetime.now())
def _random_token():
return urlsafe_b64encode(os.urandom(15))
class TokenError(Exception):
def __init__(self, message, email=None):
super(TokenError, self).__init__(message)
self.email = email
class LoginToken(models.Model):
token = models.CharField(max_length=40, primary_key=True,
default=_random_token)
email = models.EmailField()
created = models.DateTimeField(default=datetime.datetime.now)
requesting_ip = models.GenericIPAddressField()
used = models.BooleanField(default=False)
login_ip = models.GenericIPAddressField(blank=True, null=True)
post_login_url = models.TextField(blank=True)
MAX_TOKEN_AGE = datetime.timedelta(seconds=60 * 60 * 8)
def __unicode__(self):
return "%s for %s" % (self.token, self.email)
@classmethod
def generate(cls, email, requesting_ip):
lt = cls.objects.create(email=email, requesting_ip=requesting_ip)
login_url = urlresolvers.reverse('token_login', kwargs={'token': lt.token})
ctx = {'login_url': login_url, 'email': email}
t = loader.get_template("accounts/token_login.txt")
send_mail(subject=u'Log in to openparliament.ca',
message=t.render(ctx),
from_email='alerts@contact.openparliament.ca',
recipient_list=[email])
return lt
@classmethod
def validate(cls, token, login_ip):
try:
lt = cls.objects.get(token=token)
except cls.DoesNotExist:
raise TokenError("That login code couldn't be found. Try cutting and pasting it directly "
"from y | our email to your browser's address bar.")
if lt.used:
raise TokenError("That login code has already been used. You can request another login email on this page.",
email=lt.email)
if (datetime.datetime.now() - lt.created) > cls.MAX_TOKEN_AGE:
raise TokenError("That login code has expired. Please en | ter your email again, then click the link within a few hours.", email=lt.email)
lt.login_ip = login_ip
lt.used = True
lt.save()
return lt
|
alexras/bread | docs/source/conf.py | Python | mit | 8,030 | 0.007347 | # -*- coding: utf-8 -*-
#
# Bread documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 7 20:51:04 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bread'
copyright = u'2013, Alex Rasmussen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.1.0'
# The full version, including alpha/beta/rc tags.
release = '3.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebarwidth': 330
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/powdered-toast-man.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch descriptio | n file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhel | p_basename = 'Breaddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bread.tex', u'Bread Documentation',
u'Alex Rasmussen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bread', u'Bread Documentation',
[u'Alex Rasmussen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bread', u'Bread Documentation',
u'Alex Rasmussen', 'Bread', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
ganadist/adb_downloader | lib/adb.py | Python | apache-2.0 | 2,027 | 0.008387 | # coding: utf8
import sys, os
import subprocess
class AdbException(Exception):
def __init__(self, code, msg):
Exception.__init__(self, code, msg)
def __repr__(self):
code, msg = self.args
return 'AdbException: code:%s %s'%(code, msg)
class Adb:
SERIAL_NO = None
def adb(f):
def wrap(*args, **kwds):
| cmd = f(*args, **kwds)
if not cmd:
cmd = (f.__name__, ) + args
if Adb.SERIAL_NO:
cmd = (g.ADB, '-s', Adb.SERIAL_NO) + cmd
else:
cmd = (g.ADB, ) + cmd
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
proc.wait()
output = proc.stdout.read().strip().decode('utf8')
if proc.returncode != 0:
raise AdbException(proc.returncode, output)
retu | rn output
return wrap
@staticmethod
@adb
def devices(*args): pass
@staticmethod
@adb
def push(*args): pass
@staticmethod
@adb
def root(*args): pass
@staticmethod
@adb
def shell(*args): pass
@staticmethod
@adb
def forward(src, dst = None):
if dst is None:
dst = src
return ('forward', 'tcp:%d'%src, 'tcp:%d'%dst)
@staticmethod
@adb
def getprop(*args):
return ('shell', 'getprop') + args
@staticmethod
@adb
def serial(*args):
return ('get-serialno', )
@staticmethod
@adb
def wait(*args):
return ('wait-for-device', )
def select_device(serial_no = None):
devices = tuple(map(lambda x: x.split()[0], Adb.devices().split('\n')[1:]))
if not devices:
raise AdbException(-1, 'there is no connected android device')
if not serial_no:
serial_no = os.getenv('ANDROID_SERIAL', devices[0])
print('selected device is', serial_no)
if not serial_no in devices:
raise AdbException(-1, 'android device named %s is not connected'%serial_no)
return serial_no
|
tabdon/crmeasyapp | crmapp/communications/admin.py | Python | mit | 207 | 0.009662 | from django.contrib import admin
from .models import Communication
class CommunicationAdmin(admin.ModelAdmin):
list_d | isplay = ('subject', 'uui | d')
admin.site.register(Communication, CommunicationAdmin)
|
itielshwartz/BackendApi | lib/pyasn1_modules/pkcs12.py | Python | apache-2.0 | 1,204 | 0.004983 | #
# PKCS#12 syntax
#
# ASN.1 source from:
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-12/pkcs-12.asn
#
# Sample captures could be obtained with "openssl pkcs12" command
#
from pyasn1_modules.rfc2459 import *
from pyasn1_modules import rfc2251
class Attributes(univ.SetOf):
componentType = rfc2251.Attribute()
class Version(univ.Integer): pass
class CertificationRequestInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype. | NamedType('attributes',
Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class Signature(univ.BitString): pass
class SignatureAlgorithmIdentifier(AlgorithmIdentifier): pass
class CertificationRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()),
namedtype.NamedType('signatureAlgorithm', Sig | natureAlgorithmIdentifier()),
namedtype.NamedType('signature', Signature())
)
|
NeCTAR-RC/horizon | openstack_dashboard/test/integration_tests/tests/test_router.py | Python | apache-2.0 | 8,645 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import decorators
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
@decorators.services_required("neutron")
class TestRouters(helpers.TestCase):
ROUTER_NAME = helpers.gen_random_resource_name("router")
@property
def routers_page(self):
return self.home_pg.go_to_project_network_routerspage()
def _create_router(self):
routers_page = self.routers_page
routers_page.create_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
def _delete_router(self):
routers_page = self.routers_page
routers_page.delete_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(routers_page.is_router_present(self.ROUTER_NAME))
def test_router_create(self):
"""tests the router creation and deletion functionalities:
* creates a new router for public network
* verifies the router appears in the routers table as active
* deletes the newly created router
* verifies the router does not appear in the table after deletion
"""
self._create_router()
self._delete_router()
def _create_interface(self, interfaces_page):
interfaces_page.create_interface()
interface_name = interfaces_page.interfaces_names[0]
self.assertTrue(
interfaces_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
interfaces_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(interfaces_page.is_interface_present(interface_name))
self.assertTrue(interfaces_page.is_interface_status(
interface_name, 'Down'))
def _delete_interface(self, interfaces_page, interface_name):
interfaces_page.delete_interface(interface_name)
self.assertTrue(
interfaces_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
interfaces_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(interfaces_page.is_interface_present(interface_name))
@decorators.skip_because(bugs=['1792028'])
def test_router_add_delete_interface(self):
"""Tests the router interface creation and deletion functionalities:
* Follows the steps to create a new router
* Clicks on the new router name from the routers table
* Moves to the Interfaces page/tab
* Adds a new Interface for the first subnet id available
* Verifies the new interface is in the routers table by checking that
the interface is present in the table
* Deletes the newly created interface
* Verifies the interface is no longer in the interfaces table
* Switches to the routers view by clicking on the breadcrumb link
* Follows the steps to delete the router
"""
self._create_router()
routers_page = self.routers_page
router_interfaces_page = routers_page. \
go_to_interfaces_page(self.ROUTER_NAME)
self._create_interface(router_interfaces_page)
interface_name = router_interfaces_page.interfaces_names[0]
self._delete_interface(router_interfaces_page, interface_name)
router_interfaces_page.switch_to_routers_page()
self._delete_router()
@decorators.skip_because(bugs=['1792028'])
def test_router_delete_interface_by_row(self):
"""Tests the router interface creation and deletion by row action:
* Follows the steps to create a new router
* Clicks on the new router name from the routers table
* Moves to the Interfaces page/tab
* Adds a new Interface for the first subnet id available
* Verifies the new interface is in the routers table
* Deletes the newly created interface by row action
* Verifies the interface is no longer in the interfaces table
* Switches to the routers view by clicking on the breadcrumb link
* Follows the steps to delete the router
"""
self._create_router()
routers_page = self.routers_page
router_interfaces_page = routers_page. \
go_to_interfaces_page(self.ROUTER_NAME)
self._create_interface(router_interfaces_page)
interface_name = router_interfaces_page.interfaces_names[0]
router_interfaces_page.delete_interface_by_row_action(interface_name)
router_interfaces_page.switch_to_routers_page()
self._delete_router()
@decorators.skip_because(bugs=['1792028'])
def test_router_overview_data(self):
self._create_router()
routers_page = self.routers_page
router_overview_page = routers_page.\
go_to_overview_page(self.ROUTER_NAME)
self.assertTrue(router_overview_page.
is_router_name_present(self.ROUTER_NAME))
self.assertTrue(router_overview_page.is_router_status("Active"))
network_overview_page = router_overview_page.go_to_router_network()
# By default the router is created in the 'public' network so the line
# below checks that such name is present in the network
# details/overview page
self.assertTrue(network_overview_page.is_network_name_present())
self.assertTrue(network_overview_page.is_network_status("Active"))
self._delete_router()
class TestAdminRouters(helpers.AdminTestCase):
ROUTER_NAME = helpers.gen_random_resource_name("router")
@decorators.skip_because(bugs=['1792028'])
@decorators.services_required("neutron")
def test_router_create_admin(self):
"""tests the router creation and deletion functionalities:
* creates a new router for public network
* verifies the router appears in the routers table as active
* edits router name
* checks router name was updated properly
* deletes the newly created router
* verifies the router does not appear in the table after deletion
"""
routers_page = self.home_pg.go_to_project_network_routerspage()
routers_page.create_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(rout | ers_page.is_router_active(self.ROUTER_NAME))
self.home_pg.go_to_admin_overviewpage()
admin_routers_page = self.home_pg.go_to_admin_network_routerspage()
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
new_name = "edited_" + self.ROUTER_NAME
admin_routers_page.edit_router(self.ROUTER_NAME, new_name=new_name)
| self.assertTrue(
admin_routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
admin_routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(
|
whardier/holder.graphics | passenger_wsgi.py | Python | mit | 3,866 | 0.006467 | import os
import tempfile
import re
import bottle
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
formats = {
'png': {
'format': 'PNG',
'mimetype': 'image/png'
},
'jpg': {
'format': 'JPEG',
'mimetype': 'image/jpeg'
},
'gif': {
'format': 'GIF',
'mimetype': 'image/gif'
},
}
guides = {
'qqvga': [160, 120],
'hqvga': [240, 160],
'qvga': [320, 240],
'wqvga': [400, 240],
'hvga': [480, 320],
'vga': [640, 480],
'wvga': [768, 480],
'fwvga': [854, 480],
'svga': [800, 600],
'dvga': [960, 640],
'wsvga': [1024, 600],
'xga': [1024, 768],
'wxga': [1366, 768],
'fwxga': [1366, 768],
'xga+': [1152, 864],
'wxga+': [1440, 900],
'sxga': [1280, 1024],
'sxga+': [1400, 1050],
'wsxga+': [1680, 1050],
'uxga': [1600, 1200],
'wuxga': [1920, 1200],
'1080': [1920, 1080],
'720': [1280, 720],
}
@bottle.route('/')
def index():
return "it works!"
@bottle.route('/<width>/<height>')
@bottle.route('/<width>/<height>/')
def image(width=320, height=240):
width=int(width)
height=int(height)
format = bottle.request.query.get('f', 'png').lower()
bg_color = bottle.request.query.get('bgcolor', 'aaaaaa').lower()
fg_color = bottle.request.query.get('fgcolor', 'ffffff').lower()
text = bottle.request.query.get('t', str(width) + 'x' + str(height)).lower()
text_size = int(bottle.request.query.get('ts', 60))
guide_list = [[int(y) for y in x.lower().split(',')] if ',' in x else x.lower() for x in bottle.request.query.getall('g')]
guide_color = bottle.request.query.get('gcolor', fg_color).lower()
try:
if int(bg_color, 16):
bg_color = '#' + bg_color
except:
pass
try:
if int(fg_color, 16):
fg_color = '#' + | fg_color
except:
pass
try:
if int(guide_color, 16):
guide_color = '#' + guide_color
except:
pass
if not format in | formats:
return bottle.HTTPError(code=404, output="That format is not supported")
image_file = tempfile.NamedTemporaryFile(suffix='.' + format, dir='/home/spencersr/holder.graphics/tmp/images/', delete=True)
image = PIL.Image.new('RGB', size=(width, height), color=bg_color)
draw = PIL.ImageDraw.Draw(image)
print(text_size)
font = PIL.ImageFont.truetype("/usr/share/fonts/truetype/ttf-bitstream-vera/VeraBd.ttf", text_size)
guide_font = PIL.ImageFont.truetype("/usr/share/fonts/truetype/ttf-bitstream-vera/VeraBd.ttf", int(text_size/4.0))
for guide in guide_list:
guide_width, guide_height = guides.get(str(guide), guide)
guide_offset_width = (width - guide_width) / 2.0
guide_offset_height = (height - guide_height) / 2.0
draw.rectangle(((guide_offset_width, guide_offset_height), (guide_offset_width + guide_width, guide_offset_height + guide_height)), fill=None, outline=guide_color)
draw.text((guide_offset_width + 4, guide_offset_height + 4), str(guide_width) + 'x' + str(guide_height), fill=guide_color, font=guide_font)
# Draw Center Text
text_width, text_height = font.getsize(text)
draw.text(((width - text_width) / 2.0, (height - text_height) / 2.0), text, fill=fg_color, font=font)
image.save(image_file.file, formats[format]['format'])
bottle.response.image_file = image_file
return bottle.static_file(os.path.basename(image_file.name), root='/home/spencersr/holder.graphics/tmp/images/', mimetype=formats[format]['mimetype'])
def application(environ, start_response):
app = bottle.default_app()
return app.wsgi(environ, start_response)
if __name__ == "__main__":
bottle.debug(True)
app = bottle.default_app()
bottle.run(app, host='0.0.0.0', port='8685', reloader=True)
|
p2pu/learning-circles | studygroups/migrations/0109_auto_20190308_1617.py | Python | mit | 1,852 | 0.00054 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-03-08 16:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0108_auto_20181204_0729'),
]
operations = [
migrations.AddField(
model_name='course',
name='discourse_topic_url',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='course',
name='overall_rating',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=3, null=True),
),
migrations.AddField(
model_name='course',
name='platform',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='course',
name='rating_step_counts',
field=models.TextField(default='{}'),
),
| migrations.AddField(
model_name='course',
name='tagdorsement_counts',
field=models.TextField(default='{}'),
),
migrations.AddField(
model_name='course',
name='tagdorsements',
field=models.CharF | ield(blank=True, max_length=256),
),
migrations.AddField(
model_name='course',
name='total_ratings',
field=models.SmallIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='course',
name='total_reviewers',
field=models.SmallIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='application',
name='goal_met',
field=models.SmallIntegerField(blank=True, null=True),
),
]
|
bcongdon/Data-Science-Projects | election_tweets/tweet_tokenizer.py | Python | gpl-3.0 | 1,301 | 0.01691 | import re,json
import numpy as | np
import scipy.stats as sp
emoticons_str = r"""
(?:
[ | :=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return tokens_re.findall(s)
def preprocess(s, lowercase=False):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]
return tokens
if __name__ == "__main__":
total_tokens = list()
with open('output.json','r') as f:
for line in f:
tweet = json.loads(line)
tokens = preprocess(tweet['text'])
total_tokens += tokens
nptok = np.array(total_tokens)
freq = sp.itemfreq(nptok)
print freq
|
abigailStev/stingray | stingray/pulse/pulsar.py | Python | mit | 25,604 | 0.000273 | """
Basic pulsar-related functions and statistics.
"""
import functools
from collections.abc import Iterable
import warnings
from scipy.optimize import minimize, basinhopping
import numpy as np
import matplotlib.pyplot as plt
from .fftfit import fftfit as taylor_fftfit
from ..utils import simon, jit
from . import HAS_PINT, get_model, toa
__all__ = ['pulse_phase', 'phase_exposure', 'fold_events', 'profile_stat',
'z_n', 'fftfit', 'get_TOA', 'z_n_binned_events', 'z_n_gauss',
'z_n_events', 'htest', 'z_n_binned_events_all', 'z_n_gauss_all',
'z_n_events_all', 'get_orbital_correction_from_ephemeris_file']
def _default_value_if_no_key(dictionary, key, default):
try:
return dictionary[key]
except:
return default
def p_to_f(*period_derivatives):
"""Convert periods into frequencies, and vice versa.
For now, limited to third derivative. Raises when a
fourth derivative is passed.
Parameters
----------
p, pdot, pddot, ... : floats
period derivatives, starting from zeroth and in
increasing order
Examples
--------
>>> p_to_f() == []
True
>>> np.allclose(p_to_f(1), [1])
True
>>> np.allclose(p_to_f(1, 2), [1, -2])
True
>>> np.allclose(p_to_f(1, 2, 3), [1, -2, 5])
True
>>> np.allclose(p_to_f(1, 2, 3, 4), [1, -2, 5, -16])
True
>>> np.allclose(p_to_f(1, 2, 3, 4, 32, 22), [1, -2, 5, -16, 0, 0])
True
"""
nder = len(period_derivatives)
if nder == 0:
return []
fder = np.zeros_like(period_derivatives)
p = period_derivatives[0]
fder[0] = 1 / p
if nder > 1:
pd = period_derivatives[1]
fder[1] = -1 / p**2 * pd
if nder > 2:
pdd = period_derivatives[2]
fder[2] = 2 / p**3 * pd**2 - 1 / p**2 * pdd
if nder > 3:
pddd = period_derivatives[3]
fder[3] = - 6 / p**4 * pd ** 3 + 6 / p**3 * pd * pdd - \
1 / p**2 * pddd
if nder > 4:
warnings.warn("Derivatives above third are not supported")
return fder
def pulse_phase(times, *frequency_derivatives, **opts):
"""
Calculate pulse phase from the frequency and its derivatives.
Parameters
----------
times : array of floats
The times at which the phase is calculated
*frequency_derivatives: floats
List of derivatives in increasing order, starting from zero.
Other Parameters
----------------
ph0 : float
The starting phase
to_1 : bool, default True
Only return the fractional part of the phase, normalized from 0 to 1
Returns
-------
phases : array of floats
The absolute pulse phase
"""
ph0 = _default_value_if_no_key(opts, "ph0", 0)
to_1 = _default_value_if_no_key(opts, "to_1", True)
ph = ph0
for i_f, f in enumerate(frequency_derivatives):
ph += 1 / np.math.factorial(i_f + 1) * times**(i_f + 1) * f
if to_1:
ph -= np.floor(ph)
return ph
def phase_exposure(start_time, stop_time, period, nbin=16, gtis=None):
"""Calculate the exposure on each phase of a pulse profile.
Parameters
----------
start_time, stop_time : float
Starting and stopping time (or phase if ``period==1``)
period : float
The pulse period (if 1, equivalent to phases)
Other parameters
----------------
nbin : int, optional, default 16
The number of bins in the profile
gtis : [[gti00, gti01], [gti10, gti11], ...], optional, default None
Good Time Intervals
Returns
-------
expo : array of floats
The normalized exposure of each bin in the pulse profile (1 is the
highest exposure, 0 the lowest)
"""
if gtis is None:
gtis = np.array([[start_time, stop_time]])
# Use precise floating points -------------
start_time = np.longdouble(start_time)
stop_time = np.longdouble(stop_time)
period = np.longdouble(period)
gtis = np.array(gtis, dtype=np.longdouble)
# -----------------------------------------
expo = np.zeros(nbin)
phs = np.linspace(0, 1, nbin + 1)
phs = np.array(list(zip(phs[0:-1], phs[1:])))
# Discard gtis outside [start, stop]
good = np.logical_and(gtis[:, 0] < stop_time, gtis[:, 1] > start_time)
gtis = gtis[good]
for g in gtis:
g0 = g[0]
g1 = g[1]
if g0 < start_time:
# If the start of the fold is inside a gti, start from there
g0 = start_time
if g1 > stop_time:
# If the end of the fold is inside a gti, end there
g1 = stop_time
length = g1 - g0
# How many periods inside this length?
nraw = length / period
# How many integer periods?
nper = nraw.astype(int)
# First raw exposure: the number of periods
expo += nper / nbin
# FRACTIONAL PART =================
# What remains is additiona | l exposure for part of the profile.
start_phase = np.fmod(g0 / period, 1)
end_phase = nraw - nper + start_phase
limits = [[start_phase, end_phase]]
# start_phase is always < 1. end_phase not always. In this case...
if end_phase > 1:
limits = [[0, end_phase - 1], [start_phase, 1]]
for l in limits:
l0 = l[0]
l1 = l[1]
# Discards bins untouched by these limits
goodbins = np.logical_and(phs[:, 0] <= l | 1, phs[:, 1] >= l0)
idxs = np.arange(len(phs), dtype=int)[goodbins]
for i in idxs:
start = np.max([phs[i, 0], l0])
stop = np.min([phs[i, 1], l1])
w = stop - start
expo[i] += w
return expo / np.max(expo)
def fold_events(times, *frequency_derivatives, **opts):
'''Epoch folding with exposure correction.
Parameters
----------
times : array of floats
f, fdot, fddot... : float
The frequency and any number of derivatives.
Other Parameters
----------------
nbin : int, optional, default 16
The number of bins in the pulse profile
weights : float or array of floats, optional
The weights of the data. It can either be specified as a single value
for all points, or an array with the same length as ``time``
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...], optional
Good time intervals
ref_time : float, optional, default 0
Reference time for the timing solution
expocorr : bool, default False
Correct each bin for exposure (use when the period of the pulsar is
comparable to that of GTIs)
Returns
-------
phase_bins : array of floats
The phases corresponding to the pulse profile
profile : array of floats
The pulse profile
profile_err : array of floats
The uncertainties on the pulse profile
'''
nbin = _default_value_if_no_key(opts, "nbin", 16)
weights = _default_value_if_no_key(opts, "weights", 1)
gtis = _default_value_if_no_key(opts, "gtis",
np.array([[times[0], times[-1]]]))
ref_time = _default_value_if_no_key(opts, "ref_time", 0)
expocorr = _default_value_if_no_key(opts, "expocorr", False)
if not isinstance(weights, Iterable):
weights *= np.ones(len(times))
gtis = gtis - ref_time
times = times - ref_time
# This dt has not the same meaning as in the Lightcurve case.
# it's just to define stop_time as a meaningful value after
# the last event.
dt = np.abs(times[1] - times[0])
start_time = times[0]
stop_time = times[-1] + dt
phases = pulse_phase(times, *frequency_derivatives, to_1=True)
gti_phases = pulse_phase(gtis, *frequency_derivatives, to_1=False)
start_phase, stop_phase = pulse_phase(np.array([start_time, stop_time]),
*frequency_derivatives,
to_1=False)
raw_profile, bins = np.histogram(phases,
bins=np.linspace(0, 1, nbin + 1),
weights=weights)
|
notthatbreezy/phl-play | django/phlplay/wsgi.py | Python | apache-2.0 | 452 | 0.002212 | """
WSGI config for gis_photo p | roject.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling, MediaCling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "phlplay.settings")
application = Cling(MediaCling(get_wsgi_applica | tion())) |
hypriot/compose | tests/unit/project_test.py | Python | apache-2.0 | 8,045 | 0.000994 | from __future__ import unicode_literals
from .. import unittest
from compose.service import Service
from compose.project import Project
from compose.container import Container
from compose import config
import mock
import docker
class ProjectTest(unittest.TestCase):
def test_from_dict(self):
project = Project.from_dicts('composetest', [
{
'name': 'web',
'image': 'busybox:latest'
},
{
'name': 'db',
'image': 'busybox:latest'
},
], None)
self.assertEqual(len(project.services), 2)
self.assertEqual(project.get_service('web').name, 'web')
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
self.assertEqual(project.get_service('db').name, 'db')
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
def test_from_dict_sorts_in_dependency_order(self):
project = Project.from_dicts('composetest', [
{
'name': 'web',
'image': 'busybox:latest',
'links': ['db'],
},
{
'name': 'db',
'image': 'busybox:latest',
'volumes_from': ['volume']
},
{
'name': 'volume',
'image': 'busybox:latest',
'volumes': ['/tmp'],
}
], None)
self.assertEqual(project.services[0].name, 'volume')
self.assertEqual(project.services[1].name, 'db')
self.assertEqual(project.services[2].name, 'web')
def test_from_config(self):
dicts = config.from_dictionary({
'web': {
'image': 'busybox:latest',
},
'db': {
'image': 'busybox:latest',
},
})
project = Project.from_dicts('composetest', dicts, None)
self.assertEqual(len(project.services), 2)
self.assertEqual(project.get_service('web').name, 'web')
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
se | lf.assertEqual(project.get_service('db').name, 'db')
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
def test_get_service(self):
web = Service(
project='composetest',
| name='web',
client=None,
image="busybox:latest",
)
project = Project('test', [web], None)
self.assertEqual(project.get_service('web'), web)
def test_get_services_returns_all_services_without_args(self):
web = Service(
project='composetest',
name='web',
)
console = Service(
project='composetest',
name='console',
)
project = Project('test', [web, console], None)
self.assertEqual(project.get_services(), [web, console])
def test_get_services_returns_listed_services_with_args(self):
web = Service(
project='composetest',
name='web',
)
console = Service(
project='composetest',
name='console',
)
project = Project('test', [web, console], None)
self.assertEqual(project.get_services(['console']), [console])
def test_get_services_with_include_links(self):
db = Service(
project='composetest',
name='db',
)
web = Service(
project='composetest',
name='web',
links=[(db, 'database')]
)
cache = Service(
project='composetest',
name='cache'
)
console = Service(
project='composetest',
name='console',
links=[(web, 'web')]
)
project = Project('test', [web, db, cache, console], None)
self.assertEqual(
project.get_services(['console'], include_deps=True),
[db, web, console]
)
def test_get_services_removes_duplicates_following_links(self):
db = Service(
project='composetest',
name='db',
)
web = Service(
project='composetest',
name='web',
links=[(db, 'database')]
)
project = Project('test', [web, db], None)
self.assertEqual(
project.get_services(['web', 'db'], include_deps=True),
[db, web]
)
def test_use_volumes_from_container(self):
container_id = 'aabbccddee'
container_dict = dict(Name='aaa', Id=container_id)
mock_client = mock.create_autospec(docker.Client)
mock_client.inspect_container.return_value = container_dict
project = Project.from_dicts('test', [
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': ['aaa']
}
], mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id])
def test_use_volumes_from_service_no_container(self):
container_name = 'test_vol_1'
mock_client = mock.create_autospec(docker.Client)
mock_client.containers.return_value = [
{
"Name": container_name,
"Names": [container_name],
"Id": container_name,
"Image": 'busybox:latest'
}
]
project = Project.from_dicts('test', [
{
'name': 'vol',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': ['vol']
}
], mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name])
@mock.patch.object(Service, 'containers')
def test_use_volumes_from_service_container(self, mock_return):
container_ids = ['aabbccddee', '12345']
mock_return.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids]
project = Project.from_dicts('test', [
{
'name': 'vol',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': ['vol']
}
], None)
self.assertEqual(project.get_service('test')._get_volumes_from(), container_ids)
def test_use_net_from_container(self):
container_id = 'aabbccddee'
container_dict = dict(Name='aaa', Id=container_id)
mock_client = mock.create_autospec(docker.Client)
mock_client.inspect_container.return_value = container_dict
project = Project.from_dicts('test', [
{
'name': 'test',
'image': 'busybox:latest',
'net': 'container:aaa'
}
], mock_client)
service = project.get_service('test')
self.assertEqual(service._get_net(), 'container:'+container_id)
def test_use_net_from_service(self):
container_name = 'test_aaa_1'
mock_client = mock.create_autospec(docker.Client)
mock_client.containers.return_value = [
{
"Name": container_name,
"Names": [container_name],
"Id": container_name,
"Image": 'busybox:latest'
}
]
project = Project.from_dicts('test', [
{
'name': 'aaa',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'net': 'container:aaa'
}
], mock_client)
service = project.get_service('test')
self.assertEqual(service._get_net(), 'container:'+container_name)
|
ricsatjr/mplstereonet | mplstereonet/contouring.py | Python | mit | 9,471 | 0.002428 | import numpy as np
from . import stereonet_math
def _count_points(lons, lats, func, sigma, gridsize=(100,100), weights=None):
"""This function actually calculates the point density of the input ("lons"
and "lats") points at a series of counter stations. Creates "gridsize"
regular grid of counter stations in lat-long space, calculates the distance
to all input points at each counter station, and then calculates the
density using "func". Each input point is weighted by the corresponding
item of "weights". The weights are normalized to 1 before calculation."""
lons = np.atleast_1d(np.squeeze(lons))
lats = np.atleast_1d(np.squeeze(lats))
if weights in (None, False):
weights = 1
# Normalize the weights
weights = np.asarray(weights, dtype=np.float)
weights /= weights.mean()
# Generate a regular grid of "counters" to measure on...
bound = np.pi / 2.0
nrows, ncols = gridsize
xmin, xmax, ymin, ymax = -bound, bound, -bound, bound
lon, lat = np.mgrid[xmin : xmax : ncols * 1j, ymin : ymax : nrows * 1j]
xyz_counters = stereonet_math.sph2cart(lon.ravel(), lat.ravel())
xyz_counters = np.vstack(xyz_counters).T
xyz_points = stereonet_math.sph2cart(lons, lats)
xyz_points = np.vstack(xyz_points).T
# Basically, we can't model this as a convolution as we're not in cartesian
# space, so we have to iterate through and call the kernel function at
# each "counter".
totals = np.zeros(xyz_counters.shape[0], dtype=np.float)
for i, xyz in enumerate(xyz_counters):
cos_dist = np.abs(np.dot(xyz, xyz_points.T))
density, scale = func(cos_dist, sigma)
density *= weights
totals[i] = (density.sum() - 0.5) / scale
# Traditionally, the negative values (while valid, as they represent areas
# with less than expected point-density) are not returned.
totals[totals < 0] = 0
counter_lon, counter_lat = stereonet_math.cart2sph(*xyz_counters.T)
for item in [counter_lon, counter_lat, totals]:
item.shape = gridsize
return counter_lon, counter_lat, totals
def density_grid(*args, **kwargs):
"""
Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.). Returns a regular (in
lat-long space) grid of density estimates over a hemispherical surface.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are in
numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points within
a countin | g circle comprising 1% of the total area of the
| hemisphere. Does not take into account sample size. Units are in
points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher sigmas
will lead to more smoothing of the resulting density distribution. This
parameter only applies to Kamb-based methods. Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single int
is given, it is interpreted as an NxN grid. If a tuple of ints is given
it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The array
will be normalized to sum to 1, so absolute value of the weights do not
affect the result. Defaults to None.
Returns
-------
xi, yi, zi : 2D arrays
The longitude, latitude and density values of the regularly gridded
density estimates. Longitude and latitude are in radians.
See Also
---------
mplstereonet.StereonetAxes.density_contourf
mplstereonet.StereonetAxes.density_contour
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
"""
def do_nothing(x, y):
return x, y
measurement = kwargs.get('measurement', 'poles')
gridsize = kwargs.get('gridsize', 100)
weights = kwargs.get('weights', None)
try:
gridsize = int(gridsize)
gridsize = (gridsize, gridsize)
except TypeError:
pass
func = {'poles':stereonet_math.pole,
'lines':stereonet_math.line,
'rakes':stereonet_math.rake,
'radians':do_nothing}[measurement]
lon, lat = func(*args)
method = kwargs.get('method', 'exponential_kamb')
sigma = kwargs.get('sigma', 3)
func = {'linear_kamb':_linear_inverse_kamb,
'square_kamb':_square_inverse_kamb,
'schmidt':_schmidt_count,
'kamb':_kamb_count,
'exponential_kamb':_exponential_kamb,
}[method]
lon, lat, z = _count_points(lon, lat, func, sigma, gridsize, weights)
return lon, lat, z
def _kamb_radius(n, sigma):
"""Radius of kernel for Kamb-style smoothing."""
a = sigma**2 / (float(n) + sigma**2)
return (1 - a)
def _kamb_units(n, radius):
"""Normalization function for Kamb-style counting."""
return np.sqrt(n * radius * (1 - radius))
# All of the following kernel functions return an _unsummed_ distribution and
# a normalization factor
def _exponential_kamb(cos_dist, sigma=3):
"""Kernel function from Vollmer for exponential smoothing."""
n = float(cos_dist.size)
f = 2 * (1.0 + n / sigma**2)
count = np.exp(f * (cos_dist - 1))
|
dps/simplescheduler | tests/test_scheduler.py | Python | bsd-2-clause | 2,897 | 0.041767 | # -*- coding: utf-8 -*-
from simplescheduler import Scheduler, Job
from datetime import timedelta
import unittest
class FakeRedis(object):
def __init__(self):
self._r = {}
self._ledger = []
def sadd(self, key, member):
self._ledger.append('SADD %s:%s' % (key, member))
if not self._r.has_key(key):
self._r[key] = set()
self._r[key].add(member)
def get(self, key):
value = self._r.get(key)
self._ledger.append('GET %s (%s)' % (key, value))
return value
def set(self, key, value):
self._ledger.append('SET %s=%s' % (key, value))
self._r[key] = value
def zadd(self, key, weight, value):
self._ledger.append('ZADD %s %s %s' % (key, value, weight))
if not self._r.has_key(key):
self._r[key] = []
self._r[key].append((weight, value))
def zrangebyscore(self, key, start, end):
self._ledger.append('ZRANGEBYSCORE %s %d %d' % (key, start, end))
l = sorted(self._r[key], key=lambda x:x[0])
r = []
for i in l:
if i[0] >= start and i[0] <= end:
r.append(i[1])
return r
def zremrangebyscore(self, key, start, end):
self._ledger.append('ZREMRANGEBYSCORE %s %d %d' % (key, start, end) | )
l = sorted(self._r[key], key=lambda x:x[0])
r = []
for i in l:
if i[0] >= start and i[0] <= end:
r.append | (i)
for remove in r:
self._r[key].remove(remove)
return len(r)
def expire(self, key, seconds):
self._ledger.append('EXPIRE %s %d' % (key, seconds))
def clear_ledger(self):
self._ledger = []
def get_ledger(self):
return self._ledger
job_data = {}
def job1():
print 'job1'
job_data['job1'] = True
def job2():
print 'job2'
job_data['job2'] = True
def job4():
print 'job4'
job_data['job4'] = True
def job3():
print 'job3'
job_data['job3'] = True
class SchedulerUnitTests(unittest.TestCase):
def setUp(self):
self.fake_redis = FakeRedis()
self.secs = 0
self.clock_source = lambda : self.secs
self.sleeper = lambda x : True
def tearDown(self):
pass
def testRunScheduler(self):
s = Scheduler(custom_redis=self.fake_redis,
clock_source=self.clock_source,
sleeper=self.sleeper)
job = Job('tests.test_scheduler.job1')
s.schedule_now(job)
self.secs = 1
j2 = Job('tests.test_scheduler.job2')
s.schedule_now(j2)
j3 = Job('tests.test_scheduler.job3')
s.schedule_in(j3, timedelta(seconds=5))
j4 = Job('tests.test_scheduler.job4')
s.schedule(j4, 60 * 1e6)
self.secs = 2
s._run(once=True)
assert(job_data['job1'] == True)
assert(job_data['job2'] == True)
assert(job_data.has_key('job3') == False)
self.secs = 10
s._run(once=True)
assert(job_data['job3'] == True)
assert(job_data.has_key('job4') == False)
self.secs = 120
s._run(once=True)
assert(job_data['job4'] == True)
|
dcorbacho/libcloud | libcloud/test/compute/test_digitalocean_v2.py | Python | apache-2.0 | 11,147 | 0.000179 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json # NOQA
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.compute.base import NodeImage
from libcloud.compute.drivers.digitalocean import DigitalOceanNodeDriver
from libcloud.test import LibcloudTestCase, MockHttpTestCase
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIGITALOCEAN_v1_PARAMS
from libcloud.test.secrets import DIGITALOCEAN_v2_PARAMS
# class DigitalOceanTests(unittest.TestCase, TestCaseMixin):
class DigitalOcean_v2_Tests(LibcloudTestCase):
def setUp(self):
DigitalOceanNodeDriver.connectionCls.conn_classes = \
(None, DigitalOceanMockHttp)
DigitalOceanMockHttp.type = None
self.driver = DigitalOceanNodeDriver(*DIGITALOCEAN_v2_PARAMS)
def test_v2_uses_v1_key(self):
self.assertRaises(InvalidCredsError, DigitalOceanNodeDriver,
*DIGITALOCEAN_v1_PARAMS, api_version='v2')
def test_authentication(self):
DigitalOceanMockHttp.type = 'UNAUTHORIZED'
self.assertRaises(InvalidCredsError, self.driver.list_nodes)
def test_list_images_success(self):
images = self.driver.list_images()
self.assertTrue(len(images) >= 1)
image = images[0]
self.assertTrue(image.id is not None)
self.assertTrue(image.name is not None)
def test_list_sizes_success(self):
sizes = self.driver.list_sizes()
self.assertTrue(len(sizes) >= 1)
size = sizes[0]
self.assertTrue(size.id is not None)
self.assertEqual(size.name, '512mb')
self.assertEqual(size.ram, 512)
size = sizes[1]
self.assertTrue(size.id is not None)
self.assertEqual(size.name, '1gb')
self.assertEqual(size.ram, 1024)
def test_list_locations_success(self):
locations = self.driver.list_locations()
self.assertTrue(len(locations) >= 1)
location = locations[0]
self.assertEqual(location.id, 'nyc1')
self.assertEqual(location.name, 'New York 1')
def test_list_nodes_success(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].name, 'example.com')
self.assertEqual(nodes[0].public_ips, ['104.236.32.182'])
self.assertEqual(nodes[0].extra['image']['id'], 6918990)
self.assertEqual(nodes[0].extra['size_slug'], '512mb')
def test_create_node_invalid_size(self):
image = NodeImage(id='invalid', name=None, driver=self.driver)
size = self.driver.list_sizes()[0]
location = self.driver.list_locations()[0]
DigitalOceanMockHttp.type = 'INVALID_IMAGE'
expected_msg = \
r'You specified an invalid image for Droplet creation. \(code: 404\)'
self.assertRaisesRegexp(Exception, expected_msg,
self.driver.create_node,
name='test', size=size, image=image,
location=location)
def test_reboot_node_success(self):
node = self.driver.list_nodes()[0]
DigitalOceanMockHttp.type = 'REBOOT'
result = self.driver.reboot_node(node)
self.assertTrue(result)
def test_create_image_success(self):
node = self.driver.list_nodes()[0]
DigitalOceanMockHttp.type = 'SNAPSHOT'
result = self.driver.create_image(node, 'My snapshot')
self.assertTrue(result)
def test_get_image_success(self):
image = self.driver.get_image(12345)
self.assertEqual(image.name, 'My snapshot')
self.assertEqual(image.id, '12345')
self.assertEqual(image.extra['distribution'], 'Ubuntu')
def test_delete_image_success(self):
image = self.driver.get_image(12345)
DigitalOceanMockHttp.type = 'DESTROY'
result = self.driver.delete_image(image)
self.assertTrue(result)
def test_ex_power_on_node_success(self):
node = self.driver.list_nodes()[0]
DigitalOceanMockHttp.type = 'POWERON'
result = self.driver.ex_power_on_node(node)
self.assertTrue(result)
def test_ex_shutdown_node_success(self):
node = self.driver.list_nodes()[0]
DigitalOceanMockHttp.type = 'SHUTDOWN'
result = self.driver.ex_shutdown_node(node)
self.assertTrue(result)
def test_destroy_node_success(self):
node = self.driver.list_nodes()[0]
DigitalOceanMockHttp.type = 'DESTROY'
result = self.driver.destroy_node(node)
self.assertTrue(result)
def test_ex_rename_node_success(self):
node = self.driver.list_nodes()[0]
DigitalOceanMockHttp.type = 'RENAME'
result = self.driver.ex_rename_node(node, 'fedora helios')
self.assertTrue(result)
def test_list_key_pairs(self):
keys = self.driver.list_key_pairs()
self.assertEqual(len(keys), 1)
self.assertEqual(keys[0].extra['id'], 7717)
self.assertEqual(keys[0].name, 'test1')
self.assertEqual(keys[0].public_key,
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDGk5 example")
def t | est_create_key_pair(self):
DigitalOceanMockHttp.type = 'CREATE'
key = self.driver.create_key_pair(
name="test1",
public_key="ssh-rsa AAAAB3NzaC1yc2EAAAADAQsxRiUKn example"
)
self.assertEqual(key.name, "test1")
self.assertEqual(key.fingerprint,
"f5:d1:78:ed:28:72:5f:e1:ac:94:fd:1f:e0:a3:48:6d")
def test_delete_key_pair(self):
key = sel | f.driver.list_key_pairs()[0]
result = self.driver.delete_key_pair(key)
self.assertTrue(result)
def test__paginated_request_single_page(self):
nodes = self.driver._paginated_request('/v2/droplets', 'droplets')
self.assertEqual(nodes[0]['name'], 'example.com')
self.assertEqual(nodes[0]['image']['id'], 6918990)
self.assertEqual(nodes[0]['size_slug'], '512mb')
def test__paginated_request_two_pages(self):
DigitalOceanMockHttp.type = 'PAGE_ONE'
nodes = self.driver._paginated_request('/v2/droplets', 'droplets')
self.assertEqual(len(nodes), 2)
class DigitalOceanMockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('digitalocean_v2')
def _v2_regions(self, method, url, body, headers):
body = self.fixtures.load('list_locations.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v2_images(self, method, url, body, headers):
body = self.fixtures.load('list_images.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v2_sizes(self, method, url, body, headers):
body = self.fixtures.load('list_sizes.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v2_droplets(self, method, url, body, headers):
body = self.fixtures.load('list_nodes.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v2_droplets_INVALID_IMAGE(self, method, url, body, headers):
body = self.fixtures.load('error_invalid_image.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def |
MDXDave/ModernWebif | plugin/controllers/ER.py | Python | gpl-2.0 | 2,362 | 0.019475 | # -*- coding: utf-8 -*-
##############################################################################
# 2013 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
from twisted.web import static, resource, http, server
class ERController(resource.Resource):
def __init__(self, session, path = ""):
resource.Resource.__init__(self)
self.session = session
try:
from Plugins.Extensions.EPGRefresh.EPGRefreshResource import EPGRefreshSettingsResource, \
EPGRefreshChangeSettingsResource, \
EPGRefreshListServicesResource, EPGRefreshAddRemoveServiceResource, \
EPGRefreshStartRefreshResource, API_VERSION
except ImportError:
print "ER plugin not found"
return
self.putChild('get', EPGRefreshSettingsResource())
self.putChild('set', EPGRefreshChangeSettingsResource())
self.putChild('refresh', EPGRefreshStartRefreshResource())
self.putChild(' | add', EPGRefreshAddRemoveServiceResource(EPGRefreshAddRemoveServiceResource.TYPE_ADD))
self.putChild('del', EPGRefreshAddRemoveServiceResource(EPGRefreshAddRemoveServiceResource.TYPE_DEL))
try:
from Plugins.Extensions.EPGRefresh.EPGRefreshResource import EPGRefreshPreviewServicesResource
except ImportError:
pass
else:
self.putChild('preview', EPGRefreshPreviewServicesResource())
def render(self, | request):
request.setResponseCode(http.OK)
request.setHeader('Content-type', 'application/xhtml+xml')
request.setHeader('charset', 'UTF-8')
try:
from Plugins.Extensions.EPGRefresh.EPGRefresh import epgrefresh
request.setHeader('Content-type', 'application/xhtml+xml')
request.setHeader('charset', 'UTF-8')
return ''.join(epgrefresh.buildConfiguration(webif = True))
except ImportError:
return '<?xml version="1.0" encoding="UTF-8" ?><e2simplexmlresult><e2state>false</e2state><e2statetext>EPG Refresh Plugin not found</e2statetext></e2simplexmlresult>'
|
CodeforLeipzig/fog | fog/config/settings/public.py | Python | bsd-3-clause | 1,254 | 0.001595 | from configurations import values
from . import common, databases, email
from .. import __version__
class Raven(object):
"""Report uncaught exceptions to the Sentry server."""
INSTALLED_APPS = common.Common.INSTALLED_APPS + ('raven.contrib.django.raven_compat',)
RAVE | N_CONFIG = {
'dsn': values.URLValue(environ_name='RAVEN_CONFIG_DSN'),
'release': __version__,
}
class Sentry404(Raven):
"""Log 404 events to the Sentry server."""
MIDDLEWARE_CLASSES = (
| 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
) + common.Common.MIDDLEWARE_CLASSES
class Public(email.Email, databases.Databases, common.Common):
"""General settings for public projects."""
SECRET_KEY = values.SecretValue()
CSRF_COOKIE_HTTPONLY = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
X_FRAME_OPTIONS = 'DENY'
SILENCED_SYSTEM_CHECKS = values.ListValue([])
class Stage(Public):
"""Settings for staging server."""
pass
class SSL(object):
"""Settings for SSL."""
SECURE_SSL_HOST = values.Value('www.example.com')
SECURE_SSL_REDIRECT = True
class Prod(Public, SSL):
"""Settings for production server."""
pass
|
riolet/SAM | spec/python/test_server.py | Python | gpl-3.0 | 3,851 | 0.000519 | from spec.python import db_connection
import sam.common
import sam.constants
import web
app = web.application(sam.constants.urls, globals(), autoreload=False)
sam.common.session_store = web.session.DBStore(db_connection.db, 'sessions')
sam.common.session = web.session.Session(app, sam.common.session_store)
# TODO: these command | s ping the prod server instead of the test server for the session table.
# If the prod server is missing, these fail.
# I'm not sure why they do that.
def test_404():
with db_connection.env(login_active=False):
req = app.request('/invalidendpoint', method='GET')
assert req.status == "404 Not Found"
req = app.request('/invalidendpoint', method='POST')
assert req.status == "404 Not F | ound"
def test_exists_map():
with db_connection.env(login_active=False):
req = app.request('/map', method='POST')
assert req.status == "405 Method Not Allowed"
req = app.request('/map?q=42', method='GET')
assert req.status == "200 OK"
def test_exists_stats():
with db_connection.env(login_active=False):
req = app.request('/stats', 'GET')
assert req.status == "200 OK"
req = app.request('/stats', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_nodes():
with db_connection.env(login_active=False):
req = app.request('/nodes', 'GET')
assert req.status == "200 OK"
req = app.request('/nodes', 'POST')
assert req.status == "200 OK"
def test_exists_links():
with db_connection.env(login_active=False):
req = app.request('/links', 'GET')
assert req.status == "200 OK"
req = app.request('/links', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_details():
with db_connection.env(login_active=False):
req = app.request('/details', 'GET')
assert req.status == "200 OK"
req = app.request('/details', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_portinfo():
with db_connection.env(login_active=False):
req = app.request('/portinfo', 'GET')
assert req.status == "200 OK"
req = app.request('/portinfo', 'POST')
assert req.status == "200 OK"
def test_exists_metadata():
with db_connection.env(login_active=False):
req = app.request('/metadata', 'GET')
assert req.status == "200 OK"
req = app.request('/metadata', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_table():
with db_connection.env(login_active=False):
req = app.request('/table', 'GET')
assert req.status == "200 OK"
req = app.request('/table', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_settings():
with db_connection.env(login_active=False):
req = app.request('/settings', 'GET')
assert req.status == "200 OK"
req = app.request('/settings', 'POST')
assert req.status == "200 OK"
def test_exists_settings_page():
with db_connection.env(login_active=False):
req = app.request('/settings_page', 'GET')
assert req.status == "200 OK"
req = app.request('/settings_page', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_login():
with db_connection.env(login_active=True):
req = app.request('/login', 'GET')
assert req.status == "200 OK"
req = app.request('/login', 'POST')
assert req.status == "200 OK"
def test_exists_logout():
with db_connection.env(login_active=True, mock_session=True):
req = app.request('/logout', 'GET')
assert req.status == "303 See Other"
req = app.request('/logout', 'POST')
assert req.status == "405 Method Not Allowed"
|
jdemel/gnuradio | gr-channels/python/channels/impairments.py | Python | gpl-3.0 | 4,926 | 0.004669 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Radio Impairments Model
# Author: mettus
# Generated: Thu Aug 1 12:46:10 2013
##################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from gnuradio import analog
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter i | mport firdes
import math
#Import locally
from .phase_noise_gen import phase_noise_gen
from .iqbal_gen import iqbal_gen
from .distortion_2_gen import distortion_2_gen
from .distortion_3_gen import distortion_3_gen
class impairments(gr.hier_block2):
def __init__(self, phase_noise_mag=0, mag | bal=0, phasebal=0, q_ofs=0, i_ofs=0, freq_offset=0, gamma=0, beta=0):
gr.hier_block2.__init__(
self, "Radio Impairments Model",
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
)
##################################################
# Parameters
##################################################
self.phase_noise_mag = phase_noise_mag
self.magbal = magbal
self.phasebal = phasebal
self.q_ofs = q_ofs
self.i_ofs = i_ofs
self.freq_offset = freq_offset
self.gamma = gamma
self.beta = beta
##################################################
# Blocks
##################################################
self.channels_phase_noise_gen_0_0 = phase_noise_gen(math.pow(10.0,phase_noise_mag / 20.0), .01)
self.channels_iqbal_gen_0 = iqbal_gen(magbal, phasebal)
self.channels_distortion_3_gen_0 = distortion_3_gen(beta)
self.channels_distortion_2_gen_0 = distortion_2_gen(gamma)
self.blocks_multiply_xx_0_0 = blocks.multiply_vcc(1)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_conjugate_cc_0 = blocks.conjugate_cc()
self.blocks_add_const_vxx_0 = blocks.add_const_vcc((i_ofs + q_ofs* 1j, ))
self.analog_sig_source_x_0 = analog.sig_source_c(1.0, analog.GR_COS_WAVE, freq_offset, 1, 0)
##################################################
# Connections
##################################################
self.connect((self.channels_phase_noise_gen_0_0, 0), (self.channels_distortion_3_gen_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.blocks_multiply_xx_0_0, 0), (self.channels_phase_noise_gen_0_0, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_conjugate_cc_0, 0))
self.connect((self, 0), (self.blocks_multiply_xx_0_0, 1))
self.connect((self.blocks_conjugate_cc_0, 0), (self.blocks_multiply_xx_0_0, 0))
self.connect((self.channels_iqbal_gen_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.channels_distortion_3_gen_0, 0), (self.channels_distortion_2_gen_0, 0))
self.connect((self.channels_distortion_2_gen_0, 0), (self.channels_iqbal_gen_0, 0))
# QT sink close method reimplementation
def get_phase_noise_mag(self):
return self.phase_noise_mag
def set_phase_noise_mag(self, phase_noise_mag):
self.phase_noise_mag = phase_noise_mag
self.channels_phase_noise_gen_0_0.set_noise_mag(math.pow(10.0,self.phase_noise_mag / 20.0))
def get_magbal(self):
return self.magbal
def set_magbal(self, magbal):
self.magbal = magbal
self.channels_iqbal_gen_0.set_magnitude(self.magbal)
def get_phasebal(self):
return self.phasebal
def set_phasebal(self, phasebal):
self.phasebal = phasebal
self.channels_iqbal_gen_0.set_phase(self.phasebal)
def get_q_ofs(self):
return self.q_ofs
def set_q_ofs(self, q_ofs):
self.q_ofs = q_ofs
self.blocks_add_const_vxx_0.set_k((self.i_ofs + self.q_ofs* 1j, ))
def get_i_ofs(self):
return self.i_ofs
def set_i_ofs(self, i_ofs):
self.i_ofs = i_ofs
self.blocks_add_const_vxx_0.set_k((self.i_ofs + self.q_ofs* 1j, ))
def get_freq_offset(self):
return self.freq_offset
def set_freq_offset(self, freq_offset):
self.freq_offset = freq_offset
self.analog_sig_source_x_0.set_frequency(self.freq_offset)
def get_gamma(self):
return self.gamma
def set_gamma(self, gamma):
self.gamma = gamma
self.channels_distortion_2_gen_0.set_beta(self.gamma)
def get_beta(self):
return self.beta
def set_beta(self, beta):
self.beta = beta
self.channels_distortion_3_gen_0.set_beta(self.beta)
|
wavefrontHQ/python-client | wavefront_api_client/models/response_status.py | Python | apache-2.0 | 5,837 | 0.000171 | # coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class | ResponseStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program | .
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'int',
'message': 'str',
'result': 'str'
}
attribute_map = {
'code': 'code',
'message': 'message',
'result': 'result'
}
def __init__(self, code=None, message=None, result=None, _configuration=None): # noqa: E501
"""ResponseStatus - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._code = None
self._message = None
self._result = None
self.discriminator = None
self.code = code
if message is not None:
self.message = message
self.result = result
@property
def code(self):
"""Gets the code of this ResponseStatus. # noqa: E501
HTTP Response code corresponding to this response # noqa: E501
:return: The code of this ResponseStatus. # noqa: E501
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ResponseStatus.
HTTP Response code corresponding to this response # noqa: E501
:param code: The code of this ResponseStatus. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and code is None:
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def message(self):
"""Gets the message of this ResponseStatus. # noqa: E501
Descriptive message of the status of this response # noqa: E501
:return: The message of this ResponseStatus. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ResponseStatus.
Descriptive message of the status of this response # noqa: E501
:param message: The message of this ResponseStatus. # noqa: E501
:type: str
"""
self._message = message
@property
def result(self):
"""Gets the result of this ResponseStatus. # noqa: E501
:return: The result of this ResponseStatus. # noqa: E501
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this ResponseStatus.
:param result: The result of this ResponseStatus. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and result is None:
raise ValueError("Invalid value for `result`, must not be `None`") # noqa: E501
allowed_values = ["OK", "ERROR"] # noqa: E501
if (self._configuration.client_side_validation and
result not in allowed_values):
raise ValueError(
"Invalid value for `result` ({0}), must be one of {1}" # noqa: E501
.format(result, allowed_values)
)
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResponseStatus):
return True
return self.to_dict() != other.to_dict()
|
bendtherules/pontoon | pontoon/mocking.py | Python | mit | 9,262 | 0.002483 | # -*- coding: utf-8 -*-
import re
import sys
import contextlib
from random import randrange
from datetime import datetime, timedelta
from .exceptions import ClientException
# Python 2/3 compatibility for capture_stdout
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Data(object):
pass
def timestamp(hours=0):
"""Mocked Digital Ocean timestamp"""
return (datetime.utcnow() + timedelta(
hours=hours)).strftime("%Y-%m-%dT%H:%M:%SZ")
def get_builtins():
"""Python 2.x and 3.x have different names for accessing builtins"""
try:
__import__('builtins')
return 'builtins'
except ImportError:
return '__builtin__'
def _raise(ex=None):
"""Wrapper for exceptions so they can be thrown from inside lambdas"""
if ex:
exception.append(ex)
if len(exception):
raise exception.pop()
def event_response():
return {
'event_id': randrange(9999),
'status': 'OK',
}
@contextlib.contextmanager
def capture_stdout():
"""Captures STDOUT and turns it into an object"""
old = sys.stdout
capturer = StringIO()
sys.stdout = capturer
data = Data()
yield data
sys.stdout = old
data.result = capturer.getvalue()
exception = []
mocked = {
'droplets': [
{
'id': 1, 'name': 'foo', 'image_id': 1, 'region_id': 2,
'backups_active': False, 'status': 'active',
'ip_address': '192.0.2.1', 'size_id': 3, 'locked': False,
'created_at': timestamp(-200), 'private_ip_address': None,
},
{
'id': 2, 'name': 'bar', 'image_id': 1, 'region_id': 2,
'backups_active': False, 'status': 'active',
'ip_address': '192.0.2.2', 'size_id': 2, 'locked': False,
'created_at': timestamp(-5), 'private_ip_address': None,
},
{
'id': 3, 'name': 'baz', 'image_id': 1, 'region_id': 1,
'backups_active': False, 'status': 'active',
'ip_address': '192.0.2.3', 'size_id': 1, 'locked': False,
'created_at': timestamp(), 'private_ip_address': None,
},
],
'regions': [
{
'id': 1, 'name': 'Foo York 1',
},
{
'id': 2, 'name': 'Bardam 1',
},
{
'id': 3, 'name': 'Foo Barbaz 1',
},
{
'id': 4, 'name': 'Foo York 2',
},
],
'images': [
{
'id': 1, 'name': 'Foobuntu 12.04 x64', 'distribution': 'Foobuntu',
},
{
'id': 2, 'name': 'Foobuntu 12.04 x32', 'distribution': 'Foobuntu',
},
{
'id': 3, 'name': 'Bar 6.0 x64', 'distribution': 'Bar',
},
| {
'id': 4, 'name': 'Bar 6.0 x32', 'distribution': 'Bar',
},
],
'snapshots': [
{
'id': 1024, 'name': 'snapshot-foo',
'distribution': 'Foobuntu',
},
{
'id': 2048, 'name | ': 'snapshot-bar-2013-10-10',
'distribution': 'Foobuntu',
},
{
'id': 4096, 'name': 'snapshot-baz-pre-install',
'distribution': 'Foobuntu',
},
],
'sizes': [
{
'id': 1, 'name': '512MB',
},
{
'id': 2, 'name': '1GB',
},
{
'id': 3, 'name': '2GB',
},
],
'ssh_keys': [
{
'id': 1, 'name': 'foobarbaz',
'ssh_pub_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA1on8gxCGJJWSRT'
'4uOrR13mUaUk0hRf4RzxSZ1zRbYYFw8pfGesIFoEuVth4HKyF8'
'k1y4mRUnYHP1XNMNMJl1JcEArC2asV8sHf6zSPVffozZ5TT4Sf'
'sUu/iKy9lUcCfXzwre4WWZSXXcPff+EHtWshahu3WzBdnGxm5X'
'oi89zcE= test@example.com'
},
],
'events': [
{
'id': 999, 'action_status': 'done', 'droplet_id': 1,
'event_type_id': 1, 'percentage': 100,
},
{
'id': 888, 'action_status': 'done', 'droplet_id': 2,
'event_type_id': 2, 'percentage': 100,
},
{
'id': 777, 'action_status': 'done', 'droplet_id': 3,
'event_type_id': 3, 'percentage': 100,
},
],
}
def _respond(target, method='GET', params={}):
"""Fake out responses from Digital Ocean."""
if len(params) > 0:
if re.match("^/images$", target):
if 'filter' in params:
return {'images': (mocked.get('snapshots') +
mocked.get('images'))}
if re.match("^/images/\d+/transfer$", target):
if 'region_id' in params:
return event_response()
elif re.match("^/droplets/\d+/resize$", target):
if 'size_id' in params:
return event_response()
elif re.match("^/droplets/\d+/snapshot$", target):
if 'name' in params:
return event_response()
elif re.match("^/droplets/\d+/rebuild$", target):
if 'image_id' in params:
return event_response()
elif re.match("^/droplets/\d+/rename$", target):
if 'name' in params:
return event_response()
elif re.match("^/droplets/\d+/restore$", target):
if 'image_id' in params:
return event_response()
elif re.match("^/droplets/new$", target):
new = {
'id': randrange(100, 999),
'name': params['name'],
'image_id': params['image_id'],
'region_id': params['region_id'],
'backups_active': False,
'status': 'new',
'ip_address': '192.0.2.%s' % randrange(10, 255),
'size_id': params['size_id'],
'locked': False,
'created_at': timestamp(),
'private_ip_address': None
}
mocked['droplets'].append(new)
return {'droplet': new}
elif re.match("^/ssh_keys/new$", target):
new = {
'id': randrange(100, 999),
'name': params['name'],
'ssh_pub_key': params['ssh_pub_key']
}
mocked['ssh_keys'].append(new)
return {'ssh_key': new}
elif re.match("^/ssh_keys/\d+/edit$", target):
r = re.match("^/ssh_keys/(\d+)/edit$", target)
id = int(r.group(1))
replaced = []
new = {
'id': id,
'ssh_pub_key': params['ssh_pub_key']
}
for key in mocked.get('ssh_keys'):
if key['id'] == id:
new['name'] = key['name']
replaced.append(new)
else:
replaced.append(key)
mocked['ssh_keys'] = replaced
return {'ssh_key': next((
k for k in mocked.get(
'ssh_keys') if k['id'] == id), None)}
elif re.match("^/droplets/\d+/destroy$", target):
if 'scrub_data' in params:
return event_response()
else:
options = {
'/droplets': {'droplets': mocked.get('droplets')},
'/regions': {'regions': mocked.get('regions')},
'/images': {'images': (mocked.get('images') +
mocked.get('snapshots'))},
'/sizes': {'sizes': mocked.get('sizes')},
'/ssh_keys': {'ssh_keys': mocked.get('ssh_keys')},
'/droplets/\d+/reboot': event_response(),
'/droplets/(\d+)': lambda x: {'droplet': next(
(d for d in mocked.get('droplets') if d['id'] == x), None)},
'/droplets/\d+/power_cycle': event_response(),
'/droplets/\d+/power_off': event_response(),
'/droplets/\d+/power_on': event_response(),
'/droplets/\d+/enable_backups': event_response(),
'/droplets/\d+/disable_backups': event_response(),
'/droplets/\d+/shutdown': event_response(),
|
blancltd/blanc-basic-news | blanc_basic_news/feeds.py | Python | bsd-3-clause | 1,350 | 0.001481 | from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import get_object_or_404
from django.utils import timezone
from .models import Category, Post
class BasicNewsFeed(Feed):
title = getattr(settings, 'NEWS_TITLE', 'News')
link = reverse_lazy('blanc_basic_news:post-list')
def items(self):
feed_limit = getattr(settings, 'NEWS_FEED_LIMIT', 10)
return Post.objects.filter(published=True, date__lte=timezone.now())[:feed_limit]
def item_description(self, obj):
return obj.content
def item_pub | date(self, obj):
return obj.date
def item_guid(self, obj):
return '%s:news:%d' % (Site.objects.get_current().domain, obj.pk)
class Basic | NewsCategoryFeed(BasicNewsFeed):
def get_object(self, request, slug):
return get_object_or_404(Category, slug=slug)
def title(self, obj):
return '%s - %s' % (getattr(settings, 'NEWS_TITLE', 'News'), obj.title)
def link(self, obj):
return obj.get_absolute_url()
def items(self, obj):
feed_limit = getattr(settings, 'NEWS_FEED_LIMIT', 10)
return Post.objects.filter(
published=True, date__lte=timezone.now(), category=obj)[:feed_limit]
|
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/idlelib/ScriptBinding.py | Python | mit | 7,992 | 0.000375 | """Extension to execute code outside the Python shell window.
This adds the following commands:
- Check module does a full syntax check of the current module.
It also runs the tabnanny to catch any inconsistent tabs.
- Run module executes the module's code in the __main__ namespace. The window
must have been saved previously. The module is added to sys.modules, and is
also added to the __main__ namespace.
XXX GvR Redesign this interface (yet again) as follows:
- Present a dialog box for ``Run Module''
- Allow specify command line arguments in the dialog box
"""
import os
import re
import string
import tabnanny
import tokenize
import tkMessageBox
from idlelib import PyShell
from idlelib.configHandler import idleConf
IDENTCHARS = string.ascii_letters + string.digits + "_"
indent_message = """Error: Inconsistent indentation detected!
1) Your indentation is outright incorrect (easy to fix), OR
2) Your indentation mixes tabs and spaces.
To fix case 2, change all tabs to spaces by using Edit->Select All followed \
by Format->Untabify Region and specify the number of columns used by each tab.
"""
class ScriptBinding:
menudefs = [
('run', [None,
('Check Module', '<<check-module>>'),
('Run Module', '<<run-module>>'), ]), ]
def __init__(self, editwin):
self.editwin = editwin
# Provide instance variables referenced by Debugger
# XXX This should be done differently
self.flist = self.editwin.flist
self.root = self.editwin.root
def check_module_event(self, event):
filename = self.getfilename()
if not filename:
return 'break'
if not self.checksyntax(filename):
return 'break'
if not self.tabnanny(filename):
return 'break'
def tabnanny(self, filename):
f = open(filename, 'r')
try:
tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
msgtxt, (lineno, start) = msg
self.editwin.gotoline(lineno)
self.errorbox("Tabnanny Tokenizing Error",
"Token Error: %s" % msgtxt)
return False
except tabnanny.NannyNag, nag:
# The error messages from tabnanny are too confusing...
self.editwin.gotoline(nag.get_lineno())
self.errorbox("Tab/space error", indent_message)
return False
return True
def checksyntax(self, filename):
self.shell = shell = self.flist.open_shell()
saved_stream = shell.get_warning_stream()
shell.set_warning_stream(shell.stderr)
f = open(filename, 'r')
source = f.read()
f.close()
if '\r' in source:
source = re.sub(r"\r\n", "\n", source)
source = re.sub(r"\r", "\n", source)
if source and source[-1] != '\n':
source = source + '\n'
text = self.editwin.text
text.tag_remove("ERROR", "1.0", "end")
try:
try:
# If successful, return the compiled code
return compile(source, filename, "exec")
except (SyntaxError, OverflowError), err:
try:
msg, (errorfilename, lineno, offset, line) = err
if not errorfilename:
err.args = msg, (filename, lineno, offset, line)
err.filename = filename
self.colorize_syntax_error(msg, lineno, offset)
except:
msg = "*** " + str(err)
self.errorbox("Syntax error",
"There's an error in your program:\n" + msg)
return False
finally:
shell.set_warning_stream(saved_stream)
def colorize_syntax_error(self, msg, lineno, offset):
text = self.editwin.text
pos = "0.0 + %d lines + %d chars" % (lineno-1, offset-1)
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def run_module_event(self, event):
"""Run the module after setting up the environment.
First check the syntax. If OK, make sure the shell is active and
then transfer the arguments, set the run environment's working
directory to the directory of the module being executed and also
add that directory to its sys.path if not already included.
"""
filename = self.getfilename()
if not filename:
return 'break'
code = self.checksyntax(filename)
if not code:
return 'break'
if not self.tabnanny(filename):
return 'break'
shell = self.shell
interp = shell.interp
if PyShell.use_subprocess:
shell.restart_shell()
dirname = os.path.dirname(filename)
# XXX Too often this discards arguments the user just set...
interp.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import basename as _basename
if (not _sys.argv or
_basename(_sys.argv[0]) != _basename(_filename)):
_sys.argv = [_filename]
import os as _os
_os.chdir(%r)
del _filename, _sys, _basename, _os
\n""" % (filename, dirname))
interp.prepend_syspath(filename)
# XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still
# go to __stderr__. With subprocess, they go to the shell.
# Need to change streams in PyShell.ModifiedInterpreter.
interp.runcode(code)
return 'break'
def getfilename(self):
"""Get source filename. If not saved, offer to save (or create) file
The debugger requires a source file. Make sure there is one, and that
the current version of the source buffer has been saved. If the user
declines to save or cancels the Save As dialog, return None.
If the user has configured IDLE for Autosave, the file will be
silently saved if it already exists and is dirty.
"""
filename = self.editwin.io.filename
if not self.editwin.get_saved():
autosave = idleConf.GetOption('main', 'General',
'autosave', type='bool')
if autosave and filename:
self.editwin.io.save(None)
else:
reply = self.ask_save_dialog()
self.editwin.text.focus_set()
if reply == "ok":
self.editwin.io.save(None)
filename = self.editwin.io.filename
else:
filename = None
return filename
def ask_save_dialog(self):
msg = "Source Must Be Saved\n" + 5*' ' + "OK to Save?"
mb = tkMessageBox.Message(title="Save Before Run or Check", |
message=msg,
icon=tkMessageBox.QUESTION,
| type=tkMessageBox.OKCANCEL,
default=tkMessageBox.OK,
master=self.editwin.text)
return mb.show()
def errorbox(self, title, message):
# XXX This should really be a function of EditorWindow...
tkMessageBox.showerror(title, message, master=self.editwin.text)
self.editwin.text.focus_set()
|
scheib/chromium | testing/trigger_scripts/PRESUBMIT.py | Python | bsd-3-clause | 883 | 0 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for testing/trigger_scripts.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools | .
"""
USE_PYTHON3 = True
def CommonChecks(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api,
output_api,
'.',
files_to_check=['.*test.py'],
run_on_python2=not USE_PYTHON3,
run_on_python3=USE_PYTHON3,
skip_shebang_ch | eck=True)
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
koenbok/Cactus | cactus/tests/data/skeleton/plugins/version.py | Python | bsd-3-clause | 1,339 | 0.004481 | from __future__ import print_function
import os
INFO = {
'name': 'Version | Updater',
'description': 'Add a version to /versions.txt after each deploy'
}
# Set up extra django template tags
def templateTags():
pass
# Build actions
# def preBuild(site):
# print 'preBuild'
#
# def postBuild(site):
# print 'postBuild'
# Build page actions
# def preBuildPage(site, path, context, data):
# print 'preBuildPage', path
# return context, data
#
# def postBuildPage(site, path):
# print 'postBuildPage', path
# pass
# Deploy actions
|
def preDeploy(site):
# Add a deploy log at /versions.txt
import urllib2
import datetime
import platform
import codecs
import getpass
url = site.config.get('aws-bucket-website')
data = u''
# If this is the first deploy we don't have to fetch the old file
if url:
try:
data = urllib2.urlopen('http://%s/versions.txt' % url, timeout=8.0).read() + u'\n'
except:
print("Could not fetch the previous versions.txt, skipping...")
return
data += u'\t'.join([datetime.datetime.now().isoformat(), platform.node(), getpass.getuser()])
codecs.open(os.path.join(site.paths['build'], 'versions.txt'), 'w', 'utf8').write(data)
def postDeploy(site):
pass
|
pombredanne/similarityPy | tests/algorihtm_tests/find_nearest_test.py | Python | mit | 651 | 0.001536 | from unittest import TestCase
from similarityPy.algorithms.find_nearest import FindNearest
from tests import te | st_logger
__author__ = 'cenk'
class FindNearestTest(TestCase):
def setUp(self):
pass
def test_algorithm(self):
test_logger.debug("FindNearestTest - test_algorithm Starts")
points = "abcdef"
point = "abcdefg"
with self.assertRaises(TypeError) as context:
FindNearest(points, point, "")
self.assertEqual("You must initialize array and a point",
| context.exception.message)
test_logger.debug("FindNearestTest - test_algorithm Starts") |
pipermerriam/flex | tests/loading/definition/schema/test_read_only.py | Python | mit | 1,126 | 0 | import pytest
from flex.constants import (
STRING,
BOOLEAN,
INTEGER,
)
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.loading.definitions.schema import schema_validator
from tests.utils import (
assert_path_no | t_in_er | rors,
assert_message_in_errors,
)
def test_read_only_is_not_required():
try:
schema_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('readOnly', errors)
@pytest.mark.parametrize(
'value',
(None, {'a': 1}, 1, 1.1, 'abc'),
)
def test_read_only_with_invalid_types(value):
with pytest.raises(ValidationError) as err:
schema_validator({'readOnly': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'readOnly.type',
)
def test_read_only_with_valid_value():
try:
schema_validator({'readOnly': True})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('readOnly', errors)
|
plotly/plotly.py | packages/python/plotly/plotly/validators/histogram2d/_xsrc.py | Python | mit | 388 | 0.002577 | import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
| def __init__(self, plotly_name="xsrc", parent_name="histogram2d", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
| **kwargs
)
|
kshcherban/acme-nginx | acme_nginx/DigitalOcean.py | Python | gpl-3.0 | 3,177 | 0.000315 | import json
from os import getenv
try:
from urllib.request import urlopen, Request # Python 3
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen, Request # Python 2
from urllib2 import HTTPError
class DigitalOcean(object):
def __init__(self):
self.token = getenv("API_TOKEN")
self.api = "https://api.digitalocean.com/v2/domains"
if not self.token:
raise Exception("API_TOKEN not found in environment")
def determine_domain(self, domain):
"""Determine registered domain in API"""
request_headers = {
"Content-Type": "application/json",
"Authorization": "Bearer {0}".format(self.token),
}
response = urlopen(Request(self.api, headers=request_headers))
if response.getcode( | ) != 200:
raise Exception(json.loads(response.read().decode("utf8")))
domains = json.loads(response.read().decode("utf8"))["domains"]
for d in domains:
if d["name"] in domain:
return d["name"]
def create_record(self, name, data, domain):
"""
Create DNS record
Params:
name, string, record name
data, string, | record data
domain, string, dns domain
Return:
record_id, int, created record id
"""
registered_domain = self.determine_domain(domain)
api = self.api + "/" + registered_domain + "/records"
request_headers = {
"Content-Type": "application/json",
"Authorization": "Bearer {0}".format(self.token),
}
request_data = {"type": "TXT", "ttl": 300, "name": name, "data": data}
try:
response = urlopen(
Request(
api,
data=json.dumps(request_data).encode("utf8"),
headers=request_headers,
)
)
except HTTPError as e:
raise Exception(e.read().decode("utf8"))
if response.getcode() != 201:
raise Exception(json.loads(response.read().decode("utf8")))
return json.loads(response.read().decode("utf8"))["domain_record"]["id"]
def delete_record(self, record, domain):
"""
Delete DNS record
Params:
record, int, record id number
domain, string, dns domain
"""
registered_domain = self.determine_domain(domain)
api = self.api + "/" + registered_domain + "/records/" + str(record)
request_headers = {
"Content-Type": "application/json",
"Authorization": "Bearer {0}".format(self.token),
}
request = Request(
api, data=json.dumps({}).encode("utf8"), headers=request_headers
)
# this is hack around urllib to send DELETE request
request.get_method = lambda: "DELETE"
try:
response = urlopen(request)
except HTTPError as e:
raise Exception(e.read().decode("utf8"))
if response.getcode() != 204:
raise Exception(json.loads(response.read().decode("utf8")))
|
kwilliams-mo/iris | lib/iris/tests/test_pp_to_cube.py | Python | gpl-3.0 | 10,648 | 0.001784 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import os
import iris
import iris.fileformats.pp
import iris.fileformats.pp_rules
import iris.fileformats.rules
import iris.io
import iris.util
import iris.tests.stock
@iris.tests.skip_data
class TestPPLoadCustom(tests.IrisTest):
def setUp(self):
self.subcubes = iris.cube.CubeList()
filename = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
self.template = iris.fileformats.pp.load(filename).next()
def _field_to_cube(self, field):
cube, _, _ = iris.fileformats.rules._make_cube(
field, iris.fileformats.pp_rules.convert)
return cube
def test_lbtim_2(self):
for delta in range(10):
field = self.template.copy()
field.lbtim = 2
field.lbdat += delta
cube = self._field_to_cube(field)
self.subcubes.append(cube)
cube = self.subcubes.merge()[0]
self.assertCML(cube, ('pp_rules', 'lbtim_2.cml'))
def _ocean_depth(self, bounded=False):
lbuser = list(self.template.lbuser)
lbuser[6] = 2
lbuser[3] = 101
lbuser = tuple(lbuser)
for level_and_depth in enumerate([5.0, 15.0, 25.0, 35.0, 45.0]):
field = self.template.copy()
field.lbuser = lbuser
field.lbvc = 2
field.lbfc = 601
field.lblev, field.blev = level_and_depth
if bounded:
brsvd = list(field.brsvd)
brsvd[0] = field.blev - 1
field.brsvd = tuple(brsvd)
field.brlev = field.blev + 1
cube = self._field_to_cube(field)
self.subcubes.append(cube)
def test_ocean_depth(self):
self._ocean_depth()
cube = self.subcubes.merge()[0]
self.assertCML(cube, ('pp_rules', 'ocean_depth.cml'))
def test_ocean_depth_bounded(self):
self._ocean_depth(bounded=True)
cube = self.subcubes.merge()[0]
self.assertCML(cube, ('pp_rules', 'ocean_depth_bounded.cml'))
def test_invalid_units(self):
# UM to CF rules are mapped to the invalid unit "1e3 psu @0.035"
# for the STASH code m02s00i102.
lbuser = list(self.template.lbuser)
lbuser[6] = 2
lbuser[3] = 102
self.template.lbuser = tuple(lbuser)
cube = self._field_to_cube(self.template)
self.assertCML(cube, ('pp_rules', 'invalid_units.cml'))
class TestReferences(tests.IrisTest):
def setUp(self):
target = iris.tests.stock.simple_2d()
target.data = target.data.astype('f4')
self.target = target
self.ref = target.copy()
def test_regrid_missing_coord(self):
# If the target cube is missing one of the source dimension
# coords, ensure the re-grid fails nicely - i.e. returns None.
self.target.remove_coord('bar')
new_ref = iris.fileformats.rules._ensure_aligned({}, self.ref,
self.target)
self.assertIsNone(new_ref)
def test_regrid_codimension(self):
# If the target cube has two of the source dimension coords
# sharing the same dimension (e.g. a trajectory) then ensure
# the re-grid fails nicely - i.e. returns None.
self.target.remove_coord('foo')
new_foo = self.target.coord('bar').copy()
new_foo.rename('foo')
self.target.add_aux_coord(new_foo, 0)
new_ref = iris.fileformats.rules._ensure_aligned({}, self.ref,
self.target)
self.assertIsNone(new_ref)
def test_regrid_identity(self):
new_ref = iris.fileformats.rules._ensure_aligned({}, self.ref,
self.target)
# Bounds don't make it through the re-grid process
self.ref.coord('bar').bounds = None
self.ref.coord('foo').bounds = None
self.assertEqual(new_ref, self.ref)
@iris.tests.skip_data
class TestPPLoading(tests.IrisTest):
def test_simple(self):
cube = iris.tests.stock.simple_pp()
self.assertCML(cube, ('cube_io', 'pp', 'load', 'global.cml'))
@iris.tests.skip_data
class TestPPLoadRules(tests.IrisTest):
def test_pp_load_rules(self):
# Test PP loading and rule evaluation.
cube = iris.tests.stock.simple_pp()
self.assertCML(cube, ('pp_rules', 'global.cml'))
data_path = tests.get_data_path(('PP', 'rotated_uk', 'rotated_uk.pp'))
cube = iris.load(data_path)[0]
self.assertCML(cube, ('pp_rules', 'rotated_uk.cml'))
def test_lbproc(self):
data_path = tests.get_data_path(('PP', 'meanMaxMin', '200806081200__qwpb.T24.pp'))
# Set up standard name and T+24 constraint
constraint = iris.Constraint('air_temperature', forecast_period=24)
cubes = iris.load(data_path, constraint)
cubes = iris.cube.CubeList([cubes[0], cubes[3], cubes[1], cubes[2], cubes[4]])
self.assertCML(cubes, ('pp_rules', 'lbproc_mean_max_min.cml'))
def test_custom_rules(self):
# Test custom rule evaluation.
# Default behaviour
data_path = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
cube = iris.load_cube(data_path)
self.assertEqual(cube.standard_name, 'air_temperature')
# Custom behaviour
temp_path = iris.util.create_temp_filename()
f = open(temp_path, 'w')
f.write('\n'.join((
'IF',
'f.lbuser[3] == 16203',
'THEN',
'CMAttribute("standard_name", None)',
'CMAttribute("long_name", "customised")')))
f.close()
iris.fileformats.pp.add_load_rules(temp_path)
cube = iris.load_cube(data_path)
self.assertEqual(cube.name(), 'customised')
os.remove(temp_path)
# Back to default
iris.fileformats.pp.reset_load_rules()
cube = | iris.load_cube(data_path)
self.assertEqual(cube.standard_name, 'air_temperature')
def test_cell_methods(self):
# Test cell methods are created for correct values of lbproc
orig_file = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
# Values that result in cell methods being created
cell_method_values = {128 : "mean", 4096 : "minimum" | , 8192 : "maximum"}
# Make test values as list of single bit values and some multiple bit values
single_bit_values = list(iris.fileformats.pp.LBPROC_PAIRS)
multiple_bit_values = [(128 + 64, ""), (4096 + 2096, ""), (8192 + 1024, "")]
test_values = list(single_bit_values) + multiple_bit_values
for value, _ in test_values:
f = iris.fileformats.pp.load(orig_file).next()
f.lbproc = value # set value
# Write out pp file
temp_filename = iris.util.create_temp_filename(".pp")
f.save(open(temp_filename, 'wb'))
# Load pp file
cube = iris.load_cube(temp_filename)
if value in cell_method_values:
# Check for cell method on cube
self.assertEqual(cube.cell_methods[0].method, cell_method_values[value])
else:
# Check no cell method was created for values other than 128, 4096, 8192
self.assertEqual(len(cube.cell |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/route_filter_py3.py | Python | mit | 2,701 | 0.002962 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteFilter(Resource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list[~azure.mgmt.network.v2017_08_01.models.RouteFilterRule]
:param peerings: A collection of references to express route circuit
peerings.
:type peerings:
list[~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
| 'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': | 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, rules=None, peerings=None, **kwargs) -> None:
super(RouteFilter, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.rules = rules
self.peerings = peerings
self.provisioning_state = None
self.etag = None
|
chregu/cf-php-varnish-buildpack | extensions/composer/extension.py | Python | apache-2.0 | 16,172 | 0.000804 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composer Extension
Downl | oads, installs and runs Composer.
"""
import os
import os.path
import sys
import logging
import re
import json
import StringIO
from build_pack_utils import utils
from build_pack_utils import stream_output
from extension_helpers import ExtensionHelper
from build_pack_utils.compile_extens | ions import CompileExtensions
_log = logging.getLogger('composer')
def find_composer_paths(ctx):
build_dir = ctx['BUILD_DIR']
webdir = ctx['WEBDIR']
json_path = None
lock_path = None
json_paths = [
os.path.join(build_dir, 'composer.json'),
os.path.join(build_dir, webdir, 'composer.json')
]
lock_paths = [
os.path.join(build_dir, 'composer.lock'),
os.path.join(build_dir, webdir, 'composer.lock')
]
env_path = os.getenv('COMPOSER_PATH')
if env_path is not None:
json_paths = json_paths + [
os.path.join(build_dir, env_path, 'composer.json'),
os.path.join(build_dir, webdir, env_path, 'composer.json')
]
lock_paths = lock_paths + [
os.path.join(build_dir, env_path, 'composer.lock'),
os.path.join(build_dir, webdir, env_path, 'composer.lock')
]
for path in json_paths:
if os.path.exists(path):
json_path = path
for path in lock_paths:
if os.path.exists(path):
lock_path = path
return (json_path, lock_path)
class ComposerConfiguration(object):
def __init__(self, ctx):
self._ctx = ctx
self._log = _log
self._init_composer_paths()
def _init_composer_paths(self):
(self.json_path, self.lock_path) = \
find_composer_paths(self._ctx)
def read_exts_from_path(self, path):
exts = []
if path:
req_pat = re.compile(r'"require"\s?\:\s?\{(.*?)\}', re.DOTALL)
ext_pat = re.compile(r'"ext-(.*?)"')
with open(path, 'rt') as fp:
data = fp.read()
for req_match in req_pat.finditer(data):
for ext_match in ext_pat.finditer(req_match.group(1)):
exts.append(ext_match.group(1))
return exts
def pick_php_version(self, requested):
selected = None
if requested is None:
selected = self._ctx['PHP_VERSION']
elif requested == '5.5.*' or requested == '>=5.5':
selected = self._ctx['PHP_55_LATEST']
elif requested == '5.6.*' or requested == '>=5.6':
selected = self._ctx['PHP_56_LATEST']
elif requested == '7.0.*' or requested == '>=7.0':
selected = self._ctx['PHP_70_LATEST']
elif requested.startswith('5.5.'):
selected = requested
elif requested.startswith('5.6.'):
selected = requested
elif requested.startswith('7.0.'):
selected = requested
else:
selected = self._ctx['PHP_VERSION']
return selected
def read_version_from_composer(self, key):
(json_path, lock_path) = find_composer_paths(self._ctx)
if json_path is not None:
composer = json.load(open(json_path, 'r'))
require = composer.get('require', {})
return require.get(key, None)
if lock_path is not None:
composer = json.load(open(lock_path, 'r'))
platform = composer.get('platform', {})
return platform.get(key, None)
return None
def configure(self):
if self.json_path or self.lock_path:
exts = []
# include any existing extensions
exts.extend(self._ctx.get('PHP_EXTENSIONS', []))
# add 'openssl' extension
exts.append('openssl')
# add platform extensions from composer.json & composer.lock
exts.extend(self.read_exts_from_path(self.json_path))
exts.extend(self.read_exts_from_path(self.lock_path))
# update context with new list of extensions,
# if composer.json exists
php_version = self.read_version_from_composer('php')
self._log.debug('Composer picked PHP Version [%s]',
php_version)
self._ctx['PHP_VERSION'] = self.pick_php_version(php_version)
self._ctx['PHP_EXTENSIONS'] = utils.unique(exts)
self._ctx['PHP_VM'] = 'php'
class ComposerExtension(ExtensionHelper):
def __init__(self, ctx):
ExtensionHelper.__init__(self, ctx)
self._log = _log
def _defaults(self):
manifest_file_path = os.path.join(self._ctx["BP_DIR"], "manifest.yml")
compile_ext = CompileExtensions(self._ctx["BP_DIR"])
_, default_version = compile_ext.default_version_for(manifest_file_path=manifest_file_path, dependency="composer")
return {
'COMPOSER_VERSION': default_version,
'COMPOSER_PACKAGE': 'composer.phar',
'COMPOSER_DOWNLOAD_URL': '/composer/'
'{COMPOSER_VERSION}/{COMPOSER_PACKAGE}',
'COMPOSER_INSTALL_OPTIONS': ['--no-interaction', '--no-dev'],
'COMPOSER_VENDOR_DIR': '{BUILD_DIR}/{LIBDIR}/vendor',
'COMPOSER_BIN_DIR': '{BUILD_DIR}/php/bin',
'COMPOSER_CACHE_DIR': '{CACHE_DIR}/composer'
}
def _should_compile(self):
(json_path, lock_path) = \
find_composer_paths(self._ctx)
return (json_path is not None or lock_path is not None)
def _compile(self, install):
self._builder = install.builder
self.composer_runner = ComposerCommandRunner(self._ctx, self._builder)
self.move_local_vendor_folder()
self.install()
self.run()
def move_local_vendor_folder(self):
vendor_path = os.path.join(self._ctx['BUILD_DIR'],
self._ctx['WEBDIR'],
'vendor')
if os.path.exists(vendor_path):
self._log.debug("Vendor [%s] exists, moving to LIBDIR",
vendor_path)
(self._builder.move()
.under('{BUILD_DIR}/{WEBDIR}')
.into('{BUILD_DIR}/{LIBDIR}')
.where_name_matches('^%s/.*$' % vendor_path)
.done())
def install(self):
self._builder.install().package('PHP').done()
if self._ctx['COMPOSER_VERSION'] == 'latest':
dependencies_path = os.path.join(self._ctx['BP_DIR'],
'dependencies')
if os.path.exists(dependencies_path):
raise RuntimeError('"COMPOSER_VERSION": "latest" ' \
'is not supported in the cached buildpack. Please vendor your preferred version of composer with your app, or use the provided default composer version.')
self._ctx['COMPOSER_DOWNLOAD_URL'] = \
'https://getcomposer.org/composer.phar'
self._builder.install()._installer.install_binary_direct(
self._ctx['COMPOSER_DOWNLOAD_URL'], None,
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
else:
self._builder.install()._installer._install_binary_from_manifest(
self._ctx['COMPOSER_DOWNLOAD_URL'],
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
|
EnTeQuAk/dotfiles | sublime-text-3/Packages/shellenv/all/shellenv/_osx/open_directory.py | Python | unlicense | 4,675 | 0.001497 | # coding: utf-8
from __future__ import unicode_litera | ls, division, absolute_import, print_function
from getpass import getuser
import ctypes
from ctypes.util import find_library
from ctypes import c_voi | d_p, c_uint32, POINTER, c_bool, byref
from .core_foundation import CoreFoundation, unicode_to_cfstring, cfstring_to_unicode
from .._types import str_cls, type_name
od_path = find_library('OpenDirectory')
OpenDirectory = ctypes.CDLL(od_path, use_errno=True)
ODAttributeType = CoreFoundation.CFStringRef
ODMatchType = c_uint32
ODRecordType = CoreFoundation.CFStringRef
ODSessionRef = c_void_p
ODNodeRef = c_void_p
ODQueryRef = c_void_p
ODRecordRef = c_void_p
OpenDirectory.ODSessionCreate.argtypes = [
CoreFoundation.CFAllocatorRef,
CoreFoundation.CFDictionaryRef,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODSessionCreate.restype = ODSessionRef
OpenDirectory.ODNodeCreateWithName.argtypes = [
CoreFoundation.CFAllocatorRef,
ODSessionRef,
CoreFoundation.CFStringRef,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODNodeCreateWithName.restype = ODNodeRef
OpenDirectory.ODQueryCreateWithNode.argtypes = [
CoreFoundation.CFAllocatorRef,
ODNodeRef,
CoreFoundation.CFTypeRef,
ODAttributeType,
ODMatchType,
CoreFoundation.CFTypeRef,
CoreFoundation.CFTypeRef,
CoreFoundation.CFIndex,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODQueryCreateWithNode.restype = ODQueryRef
OpenDirectory.ODQueryCopyResults.argtypes = [
ODQueryRef,
c_bool,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODQueryCopyResults.restype = CoreFoundation.CFArrayRef
OpenDirectory.ODRecordCopyValues.argtypes = [
ODRecordRef,
ODAttributeType,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODRecordCopyValues.restype = CoreFoundation.CFArrayRef
kODMatchEqualTo = ODMatchType(0x2001)
kODRecordTypeUsers = ODRecordType.in_dll(OpenDirectory, 'kODRecordTypeUsers')
kODAttributeTypeRecordName = ODAttributeType.in_dll(OpenDirectory, 'kODAttributeTypeRecordName')
kODAttributeTypeUserShell = ODAttributeType.in_dll(OpenDirectory, 'kODAttributeTypeUserShell')
_login_shells = {}
def get_user_login_shell(username=None):
"""
Uses OS X's OpenDirectory.framework to get the user's login shell
:param username:
A unicode string of the user to get the shell for - None for the
current user
:return:
A unicode string of the user's login shell
"""
if username is None:
username = getuser()
if not isinstance(username, str_cls):
username = username.decode('utf-8')
if not isinstance(username, str_cls):
raise TypeError('username must be a unicode string, not %s' % type_name(username))
if username not in _login_shells:
error_ref = CoreFoundation.CFErrorRef()
session = OpenDirectory.ODSessionCreate(
CoreFoundation.kCFAllocatorDefault,
None,
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
node = OpenDirectory.ODNodeCreateWithName(
CoreFoundation.kCFAllocatorDefault,
session,
unicode_to_cfstring("/Local/Default"),
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
query = OpenDirectory.ODQueryCreateWithNode(
CoreFoundation.kCFAllocatorDefault,
node,
kODRecordTypeUsers,
kODAttributeTypeRecordName,
kODMatchEqualTo,
unicode_to_cfstring(username),
kODAttributeTypeUserShell,
1,
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
results = OpenDirectory.ODQueryCopyResults(
query,
False,
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
login_shell = None
num_results = CoreFoundation.CFArrayGetCount(results)
if num_results == 1:
od_record = CoreFoundation.CFArrayGetValueAtIndex(results, 0)
attributes = OpenDirectory.ODRecordCopyValues(od_record, kODAttributeTypeUserShell, byref(error_ref))
if bool(error_ref):
raise OSError('Error!')
num_attributes = CoreFoundation.CFArrayGetCount(results)
if num_attributes == 1:
string_ref = CoreFoundation.CFArrayGetValueAtIndex(attributes, 0)
login_shell = cfstring_to_unicode(string_ref)
_login_shells[username] = login_shell
return _login_shells.get(username)
|
pytest-dev/pytest | scripts/publish-gh-release-notes.py | Python | mit | 3,049 | 0.001968 | """
Script used to publish GitHub release notes extracted from CHANGELOG.rst.
This script is meant to be executed after a successful deployment in GitHub actions.
Uses the following environment variables:
* GIT_TAG: the name of the tag of the current commit.
* GH_RELEASE_NOTES_TOKEN: a personal access token with 'repo' permissions.
Create one at:
https://github.com/settings/tokens
This token should be set in a secret in the repository, which is exposed as an
environment variable in the main.yml workflow file.
The script also requires ``pandoc`` to be previously installed in the system.
Requires Python3.6+.
"""
import os
import re
import sys
from pathlib import Path
import github3
import pypandoc
def publish_github_release(slug, token, tag_name, body):
github = github3.login(token=token)
owner, repo = slug.split("/")
repo = github.repository(owner, repo)
return repo.create_release(tag_name=tag_name, body=body)
def parse_changelog(tag_name):
p = Path(__file__).parent.parent / | "doc/en/changelog.rst"
changelog_lines = p.read_text(encoding="UTF-8").splitlines()
title_regex = re.compile(r"pytest (\d\.\d+\.\d+) \(\d{4}-\d{2}-\d{2}\)")
consuming_version = False
version_lines = []
for line in changelog_lines:
m = title_regex.match(line)
if m:
# found the version we want: start | to consume lines until we find the next version title
if m.group(1) == tag_name:
consuming_version = True
# found a new version title while parsing the version we want: break out
elif consuming_version:
break
if consuming_version:
version_lines.append(line)
return "\n".join(version_lines)
def convert_rst_to_md(text):
return pypandoc.convert_text(
text, "md", format="rst", extra_args=["--wrap=preserve"]
)
def main(argv):
if len(argv) > 1:
tag_name = argv[1]
else:
tag_name = os.environ.get("GITHUB_REF")
if not tag_name:
print("tag_name not given and $GITHUB_REF not set", file=sys.stderr)
return 1
if tag_name.startswith("refs/tags/"):
tag_name = tag_name[len("refs/tags/") :]
token = os.environ.get("GH_RELEASE_NOTES_TOKEN")
if not token:
print("GH_RELEASE_NOTES_TOKEN not set", file=sys.stderr)
return 1
slug = os.environ.get("GITHUB_REPOSITORY")
if not slug:
print("GITHUB_REPOSITORY not set", file=sys.stderr)
return 1
rst_body = parse_changelog(tag_name)
md_body = convert_rst_to_md(rst_body)
if not publish_github_release(slug, token, tag_name, md_body):
print("Could not publish release notes:", file=sys.stderr)
print(md_body, file=sys.stderr)
return 5
print()
print(f"Release notes for {tag_name} published successfully:")
print(f"https://github.com/{slug}/releases/tag/{tag_name}")
print()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
blckshrk/Weboob | modules/vlille/test.py | Python | agpl-3.0 | 1,148 | 0.000871 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/> | .
from weboob.tools.test import BackendTest
class Vlille | Test(BackendTest):
BACKEND = 'vlille'
def test_vlille(self):
l = list(self.backend.iter_gauges())
self.assertTrue(len(l) > 0)
gauge = l[0]
s = list(self.backend.iter_sensors(gauge))
self.assertTrue(len(s) > 0)
sensor = s[0]
self.assertTrue(self.backend.get_last_measure(sensor.id) is not None)
|
gnocchixyz/gnocchi | gnocchi/tests/test_carbonara.py | Python | apache-2.0 | 38,695 | 0 | # -*- encoding: utf-8 -*-
#
# Copyright © 2014-2016 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag | reed to in writing, software
# distributed under the License is distributed on | an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import math
import operator
import fixtures
import iso8601
import numpy
import six
from gnocchi import carbonara
from gnocchi.tests import base
def datetime64(*args):
return numpy.datetime64(datetime.datetime(*args))
class TestBoundTimeSerie(base.BaseTestCase):
def test_benchmark(self):
self.useFixture(fixtures.Timeout(300, gentle=True))
carbonara.BoundTimeSerie.benchmark()
@staticmethod
def test_base():
carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6])
def test_block_size(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 5),
datetime64(2014, 1, 1, 12, 0, 9)],
[5, 6],
block_size=numpy.timedelta64(5, 's'))
self.assertEqual(2, len(ts))
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 10), 3),
(datetime64(2014, 1, 1, 12, 0, 11), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(2, len(ts))
def test_block_size_back_window(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6],
block_size=numpy.timedelta64(5, 's'),
back_window=1)
self.assertEqual(3, len(ts))
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 10), 3),
(datetime64(2014, 1, 1, 12, 0, 11), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(3, len(ts))
def test_block_size_unordered(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 5),
datetime64(2014, 1, 1, 12, 0, 9)],
[5, 23],
block_size=numpy.timedelta64(5, 's'))
self.assertEqual(2, len(ts))
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 11), 3),
(datetime64(2014, 1, 1, 12, 0, 10), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(2, len(ts))
def test_duplicate_timestamps(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 9)],
[10, 23])
self.assertEqual(2, len(ts))
self.assertEqual(10.0, ts[0][1])
self.assertEqual(23.0, ts[1][1])
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 13, 0, 10), 3),
(datetime64(2014, 1, 1, 13, 0, 11), 9),
(datetime64(2014, 1, 1, 13, 0, 11), 8),
(datetime64(2014, 1, 1, 13, 0, 11), 7),
(datetime64(2014, 1, 1, 13, 0, 11), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(4, len(ts))
self.assertEqual(10.0, ts[0][1])
self.assertEqual(23.0, ts[1][1])
self.assertEqual(3.0, ts[2][1])
self.assertEqual(9.0, ts[3][1])
class TestAggregatedTimeSerie(base.BaseTestCase):
def test_benchmark(self):
self.useFixture(fixtures.Timeout(300, gentle=True))
carbonara.AggregatedTimeSerie.benchmark()
def test_fetch_basic(self):
ts = carbonara.AggregatedTimeSerie.from_data(
timestamps=[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
values=[3, 5, 6],
aggregation=carbonara.Aggregation(
"mean", numpy.timedelta64(1, 's'), None))
self.assertEqual(
[(datetime64(2014, 1, 1, 12), 3),
(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch()))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch(
from_timestamp=datetime64(2014, 1, 1, 12, 0, 4))))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch(
from_timestamp=numpy.datetime64(iso8601.parse_date(
"2014-01-01 12:00:04")))))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch(
from_timestamp=numpy.datetime64(iso8601.parse_date(
"2014-01-01 13:00:04+01:00")))))
def test_before_epoch(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(1950, 1, 1, 12),
datetime64(2014, 1, 1, 12),
datetime64(2014, 1, 1, 12)],
[3, 5, 6])
self.assertRaises(carbonara.BeforeEpochError,
ts.group_serie, 60)
@staticmethod
def _resample(ts, sampling, agg, derived=False):
aggregation = carbonara.Aggregation(agg, sampling, None)
grouped = ts.group_serie(sampling)
if derived:
grouped = grouped.derived()
return carbonara.AggregatedTimeSerie.from_grouped_serie(
grouped, aggregation)
def test_derived_mean(self):
ts = carbonara.TimeSerie.from_data(
[datetime.datetime(2014, 1, 1, 12, 0, 0),
datetime.datetime(2014, 1, 1, 12, 0, 4),
datetime.datetime(2014, 1, 1, 12, 1, 2),
datetime.datetime(2014, 1, 1, 12, 1, 14),
datetime.datetime(2014, 1, 1, 12, 1, 24),
datetime.datetime(2014, 1, 1, 12, 2, 4),
datetime.datetime(2014, 1, 1, 12, 2, 35),
datetime.datetime(2014, 1, 1, 12, 2, 42),
datetime.datetime(2014, 1, 1, 12, 3, 2),
datetime.datetime(2014, 1, 1, 12, 3, 22), # Counter reset
datetime.datetime(2014, 1, 1, 12, 3, 42),
datetime.datetime(2014, 1, 1, 12, 4, 9)],
[50, 55, 65, 66, 70, 83, 92, 103, 105, 5, 7, 23])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean',
derived=True)
self.assertEqual(5, len(ts))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 0), 5),
(datetime64(2014, 1, 1, 12, 1, 0), 5),
(datetime64(2014, 1, 1, 12, 2, 0), 11),
(datetime64(2014, 1, 1, 12, 3, 0), -32),
(datetime64(2014, 1, 1, 12, 4, 0), 16)],
list(ts.fetch(
from_timestamp=datetime64(2014, 1, 1, 12))))
def test_derived_hole(self):
ts = carbonara.TimeSerie.from_data(
[datetime.datetime(2014, 1, 1, 12, 0, 0),
datetime.datetime(2014, 1, 1, 12, 0, 4),
datetime.datetime(2014, 1, 1, 12, 1, 2),
datetime.datetime(2014, 1, 1, 12, 1, 14),
datetime.datetime(2014, 1, 1, 12, 1, 24),
datetime.datetime(2014, 1, 1, 12, 3, 2),
datetime.datetime(2014, 1, 1, 12, 3, 22),
datetime.datetime(2014, 1, 1, 12, 3, 42),
datetime.datetime(2014, 1, 1, 12, 4, 9)],
[50, 55, |
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_op_function_generator.py | Python | apache-2.0 | 3,921 | 0.000255 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, in_dygraph_mode
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.dygraph.jit import TracedLayer
import numpy as np
from paddle import _C_ops
class TestTracedLayer(fluid.dygraph.Layer):
def __init__(self, name_scope):
super(TestTracedLayer, self).__init__(name_scope)
def forward(self, input):
return _C_ops.relu(input)
class TestVariable(unittest.TestCase):
def setUp(self):
self.shape = [512, 768]
self.dtype = np.float32
self.array = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
def test_elementwise_add(self):
with fluid.dygraph.guard():
a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b)
x.stop_gradient | = False
res1 = layers.elementwise_add(x, y)
res2 = _C_ops.elementwise_add(x, y)
self.assertTrue(np.array_eq | ual(res1.numpy(), res2.numpy()))
def test_elementwise_mul(self):
with fluid.dygraph.guard():
a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b)
res1 = layers.elementwise_mul(x, y)
res2 = _C_ops.elementwise_mul(x, y)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_relu(self):
with fluid.dygraph.guard():
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
res1 = layers.relu(x)
res2 = _C_ops.relu(x)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_trace_backward(self):
with fluid.dygraph.guard():
a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b)
x.stop_gradient = False
y.stop_gradient = False
loss = _C_ops.elementwise_mul(x, y)
loss.backward()
x_grad = x.gradient()
y_grad = y.gradient()
self.assertTrue(np.array_equal(x_grad, loss.gradient() * b))
self.assertTrue(np.array_equal(y_grad, loss.gradient() * a))
def test_traced_layer(self):
with fluid.dygraph.guard():
layer = TestTracedLayer("test_traced_layer")
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
res_dygraph, static_layer = TracedLayer.trace(
layer, inputs=x) # dygraph out
res_static_graph = static_layer([x])[0]
self.assertTrue(
np.array_equal(res_dygraph.numpy(), res_static_graph))
if __name__ == '__main__':
unittest.main()
|
Micronaet/micronaet-sql | base_mssql_accounting/__openerp__.py | Python | agpl-3.0 | 1,509 | 0.001988 | # -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################# | ##############################################
{
'name': 'Base MS SQL import accounting',
'version': '0.0.1',
'category': 'Generic Modules/Customization',
'description': """
MS SQL acco | unting ETL module:
provide query for open standar import tables
""",
'author': 'Micronaet s.r.l.',
'website': 'http://www.micronaet.it',
'depends': [
'base',
'base_mssql',
'product',
],
'init_xml': [],
'data': ['security/ir.model.access.csv', ],
'demo_xml': [],
'active': False,
'installable': True,
}
|
kapy2010/treeherder | tests/webapp/api/test_job_log_url_api.py | Python | mpl-2.0 | 1,524 | 0 | from django.core.urlresolvers import reverse
from tests.test_utils import create_generic_job
from treeherder.model.models import JobLog
def test_get_job_log_urls(test_repository, result_set_stored,
failure_classifications,
generic_reference_data, webapp):
job1 = create_generic_job('1234', test_repository, 1, 1,
generic_reference_data)
job2 = create_generic_job('5678', test_repository, 1, 2,
generic_reference_data)
JobLog.objects.create(job=job1,
name='test_log_1',
url='http://google.com',
status=JobLog.PENDING)
JobLog.objects.create(job=job1,
name='test_log_2',
url='http://yahoo.com',
status=JobLog.PARSED)
JobLog.objects.create(job=job2,
name='test_log_3',
url= | 'http://yahoo.com',
status=JobLog.PARSED)
resp = webapp.get(reverse('job-log-url-list',
kwargs={"project": test_repository.na | me}) +
'?job_id=1')
assert resp.status_int == 200
assert len(resp.json) == 2
resp = webapp.get(reverse('job-log-url-list',
kwargs={"project": test_repository.name}) +
'?job_id=1&job_id=2')
assert resp.status_int == 200
assert len(resp.json) == 3
|
otlet/JestemGraczem.pl | stream/url_converters.py | Python | agpl-3.0 | 584 | 0.003425 | # path('mixer/(?P<username>v+)/', views.mixer, name='stream.mixer'),
# p | ath('twitch/(?P<username>[a-zA-Z0-9_]+)/', views.twitch, name='stream.twitch'),
# path('live/', cache_page(60 * 1)(views.stream_api), name='stream.live'),
# path('live/esport', cache_page(60 * 10)(views.esport_stream_api), name='stream.live.esport'),
# path('(?P<username>[a-zA-Z0-9_]+)/', views.streamer, name='stream.s
class Username:
regex = '[a-zA-Z0-9_]'
def to_python(self, value):
| return int(value)
def to_url(self, value):
return str(value)
|
huggingface/transformers | src/transformers/models/deberta/tokenization_deberta.py | Python | apache-2.0 | 10,189 | 0.004318 | # coding=utf-8
# Copyright 2020 Microsoft and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model DeBERTa."""
from typing import List, Optional
from ...tokenization_utils import AddedToken
from ...utils import logging
from ..gpt2.tokenization_gpt2 import GPT2Tokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/vocab.json",
"microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/vocab.json",
"microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/vocab.json",
"microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/vocab.json",
"microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/vocab.json",
"microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/vocab.json",
},
"merges_file": {
"microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/merges.txt",
"microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/merges.txt",
"microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/merges.txt",
"microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/merges.txt",
"microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/merges.txt",
"microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/deberta-base": 512,
"microsoft/deberta-large": 512,
"microsoft/deberta-xlarge": 512,
"microsoft/deberta-base-mnli": 512,
"microsoft/deberta-large-mnli": 512,
"microsoft/deberta-xlarge-mnli": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/deberta-base": {"do_lower_case": False},
"microsoft/deberta-large": {"do_lower_case": False},
}
class DebertaTokenizer(GPT2Tokenizer):
r"""
Constructs a DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="[CLS]",
eos_token="[SEP]",
sep_token="[SEP]",
cls_token="[CLS]",
unk_token="[UNK]",
pad_token="[PAD]",
mask_token="[MASK]",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`L | ist[int]`, *optional*):
Optional second list of IDs for s | equence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list |
mfraezz/osf.io | osf/management/commands/addon_deleted_date.py | Python | apache-2.0 | 3,358 | 0.001787 | import datetime
import logging
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from framework.celery_tasks import app as celery_app
logger = logging.getLogger(__name__)
TABLES_TO_POPULATE_WITH_MODIFIED = [
'addons_zotero_usersettings',
'addons_dropbox_usersettings',
'addons_dropbox_nodesettings',
'addons_figshare_nodesettings',
'addons_figshare_usersettings',
'addons_forward_nodesettings',
'addons_github_nodesettings',
'addons_github_usersettings',
'addons_gitlab_nodesettings',
'addons_gitlab_usersettings',
'addons_googledrive_nodesettings',
'addons_googledrive_usersettings',
'addons_mendeley_nodesettings',
'addons_mendeley_usersettings',
'addons_onedrive_nodesettings',
'addons_onedrive_usersettings',
'addons_osfstorage_nodesettings',
'addons_osfstorage_usersettings',
'addons_bitbucket_nodesettings',
'addons_bitbucket_usersettings',
'addons_owncloud_nodesettings',
'addons_box_nodesettings',
'addons_owncloud_usersettings',
'addons_box_usersettings',
'addons_dataverse_node | settings',
'addons_datave | rse_usersettings',
'addons_s3_nodesettings',
'addons_s3_usersettings',
'addons_twofactor_usersettings',
'addons_wiki_nodesettings',
'addons_zotero_nodesettings'
]
UPDATE_DELETED_WITH_MODIFIED = """UPDATE {} SET deleted=modified
WHERE id IN (SELECT id FROM {} WHERE is_deleted AND deleted IS NULL LIMIT {}) RETURNING id;"""
@celery_app.task(name='management.commands.addon_deleted_date')
def populate_deleted(dry_run=False, page_size=1000):
with transaction.atomic():
for table in TABLES_TO_POPULATE_WITH_MODIFIED:
run_statements(UPDATE_DELETED_WITH_MODIFIED, page_size, table)
if dry_run:
raise RuntimeError('Dry Run -- Transaction rolled back')
def run_statements(statement, page_size, table):
logger.info('Populating deleted column in table {}'.format(table))
with connection.cursor() as cursor:
cursor.execute(statement.format(table, table, page_size))
rows = cursor.fetchall()
if rows:
logger.info('Table {} still has rows to populate'.format(table))
class Command(BaseCommand):
help = '''Populates new deleted field for various models. Ensure you have run migrations
before running this script.'''
def add_arguments(self, parser):
parser.add_argument(
'--dry_run',
type=bool,
default=False,
help='Run queries but do not write files',
)
parser.add_argument(
'--page_size',
type=int,
default=1000,
help='How many rows to process at a time',
)
def handle(self, *args, **options):
script_start_time = datetime.datetime.now()
logger.info('Script started time: {}'.format(script_start_time))
logger.debug(options)
dry_run = options['dry_run']
page_size = options['page_size']
if dry_run:
logger.info('DRY RUN')
populate_deleted(dry_run, page_size)
script_finish_time = datetime.datetime.now()
logger.info('Script finished time: {}'.format(script_finish_time))
logger.info('Run time {}'.format(script_finish_time - script_start_time))
|
spookylukey/django-nyt | setup.py | Python | apache-2.0 | 2,187 | 0.000914 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
# -*- coding | : utf-8 -*-
import os
from django_nyt import VERSION
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to typ | e in the README file than to put a raw
# string in below ...
def get_path(fname):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), fname)
def read(fname):
return open(get_path(fname)).read()
packages = find_packages()
try:
import pypandoc
long_description = pypandoc.convert(get_path('README.md'), 'rst')
long_description = long_description.split(
'<!---Illegal PyPi RST data -->')[0]
f = open(get_path('README.rst'), 'w')
f.write(long_description)
f.close()
print("Successfully converted README.md to README.rst")
except (IOError, ImportError):
# No long description... but nevermind, it's only for PyPi uploads.
long_description = ""
setup(
name="django-nyt",
version=VERSION,
author="Benjamin Bach",
author_email="benjamin@overtag.dk",
url="https://github.com/benjaoming/django-nyt",
description="A pluggable notification system written for the Django framework.",
license="Apache License 2.0",
keywords="django notification system",
packages=find_packages(exclude=["testproject", "testproject.*"]),
# long_description=long_description,
zip_safe=False,
install_requires=read('requirements.txt').split("\n"),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
include_package_data=True,
)
|
bptripp/grasp-convnet | py/visualize.py | Python | mit | 741 | 0.012146 | __author__ = 'bptripp'
import numpy as np
import matplotlib.pyplot as plt
def plot_kernels(weights):
print(weights.shape)
side = int(np.ceil(np.sqrt(weights.shape[0])))
print(side)
plt.figure()
for i in range(weights.shape[0]):
plt.subplot(side,side,i)
plt.imshow(weights[i,0,:,:])
plt.axis('off')
plt.tight_layout()
plt.show()
def plot_mesh(matrix):
import mat | plotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
im_width = matrix.shape[0]
fig = plt.figure()
X = np.arange(0, im_width)
Y = np.arange(0, im_width)
X, Y = np.meshgrid(X, Y)
ax = fig.add_subplot(1,1,1,proj | ection='3d')
ax.plot_wireframe(X, Y, matrix)
plt.show()
|
dvu4/Data-Wrangling-with-MongoDB | Lesson_5_Analyzing_Data/14-Using_push/push.py | Python | agpl-3.0 | 2,726 | 0.012839 | #!/usr/bin/env python
"""
$push is similar to $addToSet. The difference is that rather than accumulating only unique values
it aggregates all values into an array.
Using an aggregation query, count the number of tweets for each user. In the same $group stage,
use $push to accumulate all the tweet texts for each user. Limit your output to the 5 users
with the most tweets.
Your result documents should include only the fields:
"_id" (screen name of user),
"count" (number of tweets found for the user),
"tweet_texts" (a list of the tweet texts found for the user).
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
'''
pipeline = [
{"$unwind" : "$entities.hashtags"},
{"$group" : {"_id" : "$user.screen_name",
"unique_hashtags" : {
"$addToSet" : "$entities.hashtags.text"
}}},
{"$sort" : {"_id" : -1}}]
'''
pipeline = [
{
"$group" :
| {
"_id" : "$user.screen_name",
"tweet_texts" :{"$push" : "$text"},
"count" : {"$sum" : 1}
}
},
{
"$sort" :
{
"count" : -1
}
},
{
"$limit" : 5
}
]
return pipeline
def aggregate(db, pipeline):
result = db.tweets.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_ | db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
assert len(result["result"]) == 5
assert result["result"][0]["count"] > result["result"][4]["count"]
import pprint
pprint.pprint(result)
|
ungarj/mapchete | mapchete/io/_geometry_operations.py | Python | mit | 10,253 | 0.000975 | from fiona.transform import transform_geom
import logging
import pyproj
from rasterio.crs import CRS
from shapely.errors import TopologicalError
from shapely.geometry import (
box,
GeometryCollection,
shape,
mapping,
MultiPoint,
MultiLineString,
MultiPolygon,
Polygon,
LinearRing,
LineString,
base,
)
from shapely.validation import explain_validity
from mapchete.errors import GeometryTypeError
from mapchete.validate import validate_crs
logger = logging.getLogger(__name__)
CRS_BOUNDS = {
# http://spatialreference.org/ref/epsg/wgs-84/
"epsg:4326": (-180.0, -90.0, 180.0, 90.0),
# unknown source
"epsg:3857": (-180.0, -85.0511, 180.0, 85.0511),
# http://spatialreference.org/ref/epsg/3035/
"epsg:3035": (-10.6700, 34.5000, 31.5500, 71.0500),
}
def reproject_geometry(
geometry,
src_crs=None,
dst_crs=None,
clip_to_crs_bounds=True,
error_on_clip=False,
segmentize_on_clip=False,
segmentize=False,
segmentize_fraction=100,
validity_check=True,
antimeridian_cutting=False,
retry_with_clip=True,
):
"""
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
clip_to_crs_bounds : bool
Always clip geometries to CRS bounds. (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
"""
logger.debug("reproject geometry from %s to %s", src_crs, dst_crs)
src_crs = validate_crs(src_crs)
dst_crs = validate_crs(dst_crs)
def _reproject_geom(geometry, src_crs, dst_crs):
if geometry.is_empty:
return geometry
else:
out_geom = to_shape(
transform_geom(
src_crs.to_dict(),
dst_crs.to_dict(),
mapping(geometry),
antimeridian_cutting=antimeridian_cutting,
)
)
return _repair(out_geom) if validity_check else out_geom
def _segmentize_value(geometry, segmentize_fraction):
height = geometry.bounds[3] - geometry.bounds[1]
width = geometry.bounds[2] - geometry.bounds[0]
return min([height, width]) / segmentize_fraction
# return repaired geometry if no reprojection needed
if src_crs == dst_crs or geometry.is_empty:
return _repair(geometry)
# geometry needs to be clipped to its CRS bounds
elif (
clip_to_crs_bounds
and dst_crs.is_epsg_code
and dst_crs.get("init")
!= "epsg:4326" # and is not WGS84 (does not need clipping)
and (
dst_crs.get("init") in CRS_BOUNDS
or pyproj.CRS(dst_crs.to_epsg()).area_of_use.bounds
)
):
wgs84_crs = CRS().from_epsg(4326)
# get dst_crs boundaries
crs_bbox = box(
*CRS_BOUNDS.get(
dst_crs.get("init"), pyproj.CRS(dst_crs.to_epsg()).area_of_use.bounds
)
)
# reproject geometry to WGS84
geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)
# raise error if geometry has to be clipped
if error_on_clip and not geometry_4326.within(crs_bbox):
raise RuntimeError("geometry outside target CRS bounds")
clipped = crs_bbox.intersection(geometry_4326)
# segmentize clipped geometry using one 100th of with or height depending on
# which is shorter
if segmentize_on_clip or segmentize:
clipped = segmentize_geometry(
clipped, _segmentize_value(clipped, segmentize_fraction)
)
# clip geometry dst_crs boundaries and return
return _reproject_geom(clipped, wgs84_crs, dst_crs)
# return without clipping if destination CRS does not have defined bounds
else:
try:
if segmentize:
return _reproject_geom(
segmentize_geometry(
geometry, _segmentize_value(geometry, segmentize_fraction)
),
src_crs,
dst_crs,
)
else:
return _reproject_geom(geometry, src_crs, dst_crs)
except TopologicalError: # pragma: no cover
raise
except ValueError as exc: # pragma: no cover
if retry_with_clip:
logger.error(
"error when transforming %s from %s to %s: %s, trying to use CRS bounds clip",
geometry,
src_crs,
dst_crs,
exc,
)
return reproject_geometry(
geometry,
src_crs=src_crs,
dst_crs=dst_crs,
clip_to_crs_bounds=True,
error_on_clip=error_on_clip,
segmentize_on_clip=segmentize_on_clip,
segmentize=segmentize,
segmentize_fraction=segmentize_fraction,
validity_check=validity_check,
antimeridian_cutting=antimeridian_cutting,
retry_with_clip=False,
)
else:
raise
def _repair(geom):
repaired = geom.buffer(0) if geom.geom_type in ["Polygon", "MultiPolygon"] else geom
if repaired.is_valid:
return repaired
else:
raise TopologicalError(
"geometry is invalid (%s) and cannot be repaired"
% explain_validity(repaired)
)
def segmentize_geometry(geometry, segmentize_value):
| """
Segmentize Polygon outer ring by segmentize value.
Just Polygon geometry type supported.
Parameters
----------
geometry : ``shapely.geometry``
segmentize_value: float
Returns
-------
geometry : ``shapely.geometry``
"""
if geometry.geom_type != "Polygon":
raise TypeError("segmentize geometry type must be Polygon")
return Polygon(
LinearRing(
[
| p
# pick polygon linestrings
for l in map(
lambda x: LineString([x[0], x[1]]),
zip(geometry.exterior.coords[:-1], geometry.exterior.coords[1:]),
)
# interpolate additional points in between and don't forget end point
for p in [
l.interpolate(segmentize_value * i).coords[0]
for i in range(int(l.length / segmentize_value))
]
+ [l.coords[1]]
]
)
)
def to_shape(geom):
"""
Convert geometry to shapely geometry if necessary.
Parameters
----------
geom : shapely geometry or GeoJSON mapping
Returns
-------
shapely geometry
"""
return shape(geom) if isinstance(geom, dict) else geom
def multipart_to_singleparts(geom):
"""
Yield single part geometries if geom is multipart, otherwise yield geom.
Parameters
----------
geom : shapely geometry
Returns
-------
shapely single part geometries
"""
if isinstance(geom, base.BaseGeometry):
if hasattr(geom, "geoms"):
for subgeom in geom.geoms:
yield subgeom
else:
yield geom
def clean_geometry_type(
geometry, target_type, allow_multipart=True, raise_exception=True
):
"""
Return geometry of |
nekia/incubator-superset-dev | superset/connectors/sqla/views.py | Python | apache-2.0 | 11,596 | 0.000604 | """Views used by the SqlAlchemy connector"""
import logging
from past.builtins import basestring
from flask import Markup, flash, redirect
from flask_appbuilder import CompactCRUDMixin, expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
import sqlalchemy as sa
from flask_babel import lazy_gettext as _
from flask_babel import gettext as __
from superset import appbuilder, db, utils, security, sm
from superset.utils import has_access
from superset.connectors.base.views import DatasourceModelView
from superset.views.base import (
SupersetModelView, ListWidgetWithCheckboxes, DeleteMixin, DatasourceFilter,
get_datasource_exist_error_mgs,
)
from . import models
import superset.models.core as mcore
log_this = mcore.Log.log_this
class TableColumnInlineView(CompactCRUDMixin, SupersetModelView): # noqa
datamodel = SQLAInterface(models.TableColumn)
list_title = _('List Columns')
show_title = _('Show Column')
add_title = _('Add Column')
edit_title = _('Edit Column')
can_delete = False
list_widget = ListWidgetWithCheckboxes
edit_columns = [
'column_name', 'verbose_name', 'description',
'type', 'groupby', 'filterable',
'table', 'count_distinct', 'sum', 'min', 'max', 'expression',
'is_dttm', 'python_date_format', 'database_expression']
add_columns = edit_columns
list_columns = [
'column_name', 'verbose_name', 'type', 'groupby', 'filterable', 'count_distinct',
'sum', 'min', 'max', 'is_dttm']
page_size = 500
description_columns = {
'is_dttm': _(
"Whether to make this column available as a "
"[Time Granularity] option, column has to be DATETIME or "
"DATETIME-like"),
'filterable': _(
"Whether this column is exposed in the `Filters` section "
"of the explore view."),
'type': _(
"The data type that was inferred by the database. "
"It may be necessary to input a type manually for "
"expression-defined columns in some cases. In most case "
"users should not need to alter this."),
'expression': utils.markdown(
"a valid SQL expression as supported by the underlying backend. "
"Example: `substr(name, 1, 1)`", True),
'python_date_format': utils.markdown(Markup(
"The pattern of timestamp format, use "
"<a href='https://docs.python.org/2/library/"
"datetime.html#strftime-strptime-behavior'>"
"python datetime string pattern</a> "
"expression. If time is stored in epoch "
"format, put `epoch_s` or `epoch_ms`. Leave `Database Expression` "
"below empty if timestamp is stored in "
"String or Integer(epoch) type"), True),
'database_expression': utils.markdown(
"The database expression to cast internal datetime "
"constants to database date/timestamp type according to the DBAPI. "
"The expression should follow the pattern of "
"%Y-%m-%d %H:%M:%S, based on different DBAPI. "
"The string should be a python string formatter \n"
"`Ex: TO_DATE('{}', 'YYYY-MM-DD HH24:MI:SS')` for Oracle"
"Superset uses default expression based on DB URI if this "
"field is blank.", True),
}
label_columns = {
'column_name': _("Column"),
'verbose_name': _("Verbose Name"),
'description': _("Description"),
'groupby': _("Groupable"),
'filterable': _("Filterable"),
'table': _("Table"),
'count_distinct': _("Count Distinct"),
'sum': _("Sum"),
'min': _("Min"),
'max': _("Max"),
'expression': _("Expression"),
'is_dttm': _("Is temporal"),
'python_date_format': _("Datetime Format"),
'database_expression': _("Database Expression"),
'type': _('Type'),
}
appbuilder.add_view_no_menu(TableColumnInlineView)
class SqlMetricInlineView(CompactCRUDMixin, SupersetModelView): # noqa
datamodel = SQLAInterface(models.SqlMetric)
list_title = _('List Metrics')
show_title = _('Show Metric')
add_title = _('Add Metric')
edit_title = _('Edit Metric')
list_columns = ['metric_name', 'verbose_name', 'metric_type']
edit_columns = [
'metric_name', 'description', 'verbose_name', 'metric_type',
'expression', 'table', 'd3format', 'is_restricted']
description_columns = {
'expression': utils.markdown(
"a valid SQL expre | ssion as supported by the underlying backend. "
"Example: `count(DISTINCT userid)`", True),
'is_restricted': _("Whether the access to this metric is restricted "
"to certain roles. Only roles with the permission "
"'metric access on XXX (the name of this metric)' "
"are allowed to access this metric"),
| 'd3format': utils.markdown(
"d3 formatting string as defined [here]"
"(https://github.com/d3/d3-format/blob/master/README.md#format). "
"For instance, this default formatting applies in the Table "
"visualization and allow for different metric to use different "
"formats", True
),
}
add_columns = edit_columns
page_size = 500
label_columns = {
'metric_name': _("Metric"),
'description': _("Description"),
'verbose_name': _("Verbose Name"),
'metric_type': _("Type"),
'expression': _("SQL Expression"),
'table': _("Table"),
'd3format': _("D3 Format"),
'is_restricted': _('Is Restricted')
}
def post_add(self, metric):
if metric.is_restricted:
security.merge_perm(sm, 'metric_access', metric.get_perm())
def post_update(self, metric):
if metric.is_restricted:
security.merge_perm(sm, 'metric_access', metric.get_perm())
appbuilder.add_view_no_menu(SqlMetricInlineView)
class TableModelView(DatasourceModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.SqlaTable)
list_title = _('List Tables')
show_title = _('Show Table')
add_title = _('Add Table')
edit_title = _('Edit Table')
list_columns = [
'link2', 'database',
'changed_by_', 'modified']
order_columns = [
'link2', 'database', 'changed_on_']
add_columns = ['database', 'schema', 'table_name']
edit_columns = [
'table_name', 'sql', 'filter_select_enabled', 'slices',
'fetch_values_predicate', 'database', 'schema',
'description', 'owner',
'main_dttm_col', 'default_endpoint', 'offset', 'cache_timeout']
show_columns = edit_columns + ['perm']
related_views = [TableColumnInlineView, SqlMetricInlineView]
base_order = ('changed_on', 'desc')
search_columns = (
'database', 'schema', 'table_name', 'owner',
)
description_columns = {
'slices': _(
"The list of slices associated with this table. By "
"altering this datasource, you may change how these associated "
"slices behave. "
"Also note that slices need to point to a datasource, so "
"this form will fail at saving if removing slices from a "
"datasource. If you want to change the datasource for a slice, "
"overwrite the slice from the 'explore view'"),
'offset': _("Timezone offset (in hours) for this datasource"),
'table_name': _(
"Name of the table that exists in the source database"),
'schema': _(
"Schema, as used only in some databases like Postgres, Redshift "
"and DB2"),
'description': Markup(
"Supports <a href='https://daringfireball.net/projects/markdown/'>"
"markdown</a>"),
'sql': _(
"This fields acts a Superset view, meaning that Superset will "
"run a query against this string as a subquery."
),
'fetch_values_predicate': _(
"Predicate ap |
gohackfelipe/luiza | luiza.py | Python | mit | 1,285 | 0.007004 | import asyncio
import sys
import config
import sender
import receiver
print(sys.argv)
async def receiveMessageFromSerial():
return "Message"
def help():
print('Luiza 1.0 - (luiza.santost@hotmail.com)')
p | rint('Usage: python3 app.py [Options][Message][source][dest]')
print('')
print('SENDING MESSAGE')
print(' You will send a message from source to dest. 3 containing the text "Sending Message from | Luiza"')
print(' python3 app.py --send "Sending Message from Luiza" 1 3')
print('RECEIVING MESSAGE')
print(' You will receive a message using the address 3')
print(' python3 app.py --read 3')
quit()
if len(sys.argv) == 1:
help()
if(sys.argv[1] == '--send'):
if len(sys.argv) < 3:
print('ERR: An error occurred. The command was Invalid.')
help()
else:
if(len(sys.argv[2]) < 10):
print('ERR: Message size must be less than 10.')
quit()
sender.message(sys.argv[3], sys.argv[4], sys.argv[2])
if(sys.argv[1] == '--read'):
if len(sys.argv) < 3:
print('ERR: An error occurred. The command was Invalid. Dest to read not informed !!')
help()
loop = asyncio.get_event_loop()
loop.run_until_complete(receiver.start())
loop.close() |
googleapis/python-aiplatform | .sample_configs/param_handlers/predict_sample.py | Python | apache-2.0 | 1,015 | 0.003941 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for t | he specific language governing permissions and
# limitations under the License.
#
def make_endpoint(endpoint: str) -> str:
endpoint = endpoint
return endpoint
def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]:
in | stance = to_protobuf_value(instance_dict)
instances = [instance]
return instances
def make_parameters() -> google.protobuf.struct_pb2.Value:
parameters_dict = {}
parameters = to_protobuf_value(parameters_dict)
return parameters
|
SunDwarf/KCProxy | config_gunicorn.py | Python | mit | 712 | 0.001404 | import multiprocessing
bind = "0.0.0.0:7869"
workers = multiprocessing.cpu_count() * 2 + 1
# Choose one as appropriate.
# worker_class = "sync"
# worker_class = "eventlet"
# worker_class = "gevent"
# worker_class = "tornado"
| worker_class = "gthread"
# Change to true to enable daemonising.
daemon = False
# Change to specify the user gunicorn will run as.
# user = "nobody"
# Change to specify the group gunicorn will run as.
# group = "nogroup"
# SSL settings.
# If you are running the server without a reverse proxy (nginx or apache), this is highly recommended.
# keyfile = "ssl/server.key"
# certfile = "ssl/server.crt"
| def when_ready(server):
print("Server ready on address {}.".format(bind))
|
kineticadb/kinetica-api-python | gpudb/packages/avro/avro_py3/tool.py | Python | mit | 5,599 | 0.012859 | #!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line tool
NOTE: The API for the command-line tool is experimental.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import open
from future import standard_library
standard_library.install_aliases()
import sys
import urllib
#from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from http.server import HTTPServer, BaseHTTPRequestHandler
from avro import io
from avro import datafile
from avro import protocol
from avro import ipc
class GenericResponder(ipc.Responder):
def __init__(self, proto, msg, datum):
proto_json = file(proto, 'r').read()
ipc.Responder.__init__(self, protocol.Parse(proto_json))
self.msg = msg
self.datum = datum
def invoke(self, message, request):
if message.name == self.msg:
print >> sys.stderr, "Message: %s Datum: %s" % (message.name, self.datum)
# server will shut down after processing a single Avro request
global server_should_shutdown
server_should_shutdown = True
return self.datum
class GenericHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.responder = responder
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
if server_should_shutdown:
print >> sys.stderr, "Shutting down server."
self.server.force_stop()
class StoppableHTTPServer(HTTPServer):
"""HTTPServer.shutdown added in Python 2.6. FML."""
stopped = False
allow_reuse_address = True
def __init__(self, *args, **kw):
HTTPServer.__init__(self, *args, **kw)
self.allow_reuse_address = True
def serve_forever(self):
while not self.stopped:
self.handle_request()
def force_stop(self):
self.server_close()
self.stopped = True
self.serve_forever()
def run_server(uri, proto, msg, datum):
url_obj = urllib.parse.urlparse(uri)
server_addr = (url_obj.hostname, url_obj.port)
global responder
global server_should_shutdown
server_should_shutdown = False
responder = GenericResponder(proto, msg, datum)
server = StoppableHTTPServer(server_addr, GenericHandler)
print("Port: %s" % server.server_port)
sys.stdout.flush()
server.allow_reuse_address = True
print >> sys.stderr, "Starting server."
server.serve_forever()
def send_message(uri, proto, msg, datum):
url_obj = urllib.parse.urlparse(uri)
client = ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
proto_json = file(proto, 'r').read()
requestor = ipc.Requestor(protocol.Parse(proto_json), client)
print(requestor.request(msg, datum))
def file_or_stdin(f):
if f == "-":
return sys.stdin
else:
return file(f)
def main(args=sys.argv):
if len(args) == 1:
print("Usage: %s [dump|rpcreceive|rpcsend]" % args[0])
return 1
if args[1] == "dump":
if len(args) != 3:
print("Usage: %s dump input_file" % args[0])
return 1
for d in datafile.DataFileReader(file_or_stdin(args[2]), io.DatumReader()):
print(repr(d))
elif args[1] == "rpcreceive":
usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = op | en(args[6], 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = dfr.next()
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
| print(usage_str)
return 1
run_server(uri, proto, msg, datum)
elif args[1] == "rpcsend":
usage_str = "Usage: %s rpcsend uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = dfr.next()
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
send_message(uri, proto, msg, datum)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
zurfyx/simple | simple/projects/activities/abstract_models.py | Python | mit | 2,648 | 0 | from django.db import models
from django.utils import timezone
from config import constants as globalConstants
from core.fields import RestrictedFile
from projects.abstract_models import AbstractTimeStamped
from projects.models import Project
from users.models import User
class AbstractProjectActivity(AbstractTimeStamped):
"""
Project Activities for Students.
Start_date & due_date represent the first and last date for users to hand
in their Activity Replies.
Allowed submissions represents the number of edits a user can do to their
reply before their due date.
"""
project = models.ForeignKey(Project)
user = models.ForeignKey(User)
title = models.CharField(max_length | =200, unique=True)
body = models.CharField(max_length=2000, blank=True, null=True)
start_date = models.DateTimeField()
due_date = models.DateTimeField()
responses = models.ManyToManyField(User, through='ProjectActivityResponse',
related_name='responded_activity')
| def is_closed(self):
now = timezone.now()
return now < self.start_date or now > self.due_date
def __str__(self):
return self.title
class Meta:
abstract = True
verbose_name_plural = 'project activities'
class AbstractProjectActivityAttachment(models.Model):
activity = models.ForeignKey('activities.ProjectActivity',
related_name='activity_attachments')
object = RestrictedFile(
upload_to=globalConstants.MediaFile.PROJECT_ATTACHMENT.path,
max_upload_size=globalConstants.MediaFile.PROJECT_ATTACHMENT.max_size,
blank=True, null=True)
class Meta:
abstract = True
class AbstractProjectActivityResponse(AbstractTimeStamped):
"""
Project Activity Response from User.
"""
user = models.ForeignKey(User)
activity = models.ForeignKey('ProjectActivity')
body = models.CharField(max_length=20000)
number_submissions = models.PositiveIntegerField(default=0)
def __str__(self):
return self.body
class Meta:
abstract = True
unique_together = ('user', 'activity')
class AbstractProjectActivityResponseAttachment(models.Model):
response = models.ForeignKey('activities.ProjectActivityResponse',
related_name='activity_response_attachments')
object = RestrictedFile(
upload_to=globalConstants.MediaFile.PROJECT_ATTACHMENT.path,
max_upload_size=globalConstants.MediaFile.PROJECT_ATTACHMENT.max_size,
blank=True, null=True)
class Meta:
abstract = True
|
andrewv587/pycharm-project | myFCN/myVgg16.py | Python | apache-2.0 | 3,915 | 0.001277 | # -*- coding: utf-8 -*-
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import absolute_import
from __future__ import print_function
from keras.engine import Input
from keras.engine import Model
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam
from keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def get_conv(channel, name):
return Conv2D(channel, (3, 3), activation='relu', padding='same', name=name)
def get_pool(name):
return MaxPooling2D((2, 2), strides=(2, 2), name=name)
class myVGG16(object):
def __init__(self):
self.block1_conv1 = get_conv(64, 'block1_conv1')
self.block1_conv2 = get_conv(64, 'block1_conv2')
self.block1_pool = get_pool('block1_pool')
self.block2_conv1 = get_conv(128, 'block2_conv1')
self.block2_conv2 = get_conv(128, 'block2_conv2')
self.block2_pool = get_pool('block2_pool')
self.block3_conv1 = get_conv(256, 'block3_conv1')
self.block3_conv2 = get_conv(256, 'block3_conv2')
self.block3_conv3 = get_conv(256, 'block3_conv3')
self.block3_pool = get_pool('block3_pool')
self.block4_conv1 = get_conv(512, 'block4_conv1')
self.block4_conv2 = get_conv(512, 'block4_conv2')
self.block4_conv3 = get_conv(512, 'block4_conv3')
self.block4_pool = get_pool('block4_pool')
self.block5_conv1 = get_conv(512, 'block5_conv1')
self.block5_conv2 = get_conv(512, 'block5_conv2')
self.block5_conv3 = get_conv(512, 'block5_conv3')
self.block5_pool = get_pool('block5_pool')
# self.block5_conv1.trainable=False
def create_model(self, input):
x = self.block1_conv1(input)
x = self.block1_conv2(x)
x = self.block1_pool(x)
x = self.block2_conv1(x)
x = self.block2_conv2(x)
x = self.block2_pool(x)
x = self.block3_conv1(x)
x = self.block3_conv2(x)
x = self.block3_conv3(x)
x = self.block3_pool(x)
x = self.block4_conv1(x)
x = self.block4_conv2(x)
x = self.block4_conv3(x)
x = self.block4_pool(x)
x = self.block5_conv1(x)
x = self.block5_conv2(x)
x = self.block5_conv3(x)
x = self.block5_pool(x)
return x
def load_weigth(self, input_size=(224, 224, 3), trainable=False):
x = Input(input_size)
y = self.create_model(x)
model = Model(x, y)
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
for layer in model.layers:
layer.trainable = trainable
return model
def get_layer(self, input):
x = self.block1_conv1(input)
l1 = x
x = self.b | lock1_conv2(x)
x = self.block1_poo | l(x)
x = self.block2_conv1(x)
l2 = x
x = self.block2_conv2(x)
x = self.block2_pool(x)
x = self.block3_conv1(x)
l3 = x
x = self.block3_conv2(x)
c = x
x = self.block3_conv3(x)
x = self.block3_pool(x)
x = self.block4_conv1(x)
l4 = x
x = self.block4_conv2(x)
x = self.block4_conv3(x)
x = self.block4_pool(x)
x = self.block5_conv1(x)
l5 = x
x = self.block5_conv2(x)
x = self.block5_conv3(x)
x = self.block5_pool(x)
return c, [l1, l2, l3, l4, l5]
|
kansanmuisti/kamu | utils/populate-cms.py | Python | agpl-3.0 | 4,030 | 0.001985 | #!/usr/bin/python
# A script to write initial content data into database
import sys
import os
import codecs
from datetime import datetime
from django.core.management import setup_environ
my_path = os.path.abspath(os.path.dirname(__file__))
kamu_path = os.path.normpath(my_path + '/..')
# Change this if you have your initial content elsewhere
content_path = os.path.normpath(my_path + '/../Content/cms')
sys.path.insert(1, kamu_path)
sys.path.insert(2, os.path.normpath(kamu_path + '/..'))
allowed_markups = ['html', 'markdown', 'plain']
from kamu import settings
setup_environ(settings)
from django.db import connection, transaction
from django import db
from kamu.cms.models import Category, Newsitem, Item, Content, Revision
def process_file(root, filename, category_name, mu_type):
print("Processing file %s" % os.path.join(root, filename))
#Special case for newsitems
if filename.startswith("news"):
(category_name, newsdate, order) = filename.split("_")
category = Category.objects.filter(name=category_name)
if category.count() == 0:
print("Creating category %s" % category_name)
category = Category(name=category_name)
category.save()
else:
assert(category.count() == 1)
category = category[0]
if category_name == "news":
print("Processing newsitem with date %s" % newsdate)
item = Newsitem.objects.filter(category=category, date=newsdate)
# FIXME: Many newsitems per date
if not item.count():
item = Newsitem(category=category, date=newsdate)
item.save()
else:
assert(item.count() == 1)
item = item[0]
else:
item = Item.objects.filter(category=category)
if item.count() == 0:
print("Creating_item under category %s" % category_name)
item = Item(category=category)
item.save()
else:
item = item[0]
full_fname = os.path.join(root, filename)
content = Content.objects.filter(item=item, language=language)
if content.count() == 0:
print("Creating content for item %s with lang %s" % (item, language))
content = Content(item=item, language=language)
content.save()
else:
content = content[0]
revision = content.get_latest_revision( | )
mtime = datetime.fromtimestamp(os.path.getmtime(full_fname))
if revision and revision.date >= mtime:
print("\tSkipping based on file mtime")
| return
f = codecs.open(full_fname, mode="r", encoding="utf8")
if category_name == "news":
# Newsfiles contain the subject as first line
subject = f.readline()[:-1]
# Summary is defined as being all the text after subject until
# marker "[full]" alone at the beginning of line with LF at end
summary = ""
for line in f:
if line == "[full]\n":
break
summary += line
else:
subject = "Initial commit for %s in %s" % (category, language)
summary = ""
data = f.read()
content_data = data
while data:
data = f.read()
content_data += data
content_data = content_data.strip()
revision = Revision(content=content, subject=subject, summary=summary,
summary_markup_type=mu_type, data=content_data,
data_markup_type=mu_type)
revision.save()
print("Prepared revision %s" % revision)
# FIXME. This loop does not handle ordering of newsitems within
# the same day.
for root, dirs, files in os.walk(content_path):
if not files:
continue
(head,tail) = os.path.split(root)
language = tail
for filename in files:
(category_name, mu_type) = os.path.splitext(filename)
mu_type = mu_type[1:]
if mu_type not in allowed_markups:
print("Ignoring file %s" % filename)
continue
process_file(root, filename, category_name, mu_type)
|
HumanBrainProject/neuroglancer-scripts | src/neuroglancer_scripts/file_accessor.py | Python | mit | 6,613 | 0 | # Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
# Author: Yann Leprince <y.leprince@fz-juelich.de>
#
# This software is made available under the MIT licence, see LICENCE.txt.
"""Access to a Neuroglancer pre-computed dataset on the local filesystem.
See the :mod:`~neuroglancer_scripts.accessor` module for a description of the
API.
"""
import gzip
import os
import pathlib
import neuroglancer_scripts.accessor
from neuroglancer_scripts.accessor import _CHUNK_PATTERN_FLAT, DataAccessError
__all__ = [
"FileAccessor",
]
_CHUNK_PATTERN_SUBDIR = "{key}/{0}-{1}/{2}-{3}/{4}-{5}"
NO_COMPRESS_MIME_TYPES = {
"application/json",
"image/jpeg",
"image/png",
}
class FileAccessor(neuroglancer_scripts.accessor.Accessor):
"""Access a Neuroglancer pre-computed pyramid on the local file system.
:param str base_dir: path to the directory containing the pyramid
:param bool flat: use a flat file layout (see :ref:`layouts`)
:param bool gzip: compress chunks losslessly with gzip
"""
can_read = True
can_write = True
def __init__(self, base_dir, flat=False, gzip=True):
self.base_path = pathlib.Path(base_dir)
if flat:
self.chunk_pattern = _CHUNK_PATTERN_FLAT
else:
self.chunk_pattern = _CHUNK_PATTERN_SUBDIR
self.gzip = gzip
def file_exists(self, relative_path):
relative_path = pathlib.Path(relative_path)
file_path = self.base_path / relative_path
if ".." in file_path.relative_to(self.base_path).parts:
raise ValueError("only relative paths pointing under base_path "
"are accepted")
try:
if file_path.is_file():
return True
elif file_path.with_name(file_path.name + ".gz").is_file():
return True
except OSError as exc:
raise DataAccessError(
"Error fetching {0}: {1}".format(file_path, exc)) from exc
return False
def fetch_file(self, relative_path):
relative_path = pathlib.Path(relative_path)
file_path = self.base_path / relative_path
if ".." in file_path.relative_to(self.base_path).parts:
raise ValueError("only relative paths pointing under base_path "
"are accepted")
try:
if file_path.is_file():
f = file_path.open("rb")
elif file_path.with_name(file_path.name + ".gz").is_file():
f = gzip.open(str(file_path.with_name(file_path.name + ".gz")),
"rb")
else:
raise DataAccessError("Cannot find {0} in {1}".format(
relative_path, self.base_path))
with f:
return f.read()
except OSError as exc:
raise DataAccessError(
"Error fetching {0}: {1}".format(file_path, exc)) from exc
def store_file(self, relative_path, buf,
mime_type="application/octet-stream",
overwrite=False):
relative_path = pathlib.Path(relative_path)
file_path = self.base_path / relative_path
if ".." in file_path.relative_to(self.base_path).parts:
raise ValueError("only relative paths pointing under base_path "
"are accepted")
mode = "wb" if overwrite else "xb"
try:
os.makedirs(str(file_path.parent), exist_ok=True)
if self.gzip and mime_type not in NO_COMPRESS_MIME_TYPES:
with gzip.open(
str(file_path.with_name(file_path.name + ".gz")),
mode) as f:
f.write(buf)
else:
with file_path.open(mode) as f:
f.write(buf)
except OSError as exc:
raise DataAccessError("Error storing {0}: {1}"
.format(file_path, exc)) from exc
def fetch_chunk(self, key, chunk_coords):
f = None
try:
for pattern in _CHUNK_PATTERN_FLAT, _CHUNK_PATTERN_SUBDIR:
chunk_path = self._chunk_path(key, chunk_coords, pattern)
if chunk_path.is_file():
f = chunk_path.open("rb")
elif chunk_path.with_name(chunk_path.name + ".gz").is_file():
f = gzip.open(
str(chunk_path.with_name(chunk_path.name + ".gz")),
"rb"
)
if f is None:
raise DataAccessError(
"Cannot find chunk {0} in {1}" .format(
self._flat_chunk_basename(key, chunk_coords),
self.base_path)
)
with f:
return f.read()
except OSError as exc:
raise DataAccessError(
"Error accessing chunk {0} in {1}: {2}" .format(
self._flat_chunk_basename(key, chunk | _coords),
self.base_path, exc)) from e | xc
def store_chunk(self, buf, key, chunk_coords,
mime_type="application/octet-stream",
overwrite=True):
chunk_path = self._chunk_path(key, chunk_coords)
mode = "wb" if overwrite else "xb"
try:
os.makedirs(str(chunk_path.parent), exist_ok=True)
if self.gzip and mime_type not in NO_COMPRESS_MIME_TYPES:
with gzip.open(
str(chunk_path.with_name(chunk_path.name + ".gz")),
mode) as f:
f.write(buf)
else:
with chunk_path.open(mode) as f:
f.write(buf)
except OSError as exc:
raise DataAccessError(
"Error storing chunk {0} in {1}: {2}" .format(
self._flat_chunk_basename(key, chunk_coords),
self.base_path, exc)) from exc
def _chunk_path(self, key, chunk_coords, pattern=None):
if pattern is None:
pattern = self.chunk_pattern
xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
chunk_filename = pattern.format(
xmin, xmax, ymin, ymax, zmin, zmax, key=key)
return self.base_path / chunk_filename
def _flat_chunk_basename(self, key, chunk_coords):
xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
chunk_filename = _CHUNK_PATTERN_FLAT.format(
xmin, xmax, ymin, ymax, zmin, zmax, key=key)
return chunk_filename
|
sqlalchemy/alembic | alembic/migration.py | Python | mit | 41 | 0 | from .runtime.migration | import * # noqa
| |
UManPychron/pychron | pychron/envisage/tasks/editor_task.py | Python | apache-2.0 | 6,344 | 0.000631 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.api import IEditor, IEditorAreaPane
from pyface.tasks.task_layout import PaneItem, Splitter
from traits.api import Property, Instance
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
from pychron.envisage.tasks.advanced_editor_area_pane import myAdvancedEditorAreaPane
from pychron.envisage.tasks.base_task import BaseManagerTask, BaseExtractionLineTask
class BaseEditorTask(BaseManagerTask):
active_editor = Property(Instance(IEditor),
depends_on='e | ditor_area.active_editor')
editor_area = Instance(IEditorAreaPane)
def set_editor_layout(self, layout):
ea = self.editor_area
| ea.set_layout(layout)
def split_editors(self, a, b, h1=-1, h2=-1, orientation='horizontal'):
layout = Splitter(PaneItem(id=a, height=h1),
PaneItem(id=b, height=h2),
orientation=orientation)
self.set_editor_layout(layout)
def db_save_info(self):
self.information_dialog('Changes saved to the database')
def get_editor(self, name, key='name'):
return next((e for e in self.editor_area.editors if getattr(e, key) == name), None)
def get_editor_names(self):
return [e.name for e in self.editor_area.editors]
def iter_editors(self, klass):
return (e for e in self.editor_area.editors if isinstance(e, klass))
def has_active_editor(self, klass=None):
if not self.active_editor:
self.information_dialog('No active tab. Please open a tab')
elif klass:
if not isinstance(self.active_editor, klass):
name = str(klass).split('.')[-1][:-2].replace('Editor', '')
self.information_dialog('No active tab. Please open a "{}" tab'.format(name))
return
return self.active_editor
def get_editors(self, klass):
return (ei for ei in self.editor_area.editors if isinstance(ei, klass))
def close_editor(self, editor):
try:
self.editor_area.remove_editor(editor)
except AttributeError:
pass
def activate_editor(self, editor):
if self.editor_area:
try:
self.editor_area.activate_editor(editor)
except AttributeError:
pass
def create_central_pane(self):
# self.editor_area = AdvancedEditorAreaPane()
self.editor_area = myAdvancedEditorAreaPane()
return self.editor_area
def open(self, path=None, **kw):
"""
Shows a dialog to open a file.
"""
if path is None or not os.path.isfile(path):
path = self.open_file_dialog()
if path:
return self._open_file(path, **kw)
else:
self._open_abort()
def save(self, path=None):
"""
if the active_editor doesnt have a path e.g not yet saved
do a save as
"""
if self.active_editor:
if not path:
if hasattr(self.active_editor, 'path'):
if self.active_editor.path:
path = self.active_editor.path
else:
return
if not path:
self.save_as()
else:
if self._save_file(path):
self.active_editor.dirty = False
self.active_editor.path = path
def new(self):
pass
def save_as(self):
kw = {}
df = self._generate_default_filename()
if df:
kw['default_filename'] = df
path = self._get_save_path(**kw)
if path:
if self._save_file(path):
self.active_editor.path = path
self.active_editor.dirty = False
return True
def close_all(self):
for e in self.editor_area.editors:
self.close_editor(e)
# private
def _get_save_path(self, **kw):
return self.save_file_dialog(**kw)
def _generate_default_filename(self):
return
def _save_file(self, path):
pass
def _open_file(self, path, **kw):
pass
def _open_abort(self):
pass
def _pre_open_hook(self):
pass
def _open_editor(self, editor, activate=True, **kw):
if self.editor_area:
if editor not in self.editor_area.editors:
self.editor_area.add_editor(editor)
if activate:
self.editor_area.activate_editor(editor)
def _get_active_editor(self):
if self.editor_area is not None:
return self.editor_area.active_editor
return None
def _prompt_for_save(self):
if self.editor_area is None:
return True
dirty_editors = dict([(editor.name, editor)
for editor in self.editor_area.editors
if editor.dirty])
if not dirty_editors:
return True
message = 'You have unsaved files. Would you like to save them?'
ret = self._handle_prompt_for_save(message)
if ret == 'save':
for _, editor in dirty_editors.items():
editor.save(editor.path)
return ret
class EditorTask(BaseExtractionLineTask, BaseEditorTask):
pass
# ============= EOF =============================================
|
zhimin711/nova | nova/tests/unit/objects/test_image_meta.py | Python | apache-2.0 | 12,384 | 0.000242 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import exception
from nova import objects
from nova import test
class TestImageMeta(test.NoDBTestCase):
def test_basic_attrs(self):
image = {'status': 'active',
'container_format': 'bare',
'min_ram': 0,
'updated_at': '2014-12-12T11:16:36.000000',
# Testing string -> int conversion
'min_disk': '0',
'owner': '2d8b9502858c406ebee60f0849486222',
# Testing string -> bool conversion
'protected': 'yes',
'properties': {
'os_type': 'Linux',
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
'hw_scsi_model': 'virtio-sc | si',
},
'size': 213581824,
'name': 'f16-x86_64-openstack-sda',
'checksum': '755122332caeb9f661d5c978adb8b45f',
'created_at': '2014-12-10T16:23:14.000000',
'disk_format': 'qcow2',
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd'
}
image_meta = objects.ImageMeta.fro | m_dict(image)
self.assertEqual('active', image_meta.status)
self.assertEqual('bare', image_meta.container_format)
self.assertEqual(0, image_meta.min_ram)
self.assertIsInstance(image_meta.updated_at, datetime.datetime)
self.assertEqual(0, image_meta.min_disk)
self.assertEqual('2d8b9502858c406ebee60f0849486222', image_meta.owner)
self.assertTrue(image_meta.protected)
self.assertEqual(213581824, image_meta.size)
self.assertEqual('f16-x86_64-openstack-sda', image_meta.name)
self.assertEqual('755122332caeb9f661d5c978adb8b45f',
image_meta.checksum)
self.assertIsInstance(image_meta.created_at, datetime.datetime)
self.assertEqual('qcow2', image_meta.disk_format)
self.assertEqual('c8b1790e-a07d-4971-b137-44f2432936cd', image_meta.id)
self.assertIsInstance(image_meta.properties, objects.ImageMetaProps)
def test_no_props(self):
image_meta = objects.ImageMeta.from_dict({})
self.assertIsInstance(image_meta.properties, objects.ImageMetaProps)
def test_volume_backed_image(self):
image = {'container_format': None,
'size': 0,
'checksum': None,
'disk_format': None,
}
image_meta = objects.ImageMeta.from_dict(image)
self.assertEqual('', image_meta.container_format)
self.assertEqual(0, image_meta.size)
self.assertEqual('', image_meta.checksum)
self.assertEqual('', image_meta.disk_format)
def test_null_substitution(self):
image = {'name': None,
'checksum': None,
'owner': None,
'size': None,
'virtual_size': None,
'container_format': None,
'disk_format': None,
}
image_meta = objects.ImageMeta.from_dict(image)
self.assertEqual('', image_meta.name)
self.assertEqual('', image_meta.checksum)
self.assertEqual('', image_meta.owner)
self.assertEqual(0, image_meta.size)
self.assertEqual(0, image_meta.virtual_size)
self.assertEqual('', image_meta.container_format)
self.assertEqual('', image_meta.disk_format)
class TestImageMetaProps(test.NoDBTestCase):
def test_normal_props(self):
props = {'os_type': 'windows',
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
# Fill sane values for the rest here
}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual('windows', virtprops.os_type)
self.assertEqual('vga', virtprops.hw_video_model)
self.assertEqual(512, virtprops.hw_video_ram)
self.assertTrue(virtprops.hw_qemu_guest_agent)
def test_default_props(self):
props = {}
virtprops = objects.ImageMetaProps.from_dict(props)
for prop in virtprops.fields:
self.assertIsNone(virtprops.get(prop))
def test_default_prop_value(self):
props = {}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual("hvm", virtprops.get("hw_vm_mode", "hvm"))
def test_non_existent_prop(self):
props = {}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertRaises(AttributeError,
virtprops.get,
"doesnotexist")
def test_legacy_compat(self):
legacy_props = {
'architecture': 'x86_64',
'owner_id': '123',
'vmware_adaptertype': 'lsiLogic',
'vmware_disktype': 'preallocated',
'vmware_image_version': '2',
'vmware_ostype': 'rhel3_64Guest',
'auto_disk_config': 'yes',
'ipxe_boot': 'yes',
'xenapi_device_id': '3',
'xenapi_image_compression_level': '2',
'vmware_linked_clone': 'false',
'xenapi_use_agent': 'yes',
'xenapi_skip_agent_inject_ssh': 'no',
'xenapi_skip_agent_inject_files_at_boot': 'no',
'cache_in_nova': 'yes',
'vm_mode': 'hvm',
'bittorrent': 'yes',
'mappings': [],
'block_device_mapping': [],
'bdm_v2': 'yes',
'root_device_name': '/dev/vda',
'hypervisor_version_requires': '>=1.5.3',
'hypervisor_type': 'qemu',
}
image_meta = objects.ImageMetaProps.from_dict(legacy_props)
self.assertEqual('x86_64', image_meta.hw_architecture)
self.assertEqual('123', image_meta.img_owner_id)
self.assertEqual('lsilogic', image_meta.hw_scsi_model)
self.assertEqual('preallocated', image_meta.hw_disk_type)
self.assertEqual(2, image_meta.img_version)
self.assertEqual('rhel3_64Guest', image_meta.os_distro)
self.assertTrue(image_meta.hw_auto_disk_config)
self.assertTrue(image_meta.hw_ipxe_boot)
self.assertEqual(3, image_meta.hw_device_id)
self.assertEqual(2, image_meta.img_compression_level)
self.assertFalse(image_meta.img_linked_clone)
self.assertTrue(image_meta.img_use_agent)
self.assertFalse(image_meta.os_skip_agent_inject_ssh)
self.assertFalse(image_meta.os_skip_agent_inject_files_at_boot)
self.assertTrue(image_meta.img_cache_in_nova)
self.assertTrue(image_meta.img_bittorrent)
self.assertEqual([], image_meta.img_mappings)
self.assertEqual([], image_meta.img_block_device_mapping)
self.assertTrue(image_meta.img_bdm_v2)
self.assertEqual("/dev/vda", image_meta.img_root_device_name)
self.assertEqual('>=1.5.3', image_meta.img_hv_requested_version)
self.assertEqual('qemu', image_meta.img_hv_type)
def test_legacy_compat_vmware_adapter_types(self):
legacy_types = ['lsiLogic', 'busLogic', 'ide', 'lsiLogicsas',
'paraVirtual', None, '']
for legacy_type in legacy_types:
legacy_props = {
'vmware_adaptertype': legacy_type,
}
image_meta = objects.ImageMetaProps.from_dict(legacy_props)
if legacy_type == 'ide':
self.assertEqual('ide', image_meta.hw_disk_bus)
|
pglauner/misc | src/cs212/pig_game.py | Python | gpl-2.0 | 3,539 | 0.003956 | '''
Created on Oct 12, 2013
@author: pglauner
'''
import random
from my_decorators import memo
from uncertainty import best_action
goal = 40
other = {1:0, 0:1}
def roll(state, d):
"""Apply the roll action to a state (and a die roll d) to yield a new state:
If d is 1, get 1 point (losing any accumulated 'pending' points),
and it is the other player's turn. If d > 1, add d to 'pending' points."""
(p, me, you, pending) = state
if d == 1:
return (other[p], you, me+1, 0) # pig out; other player's turn
else:
return (p, me, you, pending+d) # accumulate die roll in pending
def hold(state):
"""Apply the hold action to a state to yield a new state:
Reap the 'pending' points and it becomes the other player's turn."""
(p, me, you, pending) = state
return (other[p], you, me+pending, 0)
def dierolls():
"Generate die rolls."
while True:
yield random.randint(1, 6)
def Q_pig(state, action, Pwin):
"The expected value of choosing action in state."
if action == 'hold':
return 1 - Pwin(hold(state))
if action == 'roll':
return (1 - Pwin(roll(state, 1))
+ sum(Pwin(roll(state, d)) for d in (2,3,4,5,6))) / 6.
raise ValueError
def pig_actions(state):
"The legal actions from a state."
_, _, _, pending = state
return ['roll', 'hold'] if pending else ['roll']
@memo
def Pwin(state):
"""The utility of a state; here just the probability that an optimal player
whose turn it is to move can win from the current state."""
# Assumes opponent also plays with optimal strategy.
(_, me, you, pending) = state
if me + pending >= goal:
return 1
elif you >= goal:
return 0
else:
return max(Q_pig(state, action, Pwin)
for action in pig_actions(state))
@memo
def win_diff(state):
"The utility of a state: here the winning differential (pos or neg)."
(_, me, you, pending) = state
if me + pending >= goal or you >= goal:
return (me + pending - you)
else:
return max(Q_pig(state, action, win_diff)
for action in pig_actions(state))
def max_diffs(state):
"""A strategy that maximizes the expected difference between my final score
and my opponent's."""
return best_action(state, pig_actions, Q_pig, win_diff)
def max_wins(state):
"The optimal pig strategy chooses an action with the highest win probability."
return best_action(state, pig_actions, Q_pig, Pwin)
def play_pig(A, B, dierolls=dierolls( | )):
"""Play a game of pig between two players, represented by their strategies.
Each time through the main loop we ask the current player for one decision,
which must be 'hold' or 'roll', and we update the state accord | ingly.
When one player's score exceeds the goal, return that player."""
strategies = [A, B]
state = (0, 0, 0, 0)
while True:
(p, me, you, _) = state
if me >= goal:
return strategies[p]
elif you >= goal:
return strategies[other[p]]
else:
action = strategies[p](state)
if action == 'hold':
state = hold(state)
elif action == 'roll':
state = roll(state, next(dierolls))
# Illegal action
else:
return strategies[other[p]]
def test():
winner = play_pig(max_wins, max_diffs)
return winner.__name__
if __name__ == '__main__':
print test()
|
dallascard/guac | core/lda/lda_preprocessing.py | Python | apache-2.0 | 4,593 | 0.004137 | from optparse import OptionParser
import re
import os
import sys
import numpy as np
from ..util import dirs
from ..util import file_handling as fh
from ..preprocessing import data_splitting as ds
from ..feature_extractors.vocabulary_with_counts import VocabWithCounts
def main():
usage = "%prog project"
parser = OptionParser(usage=usage)
parser.add_option('-v', dest='vocab_size', default=10000,
help='Vocabulary size (most frequent words): default=%default')
parser.add_option('--seed', dest='seed', default=42,
help='Random seed: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project_name = args[0]
dirs.make_base_dir(project_name)
vocab_size = int(options.vocab_size)
suffixes = {"'s", "n't"}
pronouns = {"i", 'you', 'he', 'his', 'she', 'her', 'hers', 'it', 'its', 'we', 'you', 'your', 'they', 'them', 'their'}
determiners = {'a', 'an', 'the', 'this', 'that', 'these', 'those'}
prepositions = {'at', 'by', 'for', 'from', 'in', 'into', 'of', 'on', 'than', 'to', 'with'}
transitional = {'and', 'also', 'as', 'but', 'if', 'or', 'then'}
common_verbs = {'are', 'be', 'been', 'had', 'has', 'have', 'is', 'said', 'was', 'were'}
stopwords = suffixes.union(pronouns).union(determiners).union(prepositions).union(transitional).union(common_verbs)
print "Removing %d stopwords:" % len(stopwords)
for s in stopwords:
print s
# set random seed
np.random.seed(int(options.seed))
# read in data
dirs.make_base_dir(project_name)
sentences = fh.read_json(dirs.get_processed_text_file())
all_documents = sentences.keys()
documents = list(set(all_documents))
# create a vocabulary and fill it with the tokenized documents
tokenized, vocab = tokenize(sentences, documents, stopwords=stopwords)
print "Most common words in corpus:"
most_common = vocab.most_common(50)
most_common.sort()
for v in most_common:
print v
# set vocabulary size and prune tokens
print "Pruning vocabulary"
vocab.prune(n_words=vocab_size)
n_words = | 0
for k in documents:
tokens = [t for t in tokenized[k] if t in vocab.token2index]
n_words += len(tokens)
tokenized[k] = tokens
n_documents = len(documents)
| n_vocab = len(vocab)
print n_documents, "documents"
print n_vocab, "word types"
print n_words, "word tokens"
# create the count matrices
vocab_assignments = np.zeros(n_words, dtype=int) # vocab index of the ith word
#topic_assignments = np.zeros(n_words, dtype=int) # topic of the ith word
doc_assignments = np.zeros(n_words, dtype=int) # document of the ith word
count = 0
for d_i, d in enumerate(documents):
tokens = tokenized[d]
for t in tokens:
v_index = vocab.get_index(t)
assert v_index >= 0
#w_topic = np.random.randint(n_topics)
vocab_assignments[count] = v_index
#topic_assignments[count] = w_topic
doc_assignments[count] = d_i
#topic_counts[w_topic] += 1
#vocab_topics[v_index, w_topic] += 1
#doc_topics[d_i, w_topic] += 1
count += 1
assert count == n_words
output_filename = os.path.join(dirs.lda_dir, 'word_num.json')
fh.write_to_json(list(vocab_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'word_doc.json')
fh.write_to_json(list(doc_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'vocab.json')
fh.write_to_json(vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'documents.json')
fh.write_to_json(documents, output_filename, sort_keys=False)
# just exit after writing data
def tokenize(sentences, documents_to_tokenize, stopwords=set()):
print "Tokenizing"
vocab = VocabWithCounts('', add_oov=False)
tokenized = {}
for k in documents_to_tokenize:
text = sentences[k].lower()
text = re.sub('\d', '#', text)
tokens = text.split()
tokens = [t for t in tokens if re.search('[a-zA-Z]', t)]
tokens = [t for t in tokens if t not in stopwords]
vocab.add_tokens(tokens)
tokenized[k] = tokens
return tokenized, vocab
if __name__ == '__main__':
main()
|
opennode/nodeconductor-assembly-waldur | src/waldur_openstack/openstack_tenant/migrations/0016_internalip_device_info.py | Python | mit | 605 | 0 | # Generated by Django 2.2.13 on 2021-01-12 11:54
from | django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0015_add_fixed_ips'),
]
operations = [
migrations.AddField(
model_name='internalip',
name='device_id',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='internalip',
name='device_owner',
field=models.Char | Field(blank=True, max_length=100, null=True),
),
]
|
knights-lab/NINJA-DOJO | dojo/scripts/extract_ncbi_tid_from_mp2_gold.py | Python | gpl-2.0 | 1,651 | 0.001817 | #!/usr/bin/env python
import os
from collections import defaultdict
from glob import glob
import click
import csv
from ninja_utils.parsers import FASTQ
from dojo.taxonomy.maps import RefseqCatalogMap
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('-v', '--verbose', is_flag=True)
def extract_ncbi_tid(path, verbose):
rf = RefseqCatalogMap()
for file in glob(os.path.join(os.path.abspath(path), '*.gold')):
tid_counts = defaultdict(int)
with open(os.path.join(path, '%s.fastq' % os.path.basename(file).split('.')[0])) as fastq_fh:
with open(os.path.join(path, '%s.ninja.fastq') % os.path.basename(file).split('.')[0], 'w') as outf:
for header, seq, qual in FASTQ(fastq_fh).read():
header_ar | r = header.split('|')
if len(header_arr) > 3:
refseq_acc = header_arr[3]
acc = refseq_acc[:refseq_acc.find('.')]
tid = rf.refseq_accession2ncbi_tid[acc] |
if not tid == 0:
outf.write('@ncbi_tid|%s|%s\n' % (tid, header))
outf.write(seq + '\n')
outf.write('+\n%s\n' % qual)
tid_counts[tid] += 1
with open(os.path.join(path, '%s.ninja.gold') % os.path.basename(file).split('.')[0], 'w') as outf:
writer = csv.writer(outf)
writer.writerow(('ncbi_tid', 'count'))
for kv in tid_counts.items():
writer.writerow(kv)
if __name__ == '__main__':
extract_ncbi_tid()
|
0vercl0k/rp | src/third_party/beaengine/tests/0f3a0b.py | Python | mit | 2,498 | 0.000801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0F 3A 0b /r ib
# roundsd xmm1, xmm2/m64, imm8
Buffer = bytes.fromhex('660f3a0b2011')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x0f3a0b)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'roundsd')
assert_equal(myDisasm.repr(), 'roundsd xmm4, qword ptr [rax], 11h')
Buffer = bytes.fromhex('660f3a0bc011')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x0f3a0b)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'roundsd')
assert_equal(myDisasm.repr(), 'roundsd xmm0, xmm0, 11h')
# VEX.LIG.66.0 | F3A.WIG 0b /r ib
# Vroundsd xmm1, xmm2, xmm3/m64, imm8
myVEX = VEX('VEX.LIG.66.0F3A.WIG')
Buffer = bytes.fromhex('{}0b1033'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.r | ead()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x0b)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vroundsd')
assert_equal(myDisasm.repr(), 'vroundsd xmm10, xmm0, qword ptr [r8], 33h')
# EVEX.LIG.66.0F3A.W0 0b /r ib
# VRNDscalesd xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
myEVEX = EVEX('EVEX.LIG.66.0F3A.W0')
Buffer = bytes.fromhex('{}0b2011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x0b)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vrndscalesd')
assert_equal(myDisasm.repr(), 'vrndscalesd xmm28, xmm16, qword ptr [r8], 11h')
|
Scan-o-Matic/scanomatic | scanomatic/models/phases_models.py | Python | gpl-3.0 | 1,102 | 0.001815 | from __future__ import | absolute_import
import scanomatic.generics.model as model
class SegmentationModel(model.Model):
def __init__(self, dydt=None, dydt_ranks=None, dydt_signs=None, d2yd2t=None,
d2yd2t_signs=None, phases=None, offset=0, log2_curve=None, times=None,
plate=None, pos=None):
self.log2_curve = log2_curve
""":type : numpy.ndarray | """
self.times = times
""":type : numpy.ndarray"""
self.plate = plate
""":type : int"""
self.pos = pos
""":type : (int, int)"""
self.dydt = dydt
""":type : numpy.ndarray"""
self.dydt_ranks = dydt_ranks
""":type : numpy.ndarray"""
self.dydt_signs = dydt_signs
""":type : numpy.ndarray"""
self.d2yd2t = d2yd2t
""":type : numpy.ndarray"""
self.d2yd2t_signs = d2yd2t_signs
""":type : numpy.ndarray"""
self.offset = offset
""":type : int"""
self.phases = phases
""":type : numpy.ndarray"""
super(SegmentationModel, self).__init__()
|
tensorflow/tfx | tfx/utils/topsort_test.py | Python | apache-2.0 | 5,388 | 0.002598 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.topsort."""
import attr
import tensorflow as tf
from tfx.utils import topsort
@attr.s
class Node:
# Some users topsort PipelineNode protos, which are not hashable.
# To accomodate this, do not make this dataclass hashable.
name = attr.ib()
upstream_nodes = attr.ib()
downstream_nodes = attr.ib()
class TopsortTest(tf.test.TestCase):
def test_topsorted_layers_DAG(self):
nodes = [
Node('A', [], ['B', 'C', 'D']),
Node('B', ['A'], []),
Node('C', ['A'], ['D']),
Node('D', ['A', 'C', 'F'], ['E']),
Node('E', ['D'], []),
Node('F', [], ['D'])
]
node_map = {node.name: node for node in nodes}
layers = topsort.topsorted_layers(
nodes,
get_node_id_fn=lambda n: n.name,
get_parent_nodes=(
lambda n: [node_map[name] for name in n.upstream_nodes]),
get_child_nodes=(
lambda n: [node_map[name] for name in n.downstream_nodes]))
self.assertEqual([['A', 'F'], ['B', 'C'], ['D'], ['E']],
[[node.name for node in layer] for layer in layers])
def test_topsorted_layers_error_if_cycle(self):
nodes = [
Node('A', [], ['B', 'E']),
Node('B', ['A', 'D'], ['C']),
Node('C', ['B'], ['D']),
Node('D', ['C'], ['B']),
Node('E', ['A'], [])
]
node_map = {node.name: node for node in nodes}
with self.assertRaisesRegex(topsort.InvalidDAGError, 'Cycle detected.'):
topsort.topsorted_layers(
nodes,
get_node_id_fn=lambda n: n.name,
get_parent_nodes=(
lambda n: [node_map[name] for name in n.upstream_nodes]),
get_child_nodes=(
lambda n: [node_map[name] for name in n.downstream_nodes]))
def test_topsorted_layers_ignore_unknown_parent_node(self):
nodes = [
Node('A', [], ['B']),
Node('B', ['A'], ['C']),
Node('C', ['B'], []),
]
node_map = {node.name: node for node in nodes}
# Exclude node A. Node B now has a parent node 'A' that should be ignored.
layers = topsort.topsorted_layers(
[node_map['B'], node_map['C']],
get_node_id_fn=lambda n: n.name,
get_parent_nodes=(
lambda n: [node_map[name] for name in n.upstream_nodes]),
get_child_nodes=(
lambda n: [node_map[name] for name in n.downstream_nodes]))
self.assertEqual([['B'], ['C']],
[[node.name for node in layer] for layer in layers])
def test_topsorted_layers_ignore_duplicate_parent_node(self):
nodes = [
Node('A', [], ['B']),
Node('B', ['A', 'A'], []), # Duplicate parent node 'A'
]
node_map = {node.name: node for node in nodes}
layers = topsort.topsorted_layers(
nodes,
get_node_id_fn=lambda n: n.name,
get_parent_nodes=(
lambda n: [node_map[name] for name in n.upstream_nodes]),
get_child_nodes=(
lambda n: [node_map[name] for name in n.downstream_nodes]))
self.assertEqual([['A'], ['B']],
[[node.name for node in layer] for layer in layers])
def test_topsorted_layers_ignore_unknown_child_node(self):
nodes = [
Node('A', [], ['B']),
Node('B', ['A'], ['C']),
Node('C', ['B'], []),
]
node_map = {node.name: node for node in nodes}
# Exclude node C. Node B now has a child node 'C' that should be ignored.
layers = topsort.topsorted_layers(
[node_map['A'], node_map['B']],
get_node_id_fn=lambda n: n.name,
get_parent_nodes=(
lambda n: [node_map[name] for name in n.upstream_nodes] | ),
get_child_nodes=(
lambda n: [node_map[name] for name in n.downstream_nodes]))
| self.assertEqual([['A'], ['B']],
[[node.name for node in layer] for layer in layers])
def test_topsorted_layers_ignore_duplicate_child_node(self):
nodes = [
Node('A', [], ['B', 'B']), # Duplicate child node 'B'
Node('B', ['A'], []),
]
node_map = {node.name: node for node in nodes}
layers = topsort.topsorted_layers(
nodes,
get_node_id_fn=lambda n: n.name,
get_parent_nodes=(
lambda n: [node_map[name] for name in n.upstream_nodes]),
get_child_nodes=(
lambda n: [node_map[name] for name in n.downstream_nodes]))
self.assertEqual([['A'], ['B']],
[[node.name for node in layer] for layer in layers])
def test_topsorted_layers_empty(self):
layers = topsort.topsorted_layers(
nodes=[],
get_node_id_fn=lambda n: n.name,
get_parent_nodes=lambda n: [],
get_child_nodes=lambda n: [])
self.assertEqual([], layers)
if __name__ == '__main__':
tf.test.main()
|
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/VERSION/GL_4_1.py | Python | lgpl-3.0 | 13,612 | 0.057229 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_4_1'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_4_1',error_checker=_errors._error_checker)
GL_ACTIVE_PROGRAM=_C('GL_ACTIVE_PROGRAM',0x8259)
GL_ALL_SHADER_BITS=_C('GL_ALL_SHADER_BITS',0xFFFFFFFF)
GL_FIXED=_C('GL_FIXED',0x140C)
GL_FRAGMENT_SHADER_BIT=_C('GL_FRAGMENT_SHADER_BIT',0x00000002)
GL_GEOMETRY_SHADER_BIT=_C('GL_GEOMETRY_SHADER_BIT',0x00000004 | )
GL_HIGH_FLOAT=_C('GL_HIGH_FLOAT',0x8DF2)
GL_HIGH_INT=_C('GL_HIGH_INT',0x8DF5)
GL_I | MPLEMENTATION_COLOR_READ_FORMAT=_C('GL_IMPLEMENTATION_COLOR_READ_FORMAT',0x8B9B)
GL_IMPLEMENTATION_COLOR_READ_TYPE=_C('GL_IMPLEMENTATION_COLOR_READ_TYPE',0x8B9A)
GL_LAYER_PROVOKING_VERTEX=_C('GL_LAYER_PROVOKING_VERTEX',0x825E)
GL_LOW_FLOAT=_C('GL_LOW_FLOAT',0x8DF0)
GL_LOW_INT=_C('GL_LOW_INT',0x8DF3)
GL_MAX_FRAGMENT_UNIFORM_VECTORS=_C('GL_MAX_FRAGMENT_UNIFORM_VECTORS',0x8DFD)
GL_MAX_VARYING_VECTORS=_C('GL_MAX_VARYING_VECTORS',0x8DFC)
GL_MAX_VERTEX_UNIFORM_VECTORS=_C('GL_MAX_VERTEX_UNIFORM_VECTORS',0x8DFB)
GL_MAX_VIEWPORTS=_C('GL_MAX_VIEWPORTS',0x825B)
GL_MEDIUM_FLOAT=_C('GL_MEDIUM_FLOAT',0x8DF1)
GL_MEDIUM_INT=_C('GL_MEDIUM_INT',0x8DF4)
GL_NUM_PROGRAM_BINARY_FORMATS=_C('GL_NUM_PROGRAM_BINARY_FORMATS',0x87FE)
GL_NUM_SHADER_BINARY_FORMATS=_C('GL_NUM_SHADER_BINARY_FORMATS',0x8DF9)
GL_PROGRAM_BINARY_FORMATS=_C('GL_PROGRAM_BINARY_FORMATS',0x87FF)
GL_PROGRAM_BINARY_LENGTH=_C('GL_PROGRAM_BINARY_LENGTH',0x8741)
GL_PROGRAM_BINARY_RETRIEVABLE_HINT=_C('GL_PROGRAM_BINARY_RETRIEVABLE_HINT',0x8257)
GL_PROGRAM_PIPELINE_BINDING=_C('GL_PROGRAM_PIPELINE_BINDING',0x825A)
GL_PROGRAM_SEPARABLE=_C('GL_PROGRAM_SEPARABLE',0x8258)
GL_RGB565=_C('GL_RGB565',0x8D62)
GL_SHADER_BINARY_FORMATS=_C('GL_SHADER_BINARY_FORMATS',0x8DF8)
GL_SHADER_COMPILER=_C('GL_SHADER_COMPILER',0x8DFA)
GL_TESS_CONTROL_SHADER_BIT=_C('GL_TESS_CONTROL_SHADER_BIT',0x00000008)
GL_TESS_EVALUATION_SHADER_BIT=_C('GL_TESS_EVALUATION_SHADER_BIT',0x00000010)
GL_UNDEFINED_VERTEX=_C('GL_UNDEFINED_VERTEX',0x8260)
GL_VERTEX_SHADER_BIT=_C('GL_VERTEX_SHADER_BIT',0x00000001)
GL_VIEWPORT_BOUNDS_RANGE=_C('GL_VIEWPORT_BOUNDS_RANGE',0x825D)
GL_VIEWPORT_INDEX_PROVOKING_VERTEX=_C('GL_VIEWPORT_INDEX_PROVOKING_VERTEX',0x825F)
GL_VIEWPORT_SUBPIXEL_BITS=_C('GL_VIEWPORT_SUBPIXEL_BITS',0x825C)
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint)
def glActiveShaderProgram(pipeline,program):pass
@_f
@_p.types(None,_cs.GLuint)
def glBindProgramPipeline(pipeline):pass
@_f
@_p.types(None,_cs.GLfloat)
def glClearDepthf(d):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLsizei,ctypes.POINTER( ctypes.POINTER( _cs.GLchar )))
def glCreateShaderProgramv(type,count,strings):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glDeleteProgramPipelines(n,pipelines):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLdoubleArray)
def glDepthRangeArrayv(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLdouble,_cs.GLdouble)
def glDepthRangeIndexed(index,n,f):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat)
def glDepthRangef(n,f):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glGenProgramPipelines(n,pipelines):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLdoubleArray)
def glGetDoublei_v(target,index,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLfloatArray)
def glGetFloati_v(target,index,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLuintArray,ctypes.c_void_p)
def glGetProgramBinary(program,bufSize,length,binaryFormat,binary):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetProgramPipelineInfoLog(pipeline,bufSize,length,infoLog):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetProgramPipelineiv(pipeline,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray,arrays.GLintArray)
def glGetShaderPrecisionFormat(shadertype,precisiontype,range,precision):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLdoubleArray)
def glGetVertexAttribLdv(index,pname,params):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glIsProgramPipeline(pipeline):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,ctypes.c_void_p,_cs.GLsizei)
def glProgramBinary(program,binaryFormat,binary,length):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint)
def glProgramParameteri(program,pname,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLdouble)
def glProgramUniform1d(program,location,v0):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glProgramUniform1dv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLfloat)
def glProgramUniform1f(program,location,v0):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glProgramUniform1fv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint)
def glProgramUniform1i(program,location,v0):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glProgramUniform1iv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLuint)
def glProgramUniform1ui(program,location,v0):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLuintArray)
def glProgramUniform1uiv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLdouble,_cs.GLdouble)
def glProgramUniform2d(program,location,v0,v1):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glProgramUniform2dv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLfloat,_cs.GLfloat)
def glProgramUniform2f(program,location,v0,v1):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glProgramUniform2fv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint)
def glProgramUniform2i(program,location,v0,v1):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glProgramUniform2iv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLuint,_cs.GLuint)
def glProgramUniform2ui(program,location,v0,v1):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLuintArray)
def glProgramUniform2uiv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glProgramUniform3d(program,location,v0,v1,v2):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glProgramUniform3dv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glProgramUniform3f(program,location,v0,v1,v2):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glProgramUniform3fv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint)
def glProgramUniform3i(program,location,v0,v1,v2):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glProgramUniform3iv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glProgramUniform3ui(program,location,v0,v1,v2):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLuintArray)
def glProgramUniform3uiv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glProgramUniform4d(program,location,v0,v1,v2,v3):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glProgramUniform4dv(program,location,count,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glProgramUniform4f(program,location,v0,v1,v2,v3):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glProgramUniform4fv(program,location,count,value):pass
@_f
|
manuelm/pyload | module/common/packagetools.py | Python | gpl-3.0 | 4,869 | 0.001027 | #!/usr/bin/env python
# JDownloader/src/jd/controlling/LinkGrabberPackager.java
import re
from urlparse import urlparse
def matchFirst(string, *args):
""" matches against list of regexp and returns first match"""
for patternlist in args:
for pattern in patternlist:
r = pattern.search(string)
if r is not None:
name = r.group(1)
return name
return string
def parseNames(files):
""" Generates packages names from name, data lists
:param files: list of (name, data)
:return: packagenames mapt to data lists (eg. urls)
"""
packs = {}
endings = "\\.(3gp|7zip|7z|abr|ac3|aiff|aifc|aif|ai|au|avi|bin|bz2|cbr|cbz|ccf|cue|cvd|chm|dta|deb|divx|djvu|dlc|dmg|doc|docx|dot|eps|exe|ff|flv|f4v|gsd|gif|gz|iwd|iso|ipsw|java|jar|jpg|jpeg|jdeatme|load|mws|mw|m4v|m4a|mkv|mp2|mp3|mp4|mov|movie|mpeg|mpe|mpg|msi|msu|msp|nfo|npk|oga|ogg|ogv|otrkey|pkg|png|pdf|pptx|ppt|pps|ppz|pot|psd|qt|rmvb|rm|rar|ram|ra|rev|rnd|r\\d+|rpm|run|rsdf|rtf|sh(!?tml)|srt|snd|sfv|swf|tar|tif|tiff|ts|txt|viv|vivo|vob|wav|wmv|xla|xls|xpi|zeno|zip|z\\d+|_[_a-z]{2}|\\d+$)"
rarPats = [re.compile("(.*)(\\.|_|-)pa?r?t?\\.?[0-9]+.(rar|exe)$", re.I),
re.compile("(.*)(\\.|_|-)part\\.?[0]*[1].(rar|exe)$", re.I),
re.compile("(.*)\\.rar$", re.I),
re.compile("(.*)\\.r\\d+$", re.I),
re.compile("(.*)(\\.|_|-)\\d+$", re.I)]
zipPats = [re.compile("(.*)\\.zip$", re.I),
re.compile("(.*)\\.z\\d+$", re.I),
re.compile("(?is).*\\.7z\\.[\\d]+$", re.I),
re.compile("(.*)\\.a.$", re.I)]
ffsjPats = [re.compile("(.*)\\._((_[a-z])|([a-z]{2}))(\\.|$)"),
re.compile("(.*)(\\.|_|-)[\\d]+(" + endings + "$)", re.I)]
iszPats = [re.compile("(.*)\\.isz$", re.I),
re.compile("(.*)\\.i\\d{2}$", re.I)]
pat1 = re.compile("(\\.?CD\\d+)", re.I)
pat2 = re.compile("(\\.?part\\d+)", re.I)
pat3 = re.compile("(.+)[\\.\\-_]+$")
pat4 = re.compile("(.+)\\.\\d+\\.xtm$")
for file, url in files:
patternMatch = False
if file is None:
continue
# remove trailing /
name = file.rstrip('/')
# extract last path part .. if there is a path
split = name.rsplit("/", 1)
if len(split) > 1:
name = split.pop(1)
#check if a already existing package may be ok for this file
# found = False
# for pack in packs:
# if pack in file:
# packs[pack].append(url)
# found = True
# break
#
# if found: continue
# unrar pattern, 7zip/zip and hjmerge pattern, isz pattern, FFSJ pattern
before = name
name = matchFirst(name, rarPats, zipPats, iszPats, ffsjPats)
if before != name:
patternMatch = True
# xtremsplit pattern
r = pat4.search(name)
if r is not None:
name = r.group(1)
# remove part and cd pattern
r = pat1.search(nam | e)
if r is not None:
name = name.replace(r.group(0), "")
patternMa | tch = True
r = pat2.search(name)
if r is not None:
name = name.replace(r.group(0), "")
patternMatch = True
# additional checks if extension pattern matched
if patternMatch:
# remove extension
index = name.rfind(".")
if index <= 0:
index = name.rfind("_")
if index > 0:
length = len(name) - index
if length <= 4:
name = name[:-length]
# remove endings like . _ -
r = pat3.search(name)
if r is not None:
name = r.group(1)
# replace . and _ with space
name = name.replace(".", " ")
name = name.replace("_", " ")
name = name.strip()
else:
name = ""
# fallback: package by hoster
if not name:
name = urlparse(file).hostname
if name: name = name.replace("www.", "")
# fallback : default name
if not name:
name = "unknown"
# build mapping
if name in packs:
packs[name].append(url)
else:
packs[name] = [url]
return packs
if __name__ == "__main__":
from os.path import join
from pprint import pprint
f = open(join("..", "..", "testlinks2.txt"), "rb")
urls = [(x.strip(), x.strip()) for x in f.readlines() if x.strip()]
f.close()
print "Having %d urls." % len(urls)
packs = parseNames(urls)
pprint(packs)
print "Got %d urls." % sum([len(x) for x in packs.itervalues()])
|
Endika/event | website_event_sale_legal/__openerp__.py | Python | agpl-3.0 | 742 | 0 | # -*- coding: utf-8 -*-
# © 2015 Antiun Ingeniería, S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Legal terms per event",
"summary": "Make attendees to accept legal terms per event",
"version": "8.0.1.0.0",
"category": "Marketing",
"website": "htt | p://www.antiun.com",
"author": "Antiun Ingeniería S.L., Odoo Community A | ssociation (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"auto_install": True,
"depends": [
"website_event_sale",
"website_sale_product_legal",
],
"data": [
"views/event_event_view.xml",
"views/legal_term_view.xml",
"views/templates.xml",
],
}
|
arnaudsj/milk | milk/supervised/multi_view.py | Python | mit | 1,957 | 0.009198 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2011, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
import numpy as np
__all__ = [
'multi_view_learner',
]
class multi_view_model(object):
def __init__(self, models):
self.models = models
def apply(self, features):
if len(features) != len(self.models):
raise ValueError('milk.supervised.two_view: Nr of features does not match training data (got %s, expected %s)' % (len(features) ,len(self.models)))
Ps = np.array([model.apply(f) for model,f in zip(self.models, features)])
if np.any(Ps <= 0.): return False
if np.any(Ps >= 1.): return True
# This is binary only:
# if \prod Pi > \prod (1-Pi) return 1
# is equivalent to
# if \prod Pi | /(1-Pi) > 1. return 1
# if \sum \log( Pi/(1-Pi) ) > 0. return 1
return np.sum( np.log(Ps/(1-Ps)) ) > 0
class multi_view_learner(object):
'''
Multi View Learner
This learner learns different classifiers on multiple sets of features and
combines them for classification.
'''
def __init__(self, bases):
self.bases = bases
def train(self, features, labels, normali | sedlabels=False):
features = zip(*features)
if len(features) != len(self.bases):
raise ValueError('milk.supervised.multi_view_learner: ' +
'Nr of features does not match classifiser construction (got %s, expected %s)'
% (len(features) ,len(self.bases)))
models = []
for basis,f in zip(self.bases, features):
try:
f = np.array(f)
except:
f = np.array(f, dtype=object)
models.append(basis.train(f, labels))
return multi_view_model(models)
multi_view_classifier = multi_view_learner
|
webrian/ProcessingDiagramPlugin | DiagramAlgorithmProvider.py | Python | gpl-2.0 | 4,094 | 0.001221 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : March 2016
Copyright : (C) 2016 by Adrian Weber
Email : webrian at gmx dot net
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Adrian Weber'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Adrian Weber'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import *
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingConfig import Setting, ProcessingConfig
from processing.tools import system
#from diagram.SwissStyleAlgorithm import SwissStyleAlgorithm
from diagram.PiechartDiagramAlgorithm import PiechartDiagramAlgorithm
from diagram.DividedWingchartDiagramAlgorithm import DividedWingchartDiagramAlgorithm
from diagram.BarchartDiagramAlgorithm import BarchartDiagramAlgorithm
from diagram.DivergentBarchartDiagramAlgorithm import DivergentBarchartDiagramAlgorithm
from diagram.DividedBarchartDiagramAlgorithm import DividedBarchartDiagramAlgorithm
class DiagramAlgorithmProvider(AlgorithmProvider):
alglist = []
def __init__(self):
AlgorithmProvider.__init__(self)
# Activate provider by default
self.activate = False
# Load algorithms
self.alglist = [
PiechartDiagramAlgorithm(),
DividedWingchartDiagramAlgorithm(),
BarchartDiagramAlgorithm(),
DivergentBarchartDiagramAlgorithm(),
DividedBarchartDiagramAlgorithm(),
]
for alg in self.alglist:
alg.provider = self
def initializeSettings(self):
"""In this method we add settings needed to configure our
provider.
Do not forget to | call the parent method, since it takes care
or automatic | ally adding a setting for activating or
deactivating the algorithms in the provider.
"""
AlgorithmProvider.initializeSettings(self)
def unload(self):
"""Setting should be removed here, so they do not appear anymore
when the plugin is unloaded.
"""
AlgorithmProvider.unload(self)
def getName(self):
"""This is the name that will appear on the toolbox group.
It is also used to create the command line name of all the
algorithms from this provider.
"""
return 'diagram'
def getDescription(self):
"""This is the provired full name.
"""
return 'Diagrams creation algorithms'
def getIcon(self):
"""We return the default icon.
"""
filepath = os.path.dirname(__file__) + "/logo.png"
return QIcon(filepath)
def _loadAlgorithms(self):
"""Here we fill the list of algorithms in self.algs.
This method is called whenever the list of algorithms should
be updated. If the list of algorithms can change (for instance,
if it contains algorithms from user-defined scripts and a new
script might have been added), you should create the list again
here.
In this case, since the list is always the same, we assign from
the pre-made list. This assignment has to be done in this method
even if the list does not change, since the self.algs list is
cleared before calling this method.
"""
self.algs = self.alglist
|
DanteLore/national-rail | loadstations.py | Python | mit | 1,412 | 0.002833 | import argparse
from osgb import osgb_to_lonlat
from osgb.convert import eastnorth_to_osgb
from utils.database import insert_into_db, empty_table, execute_sql
# Loads data from here: https://data.gov.uk/dataset/naptan
def read_stations(filename):
with open(filename, 'r') as input_file:
for line in input_file.readlines()[1:]:
splits = line.strip().split(",")
yield {
"crs": splits[2],
"name": splits[3],
"easting": long(splits[6]),
"northing": long(splits[7])
}
def convert(row):
e = row["easting"]
n = row["northing"]
lon, lat = osgb_to_lonlat(eastnorth_to_osgb(e, n, digits=4))
row["latitude"] = lat
row["longitude"] = lon
return row
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='National Rail Data Collector')
parse | r.add_argument('--filename', hel | p='Input CSV file', default="data/RailReferences.csv")
parser.add_argument('--db', help='SQLite DB Name', default="data/trains.db")
args = parser.parse_args()
execute_sql(args.db, "create table if not exists stations (crs TEXT, name TEXT, easting INT, northing INT, latitude DOUBLE, longitude DOUBLE);")
rows = read_stations(args.filename)
stations = map(convert, rows)
empty_table(args.db, "stations")
insert_into_db(args.db, "stations", stations)
|
zpace/SparsePak-SFH | ppxf_util.py | Python | mit | 12,608 | 0.003411 | #######################################################################
#
# Copyright (C) 2001-2014, Michele Cappellari
# E-mail: cappellari_at_astro.ox.ac.uk
#
# This software is provided as is without any warranty whatsoever.
# Permission to use, for non-commercial purposes is granted.
# Permission to modify for personal or internal use is granted,
# provided this copyright and disclaimer are included unchanged
# at the beginning of the file. All other rights are reserved.
#
#######################################################################
#
# NAME:
# LOG_REBIN
#
# PURPOSE:
# Logarithmically rebin a spectrum, while rigorously conserving the flux.
# Basically the photons in the spectrum are simply ridistributed according
# to a new grid of pixels, with non-uniform size in the spectral direction.
#
# This routine makes the `standard' zero-order assumption that the spectrum
# is *constant* within each pixels. It is possible to perform log-rebinning
# by assuming the spectrum is represented by a piece-wise polynomial of
# higer degree, while still obtaining a uniquely defined linear problem,
# but this reduces to a deconvolution and amplifies noise.
#
# This same routine can be used to compute approximate errors
# of the log-rebinned spectrum. To do this type the command
#
# LOG_REBIN, lamRange, err^2, err2New
#
# and the desired errors will be given by SQRT(err2Ne | w).
# NB: This rebinning of the error-spectrum is very *approximate* as
# it does not consider the correlation introduced by the rebinning!
#
# CALLING SEQUENCE:
# LOG_REBIN, lamRange, spec, specNew, logLam, $
# OVERSAMPLE=oversample, VELSCALE=velScale, /FLUX
#
# INPUTS:
# LAMRANGE: two elements vector containing the central wavelength
# of the firs | t and last pixels in the spectrum, which is assumed
# to have constant wavelength scale! E.g. from the values in the
# standard FITS keywords: LAMRANGE = CRVAL1 + [0,CDELT1*(NAXIS1-1)].
# It must be LAMRANGE[0] < LAMRANGE[1].
# SPEC: input spectrum.
#
# OUTPUTS:
# SPECNEW: logarithmically rebinned spectrum.
# LOGLAM: log(lambda) (*natural* logarithm: ALOG) of the central
# wavelength of each pixel. This is the log of the geometric
# mean of the borders of each pixel.
#
# KEYWORDS:
# FLUX: Set this keyword to preserve total flux. In this case the
# log rebinning changes the pixels flux in proportion to their
# dLam so the following command will show large differences
# beween the spectral shape before and after LOG_REBIN:
#
# plot, exp(logLam), specNew # Plot log-rebinned spectrum
# oplot, range(lamRange[0],lamRange[1],n_elements(spec)), spec
#
# By defaul, when this keyword is *not* set, the above two lines
# produce two spectra that almost perfectly overlap each other.
# OVERSAMPLE: Oversampling can be done, not to loose spectral resolution,
# especally for extended wavelength ranges and to avoid aliasing.
# Default: OVERSAMPLE=1 ==> Same number of output pixels as input.
# VELSCALE: velocity scale in km/s per pixels. If this variable is
# not defined, then it will contain in output the velocity scale.
# If this variable is defined by the user it will be used
# to set the output number of pixels and wavelength scale.
#
# MODIFICATION HISTORY:
# V1.0.0: Using interpolation. Michele Cappellari, Leiden, 22 October 2001
# V2.0.0: Analytic flux conservation. MC, Potsdam, 15 June 2003
# V2.1.0: Allow a velocity scale to be specified by the user.
# MC, Leiden, 2 August 2003
# V2.2.0: Output the optional logarithmically spaced wavelength at the
# geometric mean of the wavelength at the border of each pixel.
# Thanks to Jesus Falcon-Barroso. MC, Leiden, 5 November 2003
# V2.2.1: Verify that lamRange[0] < lamRange[1].
# MC, Vicenza, 29 December 2004
# V2.2.2: Modified the documentation after feedback from James Price.
# MC, Oxford, 21 October 2010
# V2.3.0: By default now preserve the shape of the spectrum, not the
# total flux. This seems what most users expect from the procedure.
# Set the keyword /FLUX to preserve flux like in previous version.
# MC, Oxford, 30 November 2011
# V3.0.0: Translated from IDL into Python. MC, Santiago, 23 November 2013
# V3.1.0: Fully vectorized log_rebin. Typical speed up by two orders of magnitude.
# MC, Oxford, 4 March 2014
#
#----------------------------------------------------------------------
from __future__ import print_function
import numpy as np
def log_rebin(lamRange, spec, oversample=False, velscale=None, flux=False):
"""
Logarithmically rebin a spectrum, while rigorously conserving the flux.
Basically the photons in the spectrum are simply ridistributed according
to a new grid of pixels, with non-uniform size in the spectral direction.
"""
lamRange = np.asarray(lamRange)
if len(lamRange) != 2:
raise ValueError('lamRange must contain two elements')
if lamRange[0] >= lamRange[1]:
raise ValueError('It must be lamRange[0] < lamRange[1]')
s = spec.shape
if len(s) != 1:
raise ValueError('input spectrum must be a vector')
n = s[0]
if oversample:
m = int(n*oversample)
else:
m = int(n)
dLam = np.diff(lamRange)/(n - 1.) # Assume constant dLam
lim = lamRange/dLam + [-0.5, 0.5] # All in units of dLam
borders = np.linspace(*lim, num=n+1) # Linearly
logLim = np.log(lim)
c = 299792.458 # Speed of light in km/s
if velscale is None: # Velocity scale is set by user
velscale = np.diff(logLim)/m*c # Only for output
else:
logScale = velscale/c
m = int(np.diff(logLim)/logScale) # Number of output pixels
logLim[1] = logLim[0] + m*logScale
newBorders = np.exp(np.linspace(*logLim, num=m+1)) # Logarithmically
k = (newBorders - lim[0]).clip(0, n-1).astype(int)
specNew = np.add.reduceat(spec, k)[:-1] # Do analytic integral
specNew *= np.diff(k) > 0 # fix for design flaw of reduceat()
specNew += np.diff((newBorders - borders[k])*spec[k])
if not flux:
specNew /= np.diff(newBorders)
# Output log(wavelength): log of geometric mean
logLam = np.log(np.sqrt(newBorders[1:]*newBorders[:-1])*dLam)
return specNew, logLam, velscale
#----------------------------------------------------------------------
#
# PPXF_DETERMINE_GOODPIXELS: Example routine to generate the vector of goodPixels
# to be used as input keyword for the routine PPXF. This is useful to mask
# gas emission lines or atmospheric absorptions.
# It can be trivially adapted to mask different lines.
#
# INPUT PARAMETERS:
# - LOGLAM: Natural logarithm ALOG(wave) of the wavelength in Angstrom
# of each pixel of the log rebinned *galaxy* spectrum.
# - LAMRANGETEMP: Two elements vectors [lamMin2,lamMax2] with the minimum and
# maximum wavelength in Angstrom in the stellar *template* used in PPXF.
# - VEL: Estimate of the galaxy velocity in km/s.
#
# V1.0.0: Michele Cappellari, Leiden, 9 September 2005
# V1.0.1: Made a separate routine and included additional common emission lines.
# MC, Oxford 12 January 2012
# V2.0.0: Translated from IDL into Python. MC, Oxford, 10 December 2013
# V2.0.1: Updated line list. MC, Oxford, 8 January 2014
def determine_goodpixels(logLam, lamRangeTemp, vel, mask_elines = True):
"""
Generates a list of goodpixels to mask a given set of gas emission
lines. This is meant to be used as input for PPXF.
"""
# -----[OII]----- Hdelta Hgamma Hbeta -----[OIII]----- [OI] -----[NII]----- Halpha -----[SII]-----
lines = np.array([3726.03, 3728.82, 4101.76, 4340.47, 4861.33, 4958.92, 5006.84, 6300.30, 6548.03, 6583.41, 6562.80, 6716.47, 6730.85])
#sky lines NaI OI HgI Atm
#lines = lines.append([5683.88, 5577., 5461., 5199.])
dv = lines*0 + 1200 # width/2 of masked gas emission region in km/s
c = 299792.458 # sp |
genomoncology/related | tests/ex06_json/models.py | Python | mit | 765 | 0 | from enum import Enum, unique
import related
@unique
class DayType(Enum):
NORMAL = "Normal"
HOLIDAY = "Holiday"
@related.immutable
| class DayData(object):
date = related.DateField()
logged_on = related.TimeField("%H:%M")
open_at = related.TimeField()
closed_on = related.TimeField()
customers = related.IntegerField()
day_type = related.ChildField(DayType)
sales = related.FloatField(required=False)
@related.immutable
class StoreData(object):
name = related.StringField()
id = related.IntegerField()
created_on = related.DateTimeField("%m/%d/%Y %H:%M:%S")
data_from = related.DateTimeField()
| data_to = related.DateTimeField()
days = related.SequenceField(DayData)
price = related.DecimalField()
|
mlperf/training_results_v0.7 | NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/utils/c2_model_loading.py | Python | apache-2.0 | 7,055 | 0.002268 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import pickle
from collections import OrderedDict
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from maskrcnn_benchmark.utils.registry import Registry
def _rename_basic_resnet_weights(layer_keys):
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [k.replace(".w", ".weight") for k in layer_keys]
layer_keys = [k.replace(".bn", "_bn") for k in layer_keys]
layer_keys = [k.replace(".b", ".bias") for k in layer_keys]
layer_keys = [k.replace("_bn.s", "_bn.scale") for k in layer_keys]
layer_keys = [k.replace(".biasranch", ".branch") for k in layer_keys]
layer_keys = [k.replace("bbox.pred", "bbox_pred") for k in layer_keys]
layer_keys = [k.replace("cls.score", "cls_score") for k in layer_keys]
layer_keys = [k.replace("res.conv1_", "conv1_") for k in layer_keys]
# RPN / Faster RCNN
layer_keys = [k.replace(".biasbox", ".bbox") for k in layer_keys]
layer_keys = [k.replace("conv.rpn", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox.pred", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [k.replace("rpn.cls.logits", "rpn.cls_logits") for k in layer_keys]
# Affine-Channel -> BatchNorm enaming
layer_keys = [k.replace("_bn.scale", "_bn.weight") for k in layer_keys]
# Make torchvision-compatible
layer_keys = [k.replace("conv1_bn.", "bn1.") for k in layer_keys]
layer_keys = [k.replace("res2.", "layer1.") for k in layer_keys]
layer_keys = [k.replace("res3.", "layer2.") for k in layer_keys]
layer_keys = [k.replace("res4.", "layer3.") for k in layer_keys]
layer_keys = [k.replace("res5.", "layer4.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2a_bn.", ".bn1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2b_bn.", ".bn2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
layer_keys = [k.replace(".branch2c_bn.", ".bn3.") for k in layer_keys]
layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys]
layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys]
# GroupNorm
layer_keys = [k.replace("conv1.gn.s", "bn1.weight") for k in layer_keys]
layer_keys = [k.replace("conv1.gn.bias", "bn1.bias") for k in layer_keys]
layer_keys = [k.replace("conv2.gn.s", "bn2.weight") for k in layer_keys]
layer_keys = [k.replace("conv2.gn.bias", "bn2.bias") for k in layer_keys]
layer_keys = [k.replace("conv3.gn.s", "bn3.weight") for k in layer_keys]
layer_keys = [k.replace("conv3.gn.bias", "bn3.bias") for k in layer_keys]
layer_keys = [k.replace("downsample.0.gn.s", "downsample.1.weight") \
for k in layer_keys]
layer_keys = [k.replace("downsample.0.gn.bias", "downsample.1.bias") \
for k in layer_keys]
return layer_keys
def _rename_fpn_weights(layer_keys, stage_names):
for mapped_idx, stage_name in enumerate(stage_names, 1):
suffix = ""
if mapped_idx < 4:
suffix = ".lateral"
layer_keys = [
k.replace("fpn.inner.layer{}.sum{}".format(stage_name, suffix), "fpn_inner{}".format(mapped_idx)) for k in layer_keys
]
layer_keys = [k.replace("fpn.layer{}.sum".format(stage_name), "fpn_layer{}".format(mapped_idx)) for k in layer_keys]
layer_keys = [k.replace("rpn.conv.fpn2", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox_pred.fpn2", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [
k.replace("rpn.cls_logits.fpn2", "rpn.cls_logits") for k in layer_keys
]
return layer_keys
def _rename_weights_for_resnet(weights, stage_names):
original_keys = sorted(weights.keys())
layer_keys = sorted(weights.keys())
# for X-101, rename output to fc1000 to avoid conflicts afterwards
layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys]
layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys]
# performs basic renaming: _ -> . , etc
layer_keys = _rename_basic_resnet_weights(layer_keys)
# FPN
layer_keys = _rename_fpn_weights(layer_keys, stage_names)
# Mask R-CNN
layer_keys = [k.replace("mask.fcn.logits", "mask_fcn_logits") for k in layer_keys]
layer_keys = [k.replace(".[mask].fcn", "mask_fcn") for k in layer_keys]
layer_keys = [k.replace("conv5.mask", "conv5_mask") for k in layer_keys]
# Keypoint R-CNN
layer_keys = [k.replace("kps.score.lowres", "kps_score_lowres") for k in layer_keys]
layer_keys = [k.replace("kps.score", "kps_score") for k in layer_keys]
layer_keys = [k.replace("conv.fcn", "conv_fcn") for k in layer_keys]
# Rename for our RPN structure
layer_keys = [k.replace("rpn.", "rpn.head.") for k in layer_keys]
key_map = {k: v for k, v in zip(original_keys, layer_keys)}
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights")
max_c2_key_size = max([len(k) for k in original_keys if "_momentum" not in k])
new_weights = OrderedDict()
for k in original_keys:
v = weights[k]
if "_momentum" in k:
continue
# if 'fc1000' in k:
# continue
w = torch.from_numpy(v)
# if "bn" in k:
# w = w.view(1, -1, 1, 1)
logger.info("C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]))
new_weights[key_map[k]] = w
return new_weights
def _load_c2_pickled_weights(file_path):
with open(file_path, "rb") as f:
if torch._six.PY3:
data = pickle.load(f, encoding="latin1")
else:
data = pickle.load(f)
if "blobs" in data:
weights = data["blobs"]
else:
weights = data
return weights
_C2_STAGE_NAMES = {
"R-50": ["1.2", "2.3", "3.5", "4.2"],
"R-101": ["1.2", "2.3", "3.22", "4.2"],
"R-152": ["1.2", "2.7", "3.35", "4.2"],
}
C2_FORMAT_LOADER = Registry()
@C2_FORMAT_LOADER.register("R-50-C4")
@C2_FORMAT_LOADER.register("R-50-C5")
@C2_FORMAT_LOADER.register("R-101-C4")
@C2_FORMAT_LOADER.register("R-101-C5")
@C2_FORMAT_LOADER.register("R-50-FPN")
@C2_FORMAT_LOADER.register("R-50-FPN-RETINANET")
@C2_FORMAT_LOADER.register("R-101-FPN")
@C2_FORMAT_LOADER.reg | ister("R-101-FPN-RETINANET")
@C2_FORMAT_LOADER.register("R-152-FPN")
def load_resnet_c2_format(cfg, f):
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body. | replace("-C4", "").replace("-C5", "").replace("-FPN", "")
arch = arch.replace("-RETINANET", "")
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
return dict(model=state_dict)
def load_c2_format(cfg, f):
return C2_FORMAT_LOADER[cfg.MODEL.BACKBONE.CONV_BODY](cfg, f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.