repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
fangohr/oommf-python | dev/umm-exploration-inheritance.py | Python | bsd-2-clause | 1,614 | 0.004337 | class AbstractMicromagneticModell:
def __init__(self, name, Ms):
self.name = name
self.Ms = Ms
self.field = None
self.energies = []
def __str__(self):
return "AbstractMicromagneticModell(name={})".format(self.name)
def relax(self):
self._relax()
#raise NotImplementedError("relax is abstract method")
def set_H(self, field):
print("AbstractMicromagneticModell: setting field = {}")
self.field = field
def hysteresis(self, fieldlist):
print("AbstractMicromagneticModell: starting hysteresis")
for field in fieldlist:
self.set_H(field)
self._relax()
class OOMMFC(AbstractM | icromagneticModell):
def __init__(self, name, Ms):
AbstractMicromagneticModell.__init__(self, name, Ms)
def __str__(self):
return "OOMMFC(name={}, Ms={})".format(self.name, self.Ms)
def _relax(self):
print("Calling OOMMF to run relax() with H={}".format(self.field))
class FIDIMAGC(AbstractMicromagneticModell):
def __init__(self, name, Ms):
AbstractMicromagneticModell.__ini | t__(self, name, Ms)
def __str__(self):
return "FIDIMAG(name={}, Ms={})".format(self.name, self.Ms)
def _relax(self):
print("Calling FIDIMAG to run relax() with H={}".format(self.field))
#a = AbstractMicromagneticModell('simulation-name', 10)
#print(a)
#a.hysteresis([10, 20])
o = OOMMFC(name='oommf-simulation', Ms=8e6)
print(o)
o.relax()
f = FIDIMAGC(name='fidimag-simulation', Ms=8e6)
print(o)
f.relax()
#o.relax()
#o.hysteresis([10, 20, 30])
|
alvaroribas/modeling_TDs | Herschel_mapmaking/scanamorphos/PACS/general_script_L1_PACS.py | Python | mit | 2,499 | 0.012405 | ### This script fetches level-1 PACS imaging data, using a list generated by the
### archive (in the CSV format), attaches sky coordinates and masks to them
### (by calling the convertL1ToScanam task) and save them to disk in the correct
### format for later use by Scanamorphos.
### See important instructions below.
#######################################################
### This script is part of the Scanamorphos package.
### HCSS is free software: you can redistribute it and/or modify
### it under the terms of the GNU Lesser General Public License as
### published by the Free Software Foundation, either version 3 of
### the License, or (at your option) any later version.
#######################################################
## Import classes and definitions:
import os
from herschel.pacs.spg.phot import ConvertL1ToScanamTask
#######################################################
## local settings:
dir_root = "/pcdisk/stark/aribas/Desktop/modeling_TDs/remaps_Cha/PACS/scanamorphos/"
path = dir_root +"L1/"
### number of observations:
n_obs = 2
#######################################################
## Do a multiple target search in the archive and use the "save all results as CSV" option.
## --> ascii table 'results.csv' where lines can be edited
## (suppress unwanted observations and correct target names)
## Create the dire | ctories contained in the dir_out variables (l. 57)
## before running this script.
#######################################################
## observations:
table_obs = asciiTableReader(file=dir_root+'results_fast.csv', tableType='CSV', skipRows=1)
list_obsids = table_obs[0].data
list_names = table_obs[1].data
for i_obs in range(n_obs):
##
num_obsid = list_obsids[i_obs]
source = list_names[i_obs]
source = str.lower(str(source))
dir_out = p | ath+source+"_processed_obsids"
# create directory if it does not exist
if not(os.path.exists(dir_out)):
os.system('mkdir '+dir_out)
##
print ""
print "Downloading obsid " + `num_obsid`
obs = getObservation(num_obsid, useHsa=True, instrument="PACS", verbose=True)
###
frames = obs.level1.refs["HPPAVGR"].product.refs[0].product
convertL1ToScanam(frames, cancelGlitch=1, assignRaDec=1, outDir=dir_out)
###
frames = obs.level1.refs["HPPAVGB"].product.refs[0].product
convertL1ToScanam(frames, cancelGlitch=1, assignRaDec=1, outDir=dir_out)
### END OF SCRIPT
#######################################################
|
andela-ooshodi/django-photo-application | djangophotoapp/photoapp/models.py | Python | gpl-2.0 | 1,310 | 0 | from time import time
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_delete
from django.dispatch import receiver
import os
def upload_path(instance, filename):
return 'uploads/user_{0}/{1}_{2}'.format(
instance.owner.id | ,
str(time()).replace('.', '_'),
filename
)
class UserProfile(models.Model):
user = models.OneToOneField(User)
photo = models.TextField()
class Images(models.Model):
owner = models.ForeignKey(User)
image = models.ImageField(upload_to=upload_path)
image_file_name = models.CharField(max_length=100, null=True)
date_created = models.DateTimeField(
auto_now_add=True, verbose_name='created')
# Function to delete from the file storage
@receiver(pos | t_delete, sender=Images)
def delete_from_file_system(sender, instance, **kwargs):
image_path = instance.image.path
# split the image part
filepath, ext = os.path.splitext(image_path)
# create the filtered image path
new_filepath = filepath + "filtered" + ext
# delete from file directory
if os.path.exists(image_path):
# delete image
os.remove(image_path)
if os.path.exists(new_filepath):
# delete filtered image
os.remove(new_filepath)
|
senttech/Cura | plugins/CuraProfileReader/__init__.py | Python | agpl-3.0 | 807 | 0.006196 | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the ter | ms of the AGPLv3 or higher.
from . import CuraProfileReader
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Cura Profile Reader"),
"author": "Ultima | ker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Provides support for importing Cura profiles."),
"api": 3
},
"profile_reader": [
{
"extension": "curaprofile",
"description": catalog.i18nc("@item:inlistbox", "Cura Profile")
}
]
}
def register(app):
return { "profile_reader": CuraProfileReader.CuraProfileReader() }
|
Yelp/paasta | paasta_tools/paastaapi/model/kubernetes_container.py | Python | apache-2.0 | 6,917 | 0.000578 | # coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from paasta_tools.paastaapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from paasta_tools.paastaapi.model.task_tail_lines import TaskTailLines
globals()['TaskTailLines'] = TaskTailLines
class KubernetesContainer(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'name': (str,), # noqa: E501
'tail_lines': (TaskTailLines,), # noqa: E501
| }
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'tail_l | ines': 'tail_lines', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""KubernetesContainer - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): Name of the container. [optional] # noqa: E501
tail_lines (TaskTailLines): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
andim/scipy | scipy/linalg/lapack.py | Python | bsd-3-clause | 6,884 | 0.000291 | """
Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
=======================================================
This module contains low-level functions from the LAPACK library.
.. versionadded:: 0.12.0
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
get_lapack_funcs
All functions
-------------
.. autosummary::
:toctree: generated/
sgbsv
dgbsv
cgbsv
zgbsv
sgbtrf
dgbtrf
cgbtrf
zgbtrf
sgbtrs
dgbtrs
cgbtrs
zgbtrs
sgebal
dgebal
cgebal
zgebal
sgees
dgees
cgees
zgees
sgeev
dgeev
cgeev
zgeev
sgeev_lwork
dgeev_lwork
cgeev_lwork
zgeev_lwork
sgegv
dgegv
cgegv
zgegv
sgehrd
dgehrd
cgehrd
zgehrd
sgehrd_lwork
dgehrd_lwork
cgehrd_lwork
zgehrd_lwork
sgelss
dgelss
cgelss
zgelss
sgelss_lwork
dgelss_lwork
cgelss_lwork
zgelss_lwork
sgelsd
dgelsd
cgelsd
zgelsd
sgelsd_lwork
dgelsd_lwork
cgelsd_lwork
zgelsd_lwork
sgelsy
dgelsy
cgelsy
zgelsy
sgelsy_lwork
dgelsy_lwork
cgelsy_lwork
zgelsy_lwork
sgeqp3
dgeqp3
cgeqp3
zgeqp3
sgeqrf
dgeqrf
cgeqrf
zgeqrf
sgerqf
dgerqf
cgerqf
zgerqf
sgesdd
dgesdd
cgesdd
zgesdd
sgesdd_lwork
dgesdd_lwork
cgesdd_lwork
zgesdd_lwork
sgesv
dgesv
cgesv
zgesv
sgetrf
dgetrf
cgetrf
zgetrf
sgetri
dgetri
cgetri
zgetri
sgetri_lwork
dgetri_lwork
cgetri_lwork
zgetri_lwork
sgetrs
dgetrs
cgetrs
zgetrs
sgges
dgges
cgges
zgges
sggev
dggev
cggev
zggev
chbevd
zhbevd
chbevx
zhbevx
cheev
zheev
cheevd
zheevd
cheevr
zheevr
chegv
zhegv
chegvd
zhegvd
chegvx
zhegvx
slarf
dlarf
clarf
zlarf
slarfg
dlarfg
clarfg
zlarfg
slartg
dlartg
clartg
zlartg
dlasd4
slasd4
slaswp
dlaswp
claswp
zlaswp
slauum
dlauum
clauum
zlauum
spbsv
dpbsv
cpbsv
zpbsv
spbtrf
dpbtrf
cpbtrf
zpbtrf
spbtrs
dpbtrs
cpbtrs
zpbtrs
sposv
dposv
cposv
zposv
spotrf
dpotrf
cpotrf
zpotrf
spotri
dpotri
cpotri
zpotri
spotrs
dpotrs
cpotrs
zpotrs
crot
zrot
strsyl
dtrsyl
ctrsyl
ztrsyl
strtri
dtrtri
ctrtri
ztrtri
strtrs
dtrtrs
ctrtrs
ztrtrs
cunghr
zunghr
cungqr
zungqr
cungrq
zungrq
cunmqr
zunmqr
sgtsv
dgtsv
cgtsv
zgtsv
sptsv
dptsv
cptsv
zptsv
slamch
dlamch
sorghr
dorghr
sorgqr
dorgqr
sorgrq
dorgrq
sormqr
dormqr
ssbev
dsbev
ssbevd
dsbevd
ssbevx
dsbevx
ssyev
dsyev
ssyevd
dsyevd
ssyevr
dsyevr
ssygv
dsygv
ssygvd
dsygvd
ssygvx
dsygvx
slange
dlange
clange
zlange
"""
#
# Author: Pearu Peterson, March 2002
#
from __future__ import | division, print_function, absolute_import
__all__ = ['get_lapack_funcs']
import numpy as _np
from .blas import _get_funcs
# Backward compatibility:
from .blas import find_best_blas_type as find_best_lapack_type
from scipy.linalg import _flapack
try:
from scipy.linalg import _clapack
except ImportError:
_clapack = None
# Backward compatibility
from scipy._lib._util import DeprecatedImport as _Deprecate | dImport
clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack")
flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack")
# Expose all functions (only flapack --- clapack is an implementation detail)
empty_module = None
from scipy.linalg._flapack import *
del empty_module
# some convenience alias for complex functions
_lapack_alias = {
'corghr': 'cunghr', 'zorghr': 'zunghr',
'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
'corgqr': 'cungqr', 'zorgqr': 'zungqr',
'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
'corgrq': 'cungrq', 'zorgrq': 'zungrq',
}
def get_lapack_funcs(names, arrays=(), dtype=None):
"""Return available LAPACK function objects from names.
Arrays are used to determine the optimal prefix of LAPACK routines.
Parameters
----------
names : str or sequence of str
Name(s) of LAPACK functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of LAPACK
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In LAPACK, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectevely, and
are stored in attribute `typecode` of the returned functions.
"""
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack, _clapack,
"flapack", "clapack", _lapack_alias)
def _compute_lwork(routine, *args, **kwargs):
"""
Round floating-point lwork returned by lapack to integer.
Several LAPACK routines compute optimal values for LWORK, which
they return in a floating-point variable. However, for large
values of LWORK, single-precision floating point is not sufficient
to hold the exact value --- some LAPACK versions (<= 3.5.0 at
least) truncate the returned integer to single precision and in
some cases this can be smaller than the required value.
"""
lwork, info = routine(*args, **kwargs)
if info != 0:
raise ValueError("Internal work array size computation failed: %d" % (info,))
lwork = lwork.real
if getattr(routine, 'dtype', None) == _np.float32:
# Single-precision routine -- take next fp value to work
# around possible truncation in LAPACK code
lwork = _np.nextafter(_np.float32(lwork), _np.float32(_np.inf))
lwork = int(lwork)
if lwork < 0 or lwork > _np.iinfo(_np.int32).max:
raise ValueError("Too large work array required -- computation cannot "
"be performed with standard 32-bit LAPACK.")
return lwork
|
plotly/plotly.py | packages/python/plotly/plotly/validators/icicle/outsidetextfont/_color.py | Python | mit | 469 | 0.002132 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="icicle.outsidetextfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_ | name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edi | t_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
manuelgomezsuarez/practicasAII | practicasAII/Practica1/practica1.py | Python | gpl-3.0 | 2,831 | 0.020134 | # encoding: latin1
import urllib2, re
from Tkinter import *
import tkMessageBox
import sqlite3
def extraer_datos():
f = urllib2.urlopen("http://www.us.es/rss/feed/portada")
s = f.read()
l = re.findall(r'<item>\s*<title>(.*)</title>\s*<link>(.*)</link>\s*<description>.*</description>\s*<author>.*</author>\s*(<category>.*</category>)?\s*<guid.*</guid>\s*<pubDate>(.*)</pubDate>\s*</item>', s)
return l
def almacenar_bd():
conn = sqlite3.connect('test.db')
conn.text_factory = str # para evitar problemas con el conjunto de caracteres que maneja la BD
conn.execute("DRO | P TABLE IF EXISTS NOTICIAS")
conn.execute('''CREATE TABLE NOTICIAS
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
TITULO TEXT | NOT NULL,
LINK TEXT NOT NULL,
FECHA TEXT NOT NULL);''')
l = extraer_datos()
for i in l:
conn.execute("""INSERT INTO NOTICIAS (TITULO, LINK, FECHA) VALUES (?,?,?)""",(i[0],i[1],i[3]))
conn.commit()
cursor = conn.execute("SELECT COUNT(*) FROM NOTICIAS")
tkMessageBox.showinfo( "Base Datos", "Base de datos creada correctamente \nHay " + str(cursor.fetchone()[0]) + " registros")
conn.close()
def listar_bd():
conn = sqlite3.connect('test.db')
conn.text_factory = str
cursor = conn.execute("SELECT TITULO,LINK, FECHA FROM NOTICIAS")
imprimir_etiqueta(cursor)
conn.close()
def imprimir_etiqueta(cursor):
v = Toplevel()
sc = Scrollbar(v)
sc.pack(side=RIGHT, fill=Y)
lb = Listbox(v, width=150, yscrollcommand=sc.set)
for row in cursor:
lb.insert(END,row[0])
lb.insert(END,row[1])
lb.insert(END,row[2])
lb.insert(END,'')
lb.pack(side = LEFT, fill = BOTH)
sc.config(command = lb.yview)
def buscar_bd():
def listar_busqueda(event):
conn = sqlite3.connect('test.db')
conn.text_factory = str
s = "%"+en.get()+"%"
cursor = conn.execute("""SELECT TITULO,LINK,FECHA FROM NOTICIAS WHERE FECHA LIKE ?""",(s,)) # al ser de tipo string, el ? le pone comillas simples
imprimir_etiqueta(cursor)
conn.close()
v = Toplevel()
lb = Label(v, text="Introduzca el mes (Xxx): ")
lb.pack(side = LEFT)
en = Entry(v)
en.bind("<Return>", listar_busqueda)
en.pack(side = LEFT)
def ventana_principal():
top = Tk()
almacenar = Button(top, text="Almacenar", command = almacenar_bd)
almacenar.pack(side = LEFT)
listar = Button(top, text="Listar", command = listar_bd)
listar.pack(side = LEFT)
Buscar = Button(top, text="Buscar", command = buscar_bd)
Buscar.pack(side = LEFT)
top.mainloop()
if __name__ == "__main__":
ventana_principal()
|
Kaumer/html-minifier | test/test.py | Python | mit | 999 | 0 | import unittest
from pathlib import Path
from html_minifier import Minifier
from html_minifier import DjangoMinifier
class TestMinify(unittest.TestCase):
ext_min = "_min"
file_name | = "base"
_file = "{0}.html"
location = "html"
def setUp(self):
path = Path(__file__).parent
file_name = ""
names = (self.file_name, self.ext_min)
html_vars = ["html", "html_min"]
for i, name in enumerate(names):
file_name = | ''.join([file_name, name])
_file = self._file.format(file_name)
f = path.joinpath(self.location, _file).open()
setattr(self, html_vars[i], f.read())
f.close()
def test_minifier(self):
mini = Minifier(self.html)
self.assertEqual(mini.minify(), self.html_min)
class TestMinifyDjango(TestMinify):
file_name = "django"
def test_minifier(self):
mini = DjangoMinifier(self.html)
self.assertEqual(mini.minify(), self.html_min)
|
victordomene/ram-paxos | workloads/workload_B.py | Python | mit | 4,156 | 0.009625 | """
This workload presents a simple interface that can be reused in other workloads.
It summary, it runs several subprocesses using the multiprocessing package,
makes the connections between them, and then starts working.
This particular workload spawns NETWORK_SIZE machines, two of which are
proposer. We can run with either gRPC or RDTP by changing start_vm or
proposer_entrypoint / replicas_entrypoint. Just make sure that the workload
does not start with replicas running gRPC and proposer running RDTP.
"""
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import time
from multiprocessing import Process
from paxos.vm import VM
from paxos.messengers import rpcMessenger
from paxos.receivers import rpcReceiver
from paxos.messengers import rdtpMessenger
from paxos.receivers import rdtpReceiver
from paxos import proposer, acceptor, learner
NETWORK_SIZE = 5
HOST = "localhost"
START_PORT = 6666
def initialize_rdtp_vm(name, use_disk):
return VM(name, rdtpMessenger.rdtpMessenger, rdtpReceiver.rdtpReceiver, use_disk)
def initialize_grpc_vm(name, use_disk):
return VM(name, rpcMessenger.grpcMessenger, rpcReceiver.grpcReceiver, use_disk)
def start_vm(name, network, initialize_vm = initialize_rdtp_vm):
"""
Starts a virtual machine with a given initializer.
Already starts serving.
@param name: the name of the machine that will be started
@param network: a dictionary containing information on the machines in
the network
@param initialize_vm: an initializer for the VM; by default, RDTP
@return: The instance of the RDTP virtual machine
"""
# initialize the virtual machine with my name
vm = initialize_ | vm(name, use_disk=True)
# fetch the host/port information from the network for me
host, port = network[name]
# add other machines
for friend_name, (friend_ho | st, friend_port) in network.iteritems():
# !# should we send it to ourselves?
if friend_name == name:
continue
vm.add_destination(friend_name, friend_host, friend_port)
# start serving
vm.serve(host, port)
return vm
def proposer_entrypoint(name, network):
"""
Thread entrypoint for a proposer.
This must simply call start_rdtp_vm with our name and network.
"""
# start an rdtp VM with our name and start serving
vm = start_vm(name, network)
# sleep a little bit before trying to send proposals
# (cheating for bootstrap)
time.sleep(2)
# decree number and value; these will change
n = 0
v = 5000
while True:
# propose values
vm.propose_to_quorum(n, v)
# update values for next round
n += 1
v -= 1
# give some time before proposing again
time.sleep(1)
def replicas_entrypoint(name, network):
# start an rdtp VM with our name and start serving
vm = start_vm(name, network)
# simply sleep forever, the server will handle the
# necessary requests
try:
while True:
time.sleep(600)
except KeyboardInterrupt:
vm.stop_server()
def main():
"""
Main routine for this workload; spawn a single proposer and a variable
number of acceptors (NETWORK_SIZE - 1).
"""
# a network is a dictionary of names => (host, port)
# we first build a network; then we spawn proposers, and finally
# spawn replicas
network = {}
# initialize the network
for i in xrange(NETWORK_SIZE):
name = "M" + str(i)
network[name] = (HOST, START_PORT + i)
# initialize the proposer process
proposer = Process(target = proposer_entrypoint, args = ("M0", network))
proposer.start()
proposer = Process(target = proposer_entrypoint, args = ("M1", network))
proposer.start()
# initialize all the replicas
for name in network.keys():
# M0 is our proposer; we ignore it
if name == "M0" or name == "M1":
continue
replicas = Process(target = replicas_entrypoint, args = (name, network))
replicas.start()
if __name__ == "__main__":
main()
|
oihane/odoomrp-utils | crm_claim_extra_ref/model/crm_claim.py | Python | agpl-3.0 | 2,116 | 0 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class CrmClaim(models.Model):
_inherit = 'crm.claim'
@api.multi
def _links_get(self):
link_obj = self.env['res.request.link']
return [(r.object, r.name) for r in link_obj.search([])]
@api.one
@api.depends('ref2')
def _generate_ref_model_name2(self):
model_obj = self.env['ir.model']
self.ref_model_name2 = False
if self.ref2:
cond = [('model', '=', str(self.ref2._model))]
model = model_obj.search(cond)
self.ref_model_name2 = model.name
@api.one
@api.depends('ref2')
def _generate_ref_name2(self):
self.ref_name2 = False
if self.ref2:
self.ref_name2 = self.ref2.name_get()[0][1]
@api.one
@api.depends('ref3')
def _generate_ref_model_name3(self):
model_obj = self.env['ir.model']
self.ref_model_name3 = False
if self.ref3:
cond = [('model', '=', str(self.ref3._model))]
model = model_obj.search(cond)
self.ref_model_name3 = model.name
@api.one
@api.depends('ref3')
def _generate_ref_name3(self):
self.ref_name3 = False
if self.ref3:
self.ref_name3 = self.ref3.name_get()[0][1]
ref2 = fields.Reference(string='Reference 2', selection=_links_get)
ref_model_name2 = fields.Char(
string='Ref. | Model 2', compute='_generate_ref_model_name2', store=True)
ref_name2 = fields.Char(
string='Ref. Name 2', compute='_generate_ref_name2', store=True)
ref3 = fields.Reference(string='Reference 3', selection=_links_get)
ref_model_name3 = fields.Char(
string='Ref. Model 3', compute='_generate_ref_model_nam | e3', store=True)
ref_name3 = fields.Char(
string='Ref. Name 3', compute='_generate_ref_name3', store=True)
|
ncrocfer/weevely3 | testsuite/test_file_cd.py | Python | gpl-3.0 | 2,398 | 0.007506 | from testfixtures import log_capture
from testsuite.base_fs import BaseFilesystem
from testsuite import config
from core.sessions import SessionURL
from core import modules
import utils
from core import messages
import subprocess
import os
class FileCd(BaseFilesystem):
def setUp(self):
self.session = SessionURL(
self.url,
self.password,
volatile = True
)
modules.load_modules(self.session)
self.folders, folders_rel = self.populate_folders()
# Change mode of the last folder to 0
self.check_call(
config.cmd_env_chmod_s_s % ('0', self.folders[-1]),
shell=True)
self.run_argv = modules.loaded['file_cd'].run_argv
def tearDown(self):
# Reset mode of the last folder to 777
self.check_call(
config.cmd_env_chmod_s_s % ('777', self.folders[-1]),
shell=True)
for folder in reversed(self.folders):
self.check_call(
config.cmd_env_rmdir_s % (folder),
shell=True)
@log_capture()
def test_cwd(self, log_captured):
# cd [0]
new = self.folders[0]
self.run_argv([ new ])
self.assertEquals(new, self.session['file_cd']['results']['cwd'])
# cd [-1]
new = self.folders[-1]
| self.run_argv([ new ])
self.assertEquals(self.folders[0], self.session['file_cd']['results']['cwd'])
self.assertEqual(
messages.module_file_cd.failed_directory_change_to_s % new,
log_captured.records[-1].msg
)
# new [1]/.././[1]/./
new = self.folders[1]
self.run_argv([ '%s/.././%s/./' % (new, os.path.split(new)[-1]) ])
self.assertEquals(new, self.session['file_cd']['results'] | ['cwd'])
# new bogus
new = 'bogus'
self.run_argv([ new ])
self.assertEquals(self.folders[1], self.session['file_cd']['results']['cwd'])
self.assertEqual(
messages.module_file_cd.failed_directory_change_to_s % new,
log_captured.records[-1].msg
)
# new [2]/.././[2]/../
new = self.folders[2]
self.run_argv([ '%s/.././////////%s/../' % (new, os.path.split(new)[-1]) ])
self.assertEquals(self.folders[1], self.session['file_cd']['results']['cwd'])
|
d0ugal/readthedocs.org | readthedocs/builds/migrations/0015_add_privacy.py | Python | mit | 11,730 | 0.007758 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Version.privacy_level'
db.add_column('builds_version', 'privacy_level',
self.gf('django.db.models.fields.CharField')(default='public', max_length=20),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Version.privacy_level'
db.delete_column('builds_version', 'privacy_level')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 13, 23, 55, 6, 898344)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('d | jango.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], | {'default': 'datetime.datetime(2012, 10, 13, 23, 55, 6, 898075)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'builds.build': {
'Meta': {'ordering': "['-date']", 'object_name': 'Build'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': "orm['projects.Project']"}),
'setup': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'finished'", 'max_length': '55'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'html'", 'max_length': '55'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'null': 'True', 'to': "orm['builds.Version']"})
},
'builds.version': {
'Meta': {'ordering': "['-verbose_name']", 'unique_together': "[('project', 'slug')]", 'object_name': 'Version'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'built': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'builds.versionalias': {
'Meta': {'object_name': 'VersionAlias'},
'from_slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'largest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['projects.Project']"}),
'to_slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crate_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.mo |
sebrandon1/neutron | neutron/tests/unit/ipam/test_requests.py | Python | apache-2.0 | 16,389 | 0.000732 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_utils import netutils
from oslo_utils import uuidutils
from neutron import context
from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron import manager
from neutron.tests import base
from neutron.tests.unit.ipam import fake_driver
FAKE_IPAM_CLASS = 'neutron.tests.unit.ipam.fake_driver.FakeDriver'
class IpamSubnetRequestTestCase(base.BaseTestCase):
def setUp(self):
super(IpamSubnetRequestTestCase, self).setUp()
self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
def test_subnet_request(self):
pool = ipam_req.SubnetRequest(self.tenant_id,
self.subnet_id)
self.assertEqual(self.tenant_id, pool.tenant_id)
self.assertEqual(self.subnet_id, pool.subnet_id)
self.assertIsNone(pool.gateway_ip)
self.assertIsNone(pool.allocation_pools)
def test_subnet_request_gateway(self):
request = ipam_req.SubnetRequest(self.tenant_id,
self.subnet_id,
gateway_ip='1.2.3.1')
self.assertEqual('1.2.3.1', str(request.gateway_ip))
def test_subnet_request_bad_gateway(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
gateway_ip='1.2.3.')
def test_subnet_request_with_range(self):
allocation_pools = [netaddr.IPRange('1.2.3.4', '1.2.3.5'),
netaddr.IPRange('1.2.3.7', '1.2.3.9')]
request = ipam_req.SubnetRequest(self.tenant_id,
self.subnet_id,
allocation_pools=allocation_pools)
self.assertEqual(allocation_pools, request.allocation_pools)
def test_subnet_request_range_not_list(self):
self.assertRaises(TypeError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
| allocation_pools=1)
def test_subnet_request_bad_range(self):
self.assertRaises(TypeError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation | _pools=['1.2.3.4'])
def test_subnet_request_different_versions(self):
pools = [netaddr.IPRange('0.0.0.1', '0.0.0.2'),
netaddr.IPRange('::1', '::2')]
self.assertRaises(ValueError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=pools)
def test_subnet_request_overlap(self):
pools = [netaddr.IPRange('0.0.0.10', '0.0.0.20'),
netaddr.IPRange('0.0.0.8', '0.0.0.10')]
self.assertRaises(ValueError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=pools)
class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
def test_subnet_request(self):
request = ipam_req.AnySubnetRequest(self.tenant_id,
self.subnet_id,
constants.IPv4,
24,
gateway_ip='0.0.0.1')
self.assertEqual(24, request.prefixlen)
def test_subnet_request_bad_prefix_type(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
'A')
def test_subnet_request_bad_prefix(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
33)
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
129)
def test_subnet_request_gateway(self):
request = ipam_req.AnySubnetRequest(self.tenant_id,
self.subnet_id,
constants.IPv6,
64,
gateway_ip='2000::1')
self.assertEqual(netaddr.IPAddress('2000::1'), request.gateway_ip)
def test_subnet_request_allocation_pool_wrong_version(self):
pools = [netaddr.IPRange('0.0.0.4', '0.0.0.5')]
self.assertRaises(ipam_exc.IpamValueInvalid,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
64,
allocation_pools=pools)
def test_subnet_request_allocation_pool_not_in_net(self):
pools = [netaddr.IPRange('0.0.0.64', '0.0.0.128')]
self.assertRaises(ipam_exc.IpamValueInvalid,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
25,
allocation_pools=pools)
class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase):
def test_subnet_request(self):
request = ipam_req.SpecificSubnetRequest(self.tenant_id,
self.subnet_id,
'1.2.3.0/24',
gateway_ip='1.2.3.1')
self.assertEqual(24, request.prefixlen)
self.assertEqual(netaddr.IPAddress('1.2.3.1'), request.gateway_ip)
self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet_cidr)
def test_subnet_request_gateway(self):
request = ipam_req.SpecificSubnetRequest(self.tenant_id,
self.subnet_id,
'2001::1',
gateway_ip='2000::1')
self.assertEqual(netaddr.IPAddress('2000::1'), request.gateway_ip)
class TestAddressRequest(base.BaseTestCase):
# This class doesn't test much. At least running through all of the
# constructors may shake out some trivial bugs.
EUI64 = ipam_req.AutomaticAddressRequest.EUI64
def setUp(self):
super(TestAddressRequest, self).setUp()
def test_specific_address_ipv6(self):
request = ipam_req.SpecificAddressRequest('2000::45')
self.assertEqual(netaddr.IPAddress('2000::45'), request.address)
def test_specific_address_ipv4(self):
request = ipam_req.SpecificAddressRequest('1.2.3.32')
self.assertEqual(netaddr.IPAddress('1.2.3.32'), request.address)
|
zjj/trac_hack | contrib/checkwiki.py | Python | bsd-3-clause | 3,475 | 0.003453 | #!/usr/bin/python
#
# Check/update default wiki pages from the Trac project website.
#
# Note: This is a development tool used in Trac packaging/QA, not something
# particularly useful for end-users.
#
# Author: Daniel Lundin <daniel@edgewall.com>
import httplib
import re
import sys
import getopt
# Pages to include in distribution
wiki_pages = [
"CamelCase",
"InterMapTxt",
"InterTrac",
"InterWiki",
"PageTemplates",
"RecentChanges",
"TitleIndex",
"TracAccessibility",
"TracAdmin",
"TracBackup",
"TracBrowser",
"TracCgi",
"TracChangeset",
"TracEnvironment",
"TracFastCgi",
"TracFineGrainedPermissions",
"TracGuide",
"TracImport",
"TracIni",
"TracInstall",
"TracInterfaceC | ustomization",
"TracLinks",
"TracLogging",
"TracModPython",
"TracModWSGI",
"TracNavigation",
"TracNotification",
"TracPermissions",
"TracPlugins",
"TracQuery",
"TracReports",
"TracRepositoryAdmin",
"TracRevisionLog",
"TracRoadmap",
"TracRss",
"TracSearch",
"TracStandalone",
"TracSupport",
| "TracSyntaxColoring",
"TracTickets",
"TracTicketsCustomFields",
"TracTimeline",
"TracUnicode",
"TracUpgrade",
"TracWiki",
"TracWorkflow",
"WikiDeletePage",
"WikiFormatting",
"WikiHtml",
"WikiMacros",
"WikiNewPage",
"WikiPageNames",
"WikiProcessors",
"WikiRestructuredText",
"WikiRestructuredTextLinks"
]
def get_page_from_file(prefix, pname):
d = ''
try:
f = open(pname ,'r')
d = f.read()
f.close()
except:
print "Missing page: %s" % pname
return d
def get_page_from_web(prefix, pname):
host = "trac.edgewall.org"
rfile = "/wiki/%s%s?format=txt" % (prefix, pname)
c = httplib.HTTPConnection(host)
c.request("GET", rfile)
print "Getting", rfile
r = c.getresponse()
d = r.read()
if r.status == 200 and d:
f = open(pname, 'w+')
f.write(d)
f.close()
else:
print "Missing or empty page"
c.close()
return d
def check_links(data):
def get_refs(t, refs=[]):
r = "(?P<wikilink>(^|(?<=[^A-Za-z]))[!]?[A-Z][a-z/]+(?:[A-Z][a-z/]+)+)"
m = re.search (r, t)
if not m:
refs.sort()
result = []
orf = None
for rf in refs:
if rf != orf:
result.append(rf)
orf = rf
return result
refs.append(m.group())
return get_refs( t[m.end():], refs)
for p in data.keys():
links = get_refs(data[p], [])
for l in links:
if l not in data.keys():
print "Broken link: %s -> %s" % (p, l)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "dCp:")
except getopt.GetoptError:
# print help information and exit:
print "%s [-d] [-C] [-p prefix] [PAGE ...]" % sys.argv[0]
print "\t-d -- Download pages from the main project wiki."
print "\t-C -- Don't try to check links (it's broken anyway)"
print "\t-p prefix -- When downloading, prepend 'prefix/' to the page."
sys.exit()
get_page = get_page_from_file
prefix = ''
check = True
for o,a in opts:
if o == '-d':
get_page = get_page_from_web
elif o == '-p':
prefix = a+'/'
elif o == '-C':
check = False
data = {}
for p in args or wiki_pages:
data[p] = get_page(prefix, p)
if check:
check_links(data)
|
google/grumpy | third_party/pypy/_struct.py | Python | apache-2.0 | 12,831 | 0.012937 | #
# This module is a pure Python version of pypy.module.struct.
# It is only imported if the vastly faster pypy.module.struct is not
# compiled in. For now we keep this version for reference and
# because pypy.module.struct is not ootype-backend-friendly yet.
#
"""Functions to convert between Python values and C structs.
Python strings are used to hold the data representing the C struct
and also as format strings to describe the layout of data in the C struct.
The optional first format char indicates byte order, size and alignment:
@: native order, size & alignment (default)
=: native order, std. size & alignment
<: little-endian, std. size & alignment
>: big-endian, std. size & alignment
!: same as >
The remaining chars indicate types of args and must match exactly;
these can be preceded by a decimal repeat count:
x: pad byte (no data);
c:char;
b:signed byte;
B:unsigned byte;
h:short;
H:unsigned short;
i:int;
I:unsigned int;
l:long;
L:unsigned long;
f:float;
d:double.
Special cases (preceding decimal count indicates length):
s:string (array of char); p: pascal string (with count byte).
Special case (only available in native format):
P:an integer type that is wide enough to hold a pointer.
Special case (not in native mode unless 'long long' in platform C):
q:long long;
Q:unsigned long long
Whitespace between formats is ignored.
The variable struct.error is an exception raised on errors."""
import math
import sys
# TODO: XXX Find a way to get information on native sizes and alignments
class StructError(Exception):
pass
error = StructError
bytes = str
def unpack_int(data, index, size, le):
_bytes = [b for b in data[index:index + size]]
if le == 'little':
_bytes.reverse()
number = 0
for b in _bytes:
number = number << 8 | b
return int(number)
def unpack_signed_int(data, index, size, le):
number = unpack_int(data, index, size, le)
max = (1 << (size * 8))
if number > (1 << (size * 8 - 1)) - 1:
number = int(-1 * (max - number))
return number
INFINITY = 1e200 * 1e200
NAN = INFINITY / INFINITY
def unpack_char(data, index, size, le):
return data[index:index + size]
def pack_int(number, size, le):
x = number
res = []
for i in range(size):
res.append(x & 0xff)
x = x >> 8
if le == 'big':
res.reverse()
return ''.join(chr(x) for x in res)
def pack_signed_int(number, size, le):
if not isinstance(number, int):
raise StructError("argument for i,I,l,L,q,Q,h,H must be integer")
if number > (1 << (8 * size - 1)) - 1 or number < -1 * (1 << (8 * size - 1)):
raise OverflowError("Number:%i too large to convert" % number)
return pack_int(number, size, le)
def pack_unsigned_int(number, size, le):
if not isinstance(number, int):
raise StructError("argument for i,I,l,L,q,Q,h,H must be integer")
if number < 0:
raise TypeError("can't convert negative long to unsigned")
if number > (1 << (8 * size)) - 1:
raise OverflowError("Number:%i too large to convert" % number)
return pack_int(number, size, le)
def pack_char(char, size, le):
return str(char)
def isinf(x):
return x != 0.0 and x / 2 == x
def isnan(v):
return v != v * 1.0 or (v == 1.0 and v == 2.0)
def pack_float(x, size, le):
unsigned = float_pack(x, size)
result = []
for i in range(8):
result.append((unsigned >> (i * 8)) & 0xFF)
if le == "big":
result.reverse()
return ''.join(chr(x) for x in result)
def unpack_float(data, index, size, le):
binary = [data[i] for i in range(index, index + 8)]
if le == "big":
binary.reverse()
unsigned = 0
for i in range(8):
# unsigned |= binary[i] << (i * 8)
unsigned |= ord(binary[i]) << (i * 8)
return float_unpack(unsigned, size, le)
def round_to_nearest(x):
"""Python 3 style round: round a float x to the nearest int, but
unlike the builtin Python 2.x round function:
- return an int, not a float
- do round-half-to-even, not round-half-away-from-zero.
We assume that x is finite and nonnegative; except wrong results
if you use this for negative x.
"""
int_part = int(x)
frac_part = x - int_part
if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1:
int_part += 1
return int_part
def float_unpack(Q, size, le):
"""Convert a 32-bit or 64-bit integer created
by float_pack into a Python float."""
if size == 8:
MIN_EXP = -1021 # = sys.float_info.min_exp
MAX_EXP = 1024 # = sys.float_info.max_exp
MANT_DIG = 53 # = sys.float_info.mant_dig
BITS = 64
elif size == 4:
MIN_EXP = -125 # C's FLT_MIN_EXP
MAX_EXP = 128 # FLT_MAX_EXP
MANT_DIG = 24 # FLT_MANT_DIG
BITS = 32
else:
raise ValueError("invalid size value")
if Q >> BITS:
raise ValueError("input out of range")
# extract pieces
sign = Q >> BITS - 1
exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1
mant = Q & ((1 << MANT_DIG - 1) - 1)
if exp == MAX_EXP - MIN_EXP + 2:
# nan or infinity
result = float('nan') if mant else float('inf')
elif exp == 0:
# subnormal or zero
result = math.ldexp(float(mant), MIN_EXP - MANT_DIG)
else:
# normal
mant += 1 << MANT_DIG - 1
result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1)
return -result if sign else result
def float_pack(x, size):
"""Convert a Python float x into a 64-bit unsigned integer
with the same byte representation."""
if size == 8:
MIN_EXP = -1021 # = sys.float_info.min_exp
MAX_EXP = 1024 # = sys.float_info.max_exp
MANT_DIG = 53 # = sys.float_info.mant_dig
BITS = 64
elif size == 4:
MIN_EXP = -125 # C's FLT_MIN_ | EXP
MAX_EXP = 128 # FLT_MAX_EXP
MANT_DIG = 24 # FLT_MANT_DIG
BITS = 32
else:
raise ValueError("invalid size value")
sign = math.copysign(1.0, x) < 0.0
if math.isinf(x):
mant = 0
exp = MAX_EXP - MIN_EXP + 2
elif | math.isnan(x):
mant = 1 << (MANT_DIG - 2) # other values possible
exp = MAX_EXP - MIN_EXP + 2
elif x == 0.0:
mant = 0
exp = 0
else:
m, e = math.frexp(abs(x)) # abs(x) == m * 2**e
exp = e - (MIN_EXP - 1)
if exp > 0:
# Normal case.
mant = round_to_nearest(m * (1 << MANT_DIG))
mant -= 1 << MANT_DIG - 1
else:
# Subnormal case.
if exp + MANT_DIG - 1 >= 0:
mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1))
else:
mant = 0
exp = 0
# Special case: rounding produced a MANT_DIG-bit mantissa.
assert 0 <= mant <= 1 << MANT_DIG - 1
if mant == 1 << MANT_DIG - 1:
mant = 0
exp += 1
# Raise on overflow (in some circumstances, may want to return
# infinity instead).
if exp >= MAX_EXP - MIN_EXP + 2:
raise OverflowError("float too large to pack in this format")
# check constraints
assert 0 <= mant < 1 << MANT_DIG - 1
assert 0 <= exp <= MAX_EXP - MIN_EXP + 2
assert 0 <= sign <= 1
return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant
big_endian_format = {
'x': {'size': 1, 'alignment': 0, 'pack': None, 'unpack': None},
'b': {'size': 1, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'B': {'size': 1, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'c': {'size': 1, 'alignment': 0, 'pack': pack_char, 'unpack': unpack_char},
's': {'size': 1, 'alignment': 0, 'pack': None, 'unpack': None},
'p': {'size': 1, 'alignment': 0, 'pack': None, 'unpack': None},
'h': {'size': 2, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'H': {'size': 2, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'i': {'size': 4, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'I': {'size': 4, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'l': {'size': 4, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'L': {'size': 4, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'q': {'size': 8, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'Q': {'size': 8, 'alignment': 0, ' |
dl1ksv/gnuradio | gnuradio-runtime/examples/network/audio_sink.py | Python | gpl-3.0 | 1,818 | 0.00055 | #!/usr/bin/env python
#
# Copyright 2006,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import blocks
from argparse import ArgumentParser
import sys
try:
from gnuradio import audio
except ImportError:
sys.stderr.write(
"Failed to import gnuradio.audio. Make sure gr-audio component is installed.\n")
sys.exit(1)
class audio_sink(gr.top_block):
| def __init__(self, host, port, pkt_size, sample_rate, eof):
gr.top_block.__init__(self, "audio_sink")
src = blocks.udp_source(gr.sizeof_float, host, port, pkt_size, eof=eof)
dst = audio.sink(sample_rate)
self.connect(src, dst)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--host", default="0.0.0.0",
help="local host name (domain name or IP address)")
parser.add_argument("--p | ort", type=int, default=65500,
help="port value to listen to for connection")
parser.add_argument("--packet-size", type=int, default=1472,
help="packet size.")
parser.add_argument("-r", "--sample-rate", type=int, default=32000,
help="audio signal sample rate [default=%(default)r]")
parser.add_argument("--no-eof", action="store_true", default=False,
help="don't send EOF on disconnect")
args = parser.parse_args()
# Create an instance of a hierarchical block
top_block = audio_sink(args.host, args.port,
args.packet_size, args.sample_rate,
not args.no_eof)
try:
# Run forever
top_block.run()
except KeyboardInterrupt:
# Ctrl-C exits
pass
|
privacyidea/privacyidea | privacyidea/lib/error.py | Python | agpl-3.0 | 5,601 | 0.00125 | # -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
contains Errors and Exceptions
"""
import six
from privacyidea.lib import _
import logging
log = logging.getLogger(__name__)
class ERROR:
SUBSCRIPTION = 101
TOKENADMIN = 301
CONFIGADMIN = 302
POLICY = 303
VALIDATE = 401
REGISTRATION = 402
AUTHENTICATE = 403
AUTHENTICATE_WRONG_CREDENTIALS = 4031
AUTHENTICATE_MISSING_USERNAME = 4032
AUTHENTICATE_AUTH_HEADER = 4033
AUTHENTICATE_DECODING_ERROR = 4304
AUTHENTICATE_TOKEN_EXPIRED = 4305
AUTHENTICATE_MISSING_RIGHT = 4306
CA = 503
RESOURCE_NOT_FOUND = 601
HSM = 707
SELFSERVICE = 807
SERVER = 903
USER = 904
PARAMETER = 905
@six.python_2_unicode_compatible
class privacyIDEAError(Exception):
def __init__(self, description=u"privacyIDEAError!", id=10):
self.id = id
self.message = description
Exception.__init__(self, description)
def getId(self):
return self.id
def getDescription(self):
return self.message
def __str__(self):
pstr = u"ERR%d: %r"
if isinstance(self.message, six.string_types):
pstr = u"ERR%d: %s"
### if we have here unicode, we might fail with conversion error
try:
res = pstr % (self.id, self.message)
except Exception as exx:
res = u"ERR{0:d}: {1!r}".format(self.id, self.message)
return res
def __repr__(self):
ret = '{0!s}(description={1!r}, id={2:d})'.format(type(self).__name__,
self.message, self.id)
return ret
class SubscriptionError(privacyIDEAError):
def __init__(self, description=None, application=None, id=ERROR.SUBSCRIPTION):
self.id = id
self.message = description
self.application = application
privacyIDEAError.__init__(self, description, id=self.id)
def __str__(self):
return self.__repr__()
def __repr__(self):
ret = '{0!s}({1!r}, application={2!s})'.format(type(
self).__name__, self.message, self.application)
return ret
class AuthError(privacyIDEAError):
def __init__(self, description, id=ERROR.AUTHENTICATE, details=None):
self.details = details
privacyIDEAError.__init__(self, description=description, id=id)
class ResourceNotFoundError(privacyIDEAError):
def __init__(self, description, id=ERROR.RESOURCE_NOT_FOUND):
privacyIDEAError.__init__(self, description=description, id=id)
class PolicyError(privacyIDEAError):
def __init__(self, description, id=ERROR.POLICY):
privacyIDEAError.__init__(self, description=description, id=id)
class ValidateError(privacyIDEAError):
def __init__(self, description="validation error!", id=ERROR.VALIDATE):
privacyIDEAError.__init__(self, description=description, id=id)
class RegistrationError(privacyIDEAError):
def __init__(self, description="registraion error!", id=ERROR.REGISTRATION):
privacyIDEAError.__init__(self, description=description, id=id)
class TokenAdminError(privacyIDEAError):
def __init__(self, descrip | tion="token admin error!", id=ERROR.TOKENADMIN):
privacyIDEAError.__i | nit__(self, description=description, id=id)
class ConfigAdminError(privacyIDEAError):
def __init__(self, description="config admin error!", id=ERROR.CONFIGADMIN):
privacyIDEAError.__init__(self, description=description, id=id)
class CAError(privacyIDEAError):
def __init__(self, description="CA error!", id=ERROR.CA):
privacyIDEAError.__init__(self, description=description, id=id)
class UserError(privacyIDEAError):
def __init__(self, description="user error!", id=ERROR.USER):
privacyIDEAError.__init__(self, description=description, id=id)
class ServerError(privacyIDEAError):
def __init__(self, description="server error!", id=ERROR.SERVER):
privacyIDEAError.__init__(self, description=description, id=id)
class HSMException(privacyIDEAError):
def __init__(self, description="hsm error!", id=ERROR.HSM):
privacyIDEAError.__init__(self, description=description, id=id)
class SelfserviceException(privacyIDEAError):
def __init__(self, description="selfservice error!", id=ERROR.SELFSERVICE):
privacyIDEAError.__init__(self, description=description, id=id)
class ParameterError(privacyIDEAError):
USER_OR_SERIAL = _('You either need to provide user or serial')
def __init__(self, description="unspecified parameter error!", id=ERROR.PARAMETER):
privacyIDEAError.__init__(self, description=description, id=id)
|
knuu/nlp100 | chap03/25.py | Python | mit | 328 | 0.00625 | import re
with open('England.txt') | as f:
data = f.read()
pat = re.compile(r"\{\{基礎情報 (.*?)\n\}\}", re.S)
baseInfo = '\n'.join(pat.findall(data))
print(baseInfo)
pat = re.compile(r"\|(.*?) = (.*)")
Info = pat.findall(baseInfo)
dic = {key: cont for key, cont in Info}
# print(dic)
| |
tpazderka/pysaml2 | src/saml2/mdbcache.py | Python | bsd-2-clause | 6,873 | 0.000436 | #!/usr/bin/env python
import logging
__author__ = 'rolandh'
from pymongo import Connection
#import cjson
import time
from datetime import datetime
from saml2 import time_util
from saml2.cache import ToOld
from saml2.time_util import TIME_FORMAT
logger = logging.getLogger(__name__)
class Cache(object):
def __init__(self, server=None, debug=0, db=None):
if server:
connection = Connection(server)
else:
connection = Connection()
if db:
self._db = connection[db]
else:
self._db = connection.pysaml2
self._cache = self._db.collection
self.debug = debug
def delete(self, subject_id):
self._cache.remove({"subject_id": subject_id})
def get_identity(self, subject_id, entities=None,
check_not_on_or_after=True):
""" Get all the identity information that has been received and
are still valid about the subject.
:param subject_id: The identifier of the subject
:param entities: The identifiers of the entities whoes assertions are
interesting. If the list is empty all entities are interesting.
:return: A 2-tuple consisting of the identity information (a
dictionary of attributes and values) and the list of entities
whoes information has timed out.
"""
res = {}
oldees = []
if not entities:
for item in self._cache.find({"subject_id": subject_id}):
try:
info = self._get_info(item, check_not_on_or_after)
except ToOld:
oldees.append(item["entity_id"])
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
else:
for entity_id in entities:
try:
info = self.get(subject_id, entity_id,
check_not_on_or_after)
except ToOld:
oldees.append(entity_id)
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
return res, oldees
def _get_info(self, item, check_not_on_or_after=True):
""" Get session information about a subject gotten from a
specified IdP/AA.
:param item: Information stored
:return: The session information as a dictionary
"""
timestamp = item["timestamp"]
if check_not_on_or_after and not time_util.not_on_or_after(timestamp):
raise ToOld()
try:
return item["info"]
except KeyError:
return None
def get(self, subject_id, entity_id, check_not_on_or_after=True):
res = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
if not res:
return {}
else:
return self._get_info(res, check_not_on_or_after)
def set(self, subject_id, entity_id, info, timestamp=0):
""" Stores session information in the cache. Assumes that the subject_id
is unique within the context of the Service Provider.
:param subject_id: The subject identifier
:param entity_id: The identifier of the entity_id/receiver of an
assertion
:param info: The session info, the assertion is part of this
:param timestamp: A time after which the assertion is not valid.
"""
if isinstance(timestamp, datetime) or isinstance(timestamp,
time.struct_time):
timestamp = time.strftime(TIME_FORMAT, timestamp)
doc = {"subject_id": subject_id,
"entity_id": entity_id,
"info": info,
"timestamp": timestamp}
_ = self._cache.insert(doc)
def reset(self, subject_id, entity_id):
""" Scrap the assertions received from a IdP or an AA about a special
subject.
:param subject_id: The subjects identifier
:param entity_id: The identifier of the entity_id of the assertion
:return:
"""
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"info": {}, "timestamp": 0}})
def entities(self, subject_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param subject_id: The identifier of the subject
:return: A possibly empty list of entity identifiers
"""
try:
return [i["entity_id"] for i in self._cache.find({"subject_id":
subject_id})]
except ValueError:
re | turn []
def receivers(self, subject_id):
""" Another name for entities() just to make it more logic in the IdP
scenario """
return self.entities(subject_id)
def active(self, subject_id, entity_id):
""" Returns the status of assertions from a specific entity_id.
:param subject_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False dependi | ng on if the assertion is still
valid or not.
"""
item = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
try:
return time_util.not_on_or_after(item["timestamp"])
except ToOld:
return False
def subjects(self):
""" Return identifiers for all the subjects that are in the cache.
:return: list of subject identifiers
"""
subj = [i["subject_id"] for i in self._cache.find()]
return list(set(subj))
def update(self, subject_id, entity_id, ava):
""" """
item = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
info = item["info"]
info["ava"].update(ava)
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"info": info}})
def valid_to(self, subject_id, entity_id, newtime):
""" """
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"timestamp": newtime}})
def clear(self):
self._cache.remove() |
youtube/cobalt | third_party/v8/tools/clusterfuzz/js_fuzzer/tools/fuzz_one.py | Python | bsd-3-clause | 1,328 | 0.000753 | #!/usr/bin/env python
# Copyright 2020 the V8 project aut | hors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Helper script to execute a single-processed fuzzing session.
Creates fuzz tests in workdir/output/dir-<dir number>/fuzz-XXX.js.
Expects the <dir number> as single parameter.
"""
import os
import subprocess
import sys
im | port time
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
APP_DIR = os.path.join(BASE_PATH, 'workdir', 'app_dir')
FUZZ_EXE = os.path.join(BASE_PATH, 'workdir', 'fuzzer', 'ochang_js_fuzzer')
INPUT_DIR = os.path.join(BASE_PATH, 'workdir', 'input')
TEST_CASES = os.path.join(BASE_PATH, 'workdir', 'output')
COUNT = 64
FUZZ = ('FUZZ_MODE=foozzie APP_NAME=d8 APP_DIR=%s %s -o %%s -n %s -i %s > %%s'
% (APP_DIR, FUZZ_EXE, COUNT, INPUT_DIR))
assert(len(sys.argv) > 1)
dir_number = int(sys.argv[1])
assert(dir_number >= 0)
path = os.path.join(TEST_CASES, 'dir-%d' % dir_number)
assert not os.path.exists(path), 'Need fresh workdir for fuzzing'
os.makedirs(path)
start = time.time()
subprocess.check_call(
FUZZ % (path, os.path.join(path, 'out.log')), shell=True)
duration = int(time.time() - start)
with open(os.path.join(path, 'duration.log'), 'w') as f:
f.write(str(duration))
|
jalavik/inspire-next | inspire/modules/forms/fields/__init__.py | Python | gpl-2.0 | 999 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribu | te it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be use | ful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from invenio_deposit.fields.date import *
from invenio_deposit.fields.wtformsext import *
|
gen1us2k/django-example | config/wsgi.py | Python | mit | 1,453 | 0 | """
WSGI config for django-example project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi pr | ocess. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI | server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
AsherBond/MondocosmOS | grass_trunk/lib/python/ctypes/ctypesgencore/printer/printer.py | Python | agpl-3.0 | 11,921 | 0.011576 | #!/usr/bin/env python
import os, sys, time
from ctypesgencore.descriptions import *
from ctypesgencore.ctypedescs import *
from ctypesgencore.messages import *
import ctypesgencore.libraryloader # So we can get the path to it
import test # So we can find the path to local files in the printer package
def path_to_local_file(name,known_local_module = test):
basedir=os.path.dirname(known_local_module.__file__)
return os.path.join(basedir,name)
class WrapperPrinter:
def __init__(self,outpath,options,data):
status_message("Writing to %s." % outpath)
self.file=file(outpath,"w")
self.options=options
if self.options.strip_build_path and \
self.options.strip_build_path[-1] != os.path.sep:
self.options.strip_build_path += os.path.sep
self.print_header()
print >>self.file
self.print_preamble()
print >>self.file
self.print_loader()
print >>self.file
self.print_group(self.options.libraries,"libraries",self.print_library)
self.print_group(self.options.modules,"modules",self.print_module)
method_table = {
'function': self.print_function,
'macro': self.print_macro,
'struct': self.print_struct,
'struct-body': self.print_struct_members,
'typedef': self.print_typedef,
'variable': self.print_variable,
'enum': self.print_enum,
'constant': self.print_constant
}
for kind,desc in data.output_order:
if desc.included:
method_table[kind](desc)
print >>self.file
self.print_group(self.options.inserted_files,"inserted files",
self.insert_file)
def print_group(self,list,name,function):
if list:
print >>self.file,"# Begin %s" % name
print >>self.file
for obj in list:
function(obj)
print >>self.file
print >>self.file,"# %d %s" % (len(list),name)
print >>self.file,"# End %s" % name
else: |
print >>self.file,"# No %s" % name
print >>self.file
def srcinfo(self,src):
if src==None:
print >>self.file
else:
filename,lineno = src
if filename in ("<built-in>","<command line>"):
print >>self. | file, "# %s" % filename
else:
if self.options.strip_build_path and \
filename.startswith(self.options.strip_build_path):
filename = filename[len(self.options.strip_build_path):]
print >>self.file, "# %s: %s" % (filename, lineno)
def template_subs(self):
template_subs={
'date': time.ctime(),
'argv': ' '.join([x for x in sys.argv if not x.startswith("--strip-build-path")]),
'name': os.path.basename(self.options.headers[0])
}
for opt,value in self.options.__dict__.iteritems():
if type(value)==str:
template_subs[opt]=value
elif isinstance(value,(list,tuple)):
template_subs[opt]=(os.path.sep).join(value)
else:
template_subs[opt]=repr(value)
return template_subs
def print_header(self):
template_file = None
if self.options.header_template:
path = self.options.header_template
try:
template_file = file(path,"r")
except IOError:
error_message("Cannot load header template from file \"%s\" " \
" - using default template." % path, cls = 'missing-file')
if not template_file:
path = path_to_local_file("defaultheader.py")
template_file = file(path,"r")
template_subs=self.template_subs()
self.file.write(template_file.read() % template_subs)
template_file.close()
def print_preamble(self):
path = path_to_local_file("preamble.py")
print >>self.file, "# Begin preamble"
print >>self.file
preamble_file=file(path,"r")
self.file.write(preamble_file.read())
preamble_file.close()
print >>self.file
print >>self.file, "# End preamble"
def print_loader(self):
print >>self.file, "_libs = {}"
print >>self.file, "_libdirs = %s" % self.options.compile_libdirs
print >>self.file
print >>self.file, "# Begin loader"
print >>self.file
path = path_to_local_file("libraryloader.py",
ctypesgencore.libraryloader)
loader_file=file(path,"r")
self.file.write(loader_file.read())
loader_file.close()
print >>self.file
print >>self.file, "# End loader"
print >>self.file
print >>self.file, "add_library_search_dirs([%s])" % \
", ".join([repr(d) for d in self.options.runtime_libdirs])
def print_library(self,library):
print >>self.file, '_libs["%s"] = load_library("%s")'%(library,library)
def print_module(self,module):
print >>self.file, 'from %s import *' % name
def print_constant(self,constant):
print >>self.file, '%s = %s' % \
(constant.name,constant.value.py_string(False)),
self.srcinfo(constant.src)
def print_typedef(self,typedef):
print >>self.file, '%s = %s' % \
(typedef.name,typedef.ctype.py_string()),
self.srcinfo(typedef.src)
def print_struct(self, struct):
self.srcinfo(struct.src)
base = {'union': 'Union', 'struct': 'Structure'}[struct.variety]
print >>self.file, 'class %s_%s(%s):' % \
(struct.variety, struct.tag, base)
print >>self.file, ' pass'
def print_struct_members(self, struct):
if struct.opaque: return
print >>self.file, '%s_%s.__slots__ = [' % (struct.variety, struct.tag)
for name,ctype in struct.members:
print >>self.file, " '%s'," % name
print >>self.file, ']'
print >>self.file, '%s_%s._fields_ = [' % (struct.variety, struct.tag)
for name,ctype in struct.members:
if isinstance(ctype,CtypesBitfield):
print >>self.file, " ('%s', %s, %s)," % \
(name, ctype.py_string(), ctype.bitfield.py_string(False))
else:
print >>self.file, " ('%s', %s)," % (name, ctype.py_string())
print >>self.file, ']'
def print_enum(self,enum):
print >>self.file, 'enum_%s = c_int' % enum.tag,
self.srcinfo(enum.src)
# Values of enumerator are output as constants.
def print_function(self, function):
if function.variadic:
self.print_variadic_function(function)
else:
self.print_fixed_function(function)
def print_fixed_function(self, function):
self.srcinfo(function.src)
if function.source_library:
print >>self.file, "if hasattr(_libs[%r], %r):" % \
(function.source_library,function.c_name())
print >>self.file, " %s = _libs[%r].%s" % \
(function.py_name(),function.source_library,function.c_name())
print >>self.file, " %s.restype = %s" % \
(function.py_name(),function.restype.py_string())
print >>self.file, " %s.argtypes = [%s]" % (function.py_name(),
', '.join([a.py_string() for a in function.argtypes]))
else:
print >>self.file, "for _lib in _libs.values():"
print >>self.file, " if hasattr(_lib, %r):" % function.c_name()
print >>self.file, " %s = _lib.%s" % (function.py_name(),function.c_name())
print >>self.file, " %s.restype = %s" % (function.py_name(),function.restype.py_string())
print >>self.file, " |
its-lab/MoniTutor | models/0tutordb.py | Python | gpl-3.0 | 7,838 | 0.004721 | from gluon.contrib.appconfig import AppConfig
import uuid
app_conf = AppConfig(reload=True)
DATABASE_NAME = app_conf.take("monitutor_env.database_name")
DATABASE_USER = app_conf.take("monitutor_env.database_user")
DATABASE_PASSWORD = app_conf.take("monitutor_env.database_password")
DATABASE_HOST = app_conf.take("monitutor_env.database_host")
tutordb = DAL("postgres://" + DATABASE_USER + ":" + DATABASE_PASSWORD + "@" + DATABASE_HOST + "/" + DATABASE_NAME)
from gluon.tools import Auth
auth = Auth(tutordb)
auth.settings.extra_fields['auth_user']= [
Field('hmac_secret', length=512, default=lambda:str(uuid.uuid4()).replace("-","")[:16]),
Field('image', type='upload')
]
auth.define_tables(username=True)
if not tutordb.auth_group[1]:
tutordb.auth_group.insert(role="ad | min")
tutordb.define_table('monitutor_scenarios',
Field('scenario_id', type='id'),
Field('uuid', length=64, default=lambda:str(uuid.uuid4())),
Field('name', typ | e='string', requires=IS_ALPHANUMERIC()),
Field('display_name', type='string', required=True),
Field('description', type='text', required=True),
Field('goal', type='text'),
Field('hidden', type='boolean', default=True),
Field('initiated', type='boolean', default=True))
tutordb.define_table('monitutor_data',
Field('data_id', type='id'),
Field('data', type='upload', required=True),
Field('description', type='text'),
Field('name', type='string', required=True, requires=IS_ALPHANUMERIC()),
Field('display_name', type='string', required=True))
tutordb.define_table('monitutor_scenario_data',
Field('scenario_data_id', type='id'),
Field('scenario_id', 'reference monitutor_scenarios', required=True),
Field('data_id', 'reference monitutor_data', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_data, '%(name)s')))
tutordb.define_table('monitutor_milestones',
Field('milestone_id', type='id'),
Field('uuid', length=64, default=lambda:str(uuid.uuid4())),
Field('name', type='string', required=True, requires=IS_ALPHANUMERIC()),
Field('display_name', type='string', required=True),
Field('description', type='string'))
tutordb.define_table('monitutor_milestone_scenario',
Field('milestone_scenario_id', type='id'),
Field('milestone_id', 'reference monitutor_milestones', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_milestones, '%(name)s')),
Field('scenario_id', 'reference monitutor_scenarios', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_scenarios, '%(name)s')),
Field('sequence_nr', type='integer'),
Field('dependency', 'reference monitutor_milestone_scenario',
requires=IS_IN_DB(tutordb, tutordb.monitutor_scenarios, '%(name)s')),
Field('hidden', type="boolean", default=False))
tutordb.define_table('monitutor_interpreters',
Field('interpreter_id', type='id'),
Field('name', type='string', required=True, requires=IS_ALPHANUMERIC()),
Field('display_name', type='string', required=True),
Field('path', type='string', required=True))
tutordb.define_table('monitutor_programs',
Field('program_id', type='id'),
Field('uuid', length=64, default=lambda:str(uuid.uuid4())),
Field('name', type='string', required=True, requires=IS_ALPHANUMERIC()),
Field('display_name', type='string', required=True),
Field('code', type='text', required=True, requires=IS_LENGTH(655360)),
Field('interpreter_id', 'reference monitutor_interpreters', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_interpreters, '%(name)s')))
tutordb.define_table('monitutor_checks',
Field('check_id', type='id'),
Field('uuid', length=64, default=lambda:str(uuid.uuid4())),
Field('name', type='string', required=True, requires=[IS_ALPHANUMERIC(), IS_NOT_IN_DB(tutordb,"monitutor_checks.name")]),
Field('display_name', type='string', required=True),
Field('params', type='string'),
Field('program_id', 'reference monitutor_programs', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_programs, '%(name)s')),
Field('hint', type="text"))
tutordb.define_table('monitutor_attachments',
Field('attachment_id', type='id'),
Field('name', type='string', required=True),
Field('producer', type='text', required=True),
Field('filter', type='text'),
Field('requires_status', type='integer'),
Field('check_id', 'reference monitutor_checks', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_checks, '%(name)s')))
tutordb.define_table('monitutor_check_milestone',
Field('check_milestone_id', type='id'),
Field('check_id', 'reference monitutor_checks', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_checks, '%(name)s')),
Field('milestone_id', 'reference monitutor_milestones', required=True,
requires=IS_IN_DB(tutordb, tutordb.monitutor_milestones, '%(name)s')),
Field('flag_invis', type='integer', default=0),
Field('sequence_nr', type='integer'))
tutordb.define_table('monitutor_systems',
Field('system_id', type='id'),
Field('uuid', length=64, default=lambda:str(uuid.uuid4())),
Field('hostname', type='string', required=True),
Field('ip4_address', type='blob'),
Field('ip6_address', type='blob'),
Field('name', type='string', required=True, requires=IS_ALPHANUMERIC()),
Field('display_name', type='string', required=True),
Field('description', type='string'))
tutordb.define_table('monitutor_types',
Field('type_id', type='id'),
Field('name', type='string', required=True, requires=IS_ALPHANUMERIC()),
Field('display_name', type='string', required=True))
if not tutordb(tutordb.monitutor_types.name == "source").select():
tutordb.monitutor_types.insert(name="source", display_name="Source")
tutordb.monitutor_types.insert(name="dest", display_name="Destination")
tutordb.define_table('monitutor_targets',
Field('target_id', type='id'),
Field('system_id', 'reference monitutor_systems',
requires=IS_IN_DB(tutordb, tutordb.monitutor_systems, '%(name)s')),
Field('check_id', 'reference monitutor_checks',
requires=IS_IN_DB(tutordb, tutordb.monitutor_checks, '%(name)s')),
Field('type_id', 'reference monitutor_types',
requires=IS_IN_DB(tutordb, tutordb.monitutor_types, '%(name)s')))
tutordb.define_table('scenario_user',
Field('scenario_user_id', type="id"),
Field('scenario_id', 'reference monitutor_scenarios', required=True),
Field('user_id', 'reference auth_user', required=True),
Field('passed', type="boolean", required=False, default=False))
tutordb.define_table('monitutor_customvars',
Field('customvar_id', type="id", required=True),
Field('uuid', length=64, default=lambda:str(uuid.uuid4())),
Field('name', type="string", required=True, requires=IS_ALPHANUMERIC()),
Field('display_name', type="string", required=True),
Field('value', type="string"))
tutordb.define_table('monitutor_customvar_system', tutordb.monitutor_customvars,
Field('system_id', 'reference monitutor_systems',
requires=IS_IN_DB(tutordb, tutordb.monitutor_systems, '%(name)s')))
tutordb.define_table('monitutor_user_system',
Field('user_system_id', type="id"),
Field('system_id', 'reference monitutor_systems',
requires=IS_IN_DB(tutordb, tutordb.monitutor_systems, '%(name)s')),
Field('user_id', 'reference auth_user'),
Field('hostname', type='string'),
Field('ip4_address', type='blob'),
Field('ip6_address', type='blob'))
tutordb.define_table('monitutor_customvar_user_system', tutordb.monitutor_customvars,
Field('system_id', 'reference monitutor_user_system',
requires=IS_IN_DB(tutordb, tutordb.monitutor_user_system, '%(name)s')))
|
taojy123/GoCMS | gocms/wsgi.py | Python | mit | 1,300 | 0.000769 |
"""
WSGI config for gocms project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
path = os.path.dirname(__file__)
path = os.path.join(path, "..")
os.chdir(path) # using pyinstaller to comment it
sys.path.append(path)
print os.getcwd()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gocms.settings")
# This application object is used by any WSGI serv | er configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldAppl | ication(application)
|
ContributeToScience/participant-booking-app | booking/message/migrations/0001_initial.py | Python | gpl-2.0 | 7,560 | 0.008466 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Message'
db.create_table(u'message_message', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True)),
('object_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('group_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('content', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('sender', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='sent_messages', null=True, to=orm['auth.User'])),
('recipient', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='received_messages', null=True, to=orm['auth.User'])),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('read_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('sender_deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('recipient_deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('send_email', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'message', ['Message'])
def backwards(self, orm):
# Deleting model 'Message'
db.delete_table(u'message_message')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'message.message': {
'Meta': {'ordering': "['-sent_at', '-id']", 'object_name': 'Message'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
| 'recipient': ('django.db.models.fields.related.Fo | reignKey', [], {'blank': 'True', 'related_name': "'received_messages'", 'null': 'True', 'to': u"orm['auth.User']"}),
'recipient_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_messages'", 'null': 'True', 'to': u"orm['auth.User']"}),
'sender_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['message'] |
gabrielelanaro/solfege | solfege/const.py | Python | gpl-3.0 | 1,656 | 0.012681 | # GNU Solfege - free ear training software
# C | opyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty o | f
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Moved RHYTHMS here because is should be available from a module that does
# not pull in the gtk module.
from __future__ import absolute_import
RHYTHMS = ("c4", "c8 c8", "c16 c16 c16 c16", "c8 c16 c16",
"c16 c16 c8", "c16 c8 c16", "c8. c16", "c16 c8.",
"r4", "r8c8", "r8 c16 c16", "r16 c16 c8", "r16c8c16",
"r16 c16 c16 c16", "r8 r16 c16", "r16 c8.",
"c12 c12 c12", "r12 c12 c12",
"c12 r12 c12", "c12 c12 r12", "r12 r12 c12", "r12 c12 r12",
"c4.", "c4 c8", # 22, 23
"c8 c4", "c8 c8 c8", # 24, 25
"c4 c16 c16", # 26
"c16 c16 c4", # 27
"c8 c8 c16 c16", #28
"c8 c16 c16 c8", #29
"c16 c16 c8 c8", #30
"c8 c16 c16 c16 c16", #31
"c16 c16 c8 c16 c16", #32
"c16 c16 c16 c16 c8", #33
"c16 c16 c16 c16 c16 c16", #34
)
|
nwjs/chromium.src | testing/libfuzzer/zip_sources.py | Python | bsd-3-clause | 1,995 | 0.014035 | #!/usr/bin/python2
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Archive all source files that are references in binary debug info.
Invoked by libfuzzer buildbots. Executes dwarfdump to parse debug info.
"""
from __future__ import print_function
import argparse
import os
import re
import subprocess
import zipfile
compile_unit_re = re.compile('.*DW_TAG_compile_unit.*')
at_name_re = re.compile('.*DW_AT_name.*"(.*)".*')
def main():
parser = argparse.ArgumentParser(description="Zip binary sources.")
parser.add_argument('--binary', required=True,
help='binary file to read')
parser.add_argument('--workdir', required=True,
help='working directory to use to resolve relative paths')
parser.add_argument('--srcdir', required=True,
help='sources root directory to calculate zip entry names')
parser.add_argument('--output', required=True,
help='output zip file name')
parser.add_argument('--dwarfdump', required=False,
default='dwarfdump', help='path to dwarfdump utility')
args = parser.parse_args()
# Dump .debug_info section.
out = subprocess.check_output(
[args.dwarfdump, '-i', args.binary])
looking_for_unit = True
compile_units = set()
# Look for DW_AT_name within DW_TAG_compile_un | it
for line in out.splitlines():
if looking_for_unit and compile_unit_re.match(line):
looking_for_unit = False
elif not looking_for_unit:
match = at_name_re.match(line)
if match:
compile_units.add(match.group(1))
looking_for_unit = True
# Zip sources.
with zipfile.ZipFile(args.output, 'w') as z:
for compile_unit in sorted(compile_units):
src_file = os.path.abspath(os.path.join(args.workdir, com | pile_unit))
print(src_file)
z.write(src_file, os.path.relpath(src_file, args.srcdir))
if __name__ == '__main__':
main()
|
richard-fisher/repository | system/base/fontconfig/actions.py | Python | gpl-2.0 | 410 | 0.014634 |
#!/usr/bin/python
from pisi.actionsapi import shelltools, get, autotools, pisitools
def | setup():
autotools.configure ("--prefix=/usr\
--disable-static\
--disable-docs\
--docdir=/usr/share/doc/fontconfig-2.10.2")
def build( | ):
autotools.make ()
def install():
autotools.rawInstall ("DESTDIR=%s" % get.installDIR())
|
Mezgrman/mezgrmanDE | displays/urls.py | Python | agpl-3.0 | 511 | 0.027397 | from django.conf.urls import patterns, include, url
urlpatterns = patterns('displays.views',
url(r'^$', 'index', name = 'index'),
url(r'^(?P<id>\d+)/$', 'display', name = 'display'),
| url(r'^(?P<id>\d+)/settings\.json$', 'ajax_settings', name = 'ajax-settings'),
url(r'^(?P<id>\d+)/bitmap\.json$', 'aja | x_bitmap', name = 'ajax-bitmap'),
url(r'^(?P<id>\d+)/states\.json$', 'ajax_states', name = 'ajax-states'),
url(r'^(?P<id>\d+)/message\.json$', 'ajax_message', name = 'ajax-message'),
)
|
creativcoder/AlgorithmicProblems | codeforces/long_words.py | Python | mit | 227 | 0.057269 | #http://codeforces.com/problemset/problem/71/A
T = int(raw_input())
while(not T == 0):
wor | d = str(raw_input())
if len(word)>10:
print word[0]+str(len(word[1:len(word)-1]))+word[len(word)-1]
els | e:
print word
T-=1
|
bevenky/dev-cms | dev_cms/loader.py | Python | mit | 890 | 0.001124 | # Template loader to retrieve templates from the database
from django.template import TemplateDoesNotExist
from django.template.loader import BaseLoader
from pages.models import Page
from appearance.mode | ls import Template
class DBTemplateLoader(BaseLoader):
is_usable = True
def load_template_source(self, template_name, template_dirs=None):
try:
if template_name.startswith('preview/'):
page = Page.objects.get(preview_url__exact=template_name)
else:
page = Page.objects.get(url__exact=template_name)
return page.content, str(page)
except Page.DoesNotExist:
try:
tmp | l = Template.objects.get(path__exact=template_name)
return tmpl.content, str(tmpl)
except Template.DoesNotExist:
raise TemplateDoesNotExist, template_name
|
wxs/subjective-functions | synthesize.py | Python | mit | 7,775 | 0.013248 | # Copyright 2017, Xavier Snelgrove
import argparse
import os
import sys
import numpy as np
from scipy import ndimage
import gram
from gram import JoinMode
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Synthesize image from texture", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output-width', '-ow', default=512, type=int,
help="Pixel width of generated image")
parser.add_argument('--output-height', '-oh', type=int,
help="Pixel height of generated image. If not specified, equal to output-width.")
parser.add_argument('--octaves', '-o', type=int, default=4,
help="Number of octaves (where 1 means \"Consider only 1 scale\")")
parser.add_argument('--layers', '-l', type=int, nargs='+', default=[2, 7],
help="Which layers to match gram matrices on")
parser.add_argument('--max-iter', '-m', type=int, default=500,
help="Maximum iterations for the L-BFGS-B optimizer")
parser.add_argument("--output-prefix", "-op", default='out',
help="Prefix to append to output directory")
parser.add_argument("--save-every", "-n", default=10, type=int,
help="Save an in-progress optimization image every SAVE_EVERY iterations")
parser.add_argument("--source-scale", "-ss", type=float,
help="How much to scale the source image by")
parser.add_argument("--source-width", "-sw", type=int,
help="Scale source to this width. Mutually exclusive with source-scale")
parser.add_argument("--padding-mode", "-p", type=str, choices = ['valid', 'same'], | default='valid',
help="What | boundary condition to use for convolutions")
parser.add_argument("--join-mode", "-j", type=JoinMode,
choices = list(JoinMode),
default=JoinMode.AVERAGE,
help="How to combine gram matrices when multiple sources given")
parser.add_argument("--count", "-c", type=int, default=1,
help="How many images to generate simultaneously")
parser.add_argument("--mul", type=float, default=1.0, help="Multiply target grams by this amount")
parser.add_argument("--if-weight", type=float, default=1., help="Inter-frame loss weight")
parser.add_argument("--if-shift", type=float, default=5., help="How many pixel-shift should inter-frame loss approximate?")
parser.add_argument("--if-order", type=int, default=2, help="How many frames should we 'tie' together?")
parser.add_argument("--if-distance-type", type=str, choices = ['l2', 'lap1'], default="l2", help="How should we measure the distance between frames?")
parser.add_argument("--if-octaves", type=int, default=1, help="At how many scales should the distance function operate?")
parser.add_argument("--seed", type=str, choices = ['random', 'symmetric'], default='random', help="How to seed the optimization")
parser.add_argument("--data-dir", "-d", type=str, default="model_data", help="Where to find the VGG weight files")
parser.add_argument("--output-dir", type=str, default="outputs", help="Where to save the generated outputs")
parser.add_argument("--tol", type=float, default=1e-9, help="Gradient scale at which to terminate optimization")
parser.add_argument("--source", "-s", required=True, nargs='+',
help="List of file to use as source textures")
args = parser.parse_args()
# Any necessary validation here?
if args.if_octaves > args.octaves:
print("Error: if_octaves must be less than octaves, but %d > %d" % (args.if_octaves, args.octaves))
sys.exit(1)
output_size = (args.output_width, args.output_height if args.output_height is not None else args.output_width)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
output_dir = "{}.L{}.o{}".format(args.output_prefix, ",".join(str(l) for l in args.layers), args.octaves)
output_dir = os.path.join(args.output_dir, output_dir)
if args.source_scale:
output_dir += ".w{:.2}".format(args.source_scale)
if args.source_width:
output_dir += ".w{}".format(args.source_width)
if args.count > 1:
output_dir += ".c{}.ifs{}".format(args.count, args.if_shift)
if args.mul != 1.0:
output_dir += ".m{}".format(args.mul)
if args.join_mode != JoinMode.AVERAGE:
output_dir += ".j{}".format(args.join_mode.value)
if args.if_octaves != 1:
output_dir += ".ifo%d" % args.if_octaves
output_dir += ".{}x{}".format(*output_size)
suffix = 0
base_output_dir = output_dir
while os.path.exists(output_dir):
output_dir = base_output_dir + ".{}".format(suffix)
suffix += 1
if suffix > 100:
print("Hmm, maybe in an infinite loop trying to create output directory")
sys.exit(1)
try:
os.mkdir(output_dir)
except OSError:
print("Hmm, failed to make output directory... race condition?")
sys.exit(1)
# Save the command for good measure
with open(os.path.join(output_dir, "Acommand.txt"), 'w') as f:
f.write(' '.join(sys.argv))
width = args.output_width
height = args.output_height or width
print("About to generate a {}x{} image, matching the Gram matrices for layers {} at {} distinct scales".format(width, height, args.layers, args.octaves))
pyramid_model = gram.make_pyramid_model(args.octaves, args.padding_mode)
pyramid_gram_model = gram.make_pyramid_gram_model(pyramid_model, args.layers, data_dir=args.data_dir)
target_grams = gram.get_gram_matrices_for_images(pyramid_gram_model, args.source,
source_width = args.source_width, source_scale = args.source_scale, join_mode = args.join_mode)
target_grams = [t*args.mul for t in target_grams]
#target_grams = [np.max(t) - t for t in target_grams]
x0 = np.random.randn(args.count, height, width, 3)
if args.seed == 'symmetric':
x0 = x0 + x0[:,::-1, :, :]
x0 = x0 + x0[:, :, ::-1, :]
blur_radius = 30
for i in range(3):
x0[...,i] = blur_radius*50*ndimage.gaussian_filter(x0[...,i], blur_radius)
x0 += np.random.randn(*(x0.shape)) * 2
else:
# Shift the whole thing to be near zero
x0 += 10 - gram.colour_offsets
#x0 = preprocess(load_img('../sources/smokeb768.jpg'))
interframe_distances = []
if args.count > 1:
for im in gram.get_images(args.source, source_scale = args.source_scale, source_width=args.source_width):
interframe_distances.append(gram.interframe_distance(pyramid_model, im,
shift=args.if_shift,
interframe_distance_type = args.if_distance_type,
interframe_octaves = args.if_octaves))
print("Raw interframe distances: ")
print(interframe_distances)
#target_distances = np.mean(interframe_distances, axis=1)
target_distances = interframe_distances[0]
print("Shifting the source images by {} gives a {} interframe distance of approx {}".format(args.if_shift, args.if_distance_type, target_distances))
else:
target_distances=None
gram.synthesize_animation(pyramid_model, pyramid_gram_model, target_grams,
width = width, height = height, frame_count=args.count,
x0 = x0,
interframe_loss_weight=args.if_weight,
interframe_order=args.if_order,
target_interframe_distances = target_distances,
interframe_distance_type = args.if_distance_type,
interframe_octaves = args.if_octaves,
output_directory = output_dir, max_iter=args.max_iter, save_every=args.save_every, tol=args.tol
)
print("DONE: ")
|
ychfan/tensorflow | tensorflow/contrib/nccl/python/ops/nccl_ops_test.py | Python | apache-2.0 | 6,643 | 0.007978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nccl ops. See also the cc test for nccl_communicator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as np
from tensorflow.contrib import nccl
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.platform import test
def _DeviceTensors(tensors, devices):
res = []
for t, d in zip(tensors, devices):
with ops.device(d):
res.append(array_ops.identity(t))
return res
def _NcclAllReduce(nccl_fun, tensors, devices):
return nccl_fun(_DeviceTensors(tensors, devices))
def _NcclReduce(nccl_fun, tensors, devices):
receiver = np.random.randint(0, len(devices))
with ops.device(devices[receiver]):
return [nccl_fun(_DeviceTensors(tensors, devices))]
def _NcclBroadcast(tensors, devices):
sender = np.random.randint(0, len(devices))
with ops.device(devices[sender]):
tensor = array_ops.identity(tensors[0])
broadcast = nccl.broadcast(tensor)
return _DeviceTensors([broadcast] * len(devices), devices)
class NcclTestCase(test.TestCase):
def _Test(self,
nccl_reduce,
numpy_fn,
device_sets=(['/device:GPU:1', '/device:GPU:2', '/device:GPU:0'],
['/device:GPU:1', '/device:GPU:0'])):
"""Tests that nccl_reduce does the same as reduction with numpy_fn.
Args:
nccl_reduce: A function taking a list of tensors and a list of devices,
and returns a list of reduced tensors and a list of ops to perform the
reduction.
numpy_fn: A function taking two tensors and returning the reduction of the
two.
device_sets: Tuple of virtual devices to run test on.
"""
if not test.is_gpu_available():
return # Test requires access to a GPU
for dtype in [np.float32, np.int32, np.int64, np.float64]:
# Create session inside outer loop to test use of
# same communicator across multiple sessions.
with self.test_session(use_gpu=True) as sess:
for devices in device_sets:
shape = (3, 4)
random = (np.random.random_sample(shape) - .5) * 1024
tensors = []
for _ in devices:
tensors.append(random.astype(dtype))
np_ans = tensors[0]
for t in tensors[1:]:
np_ans = numpy_fn(np_ans, t)
reduce_tensors = nccl_reduce(tensors, devices)
self.assertNotEmpty(reduce_tensors)
# Test shape inference.
for r in reduce_tensors:
self.assertEqual(shape, r.get_shape())
result_tensors = [array_ops.identity(t) for t in reduce_tensors]
# Test execution and results.
for t in sess.run(result_tensors):
self.assertAllClose(t, np_ans)
def _TestGradient(self, nccl_reduce, numpy_fn):
"""Tests the gradient of nccl_reduce.
Args:
nccl_reduce: A function taking a list of tensors and a list of devices,
and returns a list of reduced tensors and a list of ops to perform the
reduction.
numpy_fn: A function taking two tensors and returning the gradient of the
reduction of the two.
"""
def _Gradient(tensors, devices):
inputs = [array_ops.placeholder(t.dtype, t.shape) for t in tensors]
reduce_tensors = nccl_reduce(inputs, devices)
losses = _DeviceTensors(tensors, [t.device for t in reduce_tensors])
grads = gradients.gradients(
reduce_tensors, inputs, losses, colocate_gradients_with_ops=True)
return [g for g in grads if g is not None]
self._Test(_Gradient, numpy_fn)
class AllReduceTest(NcclTestCase):
def testAllReduce(self):
self._Test(partial(_NcclAllReduce, nccl.all_sum), lambda x, y: x + y)
self._Test(partial(_NcclAllReduce, nccl.all_prod), lambda x, y: x * y)
self._Test(partial(_NcclAllReduce, nccl.all_min), np.minimum)
self._Test(partial(_NcclAllReduce, nccl.all_max), np.maximum)
def testAllSumGrad(self):
self._TestGradient(
partial(_NcclAllReduce, nccl.all_sum), lambda x, y: x + y)
def testErrors(self):
with self.assertRaisesRegexp(ValueError, 'Device assignment required'):
nccl.all_sum([array_ops.identity(np.random.random_sample((3, 4)))])
with self.assertRaisesRegexp(ValueError, 'Must pass >0 tensors'):
nccl.all_sum([])
class SingleReduceTest(NcclTestCase):
def testSum(self):
self._Test(partial(_NcclReduce, nccl.reduce_sum), lambda x, y: x + y)
def testSumGrad(self):
self._TestGradient(partial(_NcclReduce, nccl.reduce_sum), lambda x, y: x)
class BroadcastTest(NcclTestCase):
def testBroadcast(self):
self._Test(_NcclBroadcast, lambda x, y: x)
def testBroadcastSingleDevice(self):
# Broadcasts on a single device are removed completely during rewrite.
self._Test(_NcclBroadcast, lambda x, y: x,
(['/device:GPU:0', '/device:GPU:0'],))
def testBroadcastToCpuError(self):
# Broadcasts to CPU is not supported.
with self.assertRaisesRegexp(
errors.NotFoundError,
"No registered '_NcclBroadcastRecv' OpKernel for CPU devices"):
self._Test(_NcclBroadcast, lambda x, y: x,
(['/device:GPU:0', '/device:CPU:0'],))
class CombinedTest(NcclTestCase):
"""Test all-reduce vs. single-reduce plus broadcast in one session.run."""
def _Combined(self, tensors, devices):
all_reduce_tensors = _NcclAllReduce(nccl.all_sum, tensors, devices)
single_reduce_tensors = _NcclReduce(nccl.reduce_sum, tensors, devices)
broadcast_tensors = _NcclBroadcast(single_reduce_tensors, devices)
return all_reduce_tensors + broadcast_tensors
def testCombined(self):
self._Test(self._Combined, lambda x, y: x + y)
if __name_ | _ == '__main__' | :
test.main()
|
cpennington/edx-platform | common/test/acceptance/pages/studio/index.py | Python | agpl-3.0 | 13,539 | 0.002807 | """
Studio Index, home and dashboard pages. These are the starting pages for users.
"""
from bok_choy.page_object import PageObject
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from common.test.acceptance.pages.studio import BASE_URL
from common.test.acceptance.pages.studio.login import LoginPage
from common.test.acceptance.pages.studio.signup import SignupPage
from common.test.acceptance.pages.studio.utils import HelpMixin
class HeaderMixin(object):
"""
Mixin class used for the pressing buttons in the header.
"""
def click_sign_up(self):
"""
Press the Sign Up button in the header.
"""
next_page = SignupPage(self.browser)
self.q(css='.action-signup')[0].click()
return next_page.wait_for_page()
def click_sign_in(self):
"""
Press the Sign In button in the header.
"""
next_page = LoginPage(self.browser)
self.q(css='.action-signin')[0].click()
return next_page.wait_for_page()
class IndexPage(PageObject, HeaderMixin, HelpMixin):
"""
Home page for Studio when not logged in.
"""
url = BASE_URL + "/"
def is_browser_on_page(self):
return self.q(css='.wrapper-text-welcome').visible
class DashboardPage(PageObject, HelpMixin):
"""
Studio Dashboard page with courses.
The user must be logged in to access this page.
"""
url = BASE_URL + "/course/"
def is_browser_on_page(self):
return self.q(css='.content-primary').visible
@property
def course_runs(self):
"""
The list of course run metadata for all displayed courses
Returns an empty string if there are none
"""
return self.q(css='.course-run>.value').text
@property
def has_processing_courses(self):
return self.q(css='.courses-processing').present
def create_rerun(self, course_key):
"""
Clicks the create rerun link of the course specified by course_key
'Re-run course' link doesn't show up until you mouse over that course in the course listing
"""
actions = ActionChains(self.browser)
button_name = self.browser.find_element_by_css_selector('.rerun-button[href$="' + course_key + '"]')
actions.move_to_element(button_name)
actions.click(button_name)
actions.perform()
def click_course_run(self, run):
"""
Clicks on the course with run given by run.
"""
self.q(css='.course-run .value').filter(lambda el: el.text == run)[0].click()
# Clicking on course with run will trigger an ajax event
self.wait_for_ajax()
def scroll_to_course(self, course_key):
"""
Scroll down to the course element
"""
element = '[data-course-key*="{}"]'.format(course_key)
self.scroll_to_element(element)
def has_new_library_button(self):
"""
(bool) is the "New Library" button present?
"""
return self.q(css='.new-library-button').present
def click_new_library(self):
"""
Click on the "New Library" button
"""
self.q(css='.new-library-button').first.click()
self.wait_for_ajax()
def is_new_library_form_visible(self):
"""
Is the new library form visisble?
"""
return self.q(css='.wrapper-create-library').visible
def fill_new_library_form(self, display_name, org, number):
"""
Fill out the form to create a new library.
Must have called click_new_library() first.
"""
field = lambda fn: self.q(css=u'.wrapper-create-library #new-library-{}'.format(fn))
field('name').fill(display_name)
field('org').fill(org)
field('number').fill(number)
def is_new_library_form_valid(self):
"""
Is the new library form ready to submit?
"""
return (
self.q(css='.wrapper-create-library .new-library-save:not(.is-disabled)').present and
not self.q(css='.wrapper-create-library .wrap-error.is-shown').present
)
def submit_new_library_form(self):
"""
Submit the new library form.
"""
self.q(css='.wrapper-create-library .new-library-save').click()
@property
def new_course_button(self):
"""
Returns "New Course" button.
"""
return self.q(css='.new-course-button')
def is_new_course_form_visible(self):
"""
Is the new course form visible?
"""
return self.q(css='.wrapper-create-course').visible
def click_new_course_button(self):
"""
Click "New Course" button
"""
self.q(css='.new-course-button').first.click()
self.wait_for_ajax()
def fill_new_course_form(self, display_name, org, number, run):
"""
Fill out the form to create a new course.
"""
field = lambda fn: self.q(css=u'.wrapper-create-course #new-course-{}'.format(fn))
field('name').fill(display_name)
field('org').fill(org)
field('number').fill(number)
field('run').fill(run)
def is_new_course_form_valid(self):
"""
Returns `True` if new course form is valid otherwise `False`.
"""
return (
self.q(css='.wrapper-create-course .new-course-save:not(.is-disabled)').present and
not self.q(css='.wrapper-create-course .wrap-error.is-shown').present
)
def submit_new_course_form(self):
"""
Submit the new course form.
"""
self.q(css='.wrapper-create-course .new-course-save').first.click()
self.wait_for_ajax()
@property
def error_notification(self):
"""
Returns error notification element.
"""
return self.q(css='.wrapper-notification-error.is-shown')
@property
def error_notification_message(self):
"""
Returns text of error message.
"""
self.wait_for_element_visibility(
".wrapper-notification-error.is-shown .message", "Error message is visible"
)
return self.error_notification.results[0].find_element_by_css_selector('.message').text
@property
def course_org_field(self):
"""
Returns course organization input.
"""
return self.q(css='.wrapper-create-course #new-course-org')
def select_item_in_autocomplete_widget(self, item_text):
"""
Selects item in autocomplete where text of item matches item_text.
"""
self.wait_for_element_visibility(
".ui-autocomplete .ui-menu-item", "Autocomplete widget is visible"
)
self.q(css='.ui-autocomplete .ui-menu-item a').filter(lambda el: el.text == item_text)[0].click()
def list_courses(self, archived=False):
"""
List all the courses found on the page's list of courses.
"""
# Workaround Selenium/Firefox bug: `.text` property is broken on invisible elements
tab_selector = u'#course-index-tabs .{} a'.format('archived-courses-tab' if archived else 'courses-tab')
self.wait_for_element_presence(tab_selector, "Courses Tab")
self.q(css=tab_selector).click()
div2info = lambda element: {
'name': element.find_element_by_css_selector('.course-title').text,
'org': element.find_element_by_css_selector('.course-org .value').text,
'number': element.find_element_by_css_selector('.course-num .value').text,
'run': element.find_element_by_css_selector('.course-run .value').text,
'url': | element.find_element_by_css_selector('a.course-link').get_attribute('href'),
}
course_list_selector = u' | .{} li.course-item'.format('archived-courses' if archived else 'courses')
return self.q(css=course_list_selector).map(div2info).results
def has_course(self, org, number, run, archived=False):
"""
Returns `True` if course for given org, number and run exists on the page otherwise `False`
"""
for c |
googleapis/python-error-reporting | google/cloud/errorreporting_v1beta1/services/report_errors_service/client.py | Python | apache-2.0 | 22,790 | 0.001624 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.errorreporting_v1beta1.types import report_errors_service
from .transports.base import ReportErrorsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ReportErrorsServiceGrpcTransport
from .transports.grpc_asyncio import ReportErrorsServiceGrpcAsyncIOTransport
class ReportErrorsServiceClientMeta(type):
"""Metaclass for the ReportErrorsService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ReportErrorsServiceTransport]]
_transport_registry["grpc"] = ReportErrorsServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ReportErrorsServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ReportErrorsServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
| Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
| return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ReportErrorsServiceClient(metaclass=ReportErrorsServiceClientMeta):
"""An API for reporting error events."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "clouderrorreporting.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ReportErrorsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ReportErrorsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ReportErrorsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ReportErrorsServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@stati |
dragonfly-science/kokako | kokako/detectors/kiwi.py | Python | gpl-3.0 | 1,133 | 0.006178 | import numpy as np
from pylab import mean, log
from matplotlib import mlab
from kokako.score import Detector
class SimpleKiwi(Detector):
code = 'simple-north-island-brown-kiwi'
description = 'Simple detector for north-island brown kiwi, based on energy between 1600 and 2200 Hz'
version = '0.1.2'
window = 0.032
lower_call_frequency = 1600
upper_call_frequency = 2200
lower_syllable_frequency = 0.5
upper_syllable_frequency = 1.1
def score(self, audio):
nfft = int(self.window*audio.framerate)
audio.calculat | e_specgram(nfft=nfft, noverlap=nfft/2)
freqs = np.where((audio.specgram_freqs >= self.lower_call_frequency)*(audio.specgram_freqs <= self.upper_call_frequency))
spec2 = mlab.specgram(mean(log(audio.specgram[freqs[0],]), 0), NFFT=1024, noverlap=512, Fs=2/self.window)
freqs2 = np.where((spec2[1] >= self.lower_syllable_frequency)*(spec2[1] <= self.upper_syllable_frequency))
max_kiwi = max(np.max(spec2[0][freqs2[0], :], 0))
| mean_kiwi = np.exp(np.mean(np.mean(np.log(spec2[0][freqs2[0], :]), 0)))
return max_kiwi/mean_kiwi
|
iwaseyusuke/ryu | ryu/services/protocols/bgp/base.py | Python | apache-2.0 | 19,278 | 0 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines some base class related to managing green threads.
"""
from __future__ import absolute_import
import abc
from collections import OrderedDict
import logging
import socket
import time
import traceback
import weakref
import netaddr
import six
from ryu.lib import hub
from ryu.lib import sockopt
from ryu.lib import ip
from ryu.lib.hub import Timeout
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import RF_IPv6_VPN
from ryu.lib.packet.bgp import RF_L2_EVPN
from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC
from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC
from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC
from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC
from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.services.protocols.bgp.utils.circlist import CircularListType
from ryu.services.protocols.bgp.utils.evtlet import LoopingCall
# Logger instance for this module.
LOG = logging.getLogger('bgpspeaker.base')
# Pointer to active/available OrderedDict.
OrderedDict = OrderedDict
# Currently supported address families.
SUPPORTED_GLOBAL_RF = {
RF_IPv4_UC,
RF_IPv6_UC,
RF_IPv4_VPN,
RF_RTC_UC,
RF_IPv6_VPN,
RF_L2_EVPN,
RF_IPv4_FLOWSPEC,
RF_IPv6_FLOWSPEC,
RF_VPNv4_FLOWSPEC,
RF_VPNv6_FLOWSPEC,
RF_L2VPN_FLOWSPEC,
}
# Various error codes
ACTIVITY_ERROR_CODE = 100
RUNTIME_CONF_ERROR_CODE = 200
BIN_ERROR = 300
NET_CTRL_ERROR_CODE = 400
API_ERROR_CODE = 500
PREFIX_ERROR_CODE = 600
BGP_PROCESSOR_ERROR_CODE = 700
CORE_ERROR_CODE = 800
# Registry of custom exceptions
# Key: code:sub-code
# Value: exception class
_EXCEPTION_REGISTRY = {}
class BGPSException(Exception):
"""Base exception class for all BGPS related exceptions.
"""
CODE = 1
SUB_CODE = 1
DEF_DESC = 'Unknown exception.'
def __init__(self, desc=None):
super(BGPSException, self).__init__()
if not desc:
desc = self.__class__.DEF_DESC
kls = self.__class__
self.message = '%d.%d - %s' % (kls.CODE, kls.SUB_CODE, desc)
def __repr__(self):
kls = self.__class__
return '<%s(desc=%s)>' % (kls, self.message)
def __str__(self, *args, **kwargs):
return self.message
def add_bgp_error_metadata(code, sub_code, def_desc='unknown'):
"""Decorator for all exceptions that want to set exception class meta-data.
"""
# Check registry if we already have an exception with same code/sub-code
if _EXCEPTION_REGISTRY.get((code, sub_code)) is not None:
raise ValueError('BGPSException with code %d and sub-code %d '
'already defined.' % (code, sub_code))
def decorator(subclass):
"""Sets class constants for exception code and sub-code.
If given class is sub-class of BGPSException we sets class constants.
"""
if issubclass(subclass, BGPSException):
_EXCEPTION_REGISTRY[(code, sub_code)] = subclass
subclass.CODE = code
subclass.SUB_CODE = sub_code
subclass.DEF_DESC = def_desc
return subclass
return decorator
@add_bgp_error_metadata(code=ACTIVITY_ERROR_CODE,
sub_code=1,
def_desc='Unknown activity exception.')
class ActivityException(BGPSException):
"""Base class for exceptions related to Activity.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class Activity(object):
"""Base class for a thread of execution that provides some custom settings.
Activity is also a container of other activities or threads that it has
started. Inside a Activity you should always use one of the spawn method
to start another activity or greenthread. Activity is also holds pointers
to sockets that it or its child activities of threads have create.
"""
def __init__(self, name=None):
self._name = name
if self._name is None:
self._name = 'UnknownActivity: ' + str( | time.time())
self._child_thread_map = weakref.WeakValueDictionary()
self._child_activity_map = weakref.WeakValueDictionary()
self._asso_socket_map = weakref.WeakValueDictionary()
self._timers = weakref.WeakValueDictionary()
self._started = False
@property
def name(self):
| return self._name
@property
def started(self):
return self._started
def _validate_activity(self, activity):
"""Checks the validity of the given activity before it can be started.
"""
if not self._started:
raise ActivityException(desc='Tried to spawn a child activity'
' before Activity was started.')
if activity.started:
raise ActivityException(desc='Tried to start an Activity that was '
'already started.')
def _spawn_activity(self, activity, *args, **kwargs):
"""Starts *activity* in a new thread and passes *args* and *kwargs*.
Maintains pointer to this activity and stops *activity* when this
activity is stopped.
"""
self._validate_activity(activity)
# Spawn a new greenthread for given activity
greenthread = hub.spawn(activity.start, *args, **kwargs)
self._child_thread_map[activity.name] = greenthread
self._child_activity_map[activity.name] = activity
return greenthread
def _spawn_activity_after(self, seconds, activity, *args, **kwargs):
self._validate_activity(activity)
# Schedule to spawn a new greenthread after requested delay
greenthread = hub.spawn_after(seconds, activity.start, *args,
**kwargs)
self._child_thread_map[activity.name] = greenthread
self._child_activity_map[activity.name] = activity
return greenthread
def _validate_callable(self, callable_):
if callable_ is None:
raise ActivityException(desc='Callable cannot be None')
if not hasattr(callable_, '__call__'):
raise ActivityException(desc='Currently only supports instances'
' that have __call__ as callable which'
' is missing in given arg.')
if not self._started:
raise ActivityException(desc='Tried to spawn a child thread '
'before this Activity was started.')
def _spawn(self, name, callable_, *args, **kwargs):
self._validate_callable(callable_)
greenthread = hub.spawn(callable_, *args, **kwargs)
self._child_thread_map[name] = greenthread
return greenthread
def _spawn_after(self, name, seconds, callable_, *args, **kwargs):
self._validate_callable(callable_)
greenthread = hub.spawn_after(seconds, callable_, *args, **kwargs)
self._child_thread_map[name] = greenthread
return greenthread
def _create_timer(self, name, func, *arg, **kwarg):
timer = LoopingCall(func, *arg, **kwarg)
self._timers[name] = timer
return timer
@abc.abstractmethod
def _run(self, *args, **kwargs):
"""Main activity of this class.
Can launch other activity/callables here.
Sub-classes should override this method.
"""
raise NotImplementedError()
def start(self, *args, **kwargs):
"""Starts the main activity of this class. |
phaethon/scapy | kamene/layers/can.py | Python | gpl-2.0 | 4,119 | 0.00437 | #! /usr/bin/env python
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Nils Weiss <nils@we155.de>
## This program is published under a GPLv2 lice | nse
"""
CANSocket.
"""
from kamene.packet import *
from kamene.fields impo | rt *
import kamene.sendrecv as sendrecv
from kamene.supersocket import SuperSocket
from kamene.arch.linux import get_last_packet_timestamp
############
## Consts ##
############
CAN_FRAME_SIZE = 16
LINKTYPE_CAN_SOCKETCAN = 227 # From pcap spec
CAN_INV_FILTER = 0x20000000
class CAN(Packet):
name = 'CAN'
fields_desc = [
FlagsField("flags", 0, 3, ["ERR", "RTR", "EFF"]),
XBitField("id", 0, 29),
PadField(FieldLenField('dlc', None, length_of='data', fmt='B'), 4),
PadField(StrLenField('data', '', length_from=lambda pkt: min(pkt.dlc, 8)), 8)
]
def extract_padding(self, p):
return '', p
def pre_dissect(self, s):
# need to change the byteoder of the first four bytes
return struct.pack('<I12s', *struct.unpack('>I12s', s))
def post_build(self, pkt, pay):
# need to change the byteoder of the first four bytes
return struct.pack('<I12s', *struct.unpack('>I12s', pkt))+pay
class CANSocket(SuperSocket):
desc = "read/write packets at a given CAN interface using PF_CAN sockets"
def __init__(self, iface=None, receive_own_messages=False, filter=None):
if iface is None:
iface = conf.CANiface
self.ins = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.ins.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_RECV_OWN_MSGS,
struct.pack('i', receive_own_messages))
except Exception as e:
Kamene_Exception("Could not receive own messages (%s)", e)
if filter is None:
filter = [{
'can_id': 0,
'can_mask': 0
}]
can_filter_fmt = "={}I".format(2 * len(filter))
filter_data = []
for can_filter in filter:
filter_data.append(can_filter['can_id'])
filter_data.append(can_filter['can_mask'])
self.ins.setsockopt(socket.SOL_CAN_RAW,
socket.CAN_RAW_FILTER,
struct.pack(can_filter_fmt, *filter_data)
)
self.ins.bind((iface,))
self.outs = self.ins
def recv(self, x=CAN_FRAME_SIZE):
# Fetching the Arb ID, DLC and Data
try:
pkt, sa_ll = self.ins.recvfrom(x)
except BlockingIOError:
warning('Captured no data, socket in non-blocking mode.')
return None
except socket.timeout:
warning('Captured no data, socket read timed out.')
return None
except OSError:
# something bad happened (e.g. the interface went down)
warning("Captured no data.")
return None
q = CAN(pkt)
q.time = get_last_packet_timestamp(self.ins)
return q
def sr(self, *args, **kargs):
return sendrecv.sndrcv(self, *args, **kargs)
def sr1(self, *args, **kargs):
a,b = sendrecv.sndrcv(self, *args, **kargs)
if len(a) > 0:
return a[0][1]
else:
return None
def sniff(self, *args, **kargs):
return sendrecv.sniff(opened_socket=self, *args, **kargs)
@conf.commands.register
def srcan(pkt, iface=None, receive_own_messages=False, filter=None, *args, **kargs):
if not "timeout" in kargs:
kargs["timeout"] = -1
s = conf.CANSocket(iface, receive_own_messages, filter)
a, b = s.sr(pkt, *args, **kargs)
s.close()
return a, b
@conf.commands.register
def srcanloop(pkts, *args, **kargs):
"""Send a packet at can layer in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return sendrecv.__sr_loop(srcan, pkts, *args, **kargs)
conf.l2types.register(LINKTYPE_CAN_SOCKETCAN, CAN)
conf.CANiface = "can0"
conf.CANSocket = CANSocket
|
asedunov/intellij-community | python/testData/quickFixes/PyMakeMethodStaticQuickFixTest/usage_after.py | Python | apache-2.0 | 72 | 0.013889 | class A:
@staticmethod
def m(x): |
retu | rn x
print A.m(1) |
unioslo/cerebrum | testsuite/tests/test_core/test_utils/test_json.py | Python | gpl-2.0 | 1,860 | 0 | # encoding: utf-8
#
# Copyright 2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for mor | e details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import unicode_literals
import six
from Cerebrum.utils import json
from mx.DateTime im | port DateTime
def test_mxdatetime():
assert json.dumps(
DateTime(2018, 1, 1, 12, 0, 0)) == '"2018-01-01T12:00:00+01:00"'
assert json.dumps(DateTime(2018, 1, 1, 0, 0, 0)) == '"2018-01-01"'
def test_constants(factory):
co = factory.get('Constants')(None)
assert json.dumps(co.entity_account) == (
'{{"__cerebrum_object__": "code", '
'"code": {d}, '
'"str": "{c}", '
'"table": "{t}"}}').format(
c=co.entity_account,
d=int(co.entity_account),
t=co.EntityType._lookup_table)
def test_entity(initial_account, factory):
co = factory.get('Constants')(None)
assert json.dumps(initial_account) == (
'{{"__cerebrum_object__": "entity", '
'"entity_id": {}, '
'"entity_type": {}, '
'"str": "{}"}}'
.format(
initial_account.entity_id,
json.dumps(co.entity_account),
six.text_type(initial_account)))
|
seraphln/wheel | wheel/example.local_settings.py | Python | gpl-3.0 | 1,093 | 0.000915 | # coding: utf-8
"""
Wheel will try to read configur | ations from environment variables
so you dont need this local_settings.py file if you have env vars.
1. You can set as a file
export WHEEL_SETTINGS='/path/to/settings.py'
2. You can set individual values
export WHEEL_MONGODB_DB="wheel_db"
export WHEEL_MONGODB_HOST='localhost'
export WHEEL_MONGODB_PORT='$int 27017'
Or just fill your values in this file and renam | e it to 'local_settings.py'
"""
# MONGO
MONGODB_DB = "wheel_db"
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_USERNAME = None
MONGODB_PASSWORD = None
# Debug and toolbar
DEBUG = True
DEBUG_TOOLBAR_ENABLED = False
# Logger
LOGGER_ENABLED = True
LOGGER_LEVEL = 'DEBUG'
LOGGER_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
LOGGER_DATE_FORMAT = '%d.%m %H:%M:%S'
"""
If you want to have a new theme installed you can use wheelcms tool
$ pip install wheelcms
$ cd wheel
$ wheelcms install_theme material
The above commands will download material design theme to your themes folder,
then just enable it.
DEFAULT_THEME = 'material'
"""
|
mstrader/MkidDigitalReadout | DarknessFilters/triggerPhotons.py | Python | gpl-2.0 | 7,378 | 0.013283 | from matplotlib import rcParams, rc
import numpy as np
import sys
from fitFunctions import gaussian
import scipy.interpolate
import scipy.signal
from baselineIIR import IirFilter
import pickle
import smooth
# common setup for matplotlib
params = {'savefig.dpi': 300, # save figures to 300 dpi
'axes.labelsize': 14,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.major.pad': 6,
'xtick.major.pad': 6,
'ytick.labelsize': 14}
# use of Sans Serif also in math mode
rc('text.latex', preamble='\usepackage{sfmath}')
rcParams.update(params)
import matplotlib.pyplot as plt
import numpy as np
import os
import struct
def calcThreshold(phase,nSigma=2.5,nSamples=5000):
'''
Calculate the threshold (in phase units) corresponding
to a sigma threshold (note: look at this f'n, seems a bit odd
Appears to define sigma as one-sided lower 95% threshold)
'''
n,bins= np.histogram(phase[:nSamples],bins=100)
n = np.array(n,dtype='float32')/np.sum(n)
tot = np.zeros(len(bins))
for i in xrange(len(bins)):
tot[i] = np.sum(n[:i])
med = bins[np.abs(tot-0.5).argmin()]
thresh = bins[np.abs(tot-0.05).argmin()]
threshold = med-nSigma*abs(med-thresh)
return threshold
def sigmaTrigger(data,nSigmaTrig=7.,deadtime=10):
'''
Find photon pulses using a sigma trigger
INPUTS:
data - phase timestream (filtered or raw)
nSigmaTrig - threshold for photon detection, in units sigma from baseline
deadtime - trigger deadtime in ticks (us)
OUTPUTS:
Dictionary with keys:
peakIndices - indices of detected pulses in phase stream
peakHeights - heights of detected pulses (in | same units as input data)
'''
data = np.array(data)
med = np.median(data)
trigMask = data > (med + np.std(data)*nSigmaTrig)
if np.sum(trigMask) > 0:
peakIndices = np.where(trigMask)[0]
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices): |
p = peakIndices[i]
else:
p = peakIndices[-1]
else:
return {'peakIndices':np.array([]),'peakHeights':np.array([])}
peakHeights = data[peakIndices]
return {'peakIndices':peakIndices,'peakHeights':peakHeights}
def detectPulses(data,threshold=None,nSigmaThreshold=3.,deadtime=10,nNegDerivChecks=10,negDerivLenience=1,bNegativePulses = True):
#deadtime in ticks (us)
if bNegativePulses:
data = np.array(data)
else:
data = -np.array(data) #flip to negative pulses
if threshold is None:
threshold = np.median(data)-nSigmaThreshold*np.std(data)
derivative = np.diff(data)
peakHeights = []
t = 0
negDeriv = derivative <= 0
posDeriv = np.logical_not(negDeriv)
triggerBooleans = data[nNegDerivChecks:-2] < threshold
negDerivChecksSum = np.zeros(len(negDeriv[0:-nNegDerivChecks-1]))
for i in range(nNegDerivChecks):
negDerivChecksSum += negDeriv[i:i-nNegDerivChecks-1]
peakCondition0 = negDerivChecksSum >= nNegDerivChecks-negDerivLenience
peakCondition1 = np.logical_and(posDeriv[nNegDerivChecks:-1],posDeriv[nNegDerivChecks+1:])
peakCondition01 = np.logical_and(peakCondition0,peakCondition1)
peakBooleans = np.logical_and(triggerBooleans,peakCondition01)
try:
peakIndices = np.where(peakBooleans)[0]+nNegDerivChecks
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
except IndexError:
return {'peakIndices':np.array([]),'peakHeights':np.array([])}
if bNegativePulses:
peakHeights = data[peakIndices]
else:
peakHeights = -data[peakIndices] #flip back to positive sign
return {'peakIndices':peakIndices,'peakHeights':peakHeights}
def optimizeTrigCond(data, nPeaks, sigmaThreshList=[3.], nNegDerivChecksList=[10], negDerivLenienceList=[1], bNegativePulses=True):
minSigma = 1000
optSigmaThresh = 0
optNNegDerivChecks = 0
optNegDerivLenience = 0
optPeakDict = {'peakIndices':np.array([]), 'peakHeights':np.array([])}
for sigmaThresh in sigmaThreshList:
for nNegDerivChecks in nNegDerivChecksList:
for negDerivLenience in negDerivLenienceList:
peakDict = detectPulses(data, nSigmaThreshold=sigmaThresh, nNegDerivChecks=nNegDerivChecks, negDerivLenience=negDerivLenience, bNegativePulses=bNegativePulses)
if(len(peakDict['peakIndices'])>=nPeaks):
sigma = np.std(peakDict['peakHeights'])
if(sigma<minSigma):
minSigma = sigma
optSigmaThresh = sigmaThresh
optNNegDerivChecks = nNegDerivChecks
optNegDerivLenience = negDerivLenience
optPeakDict = peakDict
return optSigmaThresh, optNNegDerivChecks, optNegDerivLenience, minSigma, optPeakDict
def findSigmaThresh(data, initSigmaThresh=2., tailSlack=0., isPlot=False):
'''
Finds the optimal photon trigger threshold by cutting out the noise tail
in the pulse height histogram.
INPUTS:
data - filtered phase timestream data (positive pulses)
initSigmaThresh - sigma threshold to use when constructing initial
pulse height histogram
tailSlack - amount (in same units as data) to relax trigger threshold
isPlot - make peak height histograms if true
OUTPUTS:
threshold - trigger threshold in same units as data
sigmaThresh - trigger threshold in units sigma from median
'''
peakdict = sigmaTrigger(data, nSigmaTrig=initSigmaThresh)
peaksHist, peaksHistBins = np.histogram(peakdict['peakHeights'], bins='auto')
if(isPlot):
plt.plot(peaksHistBins[:-1], peaksHist)
plt.title('Unsmoothed Plot')
plt.show()
print 'peaksHistLen:', len(peaksHist)
peaksHist = smooth.smooth(peaksHist,(len(peaksHistBins)/20)*2+1)
print 'peaksHistSmoothLen:', len(peaksHist)
if(isPlot):
plt.plot(peaksHistBins[0:len(peaksHist)], peaksHist)
plt.title('smoothed plot')
plt.show()
minima=np.ones(len(peaksHist)) #keeps track of minima locations; element is 1 if minimum exists at that index
minimaCount = 1
#while there are multiple local minima, look for the deepest one
while(np.count_nonzero(minima)>1):
minima = np.logical_and(minima, np.logical_and((peaksHist<=np.roll(peaksHist,minimaCount)),(peaksHist<=np.roll(peaksHist,-minimaCount))))
#print 'minima array:', minima
minima[minimaCount-1]=0
minima[len(minima)-minimaCount]=0 #get rid of boundary effects
minimaCount += 1
thresholdInd = np.where(minima)[0][0]
threshold = peaksHistBins[thresholdInd]-tailSlack
sigmaThresh = (threshold-np.median(data))/np.std(data)
return threshold, sigmaThresh
|
odejesush/tensorflow | tensorflow/python/ops/gradients_test.py | Python | apache-2.0 | 23,474 | 0.011459 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from tensorflow.contrib.compiler import jit
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops # pylint: disable=unused-import
from tensorflow.python.ops import functional_ops # pylint: disable=unused-import
from tensorflow.python.ops import gradients
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import state_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients_impl._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
between_ops.sort(key=lambda x: -x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op], _OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Gra | ph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.stack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
| t6 = array_ops.stack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat([t4, t3], 0)
t6 = constant([2.0])
t7 = array_ops.concat([t5, t6], 0)
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = array_ops.concat([t3, t3, t3], 0)
t5 = constant([1.0])
t6 = array_ops.concat([t4, t5], 0)
t7 = array_ops.concat([t6, t3], 0)
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups())
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
|
pierreg/tensorflow | tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py | Python | apache-2.0 | 5,787 | 0.006566 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data in a constant.
Run using bazel:
bazel run -c opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data, pin to CPU because rest of pipeline is CPU-only
with tf.device('/cpu:0'):
input_images = tf.constant(data_sets.train.images)
input_labels = tf.constant(data_sets.train.labels)
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS. | num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = m | nist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS = parser.parse_args()
tf.app.run()
|
RennesUsher/crawlAll | crawlAll/settings.py | Python | apache-2.0 | 3,379 | 0.007103 | # -*- coding: utf-8 -*-
# Scrapy settings for crawlAll project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'crawlAll'
SPIDER_MODULES = ['crawlAll.spiders']
NEWSPIDER_MODULE = 'crawlAll.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'crawlAll (+http://www.toutiao.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
CRAWLERA_PRESERVE_DELAY = True
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 32
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
'X-Crawlera-Cookies':'disable'
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'crawlAll.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'crawlAll.useragent.MyUserAgentMiddleware':400,
# 'crawlAll.middlewares.MyCustomDownloaderMiddleware': None
'scrapy_crawlera.CrawleraMiddleware': 300
}
CRAWLERA_ENABLED = True
CRAWLERA_APIKEY = 'd696afc5af424a5e8ef91a41174e7d4c'
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthe | docs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'crawlAll.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = False
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high l | atencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
rodrigoasmacedo/l10n-brazil | __unported__/l10n_br_account/__openerp__.py | Python | agpl-3.0 | 2,625 | 0.00381 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009-2013 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. | #
# #
#This program is distributed in the hope that it will b | e useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
{
'name': 'Brazilian Localization Account',
'description': 'Brazilian Localization Account',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, OpenERP Brasil',
'website': 'http://openerpbrasil.org',
'version': '7.0',
'depends': [
'l10n_br',
'l10n_br_base',
'l10n_br_product',
'account_fiscal_position_rule',
'web_context_tunnel',
],
'data': [
'l10n_br_account_workflow.xml',
'account_fiscal_position_workflow.xml',
'data/l10n_br_account.fiscal.document.csv',
'data/l10n_br_account_data.xml',
'account_view.xml',
'account_fiscal_position_rule_view.xml',
'account_invoice_view.xml',
'l10n_br_account_view.xml',
'res_partner_view.xml',
'product_view.xml',
'res_company_view.xml',
'security/ir.model.access.csv',
'security/l10n_br_account_security.xml',
],
'demo': [
'demo/account.account.csv',
'demo/account_tax_code.xml',
'demo/account_financial_demo.xml',
'demo/account_fiscal_demo.xml',
'demo/base_demo.xml',
],
'test': [],
'installable': False,
'auto_install': True,
}
|
pdarragh/Viper | viper/interactive/lexer.py | Python | apache-2.0 | 900 | 0 | import viper.lexer as vl
import cmd
class InteractiveLexerException(Exception):
def __init__(self, output: str):
self.output = output
class InteractiveLexer(cmd.Cmd): # pragma: no cover
prompt = 'viper_lex> '
def default(self, line):
lexe | mes = vl.lex_line(line)
print(lexemes)
def do_exit(self, arg):
"""Exit the interactive lexer."""
raise InteractiveLexerException(output='exit')
def do_quit(self, arg):
"""Quit the interactive lexer."""
raise InteractiveLexerException(output='quit')
def cmdloop(self, intro=None):
try:
super().cmdloop(intro=intro)
except vl.Lexer | Error as e:
print(e)
self.cmdloop(intro=intro)
except InteractiveLexerException:
return
except KeyboardInterrupt:
print('\b\bexit')
return
|
googleapis/python-redis | google/cloud/redis_v1/types/cloud_redis.py | Python | apache-2.0 | 23,259 | 0.000516 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.redis.v1",
manifest={
"NodeInfo",
"Instance",
"ListInstancesRequest",
"ListInstancesResponse",
"GetInstanceRequest",
"CreateInstanceRequest",
"UpdateInstanceRequest",
"UpgradeInstanceRequest",
"DeleteInstanceRequest",
"GcsSource",
"InputConfig",
"ImportInstanceRequest",
"GcsDestination",
"OutputConfig",
"ExportInstanceRequest",
"FailoverInstanceRequest",
"OperationMetadata",
"LocationMetadata",
"ZoneMetadata",
},
)
class NodeInfo(proto.Message):
r"""Node specific properties.
Attributes:
id (str):
Output only. Node identifying string. e.g.
'node-0', 'node-1'
zone (str):
Output only. Location of the node.
"""
id = proto.Field(proto.STRING, number=1,)
zone = proto.Field(proto.STRING, number=2,)
class Instance(proto.Message):
r"""A Google Cloud Redis instance.
Attributes:
name (str):
Required. Unique name of the resource in this scope
including project and location using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
Note: Redis instances are managed and addressed at regional
level so location_id here refers to a GCP region; however,
users may choose which specific zone (or collection of zones
for cross-zone instances) an instance should be provisioned
in. Refer to
[location_id][google.cloud.redis.v1.Instance.location_id]
and
[alternative_location_id][google.cloud.redis.v1.Instance.alternative_location_id]
fields for more details.
display_name (str):
An arbitrary and optional user-provided name
for the instance.
labels (Sequence[google.cloud.redis_v1.types.Instance.LabelsEntry]):
Resource labels to represent user provided
metadata
location_id (str):
Optional. The zone where the instance will be
provisioned. If not provided, the service will
choose a zone from the specified region for the
instance. For standard tier, additional nodes
will be added across multiple zones for
protection against zonal failures. If specified,
at least one node will be provisioned in this
zone.
alternative_location_id (str):
Optional. If specified, at least one node will be
provisioned in this zone in addition to the zone specified
in location_id. Only applicable to standard tier. If
provided, it must be a different zone from the one provided
in [location_id]. Additional nodes beyond the first 2 will
be placed in zones selected by the service.
redis_version (str):
Optional. The version of Redis software. If not provided,
latest supported version will be used. Currently, the
supported values are:
- ``REDIS_3_2`` for Redis 3.2 compatibility
- ``REDIS_4_0`` for Redis 4.0 compatibility (default)
- ``REDIS_5_0`` for Redis 5.0 compatibility
- ``REDIS_6_X`` for Redis 6.x compatibility
reserved_ip_range (str):
Optional. For DIRECT_PEERING mode, the CIDR range of
internal addresses that are reserved for this instance.
Range must be unique and non-overlapping with existing
subnets in an authorized network. For PRIVATE_SERVICE_ACCESS
mode, the name of one allocated IP address ranges associated
with this private service access connection. If not
provided, the service will choose an unused /29 block, for
example, 10.0.0.0/29 or 192.168.0.0/29. For
READ_REPLICAS_ENABLED the default block size is /28.
host (str):
Output only. Hostname or IP address of the
exposed Redis endpoint used by clients to
connect to the service.
port (int):
Output only. The port number of the exposed
Redis endpoint.
current_location_id (str):
Output only. The current zone where the Redis primary node
is located. In basic tier, this will always be the same as
[location_id]. In standard tier, this can be the zone of any
node in the instance.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the instance was
created.
state (google.cloud.redis_v1.types.Instance.State):
Output only. The current state of this
instance.
status_message (str):
Output only. Additional information about the
current status of this instance, if available.
redis_configs (Sequence[google.cloud.redis_v1.types.Instance.RedisConfigsEntry]):
Optional. Redis configuration parameters, according to
http://redis.io/topics/config. Currently, the only supported
parameters are:
Redis version 3.2 and newer:
- maxmemory-policy
- notify-keyspace-events
Redis version 4.0 and newer:
- activedefrag
- lfu-decay-time
- lfu-log-factor
- maxmemory-gb
| Redis version 5.0 and newer:
- stream-node-max-bytes
- stream-node-max-entries
tier (google.cloud.redis_v1.types.Instance.Tier):
Required. The service tier of the instance.
memory_size_gb (int):
Required. Redis memory size in GiB.
authorized_network (str):
Optional. The full name | of the Google Compute Engine
`network <https://cloud.google.com/vpc/docs/vpc>`__ to which
the instance is connected. If left unspecified, the
``default`` network will be used.
persistence_iam_identity (str):
Output only. Cloud IAM identity used by import / export
operations to transfer data to/from Cloud Storage. Format is
"serviceAccount:<service_account_email>". The value may
change over time for a given instance so should be checked
before each import/export operation.
connect_mode (google.cloud.redis_v1.types.Instance.ConnectMode):
Optional. The network connect mode of the Redis instance. If
not provided, the connect mode defaults to DIRECT_PEERING.
replica_count (int):
Optional. The number of replica nodes. Valid range for
standard tier is [1-5] and defaults to 1. Valid value for
basic tier is 0 and defaults to 0.
nodes (Sequence[google.cloud.redis_v1.types.NodeInfo]):
Output only. Info per node.
read_endpoint (str):
Output only. Hostname or IP address of the
exposed readonly Redis endpoint. Standard tier
only. Targets all healthy replica nodes in
instance. Replication is asynchronous and
replica nodes will exhibit some lag behind the
primary. Write requests must target 'host'.
|
aaiijmrtt/TENSORCHALK | tests/core.py | Python | mit | 374 | 0.010695 | import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '. | .', 'code')))
import core, graphs
graph = graphs.one
print 'import tensorflow as tf'
print core.__fill__(graph, 'add', dict(), 0)[0]
print 'with tf.Session() as sess:'
print '\tsess.run(tf.global_variables_initializer())'
print '\tprint s | ess.run(add, {x: [[1, 2]], y: [[3, 4]]})'
|
JQIamo/artiq | artiq/test/lit/iodelay/loop.py | Python | lgpl-3.0 | 320 | 0.003125 | # RUN: %python -m artiq.compiler.testbench.signature %s >%t
# RUN: OutputCheck %s --file-to-check=%t
# CHECK-L: f: ()->NoneType delay(30 | mu)
def f():
for _ in range(10):
delay_mu(3)
# CHECK-L: g: ( | )->NoneType delay(60 mu)
def g():
for _ in range(10):
for _ in range(2):
delay_mu(3)
|
solashirai/edx-platform | common/lib/xmodule/xmodule/modulestore/django.py | Python | agpl-3.0 | 12,834 | 0.001792 | """
Module that provides a connection to the ModuleStore specified in the django settings.
Passes settings.MODULESTORE as kwargs to MongoModuleStore
"""
from __future__ import absolute_import
from importlib import import_module
import gettext
import logging
from pkg_resources import resource_filename
import re
from django.conf import settings
# This configuration must be executed BEFORE any additional Django imports. Otherwise, the imports may fail due to
# Django not being configured properly. This mostly applies to tests.
if not settings.configured:
settings.configure()
from django.core.cache import caches, InvalidCacheBackendError
import django.dispatch
import django.utils
from django.utils.translation import get_language, to_locale
from pymongo import ReadPreference
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.draft_and_published import BranchSettingMixin
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.util.django import get_current_request_hostname
import xblock.reference.plugins
try:
# We may not always have the request_cache module available
from request_cache.middleware import RequestCache
HAS_REQUEST_CACHE = True
except ImportError:
HAS_REQUEST_CACHE = False
# We also may not always have the current request user (crum) module available
try:
from xblock_django.user_service import DjangoXBlockUserService
from crum import get_current_user
HAS_USER_SERVICE = True
except ImportError:
HAS_USER_SERVICE = False
try:
from xblock_django.models import XBlockDisableConfig
except ImportError:
XBlockDisableConfig = None
log = logging.getLogger(__name__)
ASSET_IGNORE_REGEX = getattr(settings, "ASSET_IGNORE_REGEX", r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)")
class SignalHandler(object):
"""
This class is to allow the modulestores to emit signals that can be caught
by other parts of the Django application. If your app needs to do something
every time a course is published (e.g. search indexing), you can listen for
that event and kick off a celery task when it happens.
To listen for a signal, do the following::
from django.dispatch import receiver
from celery.task import task
from xmodule.modulestore.django import modulestore, SignalHandler
@receiver(SignalHandler.course_published)
def listen_for_course_publish(sender, course_key, **kwargs):
do_my_expensive_update.delay(course_key)
@task()
def do_my_expensive_update(course_key):
# ...
Things to note:
1. We receive using the Django Signals mechanism.
2. The sender is going to be the class of the modulestore sending it.
3. The names of your handler function's parameters *must* be "sender" and "course_key".
4. Always have **kwargs in your signal handler, as new things may be added.
5. The thing that listens for the signal lives in process, but should do
| almost no work. Its main job is to kick off the celery task that will
do the actual work.
"""
pre_publish = django.dispatch.Signal(providing_args=["course_key"])
course_published = django.dispatch.Signal(providing_args=["course_key"])
course_deleted = django.dispatch.Signal(providing_args=["course_key"])
library_updated = django.dis | patch.Signal(providing_args=["library_key"])
item_deleted = django.dispatch.Signal(providing_args=["usage_key", "user_id"])
_mapping = {
"pre_publish": pre_publish,
"course_published": course_published,
"course_deleted": course_deleted,
"library_updated": library_updated,
"item_deleted": item_deleted,
}
def __init__(self, modulestore_class):
self.modulestore_class = modulestore_class
def send(self, signal_name, **kwargs):
"""
Send the signal to the receivers.
"""
signal = self._mapping[signal_name]
responses = signal.send_robust(sender=self.modulestore_class, **kwargs)
for receiver, response in responses:
log.info('Sent %s signal to %s with kwargs %s. Response was: %s', signal_name, receiver, kwargs, response)
def load_function(path):
"""
Load a function by name.
Arguments:
path: String of the form 'path.to.module.function'. Strings of the form
'path.to.module:Class.function' are also valid.
Returns:
The imported object 'function'.
"""
if ':' in path:
module_path, _, method_path = path.rpartition(':')
module = import_module(module_path)
class_name, method_name = method_path.split('.')
_class = getattr(module, class_name)
function = getattr(_class, method_name)
else:
module_path, _, name = path.rpartition('.')
function = getattr(import_module(module_path), name)
return function
def create_modulestore_instance(
engine,
content_store,
doc_store_config,
options,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
_options = {}
_options.update(options)
FUNCTION_KEYS = ['render_template']
for key in FUNCTION_KEYS:
if key in _options and isinstance(_options[key], basestring):
_options[key] = load_function(_options[key])
if HAS_REQUEST_CACHE:
request_cache = RequestCache.get_request_cache()
else:
request_cache = None
try:
metadata_inheritance_cache = caches['mongo_metadata_inheritance']
except InvalidCacheBackendError:
metadata_inheritance_cache = caches['default']
if issubclass(class_, MixedModuleStore):
_options['create_modulestore_instance'] = create_modulestore_instance
if issubclass(class_, BranchSettingMixin):
_options['branch_setting_func'] = _get_modulestore_branch_setting
if HAS_USER_SERVICE and not user_service:
xb_user_service = DjangoXBlockUserService(get_current_user())
else:
xb_user_service = None
if 'read_preference' in doc_store_config:
doc_store_config['read_preference'] = getattr(ReadPreference, doc_store_config['read_preference'])
if XBlockDisableConfig and settings.FEATURES.get('ENABLE_DISABLING_XBLOCK_TYPES', False):
disabled_xblock_types = XBlockDisableConfig.disabled_block_types()
else:
disabled_xblock_types = ()
xblock_field_data_wrappers = [load_function(path) for path in settings.XBLOCK_FIELD_DATA_WRAPPERS]
return class_(
contentstore=content_store,
metadata_inheritance_cache_subsystem=metadata_inheritance_cache,
request_cache=request_cache,
xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()),
xblock_select=getattr(settings, 'XBLOCK_SELECT_FUNCTION', None),
xblock_field_data_wrappers=xblock_field_data_wrappers,
disabled_xblock_types=disabled_xblock_types,
doc_store_config=doc_store_config,
i18n_service=i18n_service or ModuleI18nService(),
fs_service=fs_service or xblock.reference.plugins.FSService(),
user_service=user_service or xb_user_service,
signal_handler=signal_handler or SignalHandler(class_),
**_options
)
# A singleton instance of the Mixed Modulestore
_MIXED_MODULESTORE = None
def modulestore():
"""
Returns the Mixed modulestore
"""
global _MIXED_MODULESTORE # pylint: disable=global-statement
if _MIXED_MODULESTORE is None:
_MIXED_MODULESTORE = create_modulestore_instance(
settings.MODULESTORE['default']['ENGINE'],
contentstore(),
settings.MODULESTORE['default'].get('DOC_STORE_CONFIG', {}),
settings.MODULESTORE['default'].get('OPTIONS', {})
)
if settings.FEATURES.get('CUSTOM_COURSES_EDX'):
# TODO: This import prevents a circular import issue, but is
# symptomatic of a lib having a dependency on code in lms. This
# should be |
ministryofjustice/cla_frontend | cla_frontend/apps/core/testing/test_views.py | Python | mit | 1,532 | 0.003916 | from django.test import SimpleTestCase
class MaintenanceModeTestCase(SimpleTestCase):
def test_maintenance_mode_enabled_home_page(self):
with self.settings(MAINTENANCE_MODE=True):
response = self.client.get("/", follow=True)
self.assertEqual(503, response.status_code)
self.assertIn("This service is down for maintenance", response.content)
self.assertEqual([("http://testserver/maintenance", 302)], response.redirect_chain)
def test_maintenance_mode_enabled_maintenance_page(self):
with self.settings(MAINTENANCE_MODE=True):
response = self.cl | ient.get("/maintenance", follow=False)
self.assertEqual(503, response.status_code)
self.assertIn("This service is down for maintenance", response.content)
def test_maintenance_mode_disabled_home_page(self):
with self.settings(MAINTENANCE_MODE=False):
response = self.client.get("/", follow=True)
self.assertEqual(200, resp | onse.status_code)
self.assertNotIn("This service is down for maintenance", response.content)
def test_maintenance_mode_disabled_maintenance_page(self):
with self.settings(MAINTENANCE_MODE=False):
response = self.client.get("/maintenance", follow=True)
self.assertEqual(200, response.status_code)
self.assertEqual(("http://testserver/", 302), response.redirect_chain[0])
self.assertNotIn("This service is down for maintenance", response.content)
|
pombredanne/parakeet | parakeet/ndtypes/fn_type.py | Python | bsd-3-clause | 1,204 | 0.015781 | from core_types import IncompatibleTypes, ImmutableT
class FnT(ImmutableT):
"""Type of a typed function"""
def __init__(self, input_types, return_type):
self.input_types = tuple(input_types)
self.return_type = return_type
self._hash = hash(self.input_types + (return_type,))
def __str__(sel | f):
input_str = ", ".join(str(t) for t in self | .input_types)
return "(%s)->%s" % (input_str, self.return_type)
def __repr__(self):
return str(self)
def __eq__(self, other):
return other.__class__ is FnT and \
self.return_type == other.return_type and \
len(self.input_types) == len(other.input_types) and \
all(t1 == t2 for (t1, t2) in
zip(self.input_types, other.input_types))
def combine(self, other):
if self == other:
return self
else:
raise IncompatibleTypes(self, other)
def __hash__(self):
return self._hash
_fn_type_cache = {}
def make_fn_type(input_types, return_type):
input_types = tuple(input_types)
key = input_types, return_type
if key in _fn_type_cache:
return _fn_type_cache[key]
else:
t = FnT(input_types, return_type)
_fn_type_cache[key] = t
return t |
ChameleonCloud/horizon | openstack_dashboard/dashboards/admin/volumes/views.py | Python | apache-2.0 | 11,089 | 0 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.ap | ache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or | agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes and snapshots.
"""
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.admin.volumes \
import tables as volumes_tables
from openstack_dashboard.dashboards.admin.volumes \
import tabs as volumes_tabs
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
from openstack_dashboard.utils import futurist_utils
from openstack_dashboard.utils import settings as setting_utils
class VolumesView(tables.PagedTableMixin, volumes_views.VolumeTableMixIn,
tables.DataTableView):
table_class = volumes_tables.VolumesTable
page_title = _("Volumes")
FILTERS_MAPPING = {'bootable': {_('yes'): 'true', _('no'): 'false'},
'encrypted': {_('yes'): True, _('no'): False}}
def get_data(self):
default_filters = {'all_tenants': True}
filters = self.get_filters(default_filters.copy())
volumes = []
self.table.needs_filter_first = False
if (setting_utils.get_dict_config('FILTER_DATA_FIRST',
'admin.volumes') and
len(filters) == len(default_filters)):
self.table.needs_filter_first = True
return volumes
volumes = []
attached_instance_ids = []
tenants = []
tenant_dict = {}
instances = []
volume_ids_with_snapshots = []
def _task_get_tenants():
# Gather our tenants to correlate against IDs
try:
tmp_tenants, __ = keystone.tenant_list(self.request)
tenants.extend(tmp_tenants)
tenant_dict.update([(t.id, t) for t in tenants])
except Exception:
msg = _('Unable to retrieve volume project information.')
exceptions.handle(self.request, msg)
def _task_get_instances():
# As long as Nova API does not allow passing attached_instance_ids
# to nova.server_list, this call can be forged to pass anything
# != None
instances.extend(self._get_instances(
search_opts={'all_tenants': True}))
# In volumes tab we don't need to know about the assignment
# instance-image, therefore fixing it to an empty value
for instance in instances:
if hasattr(instance, 'image'):
if isinstance(instance.image, dict):
instance.image['name'] = "-"
def _task_get_volumes_snapshots():
volume_ids_with_snapshots.extend(
self._get_volumes_ids_with_snapshots(
search_opts={'all_tenants': True}
))
def _task_get_volumes():
volumes.extend(self._get_volumes(search_opts=filters))
# update group name for volumes
self._get_groups(volumes, search_opts={'all_tenants': True})
attached_instance_ids.extend(
self._get_attached_instance_ids(volumes))
if 'project' in filters:
futurist_utils.call_functions_parallel(
_task_get_tenants,
_task_get_instances,
_task_get_volumes_snapshots
)
tenant_ids = [t.id for t in tenants
if t.name == filters['project']]
if not tenant_ids:
return []
del filters['project']
for id in tenant_ids:
filters['project_id'] = id
volumes += self._get_volumes(search_opts=filters)
attached_instance_ids = self._get_attached_instance_ids(volumes)
else:
futurist_utils.call_functions_parallel(
_task_get_volumes,
_task_get_tenants,
_task_get_instances,
_task_get_volumes_snapshots
)
self._set_volume_attributes(
volumes, instances, volume_ids_with_snapshots)
for volume in volumes:
tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
tenant = tenant_dict.get(tenant_id, None)
volume.tenant_name = getattr(tenant, "name", None)
return volumes
def get_filters(self, filters):
self.table = self._tables['volumes']
self.handle_server_filter(self.request, table=self.table)
self.update_server_filter_action(self.request, table=self.table)
filters = super(VolumesView, self).get_filters(filters,
self.FILTERS_MAPPING)
return filters
class DetailView(volumes_views.DetailView):
tab_group_class = volumes_tabs.VolumeDetailTabs
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
table = volumes_tables.VolumesTable(self.request)
context["actions"] = table.render_row_actions(context["volume"])
return context
def get_search_opts(self, volume):
search_opts = super(DetailView, self).get_search_opts(volume)
search_opts['all_tenants'] = True
return search_opts
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class ManageVolumeView(forms.ModalFormView):
form_class = volumes_forms.ManageVolume
template_name = 'admin/volumes/manage_volume.html'
form_id = "manage_volume_modal"
submit_label = _("Manage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = reverse_lazy('horizon:admin:volumes:manage')
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Manage Volume")
def get_context_data(self, **kwargs):
context = super(ManageVolumeView, self).get_context_data(**kwargs)
return context
class UnmanageVolumeView(forms.ModalFormView):
form_class = volumes_forms.UnmanageVolume
template_name = 'admin/volumes/unmanage_volume.html'
form_id = "unmanage_volume_modal"
submit_label = _("Unmanage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:unmanage'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Unmanage Volume")
def get_context_data(self, **kwargs):
context = super(UnmanageVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'host': getattr(volume, "os-vol-host-attr:host")}
class MigrateV |
cloud9ers/gurumate | environment/share/doc/ipython/examples/parallel/iopubwatcher.py | Python | lgpl-3.0 | 2,903 | 0.004823 | """A script for watching all traffic on the IOPub channel (stdout/stderr/pyerr) of engines.
This connects to the default cluster, or you can pass the path to your ipcontroller-client.json
Try running this script, and then running a few jobs that print (and call sys.stdout.flush),
and you will see the print statements as they arrive, notably not waiting for the results
to finish.
You can use the zeromq SUBSCRIBE mechanism to only receive information from specific engines,
and easily filter by message type.
Authors
-------
* MinRK
"""
import os
import sys
import json
import zmq
from IPython.zmq.session import Session
from IPython.parallel.util import disambiguate_url
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.path import get_security_file
def main(connection_file):
"""watch iopub channel, and print messages"""
ctx = zmq.Context.instance()
with open(connection_file) as f:
cfg = json.loads(f.read())
location = cfg['location']
reg_url = cfg['url']
session = Session(key=str_to_bytes(cfg['exec_key']))
query = ctx.socket(zmq.DEALER)
query.connect(disambiguate_url(cfg['url'], location))
session.send(query, "connection_request")
idents,msg = session.recv(query, mode=0)
c = msg['content']
iopub_url = disambiguate_url(c['iopub'], location)
sub = ctx.socket(zmq.SUB)
# This will subscribe to all messages:
sub.setsockopt(zmq.SUBSCRIBE, b'')
# replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout
# 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes
# to everything from engine 1, but there is no way to subscribe to
# just stdout from everyone.
# multiple calls to subscribe will add subscriptions, e.g. to subscribe to
# engine 1's stderr and engine 2's stdout:
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.1.stderr')
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.2.stdout')
sub.connect(iopub_url)
while True:
try:
idents,msg = session.recv(sub, mode=0)
except KeyboardInterrupt:
return
# ident always length 1 here
topic = idents[0]
if msg['msg_type'] == 'stream':
# stdout/stderr
# stream names are in msg['content']['name'], if you want to handle
# them differently
print("%s: %s" % (topic, msg['content']['data']))
elif msg['msg_type'] == 'pyerr':
# Python traceback
c = msg['content']
print(topic + ':')
| for line in c['traceback']:
# indent lines
print(' ' + line)
if __name__ == '__main__':
if len(sys.argv) > 1:
cf = sys.argv[1]
else:
# This gets the security fi | le for the default profile:
cf = get_security_file('ipcontroller-client.json')
main(cf)
|
darthbhyrava/pywikibot-local | tests/api_tests.py | Python | mit | 38,915 | 0.000437 | # -*- coding: utf-8 -*-
"""API test module."""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import datetime
import types
import pywikibot.data.api as api
import pywikibot.family
import pywikibot.login
import pywikibot.page
import pywikibot.site
from pywikibot.tools import (
MediaWikiVersion,
PY2,
UnicodeType,
)
from tests.aspects import (
unittest,
TestCase,
DefaultSiteTestCase,
DefaultDrySiteTestCase,
)
from tests.utils import allowed_failure, FakeLoginManager, PatchedHttp
if not PY2:
from urllib.parse import unquote_to_bytes
else:
from urllib import unquote_plus as unquote_to_bytes
class TestAPIMWException(DefaultSiteTestCase):
"""Test raising an APIMWException."""
data = {'error': {'code': 'internal_api_error_fake',
'info': 'Fake error message'},
'servedby': 'unittest',
}
def _dummy_request(self, **kwargs):
self.assertIn('body', kwargs)
self.assertIn('uri', kwargs)
self.assertIn('site', kwargs)
if kwargs['body'] is None:
# use uri and remove script path
parameters = kwargs['uri']
prefix = kwargs['site'].scriptpath() + '/api.php?'
self.assertEqual(prefix, parameters[:len(prefix)])
parameters = parameters[len(prefix):]
else:
parameters = kwargs['body']
parameters = parameters.encode('ascii') # it should be bytes anyway
# Extract parameter data from the body, it's ugly but allows us
# to verify that we actually test the right request
parameters = [p.split(b'=', 1) for p in parameters.split(b'&')]
keys = [p[0].decode('ascii') for p in parameters]
values = [unquote_to_bytes(p[1]) for p in parameters]
values = [v.decode(kwargs['site'].encoding()) for v in values]
values = [v.replace('+', ' ') for v in values]
values = [set(v.split('|')) for v in values]
parameters = dict(zip(keys, values))
if 'fake' not in parameters:
return False # do an actual request
if self.assert_parameters:
for param, value in self.assert_parameters.items():
self.assertIn(param, parameters)
if value is not None:
if isinstance(value, UnicodeType):
value = value.split('|')
self.assertLessEqual(set(value), parameters[param])
return self.data
def test_API_error(self):
"""Test a static request."""
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True})
with PatchedHttp(api, self.data):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_ASCII(self):
"""Test a Page instance as parameter using ASCII chars."""
page = pywikibot.page.Page(self.site, 'ASCII')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_Unicode(self):
"""Test a Page instance as parameter using non-ASCII chars."""
page = pywikibot.page.Page(self.site, 'Ümlä üt')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
class TestApiFunctions(DefaultSiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor with implicit site creation."""
req = api.Request(action="test", foo="", bar="test")
self.assertTrue(req)
self.assertEqual(req.site, self.get_site())
class TestDryApiFunctions(DefaultDrySiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor."""
mysite = self.get_site()
req = api.Request(site=mysite, action="test", foo="", bar="test")
self.assertTrue(req)
self.assertEqual(req.site, mysite)
self.assertIn("foo", req._params)
self.assertEqual(req["bar"], ["test"])
# test item assignment
req["one"] = "1"
self.assertEqual(req._params['one'], ["1"])
# test compliance with dict interface
# req.keys() should contain "action", "foo", "bar", "one"
self.assertEqual(len(req.keys()), 4)
self.assertIn("test", req._encoded_items().values())
for item in req.items():
self.assertEqual(len(item), 2, item)
def test_mixed_mode(self):
"""Test if parameters is used with kwargs."""
req1 = api.Request(site=self.site, action='test', paramete | rs='foo')
self.assertIn('parameters' | , req1._params)
req2 = api.Request(site=self.site, parameters={'action': 'test',
'parameters': 'foo'})
self.assertEqual(req2['parameters'], ['foo'])
self.assertEqual(req1._params, req2._params)
class TestParamInfo(DefaultSiteTestCase):
"""Test ParamInfo."""
def test_init(self):
"""Test common initialization."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
len(pi.preloaded_modules))
self.assertIn('info', pi.query_modules)
self.assertIn('login', pi._action_modules)
def test_init_query_first(self):
"""Test init where it first adds query and then main."""
def patched_generate_submodules(modules):
# Change the query such that query is handled before main
modules = set(modules)
if 'main' in modules:
assert 'query' in modules
modules.discard('main')
modules = list(modules) + ['main']
else:
assert 'query' not in modules
original_generate_submodules(modules)
pi = api.ParamInfo(self.site, set(['query', 'main']))
self.assertEqual(len(pi), 0)
original_generate_submodules = pi._generate_submodules
pi._generate_submodules = patched_generate_submodules
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
def test_init_pageset(self):
"""Test initializing with only the pageset."""
site = self.get_site()
self.assertNotIn('query', api.ParamInfo.init_modules)
pi = api.ParamInfo(site, set(['pageset']))
self.assertNotIn('query', api.ParamInfo.init_modules)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
if 'query' in pi.preloaded_modules:
self.assertIn('query', pi._paraminfo)
self.assertEqual(len(pi), 4)
else:
self.assertNotIn('query', pi._paraminfo)
self.assertEqual(len(pi), 3)
self.assertEqual(len(pi),
len(pi.preloaded_modules))
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
|
safl/chplforpyp-docs | docs/source/examples/func_decl.py | Python | apache-2.0 | 71 | 0 | def | abs(x):
if x < 0:
re | turn -x
else:
return x
|
misscindy/Interview | Graph/19_06_Bipartite.py | Python | cc0-1.0 | 1,033 | 0.001936 | import collections
class Vertex(object):
def __init__(self, v):
self.v = v
self.d = -1
self.neighbors = set()
def add_neighbor(self, v):
self.neighbors.add(v)
def __repr__(self):
return str(self.v) + " " + str(self.d) + " "
def is_bipartite(vertex):
if not vertex:
return False
vertex.d = 0
q = collections.deque()
q.append(vertex)
while q:
cur_node = q.popleft()
print cur_node
for n in cur_node.neighbors:
if n.d == -1:
n.d = cur_node.d + 1
| q.append(n)
elif n.d == cur_node.d:
return False
print vertex.neighbors
return True
if __name__ == "__main__":
#
graph = Vertex( | 0)
for i in range(1, 4):
graph.add_neighbor(Vertex(i))
a = graph.neighbors.pop()
graph.add_neighbor(a)
a.add_neighbor(graph)
print is_bipartite(graph)
print graph.neighbors
for i in graph.neighbors:
print i.neighbors |
ghchinoy/tensorflow | tensorflow/python/keras/layers/wrappers_test.py | Python | apache-2.0 | 37,589 | 0.009418 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops.array_ops import concat
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import util as trackable_util
class _RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super(_RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units, 'constant_size': self.constant_size}
base_config = super(_RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TimeDistributedTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
def test_timedistributed_static_batch_size(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
def test_timedistributed_invalid_init(self):
x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegexp(
ValueError,
'Please initialize `TimeDistributed` layer with a `Layer` instance.'):
keras.layers.TimeDistributed(x)
def test_timedistributed_conv2d(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10)
def test_regularizers(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2, kernel_regularizer='l1'),
input_shape=(3, 4)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
self.assertEqual(len(model.losses), 1)
def test_TimeDistributed_batchnorm(self):
with self.cached_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
| keras.layers.BatchNormalization(center=True, scale=True),
name='bn',
input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model | .train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Verify input_map has one mapping from inputs to reshaped inputs.
self.assertEqual(len(td._input_map.keys()), 1)
def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.trainable_weights), 2)
layer.trainable = False
assert not layer.updates
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
with self.cached_session():
# test with unspecified shape and Embeddings with mask_zero
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None))) # N by t_1 by t_2 by 6
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(7, return_sequences=True)))
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(8, return_sequences=False)))
model.add(keras.layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4),
|
msarfati/InstaCommander | instacommander/tests/fixtures.py | Python | apache-2.0 | 657 | 0.003044 | # -*- coding: utf-8 - | *-
import os
MODULE_PATH = os.path.split(os.path.realpath(__file__))[0]
def typical_fixtures():
# models.User.add_system_users | ()
# typical_users()
pass
def typical_picture():
return open(os.path.join(MODULE_PATH, "data/led-hallway.jpg"), 'rb')
# def typical_users():
# models.User.register(
# email='joe',
# name='Joe MacMillan',
# password='aaa',
# confirmed=True,
# roles=["User"],
# )
# models.User.register(
# email='cameron',
# name='Cameron Howe',
# password='aaa',
# confirmed=True,
# roles=["User"],
# )
# # |
duozhilin/Blog | blogs/migrations/0009_topic_owner.py | Python | unlicense | 685 | 0.00146 | # -*- coding: utf-8 -*-
# Generated by Django | 1.11.5 on 2017-09-30 08:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blogs', '0008_remove_post_author'),
]
operations = [
migrations.AddField(
model_na | me='topic',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
pythonindia/junction | tests/utils.py | Python | mit | 1,964 | 0.001018 | # -*- coding: utf-8 -*-
import functools
from django.conf import settings
from django.db.models import signals
def signals_switch():
pre_save = signals.pre_save.receivers
post_save = signals.post_save.receivers
def disconnect():
signal | s.pre_save.receivers = []
signals.post_save.receivers = | []
def reconnect():
signals.pre_save.receivers = pre_save
signals.post_save.receivers = post_save
return disconnect, reconnect
disconnect_signals, reconnect_signals = signals_switch()
def set_settings(**new_settings):
"""Decorator for set django settings that will be only available during the
wrapped-function execution.
For example:
@set_settings(FOO='bar')
def myfunc():
...
@set_settings(FOO='bar')
class TestCase:
...
"""
def decorator(testcase):
if type(testcase) is type:
namespace = {"OVERRIDE_SETTINGS": new_settings, "ORIGINAL_SETTINGS": {}}
wrapper = type(testcase.__name__, (SettingsTestCase, testcase), namespace)
else:
@functools.wraps(testcase)
def wrapper(*args, **kwargs):
old_settings = override_settings(new_settings)
try:
testcase(*args, **kwargs)
finally:
override_settings(old_settings)
return wrapper
return decorator
def override_settings(new_settings):
old_settings = {}
for name, new_value in list(new_settings.items()):
old_settings[name] = getattr(settings, name, None)
setattr(settings, name, new_value)
return old_settings
class SettingsTestCase(object):
@classmethod
def setup_class(cls):
cls.ORIGINAL_SETTINGS = override_settings(cls.OVERRIDE_SETTINGS)
@classmethod
def teardown_class(cls):
override_settings(cls.ORIGINAL_SETTINGS)
cls.OVERRIDE_SETTINGS.clear()
|
nicole-a-tesla/meetup.pizza | pizzaplace/admin.py | Python | mit | 99 | 0.010101 | from django.contrib import admin
from .models import PizzaPlace
admin.site.regis | ter(P | izzaPlace)
|
ContributeToScience/participant-booking-app | booking/core/ip2geo/__init__.py | Python | gpl-2.0 | 10,628 | 0.001976 | import math
import mmap
import gzip
import os
import codecs
import pytz
import const
from util import ip2long
from timezone import time_zone_by_country_and_region
MMAP_CACHE = const.MMAP_CACHE
MEMORY_CACHE = const.MEMORY_CACHE
STANDARD = const.STANDARD
class GeoIPError(Exception):
pass
class GeoIPMetaclass(type):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instances'):
cls._instances = {}
if len(args) > 0:
filename = args[0]
elif 'filename' in kwargs:
filename = kwargs['filename']
if not filename in cls._instances:
cls._instances[filename] = type.__new__(cls, *args, **kwargs)
return cls._instances[filename]
GeoIPBase = GeoIPMetaclass('GeoIPBase', (object,), {})
class GeoIP(GeoIPBase):
def __init__(self, filename, flags=0):
"""
Initialize the class.
@param filename: path to a geoip database. If MEMORY_CACHE is used,
the file can be gzipped.
@type filename: str
@param flags: flags that affect how the database is processed.
Currently the only supported flags are STANDARD (the default),
MEMORY_CACHE (preload the whole file into memory), and
MMAP_CACHE (access the file via mmap).
@type flags: int
"""
self._filename = filename
self._flags = flags
if self._flags & const.MMAP_CACHE:
with open(filename, 'rb') as f:
self._filehandle = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
elif self._flags & const.MEMORY_CACHE:
try:
self._filehandle = gzip.open(filename, 'rb')
self._memoryBuffer = self._filehandle.read()
except IOError:
self._filehandle = codecs.open(filename, 'rb', 'latin_1')
self._memoryBuffer = self._filehandle.read()
else:
self._filehandle = codecs.open(filename, 'rb', 'latin_1')
self._setup_segments()
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is being used and setup
segment sizes and start points that will be used by the seek*() methods later.
"""
self._databaseType = const.CITY_EDITION_REV1
self._recordLength = const.STANDARD_RECORD_LENGTH
filepos = self._filehandle.tell()
self._filehandle.seek(-3, os.SEEK_END)
for i in range(const.STRUCTURE_INFO_MAX_SIZE):
delim = self._filehandle.read(3)
if delim == unichr(255) * 3:
self._databaseType = ord(self._filehandle.read(1))
if self._databaseType in (const.CITY_EDITION_REV0,
const.CITY_EDITION_REV1):
self._databaseSegments = 0
buf = self._filehandle.read(const.SEGMENT_RECORD_LENGTH)
for j in range(const.SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
break
else:
self._filehandle.seek(-4, os.SEEK_CUR)
self._filehandle.seek(filepos, os.SEEK_SET)
def _seek_country(self, ipnum):
"""
Using the record length and appropriate start points, seek to the
country that corresponds to the converted IP address integer.
@param ipnum: result of ip2long conversion
@type ipnum: int
@return: offset of start of record
@rtype: int
"""
offset = 0
for depth in range(31, -1, -1):
if self._flags & const.MEMORY_CACHE:
startIndex = 2 * self._recordLength * offset
length = 2 * self._recordLength
endIndex = startIndex + length
buf = self._memoryBuffer[startIndex:endIndex]
else:
self._filehandle.seek(2 * self._recordLength * offset, os.SEEK_SET)
buf = self._filehandle.read(2 * self._recordLength)
x = [0, 0]
for i in range(2):
for j in range(self._recordLength):
x[i] += ord(buf[self._recordLength * i + j]) << (j * 8)
if ipnum & (1 << depth):
if x[1] >= self._databaseSegments:
return x[1]
offset = x[1]
else:
if x[0] >= self._databaseSegments:
return x[0]
offset = x[0]
raise Exception('Error traversing database - perhaps it is corrupt?')
def _get_record(self, ipnum):
"""
Populate location dict for converted IP.
@param ipnum: converted IP address
@type ipnum: int
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_cod | e, region_name, time_zone
@rtype: dict
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
| return None
record_pointer = seek_country + (2 * self._recordLength - 1) * self._databaseSegments
self._filehandle.seek(record_pointer, os.SEEK_SET)
record_buf = self._filehandle.read(const.FULL_RECORD_LENGTH)
record = {}
record_buf_pos = 0
char = ord(record_buf[record_buf_pos])
record['country_code'] = const.COUNTRY_CODES[char]
record['country_code3'] = const.COUNTRY_CODES3[char]
record['country_name'] = const.COUNTRY_NAMES[char]
record_buf_pos += 1
str_length = 0
# get region
char = ord(record_buf[record_buf_pos + str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos + str_length])
if str_length > 0:
record['region_name'] = record_buf[record_buf_pos:record_buf_pos + str_length]
record_buf_pos += str_length + 1
str_length = 0
# get city
char = ord(record_buf[record_buf_pos + str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos + str_length])
if str_length > 0:
record['city'] = record_buf[record_buf_pos:record_buf_pos + str_length]
else:
record['city'] = ''
record_buf_pos += str_length + 1
str_length = 0
# get the postal code
char = ord(record_buf[record_buf_pos + str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos + str_length])
if str_length > 0:
record['postal_code'] = record_buf[record_buf_pos:record_buf_pos + str_length]
else:
record['postal_code'] = None
record_buf_pos += str_length + 1
str_length = 0
latitude = 0
longitude = 0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
latitude += (char << (j * 8))
record['latitude'] = (latitude / 10000.0) - 180.0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
longitude += (char << (j * 8))
record['longitude'] = (longitude / 10000.0) - 180.0
if self._databaseType == const.CITY_EDITION_REV1:
dmaarea_combo = 0
if record['country_code'] == 'US':
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
dmaarea_combo += (char << (j * 8))
record['dma_code'] = int(math.floor(dmaarea_combo / 1000))
record['area_code'] = dmaarea_combo % 1000
else:
record['dma_code'] = 0
record['area_code'] = 0
if 'dma_code' in record and record['dma_code'] in const.DMA_MAP:
record['metro_code'] = const.DMA_MAP[record['dma_code']]
else:
record['metro_code'] = ''
|
gonesurfing/Quisk_rpi_remote | softrock/conf_rx_tx_ensemble.py | Python | gpl-2.0 | 2,177 | 0.012402 | # This is a sample quisk_conf.py configuration file for a SoftRock Rx/Tx Ensemble or
# other SoftRock that has both transmit and receive capability. You need two sound
# cards, a high quality card to capture radio samples and play microphone sound; and
# a lower quality card to play radio sound and capture the microphone.
# Please do not change this sample file.
# Instead copy it to your own config file and make changes there.
# See quisk_conf_defaults.py for more information.
from softrock import hardware_usb as quisk_hardware
from softrock import widgets_tx as quisk_widgets
# In Linux, ALSA soundcards have these names. The "hw" devices are the raw
# hardware devices | , and should be used for soundcard capture.
#name_of_s | ound_capt = "hw:0"
#name_of_sound_capt = "hw:1"
#name_of_sound_capt = "plughw"
#name_of_sound_capt = "plughw:1"
#name_of_sound_capt = "default"
# Pulseaudio support added by Philip G. Lee. Many thanks!
# For PulseAudio support, use the name "pulse" and connect the streams
# to your hardware devices using a program like pavucontrol
#name_of_sound_capt = "pulse"
softrock_model = "RxTxEnsemble"
#si570_direct_control = True
#si570_i2c_address = 0x70
# If you have a SoftRock with a key jack, and you want to monitor the hardware key state for
# CW operation, enter a key poll time in milliseconds and a semi-break-in hang time in seconds.
# Do not press the PTT button. CW has its own timer to control transmit.
#key_poll_msec = 5
#key_hang_time = 0.7
# Radio samples and audio:
#sample_rate = 96000 # ADC hardware sample rate
#name_of_sound_capt = "hw:0" # Name of soundcard capture device for radio samples.
playback_rate = 48000 # radio sound playback rate
#name_of_sound_play = "hw:1" # Name of soundcard playback device for radio audio. Must be 48 ksps.
# Microphone:
#microphone_name = name_of_sound_play # Name of microphone capture device
#name_of_mic_play = name_of_sound_capt # Name of play device if CW or mic I/Q is sent to a sound card
mic_playback_rate = sample_rate # Playback rate for microphone
#mic_out_volume = 0.6 # Transmit sound output volume (after all processing) as a fraction 0.0 to 1.0
|
johnowhitaker/bobibabber | sklearn/dummy.py | Python | mit | 11,519 | 0 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.validation import safe_asarray
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Parameters
----------
strategy: str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant: int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
`classes_` : array or list of array of shape = [n_classes]
Class labels for each output.
`n_classes_` : array or list of array of shape = [n_classes]
Number of label for each output.
`class_prior_` : array or list of array of shape = [n_classes]
Probability of each class for each output.
`n_outputs_` : int,
Number of outputs.
`outputs_2d_` : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
s | elf.random_state = random_state
self.constant = constant
def fit(self, X, y):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
| Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant"):
raise ValueError("Unknown strategy type.")
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
self.classes_ = []
self.n_classes_ = []
self.class_prior_ = []
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
for k in xrange(self.n_outputs_):
classes, y_k = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
self.n_classes_.append(classes.shape[0])
self.class_prior_.append(np.bincount(y_k) / float(y_k.shape[0]))
# Checking in case of constant strategy if the constant provided
# by the user is in y.
if self.strategy == "constant":
if constant[k] not in self.classes_[k]:
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
n_samples = X.shape[0]
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
y = []
for k in xrange(self.n_outputs_):
if self.strategy == "most_frequent":
ret = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
elif self.strategy == "stratified":
ret = proba[k].argmax(axis=1)
elif self.strategy == "uniform":
ret = rs.randint(n_classes_[k], size=n_samples)
elif self.strategy == "constant":
ret = np.ones(n_samples, dtype=int) * (
np.where(classes_[k] == constant[k]))
y.append(classes_[k][ret])
y = np.vstack(y).T
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
n_samples = X.shape[0]
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in xrange(self.n_outputs_):
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
eli |
punalpatel/st2 | st2actions/tests/unit/test_rescheduler.py | Python | apache-2.0 | 4,718 | 0.004663 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
import six
from st2tests import config as test_config
test_config.parse_args()
from oslo_config import cfg
from st2actions.notifier import scheduler
from st2common.constants import action as action_constants
from st2common.models.api.action import ActionAPI, RunnerTypeAPI
from st2common.models.db.ac | tion import LiveActionDB
from st2common.persistence.action import Action, LiveAction
from st2common.persistence.runner import RunnerType
from st2common.services import executions
from st2common.transport.liveacti | on import LiveActionPublisher
from st2common.transport.publishers import CUDPublisher
from st2common.util import date as date_utils
from st2tests import DbTestCase, fixturesloader
from tests.unit.base import MockLiveActionPublisher
from st2tests.mocks.runner import MockActionRunner
TEST_FIXTURES = {
'runners': [
'testrunner1.yaml'
],
'actions': [
'action1.yaml'
]
}
PACK = 'generic'
LOADER = fixturesloader.FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
NON_EMPTY_RESULT = 'non-empty'
RUN_RESULT = (action_constants.LIVEACTION_STATUS_SUCCEEDED, NON_EMPTY_RESULT, None)
@mock.patch.object(
MockActionRunner, 'run',
mock.MagicMock(return_value=RUN_RESULT))
@mock.patch.object(
CUDPublisher, 'publish_update',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_update))
@mock.patch.object(
LiveActionPublisher, 'publish_state',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_state))
class SchedulerTest(DbTestCase):
@classmethod
def setUpClass(cls):
super(SchedulerTest, cls).setUpClass()
for _, fixture in six.iteritems(FIXTURES['runners']):
instance = RunnerTypeAPI(**fixture)
RunnerType.add_or_update(RunnerTypeAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['actions']):
instance = ActionAPI(**fixture)
Action.add_or_update(ActionAPI.to_model(instance))
def test_delayed_executions_recovery(self):
# Create a live action that's already delayed pass the allowed timeout.
dt_now = date_utils.get_datetime_utc_now()
dt_delta = datetime.timedelta(seconds=cfg.CONF.scheduler.delayed_execution_recovery)
dt_timeout = dt_now - dt_delta
liveaction = LiveActionDB(action='wolfpack.action-1',
parameters={'actionstr': 'foo'},
start_timestamp=dt_timeout,
status=action_constants.LIVEACTION_STATUS_DELAYED)
liveaction = LiveAction.add_or_update(liveaction, publish=False)
executions.create_execution_object(liveaction, publish=False)
# Run the rescheduling routine.
scheduler.recover_delayed_executions()
# The live action is expected to complete.
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
def test_delayed_executions_recovery_before_timeout(self):
# Create a live action that's delayed but has not passed the timeout.
liveaction = LiveActionDB(action='wolfpack.action-1',
parameters={'actionstr': 'foo'},
start_timestamp=date_utils.get_datetime_utc_now(),
status=action_constants.LIVEACTION_STATUS_DELAYED)
liveaction = LiveAction.add_or_update(liveaction, publish=False)
executions.create_execution_object(liveaction, publish=False)
# Run the rescheduling routine.
scheduler.recover_delayed_executions()
# The live action is expected to stay "delayed".
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_DELAYED)
|
archiechen/miami | tests/models_tests.py | Python | mit | 1,133 | 0.010591 |
import unittest
import os
os.environ['MIAMI_ENV'] = 'test'
import simplejson as json
from miami.models import Team, User, Task
class ModelsTest(unittest.TestCase):
def test_team_toJSON(self):
team = Team('Log')
team.id = 1
self.assertEquals({'id':1, 'name': 'Log', 'colo | r': '2a33d8'}, team.toJSON())
def test_user_toJSON(self):
user = User('Mike')
self.assertEquals({'name': 'Mike', 'gravater': '91f376c4b36912e5075b6170d312eab5'}, user.toJSON())
def test_task_toJSON(self):
team = Team('Log')
team.id = 1
task = Task('title1', 'detail', status='DONE', price=1, estimate=4, team=team)
t | ask.id = 1
task.owner = User('Mike')
self.assertEquals({'id': 1, 'title': 'title1', 'detail': 'detail', 'status': 'DONE', 'price': 1, 'estimate': 4,'priority': 100,'time_slots': [], 'consuming': '0','created_time': 'just now', 'last_updated': 'just now', 'team': {
'name': 'Log', 'color': '2a33d8', 'id':1}, 'owner': {'name': 'Mike', 'gravater': '91f376c4b36912e5075b6170d312eab5'}, 'partner': {}}, task.toJSON())
|
abhattad4/Digi-Menu | tests/schema/tests.py | Python | bsd-3-clause | 61,911 | 0.001179 | import datetime
import itertools
import unittest
from copy import copy
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.fields import (
BigIntegerField, BinaryField, BooleanField, CharField, DateTimeField,
IntegerField, PositiveIntegerField, SlugField, TextField,
)
from django.db.models.fields.related import (
ForeignKey, ManyToManyField, OneToOneField,
)
from django.db.transaction import atomic
from django.test import TransactionTestCase, skipIfDBFeature
from .fields import CustomManyToManyField, InheritedManyToManyField
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak,
BookWithLongName, BookWithO2O, BookWithSlug, Note, Tag, TagIndexed,
TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, Note, Tag,
TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
with connection.cursor() as cursor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names(cursor)
for model in itertools.chain(SchemaTests.models, self.local_models):
# Remove any M2M tables first
for field in model._meta.local_many_to_many:
with atomic():
tbl = field.rel.through._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
# Then remove the main tables
with atomic():
tbl = model._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Author.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag)
| new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
| self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk_db_constraint(self):
"Tests that the db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Check that initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
# Check that BookWeak doesn't have an FK constraint
constraints = self.get_constraints(BookWeak._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.fail("FK constraint for author_id found")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() |
forcaeluz/easy-fat | feeding/migrations/0006_auto_20170701_2013.py | Python | gpl-3.0 | 1,270 | 0.002362 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-01 20:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flocks', '0015_auto_20170624_1312'),
('feeding', '0005_auto_20170625_1129'),
]
operations = [
migrations.CreateModel(
name='FeedingPeriodForFlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_d | ate', models.DateField()),
('end_date', models.DateField(null=True)),
('feed_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feeding.FeedType')),
('flock', models.ForeignKey(on_delete=django.db.models.deletion.CASCAD | E, to='flocks.Flock')),
],
),
migrations.RemoveField(
model_name='feedingperiodforroom',
name='feed_type',
),
migrations.RemoveField(
model_name='feedingperiodforroom',
name='room',
),
migrations.DeleteModel(
name='FeedingPeriodForRoom',
),
]
|
akosel/incubator-airflow | tests/models.py | Python | apache-2.0 | 100,341 | 0.000897 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import pendulum
import unittest
import time
import six
import re
import urllib
import textwrap
import inspect
from airflow import configuration, models, settings, AirflowException
from airflow.exceptions import AirflowDagCycleException, AirflowSkipException
from airflow.jobs import BackfillJob
from airflow.models import DAG, TaskInstance as TI
from airflow.models import State as ST
from airflow.models import DagModel, DagRun, DagStat
from airflow.models import clear_task_instances
from airflow.models import XCom
from airflow.models import Connection
from airflow.jobs import LocalTaskJob
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.weight_rule import WeightRule
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from mock import patch, ANY
from parameterized import parameterized
from tempfile import mkdtemp, NamedTemporaryFile
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_params_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specify a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
with dag:
with dag:
op7 = DummyOperator(task_id='op7')
op8 = DummyOperator(task_id='op8')
op8.dag = dag2
self.assertEqual(op7.dag, dag)
self.assertEqual(op8.dag, dag2)
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topologica | l_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
| with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
def test_dag_none_default_args_start_date(self):
"""
Tests if a start_date of None in default_args
works.
"""
dag = DAG('DAG', default_args={'start_date': None})
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_task_priority_weight_total(self):
width = 5
depth = 5
weight = 5
pattern = re.compile('stage(\\d*).(\\d*)')
# Fully connected parallel tasks. i.e. every task at each parallel
# stage is dependent on every task in the previous stage.
# Default weight should be calculated using downstream descendants
with DAG('dag', start_date=DEFAULT_DATE,
|
kyokley/MediaConverter | tests/functional/test_path.py | Python | mit | 800 | 0.00125 | import unittest
import tempfile
import shutil
import os
from path import Path
class TestGetLocalPathsFunctional(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.path = Path('localpat | h', 'remotepath')
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_path_does_not_exist(self):
filepath = os.path.join(self.temp_dir, 'test_file')
expected = set()
actual = self.path._buildLocalPaths([filepath])
self.assertEqual(expected, actual)
def test_paths_exist(self):
files = set([tempfile.mkstemp(dir=self.temp_dir)[1]
for i in xrange(3)])
exp | ected = files
actual = self.path._buildLocalPaths([self.temp_dir])
self.assertEqual(expected, actual)
|
ifwe/tasr | test/pyunit/test_all.py | Python | apache-2.0 | 1,248 | 0 | '''
Created on Apr 8, 2014
@author: cmills
'''
from unittest import TestLoader, TextTestRunner
from test_tasr import TestTASR
from test_app_topic import TestTASRTopicApp
from test_app_core import TestTASRCoreApp
from test_app_subject import TestTASRSubjectApp
from test_client_legacy_methods import TestTASRLegacyClientMethod | s
from test_client_legacy_object import TestTASRLegacyClientObject
from test_client_methods import TestTASRClientMethods
from test_client_object import TestTASRClientObject
from test_registered_schema import TestRegisteredAvroSchema
if __name__ == | "__main__":
SUITE = TestLoader().loadTestsFromTestCase(TestTASR)
SUITE = TestLoader().loadTestsFromTestCase(TestTASRTopicApp)
SUITE = TestLoader().loadTestsFromTestCase(TestTASRCoreApp)
SUITE = TestLoader().loadTestsFromTestCase(TestTASRSubjectApp)
SUITE = TestLoader().loadTestsFromTestCase(TestTASRClientMethods)
SUITE = TestLoader().loadTestsFromTestCase(TestTASRClientObject)
SUITE = TestLoader().loadTestsFromTestCase(TestTASRLegacyClientMethods)
SUITE = TestLoader().loadTestsFromTestCase(TestTASRLegacyClientObject)
SUITE = TestLoader().loadTestsFromTestCase(TestRegisteredAvroSchema)
TextTestRunner(verbosity=2).run(SUITE)
|
rohitranjan1991/home-assistant | homeassistant/components/overkiz/cover_entities/__init__.py | Python | mit | 61 | 0 | """ | Cover entities for the Overkiz (by Somfy) integration."""
| |
dcsch/pyif | pyif/parser.py | Python | mit | 3,790 | 0.006596 |
#from . import action
from . import glk
from . import message
from .debug import log
#from . import *
NOUN_TOKEN = 1
HELD_TOKEN = 2
MULTI_TOKEN = 3
MULTIHELD_TOKEN = 4
MULTIEXCEPT_TOKEN = 5
MULTIINSIDE_TOKEN = 6
TOPIC_TOKEN = 7
CREATURE_TOKEN = 8
def tokenise_string(string):
"Transform the string into a list of tokens"
tokens = []
start = 0
for i in range(len(string)):
if string[i] == " ":
if start < i:
tokens.append(string[start:i])
start = i + 1
if len(string) > 0 and start <= i:
tokens.append(string[start:i + 1])
return tokens
class Parser:
"""Inputs commands from the user and execute | s them.
At the moment this is very simple -- just enough to get us going.
"""
def __init__(self, story, grammar):
self.story = story
self.grammar = grammar
|
def read_input(self):
"""
Parser strategy:
* Break input into tokens
* Match the initial token with a verb definition in the grammar
"""
glk.put_string("\n>")
line = glk.get_string()
tokens = tokenise_string(line)
if len(tokens) == 0:
glk.put_string(message.PARDON);
return True
# Make the actor always the player for the moment
self.story.actor = self.story.player
# Find the Verb that handles this
verb = self.grammar.find_verb_matching_token(tokens[0])
if verb:
log("matched: " + tokens[0])
a, noun_tokens_and_types = verb.find_action_matching_tokens(tokens[1:])
if a:
log("ACTION: %s, MATCHED NOUNS: %s" % (a[1], noun_tokens_and_types))
matched_nouns = []
for noun_token, noun_type in noun_tokens_and_types:
n = self.ensure_noun_token_in_scope(noun_token, noun_type)
if not n:
glk.put_string(message.CANT_SEE_A % noun_token)
return True
matched_nouns.append(n)
self.story.action = a[1]
self.story.nouns = matched_nouns
# Substitute nouns if we have them (e.g. directions)
if a[2]:
self.story.nouns = a[2]
# Execute the action
a[1](self.story)
return not self.story.has_quit
else:
log("NO ACTION MATCH")
glk.put_string(message.UNDERSTAND_AS_FAR % verb.verb_tokens[0])
return True
glk.put_string(message.NOT_A_VERB)
return True
def ensure_noun_token_in_scope(self, noun_token, noun_type):
room = self.story.player.room()
n = None
# If the noun type is 'HELD_TOKEN', are we holding the noun?
if noun_type == HELD_TOKEN:
n = self.story.actor.find(noun_token)
# If we're not holding it, so can we see it to do an implicit take?
if not n:
n = room.find(noun_token)
if n:
self.story.nouns = [n]
glk.put_string(message.FIRST_TAKING % (n.article, n.name))
ks = self.story.keep_silent
self.story.keep_silent = True
action.take(self.story)
self.story.keep_silent = ks
n = self.story.actor.find(noun_token)
elif noun_type == NOUN_TOKEN:
n = room.find(noun_token)
if n:
log("matched noun: " + n.name)
return n
return None
|
materials-commons/materialscommons.org | backend/scripts/demo-project/build_project.py | Python | mit | 1,400 | 0.005 | #!/usr/bin/python
import argparse
from os import path as os_path
import demo_project as demo
import traceback
def set_host_url_ | arg():
parser.add_argument('--host', required=True,
help='the url for the Materials Commons server')
def set_datapath_arg():
parser.add_argument('--datapath', required=True,
help='the path to the directory containing the files used by the build')
def set_apikey_arg():
parser.add_argume | nt('--apikey', required=True, help='rapikey for the user building the demo project')
parser = argparse.ArgumentParser(description='Build Demo Project.')
set_host_url_arg()
set_datapath_arg()
set_apikey_arg()
args = parser.parse_args()
host = args.host
path = os_path.abspath(args.datapath)
key = args.apikey
# log_messages
# print "Running script to build demo project: "
# print " host = " + host + ", "
# print " key = " + key + ", "
# print " path = " + path
try:
builder = demo.DemoProject(host, path, key)
# a basic get request that makes no changes; will fail if there is a problem with the host or key
flag = builder.does_project_exist()
project = builder.build_project()
if flag:
print "Refreshed project with name = " + project.name
else:
print "Built project with name = " + project.name
except Exception as err:
traceback.print_exc()
print 'Error: ', err
|
qedsoftware/commcare-hq | custom/ewsghana/views.py | Python | bsd-3-clause | 11,616 | 0.002497 | import json
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.forms.formsets import formset_factory
from django.http import HttpResponse, HttpResponseRedirect
from django.http.response import Http404
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST, require_G | ET
from django.views.generic.base import RedirectView
from corehq.apps.commtrack.models import StockState
from corehq.apps.commtrack.views import BaseCommTrackManageView
from corehq.apps.consumption.shortcuts import get_default_monthly_consumption, \
set_default_consumption_for_supply_point
from corehq.apps.domain.decorators import (
login_and_domain_required,
)
from corehq.apps.domain.views import BaseDomainView
from corehq.apps.locations.permissions import locations_access_required, user_can_edit_ | any_location
from corehq.apps.products.models import Product
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.models import WebUser
from custom.common import ALL_OPTION
from custom.ewsghana.forms import InputStockForm, EWSUserSettings
from custom.ewsghana.handlers.web_submission_handler import WebSubmissionHandler
from custom.ewsghana.models import FacilityInCharge, EWSExtension
from custom.ewsghana.reports.specific_reports.dashboard_report import DashboardReport
from custom.ewsghana.reports.specific_reports.stock_status_report import StockoutsProduct, StockStatus
from custom.ewsghana.reports.stock_levels_report import InventoryManagementData
from custom.ewsghana.utils import make_url, has_input_stock_permissions, calculate_last_period, Msg
from custom.ilsgateway.views import GlobalStats
from dimagi.utils.dates import force_to_datetime
from dimagi.utils.web import json_handler, json_response
class EWSGlobalStats(GlobalStats):
template_name = "ewsghana/global_stats.html"
show_supply_point_types = True
root_name = 'Country'
class InputStockView(BaseDomainView):
section_name = 'Input stock data'
section_url = ""
template_name = 'ewsghana/input_stock.html'
@method_decorator(login_and_domain_required)
def dispatch(self, request, *args, **kwargs):
couch_user = self.request.couch_user
site_code = kwargs['site_code']
try:
sql_location = SQLLocation.objects.get(site_code=site_code, domain=self.domain)
if not has_input_stock_permissions(couch_user, sql_location, self.domain):
raise PermissionDenied()
except SQLLocation.DoesNotExist:
raise Http404()
return super(InputStockView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
InputStockFormSet = formset_factory(InputStockForm)
formset = InputStockFormSet(request.POST)
if formset.is_valid():
try:
sql_location = SQLLocation.objects.get(site_code=kwargs['site_code'], domain=self.domain)
except SQLLocation.DoesNotExist:
raise Http404()
text = ''
for form in formset:
product = Product.get(docid=form.cleaned_data['product_id'])
if form.cleaned_data['stock_on_hand'] is not None:
text += '{} {}.{} '.format(
product.code, form.cleaned_data['stock_on_hand'], form.cleaned_data['receipts'] or 0
)
amount = form.cleaned_data['default_consumption']
if amount is not None:
set_default_consumption_for_supply_point(
self.domain, product.get_id, sql_location.supply_point_id, amount
)
if text:
WebSubmissionHandler(self.request.couch_user, self.domain, Msg(text), sql_location).handle()
url = make_url(
StockStatus,
self.domain,
'?location_id=%s&filter_by_program=%s&startdate='
'&enddate=&report_type=&filter_by_product=%s',
(sql_location.location_id, ALL_OPTION, ALL_OPTION)
)
return HttpResponseRedirect(url)
context = self.get_context_data(**kwargs)
context['formset'] = formset
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(InputStockView, self).get_context_data(**kwargs)
try:
sql_location = SQLLocation.objects.get(domain=self.domain, site_code=kwargs.get('site_code'))
except SQLLocation.DoesNotExist:
raise Http404()
InputStockFormSet = formset_factory(InputStockForm, extra=0)
initial_data = []
for product in sql_location.products.order_by('name'):
try:
stock_state = StockState.objects.get(
case_id=sql_location.supply_point_id,
product_id=product.product_id
)
stock_on_hand = stock_state.stock_on_hand
monthly_consumption = stock_state.get_monthly_consumption()
except StockState.DoesNotExist:
stock_on_hand = 0
monthly_consumption = 0
initial_data.append(
{
'product_id': product.product_id,
'product': product.name,
'stock_on_hand': int(stock_on_hand),
'monthly_consumption': round(monthly_consumption) if monthly_consumption else 0,
'default_consumption': get_default_monthly_consumption(
self.domain,
product.product_id,
sql_location.location_type.name,
sql_location.supply_point_id
),
'units': product.units
}
)
context['formset'] = InputStockFormSet(initial=initial_data)
return context
class EWSUserExtensionView(BaseCommTrackManageView):
template_name = 'ewsghana/user_extension.html'
@property
def page_context(self):
page_context = super(EWSUserExtensionView, self).page_context
user_id = self.kwargs['user_id']
try:
extension = EWSExtension.objects.get(domain=self.domain, user_id=user_id)
sms_notifications = extension.sms_notifications
facility = extension.location_id
except EWSExtension.DoesNotExist:
sms_notifications = None
facility = None
page_context['form'] = EWSUserSettings(user_id=user_id, domain=self.domain, initial={
'sms_notifications': sms_notifications, 'facility': facility
})
page_context['couch_user'] = self.web_user
return page_context
@property
def web_user(self):
return WebUser.get(docid=self.kwargs['user_id'])
def post(self, request, *args, **kwargs):
form = EWSUserSettings(request.POST, user_id=kwargs['user_id'], domain=self.domain)
if form.is_valid():
form.save(self.web_user, self.domain)
messages.add_message(request, messages.SUCCESS, 'Settings updated successfully!')
return self.get(request, *args, **kwargs)
@require_GET
def inventory_management(request, domain):
inventory_management_ds = InventoryManagementData(
config=dict(
program=None, products=None, domain=domain,
startdate=force_to_datetime(request.GET.get('startdate')),
enddate=force_to_datetime(request.GET.get('enddate')), location_id=request.GET.get('location_id'),
custom_date=True
)
)
return HttpResponse(
json.dumps(inventory_management_ds.charts[0].data, default=json_handler),
content_type='application/json'
)
@require_GET
def stockouts_product(request, domain):
stockout_graph = StockoutsProduct(
config=dict(
program=None, products=None, domain=domain,
startdate=force_to_datetime(request.GET.get('startdate')),
enddate=force_to_datetime(request.GET.get('enddate')), location_id |
laurentb/weboob | modules/airparif/pages.py | Python | lgpl-3.0 | 3,898 | 0.000257 | # -*- coding: utf-8 -*-
# Copyright(C) 2019 Vincent A
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.browser.pages import JsonPage
from weboob.browser.elements import ItemElement, DictElement, method
from webo | ob.browser.filters.standard | import (
Env, Format, Regexp, DateTime, CleanDecimal, Lower, Map,
)
from weboob.browser.filters.json import Dict
from weboob.capabilities.address import GeoCoordinates, PostalAddress
from weboob.capabilities.gauge import Gauge, GaugeSensor, GaugeMeasure
SENSOR_NAMES = {
'PM25': 'PM 2.5',
'PM10': 'PM 10',
'O3': 'O₃',
'NO3': 'NO₃',
'NO2': 'NO₂',
}
class AllPage(JsonPage):
@method
class iter_gauges(DictElement):
def find_elements(self):
return self.el.values()
class item(ItemElement):
klass = Gauge
def condition(self):
# sometimes the "date" field (which contains the hour) is empty
# and no measure is present in it, so we discard it
return bool(self.el['date'])
def parse(self, el):
for k in el:
self.env[k] = el[k]
self.env['city'] = Regexp(Dict('commune'), r'^(\D+)')(self)
obj_id = Dict('nom_court_sit')
obj_name = Dict('isit_long')
obj_city = Env('city')
obj_object = 'Pollution'
obj__searching = Lower(Format(
'%s %s %s %s',
Dict('isit_long'),
Dict('commune'),
Dict('ninsee'),
Dict('adresse'),
))
class obj_sensors(DictElement):
def find_elements(self):
return [dict(zip(('key', 'value'), tup)) for tup in self.el['indices'].items()]
class item(ItemElement):
klass = GaugeSensor
obj_name = Map(Dict('key'), SENSOR_NAMES)
obj_gaugeid = Env('nom_court_sit')
obj_id = Format('%s.%s', obj_gaugeid, Dict('key'))
obj_unit = 'µg/m³'
class obj_lastvalue(ItemElement):
klass = GaugeMeasure
obj_date = DateTime(
Format(
'%s %s',
Env('min_donnees'),
Env('date'), # "date" contains the time...
)
)
obj_level = CleanDecimal(Dict('value'))
class obj_geo(ItemElement):
klass = GeoCoordinates
obj_latitude = CleanDecimal(Env('latitude'))
obj_longitude = CleanDecimal(Env('longitude'))
class obj_location(ItemElement):
klass = PostalAddress
obj_street = Env('adresse')
obj_postal_code = Env('ninsee')
obj_city = Env('city')
obj_region = 'Ile-de-France'
obj_country = 'France'
|
tianrui/FlappyClone | test_dtree.py | Python | mit | 440 | 0.034091 | import numpy as np
import pdb
#from dtree import *
import model
def main():
# detree = DTree(np.zeros(3), [1., 2., 3.])
#
# inputs = [0.2, 0.6, 0.1]
# | print detree.infer(inputs)
# detree.feedback(inputs, 1)
# pdb.set_trace()
# cuts = detree.save()
# print | cuts
# detree.feedback(inputs, 0)
# print detree.infer(inputs)
testmod = model.Model(12)
testmod.train()
if __name__ == '__main__':
main()
|
mikenawrocki/rtslib-fb | rtslib/root.py | Python | apache-2.0 | 10,297 | 0.002137 | '''
Implements the RTSRoot class.
This file is part of RTSLib.
Copyright (c) 2011-2013 by Datera, Inc
Copyright (c) 2011-2014 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
import os
import stat
import json
from .node import CFSNode
from .target import Target
from .fabric import FabricModule
from .tcm import so_mapping, StorageObject
from .utils import RTSLibError, modprobe, mount_configfs
from .utils import dict_remove, set_a | ttributes
default_save_file = "/etc/target/saveconfig.json"
class RTSRoot(CFSNode):
'''
This is an interface to the root of the configFS object tree.
Is allows one to start browsing Target and StorageObjects,
as well as helper | methods to return arbitrary objects from the
configFS tree.
>>> import rtslib.root as root
>>> rtsroot = root.RTSRoot()
>>> rtsroot.path
'/sys/kernel/config/target'
>>> rtsroot.exists
True
>>> rtsroot.targets # doctest: +ELLIPSIS
[...]
>>> rtsroot.tpgs # doctest: +ELLIPSIS
[...]
>>> rtsroot.storage_objects # doctest: +ELLIPSIS
[...]
>>> rtsroot.network_portals # doctest: +ELLIPSIS
[...]
'''
# RTSRoot private stuff
def __init__(self):
'''
Instantiate an RTSRoot object. Basically checks for configfs setup and
base kernel modules (tcm)
'''
super(RTSRoot, self).__init__()
modprobe('configfs')
mount_configfs()
modprobe('target_core_mod')
self._create_in_cfs_ine('any')
def _list_targets(self):
self._check_self()
for fabric_module in self.fabric_modules:
for target in fabric_module.targets:
yield target
def _list_storage_objects(self):
self._check_self()
for so in StorageObject.all():
yield so
def _list_tpgs(self):
self._check_self()
for t in self.targets:
for tpg in t.tpgs:
yield tpg
def _list_node_acls(self):
self._check_self()
for t in self.tpgs:
for node_acl in t.node_acls:
yield node_acl
def _list_node_acl_groups(self):
self._check_self()
for t in self.tpgs:
for nag in t.node_acl_groups:
yield nag
def _list_mapped_luns(self):
self._check_self()
for na in self.node_acls:
for mlun in na.mapped_luns:
yield mlun
def _list_mapped_lun_groups(self):
self._check_self()
for nag in self.node_acl_groups:
for mlg in nag.mapped_lun_groups:
yield mlg
def _list_network_portals(self):
self._check_self()
for t in self.tpgs:
for p in t.network_portals:
yield p
def _list_luns(self):
self._check_self()
for t in self.tpgs:
for lun in t.luns:
yield lun
def _list_sessions(self):
self._check_self()
for na in self.node_acls:
if na.session:
yield na.session
def _list_fabric_modules(self):
self._check_self()
for mod in FabricModule.all():
yield mod
def __str__(self):
return "rtslib"
# RTSRoot public stuff
def dump(self):
'''
Returns a dict representing the complete state of the target
config, suitable for serialization/deserialization, and then
handing to restore().
'''
d = super(RTSRoot, self).dump()
d['storage_objects'] = [so.dump() for so in self.storage_objects]
d['targets'] = [t.dump() for t in self.targets]
d['fabric_modules'] = [f.dump() for f in self.fabric_modules
if f.has_feature("discovery_auth")
if f.discovery_enable_auth]
return d
def clear_existing(self, confirm=False):
'''
Remove entire current configuration.
'''
if not confirm:
raise RTSLibError("As a precaution, confirm=True needs to be set")
# Targets depend on storage objects, delete them first.
for t in self.targets:
t.delete()
for fm in (f for f in self.fabric_modules if f.has_feature("discovery_auth")):
fm.clear_discovery_auth_settings()
for so in self.storage_objects:
so.delete()
def restore(self, config, clear_existing=False, abort_on_error=False):
'''
Takes a dict generated by dump() and reconfigures the target to match.
Returns list of non-fatal errors that were encountered.
Will refuse to restore over an existing configuration unless clear_existing
is True.
'''
if clear_existing:
self.clear_existing(confirm=True)
elif any(self.storage_objects) or any(self.targets):
raise RTSLibError("storageobjects or targets present, not restoring")
errors = []
if abort_on_error:
def err_func(err_str):
raise RTSLibError(err_str)
else:
def err_func(err_str):
errors.append(err_str + ", skipped")
for index, so in enumerate(config.get('storage_objects', [])):
if 'name' not in so:
err_func("'name' not defined in storage object %d" % index)
continue
try:
so_cls = so_mapping[so['plugin']]
except KeyError:
err_func("'plugin' not defined or invalid in storageobject %s" % so['name'])
continue
kwargs = so.copy()
dict_remove(kwargs, ('exists', 'attributes', 'plugin', 'buffered_mode'))
try:
so_obj = so_cls(**kwargs)
except Exception as e:
err_func("Could not create StorageObject %s: %s" % (so['name'], e))
continue
# Custom err func to include block name
def so_err_func(x):
return err_func("Storage Object %s/%s: %s" % (so['plugin'], so['name'], x))
set_attributes(so_obj, so.get('attributes', {}), so_err_func)
# Don't need to create fabric modules
for index, fm in enumerate(config.get('fabric_modules', [])):
if 'name' not in fm:
err_func("'name' not defined in fabricmodule %d" % index)
continue
for fm_obj in self.fabric_modules:
if fm['name'] == fm_obj.name:
fm_obj.setup(fm, err_func)
break
for index, t in enumerate(config.get('targets', [])):
if 'wwn' not in t:
err_func("'wwn' not defined in target %d" % index)
continue
if 'fabric' not in t:
err_func("target %s missing 'fabric' field" % t['wwn'])
continue
if t['fabric'] not in (f.name for f in self.fabric_modules):
err_func("Unknown fabric '%s'" % t['fabric'])
continue
fm_obj = FabricModule(t['fabric'])
# Instantiate target
Target.setup(fm_obj, t, err_func)
return errors
def save_to_file(self, save_file=None):
'''
Write the configuration in json format to a file.
Save file defaults to '/etc/targets/saveconfig.json'.
'''
if not save_file:
save_file = default_save_file
with open(save_file+".temp", "w+") as f:
os.fchmod(f.fileno(), stat.S_IRUSR | stat.S_IWUSR)
f.write(json.dumps(self.dump(), sort_keys=True, indent=2))
f.write("\n" |
Venturi/oldcms | env/lib/python2.7/site-packages/sortedm2m/forms.py | Python | apache-2.0 | 4,668 | 0.001714 | # -*- coding: utf-8 -*-
import django
import sys
from itertools import chain
from django import forms
from django.conf import settings
from django.db.models.query import QuerySet
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, escape
from django.utils.safestring import mark_safe
if sys.version_info[0] < 3:
iteritems = lambda d: iter(d.iteritems())
string_types = basestring,
str_ = unicode
else:
iteritems = lambda d: iter(d.items())
string_types = str,
str_ = str
STATIC_URL = getattr(settings, 'STATIC_URL', settings.MEDIA_URL)
class SortedCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
class Media:
js = (
STATIC_URL + 'sortedm2m/widget.js',
STATIC_URL + 'sortedm2m/jquery-ui.js',
)
css = {'screen': (
STATIC_URL + 'sortedm2m/widget.css',
)}
def build_attrs(self, attrs=None, **kwargs) | :
attrs = super(SortedCheckboxSelectMultiple, self).\
build_attrs(attrs, **kwargs)
classes = attrs.setdefault('class', '').split()
classes.append('sortedm2m')
attrs['class'] = ' '.join(classes)
return attrs
def r | ender(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
# Normalize to strings
str_values = [force_text(v) for v in value]
selected = []
unselected = []
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = ' for="%s"' % conditional_escape(final_attrs['id'])
else:
label_for = ''
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_text(option_label))
item = {'label_for': label_for, 'rendered_cb': rendered_cb, 'option_label': option_label, 'option_value': option_value}
if option_value in str_values:
selected.append(item)
else:
unselected.append(item)
# re-order `selected` array according str_values which is a set of `option_value`s in the order they should be shown on screen
ordered = []
for value in str_values:
for select in selected:
if value == select['option_value']:
ordered.append(select)
selected = ordered
html = render_to_string(
'sortedm2m/sorted_checkbox_select_multiple_widget.html',
{'selected': selected, 'unselected': unselected})
return mark_safe(html)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if isinstance(value, string_types):
return [v for v in value.split(',') if v]
return value
if django.VERSION < (1, 7):
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_text(value) for value in initial]
data_set = [force_text(value) for value in data]
return data_set != initial_set
class SortedMultipleChoiceField(forms.ModelMultipleChoiceField):
widget = SortedCheckboxSelectMultiple
def clean(self, value):
queryset = super(SortedMultipleChoiceField, self).clean(value)
if value is None or not isinstance(queryset, QuerySet):
return queryset
object_list = dict((
(str_(key), value)
for key, value in iteritems(queryset.in_bulk(value))))
return [object_list[str_(pk)] for pk in value]
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_text(value) for value in self.prepare_value(initial)]
data_set = [force_text(value) for value in data]
return data_set != initial_set
|
GrandmasterShadowMorgue/KerfuffleOfTheDandelions | pronounce.py | Python | mit | 662 | 0.04236 | #
# pronounce.py
# Record pronunciations for word entries
#
# Jonatan H Sundqvist
# May 18 2015
#
# TODO | -
# -
# SPEC | -
# -
import mozart
import sqlite3
import queue
import tkinter as tk
class Pronounce(object):
'''
Docstring goes here
'''
def __init__(self):
'''
Docstring goes here
'''
#
self.size = (700, 420)
#
self.window = tk.Tk() #
self.window.title('Pronounce') #
| self.window.geometry('{width}x{height}'.format(width=self.size[0], height=self.size[1]))§
def run(self):
return self.window.mainloop()
def main():
app = Pronounce()
app.run | ()
if __name__ == '__main__':
main() |
ggravlingen/pytradfri | pytradfri/mood.py | Python | mit | 400 | 0 | """Represent a mood on the gateway."""
from .const import ROOT_MOODS
from .resource import ApiResource
class Mood(ApiResource):
def __init__(self, raw, parent):
super().__in | it__(raw)
self._parent = parent
@pro | perty
def path(self):
return [ROOT_MOODS, self._parent, self.id]
def __repr__(self):
return "<Mood {} {}>".format(self._parent, self.name)
|
MyRobotLab/myrobotlab | src/main/resources/resource/Servo/Servo.py | Python | apache-2.0 | 2,843 | 0.006331 | #########################################
# Servo.py
# categories: servo
# more info @: http://myrobotlab.org/service/Servo
#########################################
# uncomment for virtual hardware
Platform.setVirtual(True)
# Every settings like limits / port number / controller are saved after initial use
# so you can share them between differents script
servoPin01 = 4
servoPin02 = 5
# port = "/dev/ttyUSB0"
port = "COM15"
# create a servo controller and a servo
arduino = Runtime.start("arduino","Arduino")
servo01 = Runtime.start("servo01","Servo")
servo02 = Runtime.start("servo02","Servo")
# initialize arduino
# linux or macos -> arduino.connect("/dev/ttyUSB0")
print("connecting arduino to serial port")
arduino.connect(port)
# set limits
print("setting min max limits of servo")
# servo01.setMinMax(0, 180)
servo01.map(0, 180, 0, 180)
# set rest position
servo01.setRest(90)
# attach servo
print("attaching servo with pins to controller")
servo01.attach(arduino.getName(), servoPin01)
servo02.attach(arduino.getName(), servoPin02)
# auto disable - this enables (starts pwm) before a movement
# and disables (stops pwm) after a movement
servo01.setAutoDisable(True)
# servo02.setAutoDisable(False)
# speed changes
print("speed changes")
servo01.setSpeed(20) ## Low speed
servo01.moveToBlocking(90) # moveToBlocking will wait for finished move
servo01.setSpeed(50) ## medium speed
servo01.moveToBlocking(180) # moveToBlocking will wait for finished move
servo01.unsetSpeed() ## max speed ( no more speed conytol )
servo01.moveTo(0) # we cannot use moveToBlocking if servo speed is set to -1 ( max ) !!
sleep(2)
# fast sweep 10 seconds
print("fast sweep")
servo01.setSpeed(300)
servo01.sweep(0, 180);
sleep(10)
servo01.stop()
# print info
print("servo position :{}".format(servo01.getPos()))
print("servo pin :{}".format(servo01.getPin()))
print("servo re | st position :{}".format(servo01.getRest()))
print("servo speed :{}".format(servo01.getSpeed()))
print("servo is inverted :{}".format(s | ervo01.isInverted()))
print("servo min :{}".format(servo01.getMin()))
print("servo max :{}".format(servo01.getMax()))
# sync servo02 with servo01
# now servo2 will be a slave to servo01
print("syncing servo02 with servo01")
servo02.sync(servo01)
servo01.moveTo(10)
sleep(0.5)
servo01.moveTo(179)
sleep(0.5)
servo01.moveTo(10)
sleep(0.5)
# writing position in us
servo01.writeMicroseconds(1875)
print("servo position :{}".format(servo01.getPos())) # check if correct ?
# moving to rest position
print("servo01 rest")
servo01.rest()
sleep(2)
# turn off power if servo01.setAutoDisable(False)
print("turn of servos pwm")
servo01.disable()
servo02.disable()
# detaching servo01 from controller
# TODO - make arduino.detach() detach all services
print("detaching servos from controller")
servo01.detach()
servo02.detach() |
UMN-Hydro/GSFLOW_pre-processor | python_scripts/MODFLOW_scripts/print_MODFLOW_inputs_res_NWT.py | Python | gpl-3.0 | 3,667 | 0.011181 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 17 22:06:52 2017
Based on: print_MODFLOW_inputs_res_NWT.m
@author: gcng
"""
# print_MODFLOW_inputs
import numpy as np
import MODFLOW_NWT_lib as mf # functions to write individual MODFLOW files
import os # os functions
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read('settings.ini')
LOCAL_DIR = parser.get('settings', 'local_dir')
GSFLOW_DIR = LOCAL_DIR + "/GSFLOW"
# - directories
sw_2005_NWT = 2 # 1 for MODFLOW-2005; 2 for MODFLOW-NWT algorithm (both can be
# carried out with MODFLOW-NWT code | )
fl_BoundConstH = 0 # 1 for const head at high elev boundary, needed for numerical
# convergence for AGU2016 poster. Maybe resolved with MODFLOW-NWT?
if sw_2005_NWT == 1:
# MODFLOW input files
GSFLOW_indir = GSFLOW_DIR + '/inputs/MODFLOW_2005/'
# MODFLOW output files
GSFLOW_outdir = GSFLOW_DIR + '/outputs/MODFLOW_2005/'
elif sw_2005_NWT == 2:
# MODFLOW input files
GSFLOW_indir = GSFLOW_DIR + '/inputs/MODFLOW_NWT/'
# MODFLO | W output files
GSFLOW_outdir = GSFLOW_DIR + '/outputs/MODFLOW_NWT/'
infile_pre = 'test2lay_py';
NLAY = 2;
DZ = [100, 50] # [NLAYx1] [m] ***testing
# DZ = [350, 100] # [NLAYx1] [m] ***testing
# length of transient stress period (follows 1-day steady-state period) [d]
# perlen_tr = 365; # [d], ok if too long
# perlen_tr = 365*5 + ceil(365*5/4); # [d], includes leap years; ok if too long (I think, but maybe run time is longer?)
perlen_tr = 365*30 + np.ceil(365*30/4) # [d], includes leap years; ok if too long (I think, but maybe run time is longer?)
GIS_indir = GSFLOW_DIR + '/DataToReadIn/GIS/';
# use restart file as initial cond (empty string to not use restart file)
fil_res_in = '' # empty string to not use restart file
#fil_res_in = '/home/gcng/workspace/Pfil_res_inrojectFiles/AndesWaterResources/GSFLOW/outputs/MODFLOW/test2lay_melt_30yr.out' % empty string to not use restart file
# for various files: ba6, dis, uzf, lpf
surfz_fil = GIS_indir + 'topo.asc'
# surfz_fil = GIS_indir + 'SRTM_new_20161208.asc'
# for various files: ba6, uzf
mask_fil = GIS_indir + 'basinmask_dischargept.asc'
# for sfr
reach_fil = GIS_indir + 'reach_data.txt'
segment_fil_all = [GIS_indir + 'segment_data_4A_INFORMATION_Man.csv',
GIS_indir + 'segment_data_4B_UPSTREAM_Man.csv',
GIS_indir + 'segment_data_4C_DOWNSTREAM_Man.csv']
# create MODFLOW input directory if it does not exist:
if not os.path.isdir(GSFLOW_indir):
os.makedirs(GSFLOW_indir)
# while we're at it, create MODFLOW output file if it does not exist:
if not os.path.isdir(GSFLOW_outdir):
os.makedirs(GSFLOW_outdir)
##
mf.write_dis_MOD2_f(GSFLOW_indir, infile_pre, surfz_fil, NLAY, DZ, perlen_tr);
mf.write_ba6_MOD3_2(GSFLOW_indir, infile_pre, mask_fil, fl_BoundConstH); # list this below write_dis_MOD2_f
# flow algorithm
if sw_2005_NWT == 1:
mf.write_lpf_MOD2_f2_2(GSFLOW_indir, infile_pre, surfz_fil, NLAY);
elif sw_2005_NWT == 2:
# MODFLOW-NWT files
mf.write_upw_MOD2_f2_2(GSFLOW_indir, infile_pre, surfz_fil, NLAY);
mf.NWT_write_file(GSFLOW_indir, infile_pre);
# unsat zone and streamflow input files
mf.make_uzf3_f_2(GSFLOW_indir, infile_pre, surfz_fil, mask_fil);
mf.make_sfr2_f_Mannings(GSFLOW_indir, infile_pre, reach_fil, segment_fil_all); # list this below write_dis_MOD2_f
# Write PCG file (only used for MODFLOW-2005, but this function also creates OC file)
mf.write_OC_PCG_MOD_f(GSFLOW_indir, infile_pre, perlen_tr);
# Write namefile
mf.write_nam_MOD_f2_NWT(GSFLOW_indir, GSFLOW_outdir, infile_pre, fil_res_in, sw_2005_NWT);
|
Pavaka/Pygorithms | input_checkers/TP_input_checker.py | Python | gpl-2.0 | 1,080 | 0 | def check_input_data(costs, vector_a, vector_b):
if not _is_list(vector_a):
raise VectorANotListError
if not _is_list(vector_b): |
raise VectorBNotListError
if not _is_list(costs):
raise CostsNotListError
if 0 in (len(costs), len(vector_a), len(vector_b)):
raise EmptyListError
_check_all_values_positive_integers(costs)
_check_all_values_positive_integers(vector_a)
_check_all_values_positive_integers(vector_b)
class | EmptyListError(Exception):
pass
class VectorANotListError(Exception):
pass
class VectorBNotListError(Exception):
pass
class CostsNotListError(Exception):
pass
class ListContainsNaN(Exception):
pass
class NegativeValueError(Exception):
pass
def _is_list(item):
if isinstance(item, (list, tuple)):
return True
else:
return False
def _check_all_values_positive_integers(values):
for value in values:
if not isinstance(value, int):
raise ListContainsNaN
if value <= 0:
raise NegativeValueError
|
jakemathai/computer-vision | label_detect.py | Python | apache-2.0 | 437 | 0.029748 | #Pass in a photo u | rl to the script and it performs 5 label detection on the photo.
import sys
import argparse
import io
from google.cloud import vision
#capture the argument passed in to script
url=sys.argv[1]
client = vision.Client()
#pass commange line url into uri
image = client.image(source_uri=url)
labels = image.detect_labels(limit=5)
for i in range (0,5):
x=labels[i].description
y=labels[i].score
| print x
print y
|
atareao/nautilus-imgur-uploader | src/imgurpython/imgur/models/comment.py | Python | gpl-3.0 | 342 | 0 | class Comment(object):
# See do | cumentation at https://api.imgur.com/ for a | vailable fields
def __init__(self, *initial_data, **kwargs):
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
|
ROS-PSE/arni | arni_countermeasure/tests/test_storage.py | Python | bsd-2-clause | 5,630 | 0 | #!/usr/bin/env python
import unittest
from arni_countermeasure.rated_statistic_storage import *
from rosgraph_msgs.msg import Clock
from arni_countermeasure.outcome import *
import rospy
import time
import arni_countermeasure.helper as helper
PKG = "arni_countermeasure"
class TestStorage(unittest.TestCase):
pub = None
@classmethod
def setUpClass(TestStorage):
rospy.set_param("/use_sim_time", "true")
TestStorage.pub = rospy.Publisher('clock', Clock, queue_size=10)
rospy.init_node("test storage", anonymous=True)
TestStorage.set_time(10)
def setUp(self):
TestStorage.set_time(10)
def test_add_old(self):
"""Test if adding an too old statistic will not be saved."""
TestStorage.set_timeout(10)
TestStorage.set_time(100)
store = RatedStatisticStorage()
store._RatedStatisticStorage__add_single_outcome(
"n!node1", "cpu", Outcome.HIGH, rospy.Time(90))
self.assertEqual(
store.get_outcome("n!node1", "cpu"), Outcome.UNKNOWN)
"""Test if an statistic thats just not to old will be saved."""
TestStorage.set_timeout(10)
TestStorage.set_time(100)
store = RatedStatisticStorage()
store._RatedStatisticStorage__add_single_outcome(
"n!node2", "cpu", Outcome.HIGH, rospy.Time(91))
self.assertEqual(
store.get_outcome("n!node2", "cpu"), Outcome.HIGH)
def test_remove_through_timeout(self):
"""Test that an statistic is not present after timeout."""
TestStorage.set_timeout(20)
TestStorage.set_time(100)
store = RatedStatisticStorage()
store._RatedStatisticStorage__add_single_outcome(
"n!node3", "cpu", Outcome.HIGH, rospy.Time(100))
self.assertEqual(
stor | e.get_outcome("n!node3", "cpu"), Outcome.HIGH)
| TestStorage.set_time(120)
self.assertEqual(
store.get_outcome("n!node3", "cpu"), Outcome.UNKNOWN)
def test_add_new_than_old(self):
"""Test adding a statistic and then another one of the same
type but older."""
TestStorage.set_timeout(20)
TestStorage.set_time(100)
store = RatedStatisticStorage()
store._RatedStatisticStorage__add_single_outcome(
"n!node4", "cpu", Outcome.HIGH, rospy.Time(100))
store._RatedStatisticStorage__add_single_outcome(
"n!node4", "cpu", Outcome.LOW, rospy.Time(90))
self.assertEqual(
store.get_outcome("n!node4", "cpu"), Outcome.HIGH)
def test_callback_single_entry(self):
"""Test callback with a statistic type holding only one entry."""
TestStorage.set_timeout(20)
TestStorage.set_time(100)
store = RatedStatisticStorage()
entity_c = TestStorage._gen_entity(
"ram_usage_mean", ["20"], [chr(Outcome.HIGH)])
msg = TestStorage._gen_msg("n!node", 100, [entity_c])
store.callback_rated_statistic(msg)
self.assertEqual(
store.get_outcome("n!node", "ram_usage_mean"), Outcome.HIGH)
def test_callback_multiple_entries(self):
"""Test callback with a statistic type holding multiple entries."""
TestStorage.set_timeout(20)
TestStorage.set_time(100)
store = RatedStatisticStorage()
entity_c = TestStorage._gen_entity(
"ram_usage_mean", ["20", "40"],
[chr(Outcome.HIGH), chr(Outcome.LOW)])
msg = TestStorage._gen_msg("n!node", 100, [entity_c])
store.callback_rated_statistic(msg)
self.assertEqual(
store.get_outcome("n!node", "ram_usage_mean_0"), Outcome.HIGH)
self.assertEqual(
store.get_outcome("n!node", "ram_usage_mean_1"), Outcome.LOW)
def test_callback_multiple_rated_entities(self):
"""Test a callback with multiple entities."""
TestStorage.set_timeout(20)
TestStorage.set_time(100)
store = RatedStatisticStorage()
entity_c = TestStorage._gen_entity(
"ram_usage_mean", ["20"], [chr(Outcome.HIGH)])
entity_c2 = TestStorage._gen_entity(
"ram_usage_max", ["80"], [chr(Outcome.LOW)])
msg = TestStorage._gen_msg("n!node", 100, [entity_c, entity_c2])
store.callback_rated_statistic(msg)
self.assertEqual(
store.get_outcome("n!node", "ram_usage_mean"), Outcome.HIGH)
self.assertEqual(
store.get_outcome("n!node", "ram_usage_max"), Outcome.LOW)
@classmethod
def _gen_entity(TestStorage, statistic_type, value, outcome):
msgEntity = RatedStatisticsEntity()
msgEntity.statistic_type = statistic_type
msgEntity.actual_value = value
msgEntity.state = outcome
return msgEntity
@classmethod
def _gen_msg(TestStorage, seuid, time_arrive, entity_container):
msg = RatedStatistics()
msg.seuid = seuid
msg.window_start = rospy.Time(time_arrive - 1)
msg.window_stop = rospy.Time(time_arrive)
msg.rated_statistics_entity = entity_container
return msg
@classmethod
def set_timeout(TestStorage, timeout):
rospy.set_param(helper.ARNI_CTM_CFG_NS + "storage_timeout", timeout)
@classmethod
def set_time(TestStorage, value):
wanted_time = rospy.Time(value)
c = Clock()
c.clock = wanted_time
while rospy.Time.now() != wanted_time:
TestStorage.pub.publish(c)
time.sleep(0.01)
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, 'test_storage', TestStorage)
|
xiangcai/todother | module/user.py | Python | lgpl-3.0 | 344 | 0.008721 | import os
import sys
impor | t logging
class UserEntity(object):
def __init__(self, | user_id):
self.user_id = user_id
#self.prefs = {"locale": "zh_CN"}
self.prefs = {}
def load(self, entity):
self.nickname = entity.nickname
self.prefs['locale'] = entity.language
self.email = entity.email
|
lipschultz/diabicus | src/numeric_tools.py | Python | gpl-3.0 | 8,003 | 0.003124 | """
Diabicus: A calculator that plays music, lights up, and displays facts.
Copyright (C) 2016 Michael Lipschultz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import os
import re
from functools import reduce
from .compute import ComputationError
def is_int(val):
""" Returns True if val is an int or a float with 0 fractional part """
return isinstance(val, int) or (isinstance(val, float) and val % 1 == 0)
def is_rational(val):
"""
Returns True if val is an int or float and not irrational.
Determining irrationality is done through the is_irrational method.
"""
return isinstance(val, (int, float)) and not is_irrational(val)
def is_irrational(val):
"""
Returns True if val is irrational.
Irrationality is determined by whether val is transcendental (as
determined by is_transcendental) or sqrt(2) or golden ratio.
"""
return is_transcendental(val) or val in {2**.5, GOLDEN_RATIO}
def is_transcendental(val):
""" Returns True if val is transcendental (i.e. pi or e). """
return val in (math.pi, math.e)
def is_real(val):
""" Returns True if val is int or float. """
return isinstance(val, (int, float))
def is_complex(val):
""" Returns True if val is complex. """
return isinstance(val, complex)
def is_surreal(val):
""" Returns True if val is surreal (currently always returns False). """
return False
def is_number(val):
""" Returns True if val is int, float, or complex. """
return isinstance(val, (int, float, complex))
def is_error(val):
""" Returns True if val is a ComputationError. """
return isinstance(val, ComputationError)
GOLDEN_RATIO = (1 + 5**0.5) / 2
GRAHAMS_NUMBER = False
I = complex(0, 1)
PI_DIGITS = (3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2,
6, 4, 3, 3, 8, 3, 2, 7, 9, 5, 0, 2, 8, 8, 4, 1, 9, 7, 1, 6, 9, 3,
9, 9, 3, 7, 5, 1, 0, 5, 8, 2, 0, 9, 7, 4, 9, 4, 4, 5, 9, 2, 3, 0,
7, 8, 1, 6, 4, 0, 6, 2, 8, 6, 2, 0, 8, 9, 9, 8, 6, 2, 8, 0, 3, 4,
8, 2, 5, 3, 4, 2, 1, 1, 7, 0, 6, 7, 9, 8, 2, 1, 4
)
PRIME_NUMBERS = []
def __load_primes():
"""
Loads a comma-delimited list of prime numbers into PRIME_NUMBERS.
Prime numbers are loaded from the file prime_numbers.csv in the same
location as this python file and stores them into the global
variable PRIME_NUMBERS.
"""
global PRIME_NUMBERS
path = os.path.dirname(__file__)
with open(os.path.join(path, 'prime_numbers.csv')) as fin:
PRIME_NUMBERS = [int(v) for v in fin.read().split(',')]
__load_primes()
def is_prime(number):
""" Returns True if number is a prime number. """
return is_int(number) and number > 1 and int(number) in PRIME_NUMBERS
FACTORS_ALL = 'all'
FACTORS_PROPER = 'proper'
FACTORS_PRIME = 'prime'
def factors(num, form=FACTORS_PROPER):
"""
Return a list of factors for the provided number.
If form is FACTORS_PRIME, then the list will only contain the prime
factors of num. The product of the values in the list will always
return num. That is, if the number is a product of more than one of
the same prime (e.g. 12 = 2*2*3), then the list will contain those
duplicates (e.g. [2, 2, 3] in the example).
If form is FACTORS_ALL, then the list will contain all positive
integers that exactly divide num. For example, with num=12, the
list returned is [1, 2, 3, 4, 12].
If form is FACTORS_PROPER (default), then the list will be the same
as FACTORS_ALL, except the list will not include num. So, for
num=12, the list returned would be [1, 2, 3, 4].
If num is not an integer (as determined by is_int) greater than 1,
return empty list.
"""
if not is_int(num) or num < 2:
return []
if form == FACTORS_PRIME:
primes = []
i = 2
while num % i == 0:
primes.append(i)
num /= i
i = 3
while num > 1:
while num % i == 0:
primes.append(i)
num /= i
i += 2
return primes
else:
all_factors = reduce(list.__add__,
([i, num//i] for i in range(1, int(num**0.5) + 1) if num % i == 0)
)
if form == FACTORS_PROPER:
all_factors.remove(num)
return all_factors
FIBONACCI_NUMBERS = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233,
| 377, 610, 987, 1597, 2584, | 4181, 6765, 10946, 17711,
28657, 46368, 75025, 121393, 196418, 317811, 514229,
832040, 1346269
]
LUCAS_NUMBERS = (2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123, 199, 322, 521, 843,
1364, 2207, 3571, 5778, 9349, 15127, 24476, 39603, 64079,
103682, 167761, 271443, 439204, 710647, 1149851, 1860498,
3010349, 4870847, 7881196, 12752043, 20633239, 33385282
)
def is_subsequence_of(needle, haystack):
"""
Returns True if needle occurs as a consecutive subsequence in haystack.
Both needle and haystack must be ordered containers. The values in
needle must appear in haystack in the order they appear in needle
and must be consecutive in haystack.
For example, with needle=[1,2,3] and haystack=[1,1,2,3,4], the
function returns True since needle starts at index 1 in haystack.
With needle=[1,2,4] and haystack=[1,1,2,3,4], the function returns
False since, although the values do appear in haystack in the
correct order, they are not consecutive.
An empty needle will always return False.
"""
if len(needle) == 0:
return False
for offset in (i for i, x in enumerate(haystack) if x == needle[0]):
if offset + len(needle) > len(haystack):
return False
matches = [needle[i] == haystack[offset+i] for i in range(1, len(needle))]
if len(matches) == len(needle)-1 and all(matches):
return True
return False
def is_close(num1, num2, threshold=1e-5, method='raw'):
"""
Returns True if num1 is within threshold of num2.
If method is 'raw', then the closeness is determined by the absolute
value of the difference between num1 and num2.
If method is 'pct', then the absolute value of percent difference is
calculated and used.
num1 and num2 can be iterable. If one is iterable, then as long as
one value in the iterable object is close to the other number, the
function returns True. If both are iterable, then as long as one
value in num1 is close to one value in num2, the function returns
True.
"""
if isinstance(num1, ComputationError) or isinstance(num2, ComputationError):
return False
elif hasattr(num1, '__iter__'):
return any(is_close(n, num2, threshold) for n in num1)
elif hasattr(num2, '__iter__'):
return any(is_close(num1, n, threshold) for n in num2)
elif ((isinstance(num1, complex) or isinstance(num2, complex))
and not isinstance(num1, type(num2))):
return False
else:
if method == 'pct':
if num1 == num2 and num1 == 0:
return True
else:
return abs(num1-num2) / max([abs(v) for v in (num1, num2) if v != 0]) < threshold
else:
return abs(num1-num2) < threshold
|
foobacca/django-cms | cms/test_utils/testcases.py | Python | bsd-3-clause | 12,223 | 0.001964 | # -*- coding: utf-8 -*-
from cms.models import Page
from cms.test_utils.util.context_managers import (UserLoginContext,
SettingsOverride)
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.template.context import Context
from django.test import testcases
from django.test.client import RequestFactory
from django.utils.translation import activate
from menus.menu_pool import menu_pool
from urlparse import urljoin
import sys
import urllib
import warnings
from cms.utils.permissions import set_current_user
URL_CMS_PAGE = "/en/admin/cms/page/"
URL_CMS_PAGE_ADD = urljoin(URL_CMS_PAGE, "add/")
URL_CMS_PAGE_CHANGE = urljoin(URL_CMS_PAGE, "%d/")
URL_CMS_PAGE_CHANGE_LANGUAGE = URL_CMS_PAGE_CHANGE + "?language=%s"
URL_CMS_PAGE_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete/")
URL_CMS_PLUGIN_ADD = urljoin(URL_CMS_PAGE, "add-plugin/")
URL_CMS_PLUGIN_EDIT = urljoin(URL_CMS_PAGE, "edit-plugin/")
URL_CMS_PLUGIN_MOVE = urljoin(URL_CMS_PAGE, "move-plugin/")
URL_CMS_PLUGIN_REMOVE = urljoin(URL_CMS_PAGE, "delete-plugin/")
URL_CMS_TRANSLATION_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete-translation/")
URL_CMS_PAGE_HISTORY = urljoin(URL_CMS_PAGE_CHANGE, "history/%d/")
URL_CMS_PLUGIN_HISTORY_EDIT = urljoin(URL_CMS_PAGE_HISTORY, "edit-plugin/")
class _Warning(object):
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.itervalues():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class CMSTestCase(testcases.TestCase):
counter = 1
def _fixture_setup(self):
super(CMSTestCase, self)._fixture_setup()
self.create_fixtures()
activate("en")
def create_fixtures(self):
pass
def _post_teardown(self):
# Needed to clean the menu keys cache, see menu.menu_pool.clear()
menu_pool.clear()
super(CMSTestCase, self)._post_teardown()
set_current_user(None)
def login_user_context(self, user):
return UserLoginContext(self, user)
def get_superuser(self):
try:
admin = User.objects.get(username="admin")
except User.DoesNotExist:
admin = User(username="admin", is_staff=True, is_active=True, is_superuser=True)
admin.set_password("admin")
admin.save()
return admin
def get_staff_user_with_no_permissions(self):
"""
Used in security tests
"""
staff = User(username="staff", is_staff=True, is_active=True)
staff.set_password("staff")
staff.save()
return staff
def get_new_page_data(self, parent_id=''):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
'parent': parent_id,
'site': 1,
}
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
self.counter = self.counter + 1
return page_data
def get_new_page_data_dbfields(self, parent=None, site=None,
language=None,
template='nav_playground.html',):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0] if not language else language,
'template': template,
'parent': parent if parent else None,
'site': site if site else Site.objects.get_current(),
}
self.counter = self.counter + 1
return page_data
def get_pagedata_from_dbfields(self, page_data):
"""Converts data created by get_new_page_data_dbfields to data
created from get_new_page_data so you can switch between test cases
in api.create_page and client.post"""
page_data['site'] = page_data['site'].id
page_data['parent'] = page_data['parent'].id if page_data['parent'] else ''
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
p | age_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
return page_data
def print_page_structure(self, qs):
"""Just a helper to see the page struct.
"""
for page in qs.order | _by('tree_id', 'lft'):
ident = " " * page.level
print "%s%s (%s), lft: %s, rght: %s, tree_id: %s" % (ident, page,
page.pk, page.lft, page.rght, page.tree_id)
def print_node_structure(self, nodes, *extra):
def _rec(nodes, level=0):
ident = level * ' '
for node in nodes:
raw_attrs = [(bit, getattr(node, bit, node.attr.get(bit, "unknown"))) for bit in extra]
attrs = ', '.join(['%s: %r' % data for data in raw_attrs])
print "%s%s: %s" % (ident, node.title, attrs)
_rec(node.children, level + 1)
_rec(nodes)
def assertObjectExist(self, qs, **filter):
try:
return qs.get(**filter)
except ObjectDoesNotExist:
pass
raise self.failureException, "ObjectDoesNotExist raised for filter %s" % filter
def assertObjectDoesNotExist(self, qs, **filter):
try:
qs.get(**filter)
except ObjectDoesNotExist:
return
raise self.failureException, "ObjectDoesNotExist not raised for filter %s" % filter
def copy_page(self, page, target_page):
from cms.utils.page import get_available_slug
data = {
'position': 'last-child',
'target': target_page.pk,
'site': 1,
'copy_permissions': 'on',
'copy_moderation': 'on',
}
response = self.client.post(URL_CMS_PAGE + "%d/copy-page/" % page.pk, data)
self.assertEquals(response.status_code, 200)
# Altered to reflect the new django-js jsonified response messages
self.assertEquals(response.content, '{"status": 200, "content": "ok"}')
title = page.title_set.all()[0]
copied_slug = get_available_slug(title)
copied_page = s |
lgiordani/slack_hangman | tests/test_guess_manager.py | Python | mit | 5,541 | 0.000361 | from main import GuessManager
def test_init_uppercase():
g = GuessManager('SOMEWORD')
assert g.word == 'SOMEWORD'
assert g.mask == [False]*8
def test_init_mask():
mask = [True, False, True, False, True, False, True, False]
g = GuessManager('SOMEWORD', mask=mask)
assert g.word == 'SOMEWORD'
assert g.mask == mask
def test_init_mword():
g = GuessManager('ABC', mask=[False, True, False])
assert g._mword == [('A', False), ('B', True), ('C', False)]
def test_init_lowercase():
g = GuessManager('someword')
assert g.word == 'SOMEWORD'
assert g.mask == [False]*8
def test_init_guessed_letters():
g = GuessManager('someword')
assert g.guessed_letters == set()
def test_init_tried_letters():
g = GuessManager('someword')
assert g.tried_letters == set()
def test_init_hidden_letters():
g = GuessManager('someword')
assert g.hidden_letters == set(['S', 'O', 'M', 'E', 'W', 'R', 'D'])
def test_init_hidden_letters_checks_mask():
g = GuessManager('some', mask=[True, False, False, False])
assert g.hidden_letters == set(['O', 'M', 'E'])
def test_len():
g = GuessManager('someword')
assert g.len == 8
def test_len_checks_mask():
g = GuessManager('a b', mask=[False, True, False])
assert g.len == 2
def test_guessed():
g = GuessManager('a b', mask=[False, True, False])
assert g.guessed == 0
def test_missing():
g = GuessManager('someword')
assert g.missing == 8
def test_missing_checks_mask():
g = GuessManager('a b', mask=[False, True, False])
assert g.missing == 2
def test_status():
g = GuessManager('someword')
assert g.status == [None] * 8
def test_status_check_mask():
g = GuessManager('some', [True, False, True, False])
assert g.status == ['S', None, 'M', None]
def test_guess_letter():
g = GuessManager('someword')
res = g.guess('m')
assert g.guessed_letters == set(['M'])
assert g.tried_letters == set(['M'])
assert g.guessed == 1
assert res == 1
assert g.missing == 7
assert g.status == [None, None, 'M', None, None, None, None, None]
def test_guess_letter_with_mask():
g = GuessManager('a bc', mask=[False, True, False, False])
res = g.guess('a')
assert g.guessed_letters == set(['A'])
assert g.tried_letters == set(['A'])
assert g.guessed == 1
assert res == 1
assert g.missing == 2
assert g.status == ['A', ' ', None, None]
def test_guess_more_than_one_letter():
g = GuessManager('someword')
res = g.guess('o')
assert g.guessed_letters == set(['O'])
assert g.tried_letters == set(['O'])
assert g.guessed == 2
assert res == 2
assert g.missing == 6
assert g.status == [None, 'O', None, None, None, 'O', None, None]
def test_guess_multiple_calls_same_letter():
g = GuessManager('someword')
res = g.guess('o')
res = g.guess('o')
assert g.guessed_letters == set(['O'])
assert g.tried_letters == set(['O'])
assert g.guessed == 2
assert res == 0
assert g.missing == 6
assert g.status == [None, 'O', None, None, None, 'O', None, None]
def test_guess_multiple_calls_different_letters():
g = GuessManager('someword')
g.guess('o')
g.guess('m')
assert g.guessed_letters == set(['O', 'M'])
assert g.tried_letters == set(['O', 'M'])
assert g.guessed == 3
assert g.missing == 5
assert g.status == [None, 'O', 'M', None, None, 'O', None, None]
def test_wrong_guess():
g = GuessManager('someword')
res = g.guess('x')
assert g.guessed_letters == set()
assert g.tried_letters == set(['X'])
assert g.guessed == 0
assert res == 0
assert g.missing == 8
assert g.status == [None, None, None, None, None, None, None, None]
def test_guess_word_successful():
g = GuessManager('someword')
res = g.guess_word('someword')
assert g.guessed_letters == set(['S', 'O', 'M', 'E', 'W', 'O', 'R', 'D'])
assert g.tried_letters == set()
assert g.guessed == 8
assert res == 8
assert g.missing == 0
assert g.status == list('someword'.upper())
def test_guess_word_checks_mask():
g = GuessManager('a (19)', mask=[False, True, True, True, True, True])
res = g.guess_word('a (19)')
assert g.guessed_letters == set(['A'])
assert g.tried_letters == set()
assert g.guessed == 1
assert res == 1
assert g.missing == 0
assert g.status == list('a (19)'.upper())
def test_guess_word_successful_after_guessed_letters():
g = GuessManager('someword')
g.guess('s' | )
g.guess('o')
res = g.guess_word('someword')
assert g.guessed_letters == set(['S', 'O', 'M', 'E', 'W', 'O', 'R', 'D'])
assert g.tried_letters == set(['S', 'O'])
assert g.guessed == 8
assert res == 5
assert g.missing == 0
assert g.status == list('someword'.upper())
def test_guess_word_unsuccessful():
g = GuessManager('someword')
res = g.guess_word('s | ameward')
assert g.guessed_letters == set()
assert g.tried_letters == set()
assert res == 0
assert g.missing == 8
assert g.status == [None, None, None, None, None, None, None, None]
def test_guess_word_unsuccessful_after_guessed_letters():
g = GuessManager('someword')
g.guess('s')
g.guess('o')
res = g.guess_word('somelord')
assert g.guessed_letters == set(['S', 'O'])
assert g.tried_letters == set(['S', 'O'])
assert res == 0
assert g.missing == 5
assert g.status == ['S', 'O', None, None, None, 'O', None, None]
|
e2crawfo/dps | dps/datasets/load/emnist.py | Python | apache-2.0 | 11,445 | 0.001922 | import shutil
import numpy as np
import dill
import gzip
import os
import subprocess
import struct
from array import array
import warnings
from dps import cfg
from dps.utils import image_to_string, cd, resize_image
# This link seems not to work anymore...
# emnist_url = 'https://cloudstor.aarnet.edu.au/plus/index.php/s/54h3OuGJhFLwAlQ/download'
emnist_url = 'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
template = 'emnist-byclass-{}-{}-idx{}-ubyte.gz'
emnist_gz_names = [
template.format('test', 'images', 3),
template.format('test', 'labels', 1),
template.format('train', 'images', 3),
template.format('train', 'labels', 1)
]
def emnist_classes():
return (
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
emnist_filenames = [c + ".pklz" for c in emnist_classes()]
def _validate_emnist(path):
if not os.path.isdir(path):
return False
return set(os.listdir(path)) == set(emnist_filenames)
def _download_emnist(data_dir):
"""
Download the emnist data. Result is that a directory called "emnist_raw"
is created inside `data_dir` which contains 4 files.
Parameters
----------
path: str
Path to directory where files should be stored.
"""
emnist_raw_dir = os.path.join(data_dir, "emnist_raw")
os.makedirs(emnist_raw_dir, exist_ok=True)
with cd(emnist_raw_dir):
if not os.path.exists('gzip.zip'):
print("Downloading...")
command = "wget --output-document=gzip.zip {}".format(emnist_url).split()
subprocess.run(command, check=True)
else:
print("Found existing copy of gzip.zip, not downloading.")
print("Extracting...")
for fname in emnist_gz_names:
if not os.path.exists(fname):
subprocess.run('unzip gzip.zip gzip/{}'.format(fname), shell=True, check=True)
shutil.move('gzip/{}'.format(fname), '.')
else:
print("{} already exists, skipping extraction.".format(fname))
try:
shutil.rmtree('gzip')
except FileNotFoundError:
pass
return emnist_raw_dir
def _emnist_load_helper(path_img, path_lbl):
with gzip.open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049,'
'got {}'.format(magic))
labels = array("B", file.read())
with gzip.open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051,'
'got {}'.format(magic))
image_data = array("B", file.read())
images = np.zeros((size, rows * cols), dtype=np.uint8)
for i in range(size):
images[i][:] = image_data[i * rows * cols:(i + 1) * rows * cols]
return np.array(images, dtype=np.uint8), np.array(labels, dtype=np.uint8)
def maybe_convert_emnist_shape(path, shape):
""" Create a version of emnist on disk that is reshaped to the desired shape.
Images a | re stored on disk as uint8.
"""
if shape == (28, 28):
return
shape_dir = os.path.join(path, 'emnist_{}_by_{}'.format(*shape))
if os.path.isdir(shape_dir):
return
emnist_dir = os.path.join(path, 'emnist')
print("Converting (28, 28) EMNIST dataset to {}...".format(shape))
try:
shutil.rmtree(shape_dir)
except FileNotFoundError:
pass
os.mak | edirs(shape_dir, exist_ok=False)
classes = ''.join(
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
for i, cls in enumerate(sorted(classes)):
with gzip.open(os.path.join(emnist_dir, str(cls) + '.pklz'), 'rb') as f:
_x = dill.load(f)
new_x = []
for img in _x[:10]:
img = resize_image(img, shape, preserve_range=True)
new_x.append(img)
print(cls)
print(image_to_string(_x[0]))
_x = np.array(new_x, dtype=_x.dtype)
print(image_to_string(_x[0]))
path_i = os.path.join(shape_dir, cls + '.pklz')
with gzip.open(path_i, 'wb') as f:
dill.dump(_x, f, protocol=dill.HIGHEST_PROTOCOL)
def maybe_download_emnist(data_dir, quiet=0, shape=None):
"""
Download emnist data if it hasn't already been downloaded. Do some
post-processing to put it in a more useful format. End result is a directory
called `emnist-byclass` which contains a separate pklz file for each emnist
class.
Pixel values of stored images are uint8 values up to 255.
Images for each class are put into a numpy array with shape (n_images_in_class, 28, 28).
This numpy array is pickled and stored in a zip file with name <class char>.pklz.
Parameters
----------
data_dir: str
Directory where files should be stored.
"""
emnist_dir = os.path.join(data_dir, 'emnist')
if _validate_emnist(emnist_dir):
print("EMNIST data seems to be present already.")
else:
print("EMNIST data not found, downloading and processing...")
try:
shutil.rmtree(emnist_dir)
except FileNotFoundError:
pass
raw_dir = _download_emnist(data_dir)
with cd(raw_dir):
images, labels = _emnist_load_helper(emnist_gz_names[0], emnist_gz_names[1])
images1, labels1 = _emnist_load_helper(emnist_gz_names[2], emnist_gz_names[3])
with cd(data_dir):
os.makedirs('emnist', exist_ok=False)
print("Processing...")
with cd('emnist'):
x = np.concatenate((images, images1), 0)
y = np.concatenate((labels, labels1), 0)
# Give images the right orientation so that plt.imshow(x[0]) just works.
x = np.moveaxis(x.reshape(-1, 28, 28), 1, 2)
for i in sorted(set(y.flatten())):
keep = y == i
x_i = x[keep.flatten(), :]
if i >= 36:
char = chr(i-36+ord('a'))
elif i >= 10:
char = chr(i-10+ord('A'))
else:
char = str(i)
if quiet >= 2:
pass
elif quiet == 1:
print(char)
elif quiet <= 0:
print(char)
print(image_to_string(x_i[0, ...]))
file_i = char + '.pklz'
with gzip.open(file_i, 'wb') as f:
dill.dump(x_i, f, protocol=dill.HIGHEST_PROTOCOL)
if shape is not None:
maybe_convert_emnist_shape(data_dir, shape)
def load_emnist(
classes, balance=False, include_blank=False,
shape=None, n_examples=None, example_range=None, show=False, path=None):
""" Load emnist data from disk by class.
Elements of `classes` pick out which emnist classes to load, but different labels
end up getting returned because most classifiers require that the labels
be in range(len(classes)). We return a dictionary `class_map` which maps from
elements of `classes` down to range(len(classes)).
Pixel values of returned images are integers in the range 0-255, but stored as float32.
Returned X array has shape (n_images,) + shape.
Parameters
----------
path: str
Path to data directory, assumed to contain a sub-directory called `emnist`.
classes: list of character from the set (0-9, A-Z, a-z)
Each character is the name of a class to load.
balance: bool
If True, will ensure that all classes are balanced by removing elements
from classes that are larger than the minimu-size class.
include_blank: bool
If True, include |
bleachbit/bleachbit | tests/TestWindows.py | Python | gpl-3.0 | 19,770 | 0.000304 | # vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2008-2021 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Windows
"""
from tests import common
from bleachbit.FileUtilities import extended_path, extended_path_undo
from bleachbit.Windows import *
from bleachbit import logger
import os
import platform
import shutil
import sys
import tempfile
import unittest
import mock
from decimal import Decimal
if 'win32' == sys.platform:
import winreg
from win32com.shell import shell
def put_files_into_recycle_bin():
"""Put a file and a folder into the recycle bin"""
# make a file and move it to the recycle bin
tests = ('regular', 'unicode-emdash-u\u2014', 'long' + 'x' * 100)
for test in tests:
(fd, filename) = tempfile.mkstemp(
prefix='bleachbit-recycle-file', suffix=test)
os.close(fd)
move_to_recycle_bin(filename)
# make a folder and move it to the recycle bin
dirname = tempfile.mkdtemp(prefix='bleachbit-recycle-folder')
common.touch_file(os.path.join(dirname, 'file'))
move_to_recycle_bin(dirname)
@common.skipUnlessWindows
class WindowsTestCase(common.BleachbitTestCase):
"""Test case for module Windows"""
def skipUnlessAdmin(self):
if not shell.IsUserAnAdmin():
self.skipTest('requires administrator privileges')
def test_get_recycle_bin(self):
"""Unit test for get_recycle_bin"""
for f in get_recycle_bin():
self.assertExists(extended_path(f))
@common.skipUnlessDestructive
def test_get_recycle_bin_destructive(self):
"""Unit test the destructive part of get_recycle_bin"""
put_files_into_recycle_bin()
# clear recycle bin
counter = 0
for f in get_recycle_bin():
counter += 1
FileUtilities.delete(f)
self.assertGreaterEqual(counter, 3, 'deleted %d' % counter)
# now it should be empty
for _f in get_recycle_bin():
self.fail('recycle bin should be empty, but it is not')
def _test_link_helper(self, mklink_option, clear_recycle_bin):
"""Helper function for testing for links with is_junction() and
get_recycle_bin()
It gets called four times for the combinations of the two
parameters. It's called by four unit tests for accounting
purposes. In other words, we don't want to count a test as
skipped if part of it succeeded.
mklink /j = directory junction
directory junction does not require administrator privileges
mklink /d=directory symbolic link
requires administrator privileges
"""
if mklink_option == '/d':
self.skipUnlessAdmin()
# make a normal directory with a file in it
target_dir = os.path.join(self.tempdir, 'target_dir')
os.mkdir(target_dir)
self.assertExists(target_dir)
self.assertFalse(is_junction(target_dir))
from random import randint
canary_fn = os.path.join(
target_dir, 'do_not_delete%d' % randint(1000, 9999))
common.touch_file(canary_fn)
self.assertExists(canary_fn)
self.assertFalse(is_junction(canary_fn))
# make a normal directory to hold a link
container_dir = os.path.join(self.tempdir, 'container_dir')
os.mkdir(container_dir)
self.assertExists(container_dir)
self.assertFalse(is_junction(container_dir))
# create the link
link_pathname = os.path.join(container_dir, 'link')
args = ('cmd', '/c', 'mklink', mklink_option,
link_pathname, target_dir)
from bleachbit.General import run_external
(rc, stdout, stderr) = run_external(args)
self.assertEqual(rc, 0, stderr)
self.assertExists(link_pathname)
self.assertTrue(is_junction(link_pathname))
# put the link in the recycle bin
move_to_recycle_bin(container_dir)
def cleanup_dirs():
shutil.rmtree(container_dir, True)
self.assertNotExists(container_dir)
shutil.rmtree(target_dir, True)
if not clear_recycle_bin:
cleanup_dirs()
return
# clear the recycle bin
for f in get_recycle_bin():
FileUtilities.delete(f, shred=False)
# verify the canary is still there
self.assertExists(canary_fn)
# clean up
cleanup_dirs()
def test_link_junction_no_clear(self):
"""Unit test for directory junctions without clearing recycle bin"""
self._test_link_helper('/j', False)
def test_link_junction_clear(self):
"""Unit test for directory junctions with clearing recycle bin"""
self._test_link_helper('/j', True)
def test_link_symlink_no_clear(self):
"""Unit test for directory symlink without clearing recycle bin"""
self._test_link_helper('/d', False)
def test_link_symlink_clear(self):
"""Unit test for directory symlink with clearing recycl | e bin"""
self._test_link_helper('/d', True)
def test_delete_locked_file(self):
"""Unit test for delete_locked_file"""
tests = ('regular', 'unicode-emdash-u\u2014', 'long' + 'x' * 100)
for test | in tests:
f = tempfile.NamedTemporaryFile(
prefix='bleachbit-delete-locked-file', suffix=test,
delete=False)
pathname = f.name
f.close()
import time
time.sleep(5) # avoid race condition
self.assertExists(pathname)
logger.debug('delete_locked_file(%s) ' % pathname)
if not shell.IsUserAnAdmin():
with self.assertRaises(WindowsError):
delete_locked_file(pathname)
else:
try:
delete_locked_file(pathname)
except WindowsError:
logger.exception(
'delete_locked_file() threw an error, which may be a false positive')
self.assertExists(pathname)
logger.info('reboot Windows and check the three files are deleted')
def test_delete_registry_key(self):
"""Unit test for delete_registry_key"""
# (return value, key, really_delete)
tests = ((False, 'HKCU\\Software\\BleachBit\\DoesNotExist', False, ),
(False, 'HKCU\\Software\\BleachBit\\DoesNotExist', True, ),
(True, 'HKCU\\Software\\BleachBit\\DeleteThisKey', False, ),
(True, 'HKCU\\Software\\BleachBit\\DeleteThisKey', True, ), )
# create a nested key
key = 'Software\\BleachBit\\DeleteThisKey'
subkey = key + '\\AndThisKey'
hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, subkey)
hkey.Close()
# test
for test in tests:
rc = test[0]
key = test[1]
really_delete = test[2]
return_value = delete_registry_key(key, really_delete)
self.assertEqual(rc, return_value)
if really_delete:
self.assertFalse(detect_registry_key(key))
# Test Unicode key. In BleachBit 0.7.3 this scenario would lead to
# the error (bug 537109)
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position
# 11: ordinal not in range(128)
key = r'Software\\BleachBit\\DeleteThisKey'
hkey = winreg.CreateKey(
winreg.HKEY_CURRENT_USE |
eduNEXT/edunext-platform | import_shims/lms/instructor_task/tests/test_base.py | Python | agpl-3.0 | 416 | 0.009615 | """Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn | _deprecated_import('instructor_task.tests.test_base', 'lms.djangoapps.in | structor_task.tests.test_base')
from lms.djangoapps.instructor_task.tests.test_base import *
|
Yasumoto/commons | src/python/twitter/common/__init__.py | Python | apache-2.0 | 957 | 0 | # ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in | the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unle | ss required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__import__('pkg_resources').declare_namespace(__name__)
|
Letractively/spiff | src/pkg.py | Python | gpl-2.0 | 7,448 | 0.005505 | import sys, os, os.path
import MySQLdb, SpiffGuard, config, shutil
from sqlalchemy import *
from SpiffIntegrator import PackageManager, \
version_is_greater, \
InvalidDescriptor
from ConfigParser import RawConfigParser
from services import ExtensionApi
from FooLib import OptionParser
from services import PageDB
from services import Session
from traceback import print_exc
from tempfile import mkdtemp
from objects import SpiffPackage
from pywsgi import DummyRequest
actions = ('check_dependencies',
'create',
'install',
'list',
'remove',
'show',
'test',
'update')
def usage():
print "Spiff %s" % config.__version__
print "Copyright (C) 2007 by Samuel Abels <http://debain.org>."
print "Syntax: python pkg.py [options] action package [package ...]"
print " action: Any of the following:"
print " ", "\n ".join(actions)
print " package: File or directory that contains the package. You may"
print " specify multiple files at the same time."
print " options: The following list of options are supported:"
print " --version Prints the version number."
print " -h, --help Prints this help."
# Define default options.
default_options = [
('version', None, False),
('help', 'h', False)
]
# Parse options.
try:
options, args = OptionParser.parse_options(sys.argv, default_options)
except:
usage()
sys.exit(1)
# Show the help, if requested.
if options['help']:
usage()
sys.exit()
# Show the version number, if requested.
if options['version']:
print "Spiff %s" % config.__version__
sys.exit()
# Get package names.
try:
action = args.pop(0)
packages = args
assert action == 'list' or len(packages) > 0
except:
usage()
sys.exit(1)
# Check syntax.
if action not in actions:
usage()
print "Unknown action %s" % action
sys.exit(1)
# Check whether the given package files exist.
if action not in ('create', 'remove', 'list'):
for package in packages:
if not os.path.exists(package):
print "No such file: %s" % pa | ckage
sys.exit(1)
# Read config.
if not os.path.exists(config.cfg_file):
print "Please configure Spiff before using this tool."
sys.exit(1)
config.cfg.read(config.cfg_file)
dbn = config.cfg.get('database', 'dbn')
# Connect to MySQL and set up Spiff Guard.
db = create_engine(dbn)
guard = SpiffGuard.DB(db)
# Set up an environment for the package manager.
page_db = PageDB(guard)
page = page_db.get('default')
request = DummyRequest()
api | = ExtensionApi(object,
guard = guard,
page_db = page_db,
request = request)
# Init the package manager.
pm = PackageManager(guard, api, package = SpiffPackage)
pm.set_package_dir(config.package_dir)
def pkg_check_dependencies(pm, package):
print "Checking dependencies of %s..." % package.get_name(),
error = False
for dependency in package.get_dependency_list():
if pm.get_package_from_descriptor(dependency) is None:
print "\nUnmet dependency:", dependency
error = True
if error:
return False
print "done."
return True
def pkg_create(pm, directory):
if os.path.exists(directory):
print "%s: file already exists" % directory
return False
print "Creating an empty package in %s." % directory
package = SpiffPackage(os.path.basename(directory))
package.set_author('Unknown Author')
package.set_author_email('unknown@unknown.com')
package.set_version('0.0.1')
package._add_listener('spiff:page_open')
package._add_dependency('spiff', 'runtime')
package._add_dependency('spiff', 'installtime')
pm.create_package(directory, package)
# Insert additional files.
open(os.path.join(directory, 'index.phtml'), 'w').close()
return True
def pkg_install(pm, package):
if not pkg_check_dependencies(pm, package):
return False
installed = pm.get_package_from_descriptor(package.get_handle())
if installed is not None:
old_version = installed.get_version()
new_version = package.get_version()
print "Package is already installed!"
print "Installed: %s, new: %s." % (old_version, new_version)
if version_is_greater(installed.get_version(), package.get_version()):
print "Installed version is newer, downgrade aborted."
return False
return pkg_update(pm, package)
print "Installing new package %s." % package.get_name()
try:
pm.install_package(package)
except Exception, e:
print "Installation failed:", e
return False
return True
def pkg_remove(pm, descriptor):
try:
package = pm.get_package_from_descriptor(descriptor)
except InvalidDescriptor, e:
print e
return False
if package is None:
print "Package %s not found in database" % descriptor
return False
depends = pm.get_package_list(depends = package)
if len(depends) > 0:
depend = ['%s=%s' % (d.get_handle(), d.get_version()) for d in depends]
descr = '%s=%s' % (package.get_handle(), package.get_version())
print "Can't remove %s, because the following packages use it:" % descr
print '\n'.join(depend)
return False
handle = package.get_handle()
version = package.get_version()
print "Removing package %s %s as requested." % (handle, version)
pm.remove_package(package)
return True
def pkg_show(pm, package):
package.dump()
return True
def pkg_list(pm, descriptor = None):
packages = pm.get_package_list()
for package in packages:
try:
if descriptor is None or package.matches(descriptor):
print package
except InvalidDescriptor, e:
print e
return False
return True
def pkg_test(pm, package):
print "Testing %s..." % package.get_name()
# Set up.
tmpdir = mkdtemp('')
pm.set_package_dir(tmpdir)
# Test.
print "Installing in %s" % tmpdir
try:
pm.test_package(package)
except:
print 'Error: Test failed!'
shutil.rmtree(tmpdir)
raise
# Done.
print 'Test successfully completed!'
shutil.rmtree(tmpdir)
return True
def pkg_update(pm, package):
if not pkg_check_dependencies(pm, package):
return False
installed = pm.get_package_from_descriptor(package.get_handle())
if installed is None:
descr = '%s=%s' % (installed.get_handle(), installed.get_version())
print "Package %s not installed, so not updating." % descr
return False
print "Replacing package as requested."
#FIXME
print "!! Update functionality is not yet implemented..."
return True
if action == 'list' and len(packages) == 0:
pkg_list(pm)
for filename in packages:
if action in ('create', 'remove', 'list'):
result = locals()['pkg_' + action](pm, filename)
else:
package = pm.read_package(filename)
result = locals()['pkg_' + action](pm, package)
if result is False:
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.