repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
embali/aiowing
|
aiowing/apps/admin/tests/test_admin.py
|
Python
|
mit
| 2,851
| 0
|
from aiohttp import ClientSession
from aiowing import settings
async def test_unauthenticated_records(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.get(test_app.router['admin_records'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == test_app.router['admin_login'].url()
await resp.release()
async def test_unauthenticated_login(test_app, test_client):
cli = await test_client(tes
|
t_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
assert resp.headers.get('Location') == \
test_app.router['admin_records'].url()
await resp.release()
async def test_unauthenticated_logout(test_app, test_client):
cli = await test_client(test_app)
resp
|
= await cli.get(test_app.router['admin_logout'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == test_app.router['admin_login'].url()
await resp.release()
async def test_authenticated_records(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
resp = await cli.get(test_app.router['admin_records'].url(),
allow_redirects=False)
assert resp.status == 200
await resp.release()
async def test_authenticated_login(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
resp = await cli.get(test_app.router['admin_login'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == \
test_app.router['admin_records'].url()
await resp.release()
async def test_authenticated_logout(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
resp = await cli.get(test_app.router['admin_logout'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == test_app.router['admin_login'].url()
await resp.release()
|
mmgen/mmgen
|
scripts/tx-v2-to-v3.py
|
Python
|
gpl-3.0
| 962
| 0.015593
|
#!/usr/bin/env python3
# Convert MMGen 'v2' transaction file (amounts as BTCAmt())
# to MMGen 'v3' (amounts as strings)
# v3 tx files were introduced with MMGen version 0.9.7
import sys,os
repo_root = os.path.split(os.path.abspath(os.path.dirname(sys.argv[0])))[0]
sys.path = [repo_root] + sys.path
from mmgen.common import *
opts_data = {
'text': {
'desc'
|
: "Convert MMGen transaction file from v2 format to v3 format",
'usage': "<tx file>",
'options': """
-h, --help Print this help message
-d, --outdir=d Output files to directory 'd' instead of working dir
-q, --quiet Write (and overwrite) files without prompting
-S, --stdout Write data to
|
STDOUT instead of file
"""
}
}
cmd_args = opts.init(opts_data)
import asyncio
from mmgen.tx import CompletedTX
if len(cmd_args) != 1:
opts.usage()
tx = asyncio.run(CompletedTX(cmd_args[0],quiet_open=True))
tx.file.write(ask_tty=False,ask_overwrite=not opt.quiet,ask_write=not opt.quiet)
|
wojcech/agilentpyvisa
|
agilentpyvisa/B1500/tester.py
|
Python
|
agpl-3.0
| 39,893
| 0.008623
|
# vim: set fileencoding: utf-8 -*-
# -*- coding: utf-8 -*-
import visa
from itertools import cycle, starmap, compress
import pandas as pd
import numpy as np
from collections import OrderedDict
from .force import *
from .force import (
DCForce,
StaircaseSweep,
PulsedSpot,
SPGU)
from .enums import *
from .measurement import *
from .measurement import (
MeasureSpot,
MeasureStaircaseSweep,
MeasurePulsedSpot,
)
from .setup import *
from .helpers import *
from .SMUs import *
from .dummy import DummyTester
from .loggers import exception_logger,write_logger, query_logger
class B1500():
def __init__(self, tester, auto_init=True, default_check_err=True):
self.__test_addr = tester
self._device=None
self.tests = OrderedDict()
self.__last_channel_setups={}
self.__last_channel_measurements={}
self.slots_installed={}
self._DIO_control_mode={}
self.sub_channels = []
self.__filter_all = None
self.__ADC={}
self.__TSC = None
self.__channels={}
self._recording = False
self.__HIGHSPEED_ADC={"number":None,"mode":None}
self.default_check_err=default_check_err
self.programs={}
self.__format = None
self.__outputMode = None
self.last_program=None
self.__no_store=("*RST","DIAG?","*TST?","CA","AB","RCV","WZ?","ST","END",
"SCR","VAR","LST?","CORRSER?","SER?","SIM?","SPM?",
"SPPER?","ERMOD?","ERSSP?","ERRX?","ERR?","EMG?",
"*LRN?","*OPC?","UNT?","WNU?","*SRE?","*STB?",)
if auto_init:
self.init()
def close(self):
if self._device:
self._device.close()
self._device=None
self.__keep_open = False
if self.__rm:
self.__rm.close()
self.__rm=None
def __del__(self):
self.close()
def init(self):
""" Resets the connected tester, then checks all installed modules,
querying their types and the available subchannels. It also
stores the available input and measure_ranges in the slots_installed dict,
with the slot number as key. sub channels is a list containing
all available channels"""
self.open()
self._reset()
self.slots_installed = self.__discover_slots()
self.sub_channels = []
for s,mod in self.slots_installed.items():
self.sub_channels.extend(mod.channels)
self.__channels = {i:self.slots_installed[self.__channel_to_slot(i)] for i in self.sub_channels}
self.enable_SMUSPGU()
self._check_err()
self.close()
def open(self, keep_open=False):
if not self._device:
try:
self.__rm = visa.ResourceManager()
self._device = self.__rm.open_resource(self.__test_addr)
self.__keep_open =True
except OSError as e:
exception_logger.warn("Could not find VISA driver, setting _device to std_out")
self.__rm.close()
self._device.close()
self.__rm = None
self._device = DummyTester()
def
|
diagnostics(self, item):
""" from the manual:
|
- before using DiagnosticItem.trigger_IO , connect a BNC cable between the Ext Trig In and
Out connectors.
- After executing DiagnosticItem.high_voltage_LED confirm the status of LED. Then enter the AB
command
If the LED does not blink, the B1500 must be repaired.
- Before executing DiagnosticItem.digital_IO, disconnect any cable from the import digital I/O port.
- Before executing interlock_open or interlock_closed , open and close the
interlock circuit respectively
"""
return self.query(format_command("DIAG?", item))
def query(self, msg, delay=None,check_error=False):
""" Writes the msg to the Tester, reads output buffer after delay and
logs both to the query logger.Optionally checks for errors afterwards"""
query_logger.info(msg)
retval=[]
if self._recording and any([x in msg for x in self.__no_store]):
self.programs[self.last_program]["config_nostore"].append(msg)
exception_logger.warn("Skipped query '{}' since not allowed while recording".format(msg))
else:
self.open()
try:
retval = self._device.query(msg, delay=delay)
query_logger.info(str(retval)+"\n")
err =self._check_err()
if err[:2]!="+0":
exception_logger.warn(err)
exception_logger.warn(msg)
finally:
if not self.__keep_open:
self.close()
return retval
def write(self, msg, check_error=False):
""" Writes the msg to the Tester and logs it in the write
logger.Optionally checks for errors afterwards"""
write_logger.info(msg)
try:
if self._recording and any([x in msg for x in self.__no_store]):
self.programs[self.last_program]["config_nostore"].append(msg)
exception_logger.warn("Skipped query '{}' since not allowed while recording".format(msg))
else:
self.open()
retval = self._device.write(msg)
write_logger.info(str(retval)+"\n")
if check_error or self.default_check_err:
err =self._check_err()
if err[:2]!="+0":
exception_logger.warn(err)
exception_logger.warn(msg)
finally:
if not self.__keep_open:
self.close()
return retval
def read(self, check_error=False, timeout="default" ):
""" Reads out the current output buffer and logs it to the query logger
optionally checking for errors"""
retval=None
self.open()
old_timeout = self._device.timeout
if timeout != "default":
self._device.timeout
try:
if "ascii" in repr(self.__format):
retval = self._device.read()
elif "binary4" in reps(self.__format):
retval = self._device.read_raw()
elif "binary8" in reps(self.__format):
retval = self._device.read_raw()
else:
raise ValueError("Unkown format {0}".format(self.__format))
if check_error:
exception_logger.info(self._check_err())
finally:
if not self.__keep_open:
self.close()
return retval
def measure(self, test_tuple, force_wait=False, autoread=False):
""" Checks the channels defined in the test tuple and performs the
measurement the Setup represents. Only one type of measurement is possible,
otherwise it raises an exception."""
channels = test_tuple.channels
exc = None
data = None
num_meas = len([c for c in channels if c.measurement])
XE_measurement=any([c.measurement and c.measurement.mode in(
MeasureModes.spot,
MeasureModes.staircase_sweep,
MeasureModes.sampling,
MeasureModes.multi_channel_sweep,
MeasureModes.CV_sweep_dc_bias,
MeasureModes.multichannel_pulsed_spot,
MeasureModes.multichannel_pulsed_sweep,
MeasureModes.pulsed_spot,
MeasureModes.pulsed_sweep,
MeasureModes.staircase_sweep_pulsed_bias,
MeasureModes.quasi_pulsed_spot,
) for c in channels])
spgu_channels = [x.number for x in channels if x.spgu]
SPGU =any(spgu_channels)
search = any([x.binarysearch or x.linearsearch for x in channels])
if [XE_measurement, SPGU,search].count(True)>1:
raise ValueErro
|
pombredanne/metamorphosys-desktop
|
metamorphosys/META/src/Python27Packages/py_modelica/py_modelica/modelica_simulation_tools/tool_base.py
|
Python
|
mit
| 12,494
| 0.002401
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
import os
import datetime
import logging
from py_modelica.exception_classes import ModelicaInstantiationError
from abc import ABCMeta, abstractmethod
class ToolBase:
__metaclass__ = ABCMeta
tool_name = ''
tool_version = ''
tool_version_nbr = ''
model_config = None
date_time = ''
## instance variables
tool_path = '' # path to the bin folder of the tool
model_file_name = '' # file that needs to be loaded
model_name = '' # name of the model in the loaded packages
msl_version = '' # version of Modelica Standard Library
mos_file_name = '' # modelica script files for compiling the model
result_mat = '' # contains the latest simulation results
base_result_mat = '' # contains the expected simulation results
working_dir = '' # contains the temporary files and executables
root_dir = ''
mo_dir = '' # contains the modelica file, (package or model)
output_dir = '' # relative or absolute
variable_filter = [] # list of names of variables to save/load to/from mat-file
experiment = {} # dictionary with StartTime, StopTime, Tolerance,
# NumberOfIntervals, Interval and Algorithm.
model_is_compiled = False # flag for telling if the model was compiled
model_did_simulate = False # flag for telling if the model has been simulated
lib_package_paths = [] # pa
|
ths to additional packages
lib_package_names = [] # names of additional packages
max_simulation_time = 43200 # (=12h) time threshold before simulation is aborted
## Variables with execution statistics
compilation_time = -1
translation_time = -1
make_time = -1
simu
|
lation_time = -1
total_time = -1
def _initialize(self,
model_config):
"""
Creates a new instance of a modelica simulation.
dictionary : model_config
Mandatory Keys : 'model_name' (str), 'model_file_name' (str)
Optional Keys : 'MSL_version' (str), 'variable_filter' ([str]),
'result_file' (str), 'experiment' ({str})
"""
print ' --- ===== See debug.log for error/debug messages ===== --- \n'
print ' in {0}'.format(os.getcwd())
# create a logger, (will only be written to if no other logger defined 'higher' up)
logging.basicConfig(filename="debug.log",
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
log = logging.getLogger()
# always use highest level of debugging
log.setLevel(logging.DEBUG)
log.debug(" --- ==== ******************************* ==== ---")
log.info(" --- ==== ******* New Run Started ******* ==== ---")
self.date_time = '{0}'.format(datetime.datetime.today())
log.debug(" --- ==== * {0} ** ==== ---".format(self.date_time))
log.debug(" --- ==== ******************************* ==== ---")
log.debug("Entered _initialize")
log.info("tool_name : {0}".format(self.tool_name))
log.info("tool_path : {0}".format(self.tool_path))
self.root_dir = os.getcwd()
self.model_config = model_config
# Mandatory keys in dictionary
try:
model_file_name = self.model_config['model_file_name']
if model_file_name == "":
self.model_file_name = ""
log.info("No model_file name given, assumes model is in Modelica Standard Library")
else:
self.model_file_name = os.path.normpath(os.path.join(os.getcwd(), model_file_name))
self.mo_dir = os.path.dirname(self.model_file_name)
log.info("mo_dir : {}".format(self.mo_dir))
log.info("model_file_name : {0}".format(self.model_file_name))
model_name = self.model_config['model_name']
if model_name == "":
base_name = os.path.basename(model_file_name)
self.model_name = os.path.splitext(base_name)[0]
log.info("No model_name given, uses model_file_name without .mo")
else:
self.model_name = model_name
log.info("model_name : {0}".format(self.model_name))
except KeyError as err:
raise ModelicaInstantiationError("Mandatory key missing in model_config : {0}".format(err.message))
# optional keys in dictionary
if 'MSL_version' in model_config:
self.msl_version = self.model_config['MSL_version']
else:
self.msl_version = "3.2"
log.info("msl_version : {0}".format(self.msl_version))
if 'experiment' in model_config:
self.experiment = dict(
StartTime=model_config['experiment']['StartTime'],
StopTime=model_config['experiment']['StopTime'],
NumberOfIntervals=model_config['experiment']['NumberOfIntervals'],
Tolerance=model_config['experiment']['
|
espressopp/espressopp
|
src/interaction/StillingerWeberTripleTerm.py
|
Python
|
gpl-3.0
| 8,110
| 0.006289
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
************************************************
espressopp.interaction.StillingerWeberTripleTerm
************************************************
This class provides methods to compute forces and energies of
the Stillinger Weber Triple Term potential.
if :math:`d_{12} >= r_{c_1}` or :math:`d_{32} >= r_{c_2}`
.. math::
U = 0.0
else
.. math::
U = \varepsilon \lambda e^{\frac{\sigma \gamma_1}{|r_{12}| - \sigma r_{c_1}}}
+ \frac{\sigma \gamma_2}{|r_{32}| - \sigma r_{c_2}}
\left(\frac{r_{12} r_{32}}{|r_{12}|\cdot |r_{32}|}
- cos(\theta_0) \right)^2
.. function:: espressopp.interaction.StillingerWeberTripleTerm(gamma, theta0, lmbd, epsilon, sigma, cutoff)
:param gamma: (default: 0.0)
:param theta0: (default: 0.0)
:param lmbd: (default: 0.0)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param cutoff: (default: infinity)
:type gamma: real
:type theta0: real
:type lmbd: real
:type epsilon: real
:type sigma: real
:type cutoff:
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm(system, vl3)
:param system:
:param vl3:
:type system:
:type vl3:
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm.getPotential(type1, type2, type3)
:param type1:
:param type2:
:param type3:
:type type1:
:type type2:
:type type3:
:rtype:
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm.getVerletListTriple()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm.setPotential(type1, type2, type3, potential)
:param type1:
:param type2:
:param type3:
:param potential:
:type type1:
:type type2:
:type type3:
:type potential:
.. function:: espressopp.interaction.FixedTripleListStillingerWeberTripleTerm(system, ftl, potential)
:param system:
:param ftl:
:param potential:
:type system:
:type ftl:
:type potential:
.. function:: espressopp.interaction.FixedTripleListStillingerWeberTripleTerm.getFixedTripleList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedTripleListStillingerWeberTripleTerm.setPotential(type1, type2, type3, potential)
:param type1:
:param type2:
:param type3:
:param potential:
:type type1:
:type type2:
:type type3:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.AngularPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_StillingerWeberTripleTerm, \
interaction_VerletListStillingerWeberTripleTerm, \
interaction_FixedTripleListStillingerWeberTripleTerm
class StillingerWeberTripleTermLocal(AngularPotentialLocal, interaction_StillingerWeberTripleTerm):
def __init__(self, gamma1=0.0, gamma2=0.0, theta0=0.0, lmbd=0.0,
epsilon=1.0, sigma1=1.0, sigma2=1.0, cutoff1=infinity, cutoff2=infinity):
"""Initialize the local StillingerWeberTripleTerm object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_StillingerWeberTripleTerm, gamma1, gamma2,
theta0, lmbd, epsilon, sigma1, sigma2, cutoff1, cutoff2)
def __init__(self, gamma=0.0, theta0=0.0, lmbd=0.0, epsilon=1.0, sigma=1.0, cutoff=infinity):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_StillingerWeberTripleTerm, gamma, gamma,
theta0, lmbd, epsilon, sigma, sigma, cutoff, cutoff)
class VerletListStillingerWeberTripleTermLocal(InteractionLocal, interaction_VerletListStillingerWeberTripleTerm):
def __init__(self, system, vl3):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListStillingerWeberTripleTerm, system, vl3)
de
|
f setPotential(self, type1, type2, type3, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugrou
|
p():
self.cxxclass.setPotential(self, type1, type2, type3, potential)
def getPotential(self, type1, type2, type3):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, type3)
def getVerletListTriple(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletListTriple(self)
class FixedTripleListStillingerWeberTripleTermLocal(InteractionLocal, interaction_FixedTripleListStillingerWeberTripleTerm):
def __init__(self, system, ftl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleListStillingerWeberTripleTerm, system, ftl, potential)
def setPotential(self, type1, type2, type3, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, type3, potential)
def getFixedTripleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedTripleList(self)
if pmi.isController:
class StillingerWeberTripleTerm(AngularPotential):
'The StillingerWeberTripleTerm potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.StillingerWeberTripleTermLocal',
pmiproperty = [ 'gamma1', 'gamma2', 'theta0',
'lambda', 'epsilon', 'sigma1',
'sigma2', 'cutoff1', 'cutoff2']
)
class VerletListStillingerWeberTripleTerm(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListStillingerWeberTripleTermLocal',
pmicall = ['setPotential', 'getPotential','getVerletListTriple']
)
class FixedTripleListStillingerWeberTripleTerm(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListStillingerWeberTripleTermLocal',
pmicall = ['setPotential','getFixedTripleList']
)
|
gbin/err-backend-discord
|
discordb.py
|
Python
|
gpl-3.0
| 24,430
| 0.00176
|
import asyncio
import logging
import sys
import re
from abc import ABC, abstractmethod
from typing import List, Optional, Union
from discord.utils import find
from errbot.backends.base import (
Person,
Message,
Room,
RoomOccupant,
Presence,
ONLINE,
OFFLINE,
AWAY,
DND,
RoomError,
)
from errbot.core import ErrBot
log = logging.getLogger(__name__)
try:
import discord
except ImportError:
log.exception("Could not start the Discord back-end")
log.fatal(
"You need to install the Discord API in order to use the Discord backend.\n"
"You can do `pip install -r requirements.txt` to install it"
)
sys.exit(1)
# Discord message size limit.
DISCORD_MESSAGE_SIZE_LIMIT = 2000
COLOURS = {
"red": 0xFF0000,
"green": 0x008000,
"yellow": 0xFFA500,
"blue": 0x0000FF,
"white": 0xFFFFFF,
"cyan": 0x00FFFF,
} # Discord doesn't know its colours
class DiscordSender(ABC, discord.abc.Snowflake):
@abstractmethod
async def send(self, content: str = None, embed: discord.Embed = None):
raise NotImplementedError
@abstractmethod
def get_discord_object(self) -> discord.abc.Messageable:
raise NotImplementedError
class DiscordPerson(Person, DiscordSender):
@classmethod
def username_and_discriminator_to_userid(cls, username: str, discriminator: str) -> str:
return find(
lambda m: m.name == username and m.discriminator == discriminator,
DiscordBackend.client.get_all_members(),
)
def __init__(self, user_id: str):
"""
@user_id: _must_ be a string representation of a Discord Snowflake (an integer).
"""
if not re.match(r"[0-9]+", str(user_id)):
raise ValueError(f"Invalid Discord user id {type(user_id)} {user_id}.")
self._user_id = user_id
def get_discord_object(self) -> discord.abc.Messageable:
return self.discord_user()
@property
def created_at(self):
return discord.utils.snowflake_time(self.id)
@property
def person(self) -> str:
return str(self)
@property
def id(self) -> str:
return self._user_id
def discord_user(self) -> discord.User:
return DiscordBackend.client.get_user(self._user_id)
@property
def username(self) -> str:
"""Convert a Discord user ID to their user name"""
user = self.discord_user()
if user is None:
log.error(f"Cannot find user with ID {self._user_id}")
return f"<{self._user_id}>"
return user.name
nick = username
@property
def client(self) -> None:
return None
@property
def fullname(self) -> Optional[str]:
usr = self.discord_user()
if usr is None:
raise ValueError("Discord user is not defined.")
return f"{usr.name}#{usr.discriminator}"
@property
def aclattr(self) -> str:
return self.fullname
async def send(
self,
content: str = None,
tts: bool = False,
embed: discord.Embed = None,
file: discord.File = None,
files: List[discord.File] = None,
delete_after: float = None,
nonce: int = None,
allowed_mentions: discord.AllowedMentions = None,
reference: Union[discord.Message, discord.MessageReference] = None,
mention_author: Optional[bool] = None,
):
await self.discord_user().send(
content=content,
tts=tts,
embed=embed,
file=file,
files=files,
delete_after=delete_after,
nonce=nonce,
allowed_mentions=allowed_mentions,
reference=reference,
mention_author=mention_author,
)
def __eq__(self, other):
return isinstance(other, DiscordPerson) and other.aclattr == self.aclattr
def __str__(self):
return f"{self.fullname}"
class DiscordRoom(Room, DiscordSender):
"""
DiscordRoom objects can be in two states:
1. They exist and we have a channel_id of that room
2. They don't currently exist and we have a channel name and guild
"""
@classmethod
def from_id(cls, channel_id):
channel = DiscordBackend.client.get_channel(channel_id)
if channel is None:
raise ValueError(f"Channel id:{channel_id} doesn't exist!")
return cls(channel.name, channel.guild.id)
def __init__(self, channel_name: str, guild_id: str):
"""
Allows to specify an existing room (via name + guild or via id) or allows the
creation of a future room by specifying a name and guild to create the channel in.
:param channel_name:
:param guild_id:
"""
if DiscordBackend.client.get_guild(guild_id) is None:
raise ValueError(f"Can't find guild id {guild_id} to init DiscordRoom")
self._guild_id = guild_id
self._channel_name = channel_name
self._channel_id = self.channel_name_to_id() # Can be None if channel doesn't exist
def get_discord_object(self):
return self.discord_channel()
def channel_name_to_id(self):
"""
Channel names are non-unique across Discord. Hence we require a guild name to uniquely
identify a room id
:return: ID of the room
"""
matching = [
channel
for channel in DiscordBackend.client.get_all_channels()
if self._channel_name == channel.name
and channel.guild.id == self._guild_id
and isinstance(channel, discord.TextChannel)
]
if len(matching) == 0:
return None
if len(matching) > 1:
log.warning(
"Multiple matching channels for channel"
f"name {self._channel_name} in guild id {self._guild_id}"
)
return matching[0].id
@property
def created_at(self):
return discord.utils.snowflake_time(self.id)
def invite(self, *args) -> None:
if not self.exist
|
s:
raise RuntimeError("Can't invite to a non-existent channel")
for identifier in args:
if not isinstance(identifier, DiscordPerson):
|
raise RuntimeError("Can't invite non Discord Users")
asyncio.run_coroutine_threadsafe(
self.discord_channel().set_permissions(
identifier.discord_user(), read_messages=True
),
loop=DiscordBackend.client.loop,
)
@property
def joined(self) -> bool:
log.error("Not implemented")
return True
def leave(self, reason: str = None) -> None:
"""
Can't just leave a room
:param reason:
:return:
"""
log.error("Not implemented")
async def create_room(self):
guild = DiscordBackend.client.get_guild(self._guild_id)
channel = await guild.create_text_channel(self._channel_name)
log.info(f"Created channel {self._channel_name} in guild {guild.name}")
self._channel_id = channel.id
def create(self) -> None:
if self.exists:
log.warning(f"Tried to create {self._channel_name} which already exists.")
raise RoomError("Room exists")
asyncio.run_coroutine_threadsafe(
self.create_room(), loop=DiscordBackend.client.loop
).result(timeout=5)
def destroy(self) -> None:
if not self.exists:
log.warning(f"Tried to destory {self._channel_name} which doesn't exist.")
raise RoomError("Room doesn't exist")
asyncio.run_coroutine_threadsafe(
self.discord_channel().delete(reason="Bot deletion command"),
loop=DiscordBackend.client.loop,
).result(timeout=5)
def join(self, username: str = None, password: str = None) -> None:
"""
All public channels are already joined. Only private channels can be joined and we
need an invite for that.
:param username:
:param password:
:return:
"""
|
SDHM/vitess
|
test/schema.py
|
Python
|
bsd-3-clause
| 9,880
| 0.008502
|
#!/usr/bin/env python
import logging
import unittest
import os
import environment
import utils
import tablet
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
shard_0_replica2 = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_0_backup = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica1 = tablet.Tablet()
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
# shard_2 tablets are not used by all tests and not included by default.
tablets = [shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly, shard_0_backup,
shard_1_master, shard_1_replica1]
tablets_shard2 = [shard_2_master, shard_2_replica1]
test_keyspace = 'test_keyspace'
db_name = 'vt_' + test_keyspace
def setUpModule():
try:
environment.topo_server().setup()
_init_mysql(tablets)
utils.run_vtctl(['CreateKeyspace', test_keyspace])
shard_0_master.init_tablet( 'master', test_keyspace, '0')
shard_0_replica1.init_tablet('replica', test_keyspace, '0')
shard_0_replica2.init_tablet('replica', test_keyspace, '0')
shard_0_rdonly.init_tablet( 'rdonly', test_keyspace, '0')
shard_0_backup.init_tablet( 'backup', test_keyspace, '0')
shard_1_master.init_tablet( 'master', test_keyspace, '1')
shard_1_replica1.init_tablet('replica', test_keyspace, '1')
utils.run_vtctl(['RebuildKeyspaceGraph', test_keyspace], auto_log=True)
# run checks now before we start the tablets
utils.validate_topology()
utils.Vtctld().start()
# create databases, start the tablets
for t i
|
n tablets:
t.create_db(db_name)
t.start_vttablet(wait_for_state=None)
|
# wait for the tablets to start
shard_0_master.wait_for_vttablet_state('SERVING')
shard_0_replica1.wait_for_vttablet_state('SERVING')
shard_0_replica2.wait_for_vttablet_state('SERVING')
shard_0_rdonly.wait_for_vttablet_state('SERVING')
shard_0_backup.wait_for_vttablet_state('NOT_SERVING')
shard_1_master.wait_for_vttablet_state('SERVING')
shard_1_replica1.wait_for_vttablet_state('SERVING')
# make sure all replication is good
for t in tablets:
t.reset_replication()
utils.run_vtctl(['InitShardMaster', test_keyspace+'/0',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', test_keyspace+'/1',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', test_keyspace])
# check after all tablets are here and replication is fixed
utils.validate_topology(ping_tablets=True)
except Exception as setup_exception:
try:
tearDownModule()
except Exception as e:
logging.exception("Tearing down a failed setUpModule() failed: %s", e)
raise setup_exception
def _init_mysql(tablets):
setup_procs = []
for t in tablets:
setup_procs.append(t.init_mysql())
utils.wait_procs(setup_procs)
def tearDownModule():
if utils.options.skip_teardown:
return
tablet.kill_tablets(tablets)
teardown_procs = []
for t in tablets:
teardown_procs.append(t.teardown_mysql())
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in tablets:
t.remove_tree()
class TestSchema(unittest.TestCase):
def setUp(self):
for t in tablets:
t.create_db(db_name)
def tearDown(self):
# This test assumes that it can reset the tablets by simply cleaning their
# databases without restarting the tablets.
for t in tablets:
t.clean_dbs()
# Tablets from shard 2 are always started during the test. Shut them down now.
if shard_2_master in tablets:
for t in tablets_shard2:
t.scrap(force=True, skip_rebuild=True)
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
t.kill_vttablet()
tablets.remove(t)
utils.run_vtctl(['DeleteShard', 'test_keyspace/2'], auto_log=True)
def _check_tables(self, tablet, expectedCount):
tables = tablet.mquery(db_name, 'show tables')
self.assertEqual(len(tables), expectedCount,
'Unexpected table count on %s (not %u): got tables: %s' %
(tablet.tablet_alias, expectedCount, str(tables)))
def _check_db_not_created(self, tablet):
# Broadly catch all exceptions, since the exception being raised is internal to MySQL.
# We're strictly checking the error message though, so should be fine.
with self.assertRaisesRegexp(Exception, '(1049, "Unknown database \'%s\'")' % db_name):
tablet.mquery(db_name, 'show tables')
def _apply_schema(self, keyspace, sql):
out, _ = utils.run_vtctl(['ApplySchema',
'-sql='+sql,
keyspace],
trap_output=True,
raise_on_error=True)
return out
def _get_schema(self, tablet_alias):
out, _ = utils.run_vtctl(['GetSchema',
tablet_alias],
trap_output=True,
raise_on_error=True)
return out
def _create_test_table_sql(self, table):
return 'CREATE TABLE %s ( \
`id` BIGINT(20) not NULL, \
`msg` varchar(64), \
PRIMARY KEY (`id`) \
) ENGINE=InnoDB' % table
def _alter_test_table_sql(self, table, index_column_name):
return 'ALTER TABLE %s \
ADD COLUMN new_id bigint(20) NOT NULL AUTO_INCREMENT FIRST, \
DROP PRIMARY KEY, \
ADD PRIMARY KEY (new_id), \
ADD INDEX idx_column(%s) \
' % (table, index_column_name)
def _apply_initial_schema(self):
schema_changes = ';'.join([
self._create_test_table_sql('vt_select_test01'),
self._create_test_table_sql('vt_select_test02'),
self._create_test_table_sql('vt_select_test03'),
self._create_test_table_sql('vt_select_test04')])
# apply schema changes to the test keyspace
self._apply_schema(test_keyspace, schema_changes)
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
# get schema for each shard
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
def test_schema_changes(self):
self._apply_initial_schema()
self._apply_schema(test_keyspace, self._alter_test_table_sql('vt_select_test03', 'msg'))
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
# test schema changes
os.makedirs(os.path.join(utils.vtctld.schema_change_dir, test_keyspace))
input_path = os.path.join(utils.vtctld.schema_change_dir, test_keyspace, "input")
os.makedirs(input_path)
sql_path = os.path.join(input_path, "create_test_table_x.sql")
with open(sql_path, 'w') as handler:
handler.write("create table test_table_x (id int)")
timeout = 10
# wait until this sql file being consumed by autoschema
while os.path.isfile(sql_path):
timeout = utils.wait_step('waiting for vtctld to pick up schema changes',
timeout,
sleep_time=0.2)
# check number of tables
self._check_tables(shard_0_master, 5)
self._check_tables(shard_1_master, 5)
def _setUp_tablets_shard_2(self):
try:
_init_mysql(tablets_shard2)
finally:
# Include shard2 tablets for tearDown.
tablets.extend(tablets_shard2)
shard_2_master.init_tablet( 'master', 'test_keyspace', '2')
shard_2_replica1.init_tablet('replica', 'test_keyspace', '2')
# We intentionally don't want to create a db on these tablets.
shard_2_master.start_vttablet(wait_for_state=None)
shard_2_replica1.start_vttable
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/mission/quest_item/shared_slooni_jong_q1_needed.py
|
Python
|
mit
| 477
| 0.046122
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
|
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_slooni_jong_q1_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_tals_n","slooni_jong_q1_needed")
#### BEGIN MODIFICATIONS ####
#### END MODI
|
FICATIONS ####
return result
|
openeventdata/Focus_Locality_Extraction
|
Focus_Locality/Sentence_Embedding_Approach/Testing/SIFpreprocessing_test.py
|
Python
|
mit
| 5,830
| 0.014923
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
|
"""
Created on Fri Mar 10 11:34:46 2017
@author: maryam
"""
import nltk
import numpy
|
as np
import sys
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
np.seterr(divide='ignore', invalid='ignore')
#reload(sys)
#sys.setdefaultencoding("utf-8")
stop = set(stopwords.words('english'))
to_filter = [',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', "'s",'``', '"', "'", '.' , "''"]
def parse_files(trainlist):
corpus= ''
for trainl in trainlist:
text = trainl.lower().replace('\n', ' ')
#text = unicode(text, errors='ignore')
corpus += text.replace('\n', ' ') +'\n'
vocabDic = nltk.FreqDist(w.lower() for w in nltk.tokenize.word_tokenize(corpus))
vocabDic1 = [(w,v) for (w,v) in vocabDic.items() if (w not in to_filter and not w.isdigit())]
vocabulary = [w for (w,v) in vocabDic1]
vocabFreq = [v for (w,v) in vocabDic1]
return corpus, vocabulary, vocabFreq
def index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha):
# alpha= 0.001
summ = sum(vocabFreq)
lines1 = [line.strip().replace('_',' ') for line in trainTextList]
X_index= []
weight= []
for line in lines1:
if line == '':
continue
word1 = nltk.tokenize.word_tokenize(line)
word = [w for w in word1 if (w not in to_filter and not w.isdigit())]
x = [0] * len(word)
w = [1] * len(word)
for i in range(len(word)):
try:
x[i] = vocabulary.index(word[i].lower())
except Exception as excep:
print (excep)
continue
try:
w[i] = alpha / (alpha + 1.0* vocabFreq[x[i]] / summ) #main formula
except Exception as excep:
print (excep)
continue
X_index.append(x)
weight.append(w)
return X_index , weight
def word2vec(word2vec_Dictionary, vocabulary, lang):
word2vec2= []
for word in vocabulary:
try:
#print (word)
word2vec = word2vec_Dictionary[word.encode('utf-8')]
except Exception:
#print 'error'
word2vec = [0.0000001] * 300
word2vec2.append(word2vec)
return word2vec2
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
WeArr=np.asarray(We)
n_samples = len(x)
emb = np.zeros((n_samples, 300))
for i in xrange(n_samples):
emb[i,:] = np.asarray(w[i]).dot(WeArr[[np.asarray(x[i])],:]) / np.count_nonzero(np.asarray(w[i]))
return emb
def compute_pc(X,npc):
"""
Compute the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==2:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def SIF_embedding(We, x, w, npc):
"""
Compute the scores between pairs of sentences using weighted average + removing the projection on the first principal component
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in the i-th sentence
:param w: w[i, :] are the weights for the words in the i-th sentence
:param params.rmpc: if >0, remove the projections of the sentence embeddings to their first principal component
:return: emb, emb[i, :] is the embedding for sentence i
"""
emb = get_weighted_average(We, x, w)
if npc > 0:
emb = remove_pc(emb, npc)
return emb
def makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We):
x , w= index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha)
emb = get_weighted_average(We, x, w)
embList = emb.tolist()
newemb= []
x, y = emb.shape
for i in range (x):
if (not np.isnan(emb[i,0]) and not np.isinf(emb[i,0]) ):
newemb.append(embList[i])
emb = np.asarray(newemb)
emb = remove_pc(emb, npc=1)
return emb
def main(alpha, lang, trainTextList, word2vec_Dictionary):
corpus , vocabulary, vocabFreq = parse_files(trainTextList)
We= word2vec(word2vec_Dictionary, vocabulary, lang)
emb = makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We)
return emb
if __name__ == '__main__':
if len(sys.argv) <3:
sys.exit()
else:
alpha = float(sys.argv[1])
lang= sys.argv[2]
SentenceListTest= sys.argv[3]
emb= main(alpha, lang, SentenceListTest)
# SentenceListTest= ['''A member of the Somali Federal Parliament has been shot dead by unknown gunmen on Thursday morning in Mogadishu, officials said. Ahmed Mohamud Hayd was killed in a drive-by shooting after he left his hotel in a heavily policed area, witnesses said.''',''' His bodyguard was also killed and a parliamentary secretary wounded in the shooting.''']
# emb = main(0.01, 'en', SentenceListTest)
# print emb
|
hatbot-team/hatbot
|
statistics/__init__.py
|
Python
|
mit
| 144
| 0
|
__author__ =
|
'moskupols'
__all__ = ['Statistics', 'BlackList', 'console_statistic']
from .statistics import *
from . import console_statisti
|
c
|
blomquisg/heat
|
heat/common/utils.py
|
Python
|
apache-2.0
| 10,575
| 0.000851
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of t
|
he National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/license
|
s/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import errno
import inspect
import logging
import os
import platform
import random
import subprocess
import socket
import sys
import uuid
import iso8601
from heat.common import exception
logger = logging.getLogger(__name__)
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = unicode(pv)
else:
headers["x-image-meta-%s" % k.lower()] = unicode(v)
return headers
def add_features_to_http_headers(features, headers):
"""
Adds additional headers representing heat features to be enabled.
:param headers: Base set of headers
:param features: Map of enabled features
"""
if features:
for k, v in features.items():
if v is not None:
headers[k.lower()] = unicode(v)
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
result[field_name] = value or None
result['properties'] = properties
if 'size' in result:
try:
result['size'] = int(result['size'])
except ValueError:
raise exception.Invalid
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = bool_from_header_value(result[key])
return result
def bool_from_header_value(value):
"""
Returns True if value is a boolean True or the
string 'true', case-insensitive, False otherwise
"""
if isinstance(value, bool):
return value
elif isinstance(value, (basestring, unicode)):
if str(value).lower() == 'true':
return True
return False
def bool_from_string(subject):
"""
Interpret a string as a boolean.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
if isinstance(subject, bool):
return subject
elif isinstance(subject, int):
return subject == 1
if hasattr(subject, 'startswith'): # str or unicode...
if subject.strip().lower() in ('true', 'on', '1'):
return True
return False
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), e:
raise exception.ImportFailure(import_str=import_str,
reason=e)
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(value):
try:
uuid.UUID(value)
return True
except Exception:
return False
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.datetime.utcnow()
str = at.strftime(TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/heat
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, widt
|
pancho-villa/Phuey
|
phuey/light_cli.py
|
Python
|
mit
| 1,447
| 0.002764
|
from sys import stdout
import argparse
import json
import logging
from .phuey import Bridge, Light
logger = logging.getLogger()
def command_interpreter(command):
python_dict = {}
commands = command.split(',')
for c in commands:
k, v = c.split('=')
if v.lower() == "true":
v = True
elif v.lower() == "false":
v = False
elif v.isdigit() is True:
v = int(v)
python_dict[k] = v
return json.dumps(python_dict)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--bridge', '-b', metavar="BRIDGEIPADDRESS")
arg_parser.add_argument('--user', '-u', metavar="USERNAME")
arg_parser.add_argument('--light', '-l', metavar="LIGHTID")
arg_parser.add_argument('--command', '-c', metavar="COMMAND")
args = arg_parser.parse_args()
bridge_ip = args.bridge
user = args.user
lid = args.light
command = command_interpreter(args.command)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stdout)
ch.setLevel(logging.DEBUG)
fmt = '%(name)s - %(asctime)s - %(modul
|
e)s-%(funcName)s/%(lineno)d - %(message)s'
formatter = logging.Formatter(fmt)
ch.setFormatter(formatter)
logger.addHandler(ch)
|
light = Light(bridge_ip, user, lid, 'my light')
logger.debug(command)
light.state = json.loads(command)
|
SciTools/iris
|
lib/iris/tests/unit/experimental/ugrid/mesh/test_Mesh__from_coords.py
|
Python
|
lgpl-3.0
| 9,316
| 0.000107
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :meth:`iris.experimental.ugrid.mesh.Mesh.from_coords`.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import numpy as np
from iris.coords import AuxCoord, DimCoord
from iris.experimental.ugrid import logger
from iris.experimental.ugrid.mesh import Connectivity, Mesh
from iris.tests.stock import simple_2d_w_multidim_coords
class Test1Dim(tests.IrisTest):
def setUp(self):
self.lon = DimCoord(
points=[0.5, 1.5, 2.5],
bounds=[[0, 1], [1, 2], [2, 3]],
standard_name="longitude",
long_name="edge longitudes",
var_name="lon",
units="degrees",
attributes={"test": 1},
)
# Should be fine with either a DimCoord or an AuxCoord.
self.lat = AuxCoord(
points=[0.5, 2.5, 1.5],
bounds=[[0, 1], [2, 3], [1, 2]],
standard_name="latitude",
long_name="edge_latitudes",
var_name="lat",
units="degrees",
attributes={"test": 1},
)
def create(self):
return Mesh.from_coords(self.lon, self.lat)
def test_dimensionality(self):
mesh = self.create()
self.assertEqual(1, mesh.topology_dimension)
self.assertArrayEqual(
[0, 1, 1, 2, 2, 3], mesh.node_coords.node_x.points
)
self.assertArrayEqual(
[0, 1, 2, 3, 1, 2], mesh.node_coords.node_y.points
)
self.assertArrayEqual([0.5, 1.5, 2.5], mesh.edge_coords.edge_x.points)
self.assertArrayEqual([0.5, 2.5, 1.5], mesh.edge_coords.edge_y.points)
self.assertIsNone(getattr(mesh, "face_coords", None))
for conn_name in Connectivity.UGRID_CF_ROLES:
conn = getattr(mesh, conn_name, None)
if conn_name == "edge_node_connectivity":
self.assertArrayEqual([[0, 1], [2, 3], [4, 5]], conn.indices)
else:
self.assertIsNone(conn)
def test_node_metadata(self):
mesh = self.create()
pairs = [
(self.lon, mesh.node_coords.node_x),
(self.lat, mesh.node_coords.node_y),
]
for expected_coord, actual_coord in pairs:
for attr in ("standard_name", "long_name", "units", "attributes"):
expected = getattr(expected_coord, attr)
actual = getattr(actual_coord, attr)
self.assertEqual(expected, actual)
self.assertIsNone(actual_coord.var_name)
def test_centre_metadata(self):
mesh = self.create()
pairs = [
(self.lon, mesh.edge_coords.edge_x),
(self.lat, mesh.edge_coords.edge_y),
]
for expected_coord, actual_coord in pairs:
for attr in ("standard_name", "long_name", "units", "attributes"):
expected = getattr(expected_coord, attr)
actual = getattr(actual_coord, attr)
self.assertEqual(expected, actual)
self.assertIsNone(actual_coord.var_name)
def test_mesh_metadata(self):
# Inappropriate to guess these values from the input coords.
mesh = self.create()
for attr in (
"standard_name",
"long_name",
"var_name",
):
self.assertIsNone(getattr(mesh, attr))
self.assertTrue(mesh.units.is_unknown())
self.assertDictEqual({}, mesh.attributes)
def test_lazy(self):
self.lon = AuxCoord.from_coord(self.lon)
self.lon = self.lon.copy(
self.lon.lazy_points(), self.lon.lazy_bounds()
)
self.lat = self.lat.copy(
self.lat.lazy_points(), self.lat.lazy_bounds()
)
mesh = self.create()
for coord in list(mesh.all_coords):
if coord is not None:
self.assertTrue(coord.has_lazy_points())
for conn in list(mesh.all_connectivities):
if conn is not None:
self.assertTrue(conn.has_lazy_indices())
def test_coord_shape_mismatch(self):
lat_orig = self.lat.copy(self.lat.points, self.lat.bounds)
self.lat = lat_orig.copy(
poi
|
nts=lat_orig.points, bounds=np.tile(lat_orig.bounds, 2)
)
with self.assertRaisesRegex(
ValueError, "bounds shapes are not identical"
):
_ = self.create()
self.lat = lat_orig.copy(
points=lat_orig.points[-1], bounds=lat_orig.bounds[-1]
)
with self.assertRaisesRegex(
Val
|
ueError, "points shapes are not identical"
):
_ = self.create()
def test_reorder(self):
# Swap the coords.
self.lat, self.lon = self.lon, self.lat
mesh = self.create()
# Confirm that the coords have been swapped back to the 'correct' order.
self.assertEqual("longitude", mesh.node_coords.node_x.standard_name)
self.assertEqual("latitude", mesh.node_coords.node_y.standard_name)
def test_non_xy(self):
for coord in self.lon, self.lat:
coord.standard_name = None
lon_name, lat_name = [
coord.long_name for coord in (self.lon, self.lat)
]
# Swap the coords.
self.lat, self.lon = self.lon, self.lat
with self.assertLogs(logger, "INFO", "Unable to find 'X' and 'Y'"):
mesh = self.create()
# Confirm that the coords have not been swapped back.
self.assertEqual(lat_name, mesh.node_coords.node_x.long_name)
self.assertEqual(lon_name, mesh.node_coords.node_y.long_name)
class Test2Dim(Test1Dim):
def setUp(self):
super().setUp()
self.lon.bounds = [[0, 0.5, 1], [1, 1.5, 2], [2, 2.5, 3]]
self.lon.long_name = "triangle longitudes"
self.lat.bounds = [[0, 1, 0], [2, 3, 2], [1, 2, 1]]
self.lat.long_name = "triangle latitudes"
def test_dimensionality(self):
mesh = self.create()
self.assertEqual(2, mesh.topology_dimension)
self.assertArrayEqual(
[0, 0.5, 1, 1, 1.5, 2, 2, 2.5, 3], mesh.node_coords.node_x.points
)
self.assertArrayEqual(
[0, 1, 0, 2, 3, 2, 1, 2, 1], mesh.node_coords.node_y.points
)
self.assertIsNone(mesh.edge_coords.edge_x)
self.assertIsNone(mesh.edge_coords.edge_y)
self.assertArrayEqual([0.5, 1.5, 2.5], mesh.face_coords.face_x.points)
self.assertArrayEqual([0.5, 2.5, 1.5], mesh.face_coords.face_y.points)
for conn_name in Connectivity.UGRID_CF_ROLES:
conn = getattr(mesh, conn_name, None)
if conn_name == "face_node_connectivity":
self.assertArrayEqual(
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], conn.indices
)
else:
self.assertIsNone(conn)
def test_centre_metadata(self):
mesh = self.create()
pairs = [
(self.lon, mesh.face_coords.face_x),
(self.lat, mesh.face_coords.face_y),
]
for expected_coord, actual_coord in pairs:
for attr in ("standard_name", "long_name", "units", "attributes"):
expected = getattr(expected_coord, attr)
actual = getattr(actual_coord, attr)
self.assertEqual(expected, actual)
self.assertIsNone(actual_coord.var_name)
def test_mixed_shapes(self):
self.lon = AuxCoord.from_coord(self.lon)
lon_bounds = np.array([[0, 0, 1, 1], [1, 1, 2, 2], [2, 3, 2.5, 999]])
self.lon.bounds = np.ma.masked_equal(lon_bounds, 999)
lat_bounds = np.array([[0, 1, 1, 0], [1, 2, 2, 1], [2, 2, 3, 999]])
self.lat.bounds = np.ma.masked_equal(lat_bounds, 999)
mesh = self.create()
self.assertArrayEqual(
mesh.face_node_connectivity.location_lengths(), [4, 4, 3]
)
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/Demos/security/get_policy_info.py
|
Python
|
gpl-2.0
| 1,166
| 0.023156
|
import win32security,win32file,win32api,ntsecuritycon,win32con
policy_handle = win32security.GetPol
|
icyHandle('rupole',win32security.POLICY_ALL_ACCESS)
## mod_nbr, mod_time = win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyModificationInformation)
## print mod_nbr, mod_time
domain_name,dns_domain_name, dns_forest_name, domain_guid, domain_sid = \
|
win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyDnsDomainInformation)
print domain_name, dns_domain_name, dns_forest_name, domain_guid, domain_sid
event_audit_info=win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyAuditEventsInformation)
print event_audit_info
domain_name,sid =win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyPrimaryDomainInformation)
print domain_name, sid
domain_name,sid =win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyAccountDomainInformation)
print domain_name, sid
server_role = win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyLsaServerRoleInformation)
print 'server role: ',server_role
win32security.LsaClose(policy_handle)
|
kahliloppenheimer/Naive-Bayes-Classifier
|
corpus.py
|
Python
|
mit
| 3,727
| 0.00161
|
# -*- mode: Python; coding: utf-8 -*-
"""For the purposes of classification, a corpus is defined as a collection
of labeled documents. Such documents might actually represent words, images,
etc.; to the classifier they are merely instances with features."""
from abc import ABCMeta, abstractmethod
from csv import reader as csv_reader
from glob import glob
from os.path import basename, dirname, split, splitext
from document import Document
class Corpus(object):
"""An abstract collection of documents."""
__metaclass__ = ABCMeta
def __init__(self, datafiles, documen
|
t_class=Document):
self.documents = []
self.datafiles = glob(datafiles)
for datafile in self.datafiles:
self.load(datafile, document_class)
# Act as a mutable
|
container for documents.
def __len__(self): return len(self.documents)
def __iter__(self): return iter(self.documents)
def __getitem__(self, key): return self.documents[key]
def __setitem__(self, key, value): self.documents[key] = value
def __delitem__(self, key): del self.documents[key]
@abstractmethod
def load(self, datafile, document_class):
"""Make labeled document instances for the data in a file."""
pass
class PlainTextFiles(Corpus):
"""A corpus contained in a collection of plain-text files."""
def load(self, datafile, document_class):
"""Make a document from a plain-text datafile. The document is labeled
using the last component of the datafile's directory."""
label = split(dirname(datafile))[-1]
with open(datafile, "r") as file:
data = file.read()
self.documents.append(document_class(data, label, datafile))
class PlainTextLines(Corpus):
"""A corpus in which each document is a line in a datafile."""
def load(self, datafile, document_class):
"""Make a document from each line of a plain text datafile.
The document is labeled using the datafile name, sans directory
and extension."""
label = splitext(basename(datafile))[0]
with open(datafile, "r") as file:
for line in file:
data = line.strip()
self.documents.append(document_class(data, label, datafile))
class NamesCorpus(PlainTextLines):
"""A collection of names, labeled by gender. See names/README for
copyright and license."""
def __init__(self, datafiles="names/*.txt", document_class=Document):
super(NamesCorpus, self).__init__(datafiles, document_class)
class CSVCorpus(Corpus):
"""A corpus encoded as a comma-separated-value (CSV) file."""
def load(self, datafile, document_class, encoding="utf-8"):
"""Make a document from each row of a CSV datafile.
Assumes data, label ordering and UTF-8 encoding."""
def unicode_csv_reader(csvfile, *args, **kwargs):
for row in csv_reader(csvfile, *args, **kwargs):
yield [unicode(cell, encoding) for cell in row]
with open(datafile, "r") as file:
for data, label in unicode_csv_reader(file):
label = label.strip().upper() # canonicalize label
self.documents.append(document_class(data, label, datafile))
class BlogsCorpus(CSVCorpus):
"""A collection of blog posts, labeled by author gender. See the paper
"Improving Gender Classification of Blog Authors" by Mukherjee and Liu
<http://www.cs.uic.edu/~liub/publications/EMNLP-2010-blog-gender.pdf>
for details and some impressive results."""
def __init__(self, datafiles="blog-gender-dataset.csv",
document_class=Document):
super(BlogsCorpus, self).__init__(datafiles, document_class)
|
MostlyOpen/odoo_addons
|
myo_summary/models/annotation.py
|
Python
|
agpl-3.0
| 1,419
| 0
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free sof
|
tware: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This
|
program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Summary(models.Model):
_inherit = 'myo.summary'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_summary_annotation_rel',
'summary_id',
'annotation_id',
'Annotations'
)
class Annotation(models.Model):
_inherit = 'myo.annotation'
summary_ids = fields.Many2many(
'myo.summary',
'myo_summary_annotation_rel',
'annotation_id',
'summary_id',
'Summaries'
)
|
bretttegart/treadmill
|
lib/python/treadmill/cli/admin/ldap/cell.py
|
Python
|
apache-2.0
| 6,420
| 0
|
"""Implementation of treadmill admin ldap CLI cell plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import click
from ldap3.core import exceptions as ldap_exceptions
from treadmill import admin
from treadmill import cli
from treadmill import context
from treadmill import yamlwrapper as yaml
def init():
"""Configures cell CLI group"""
# Disable too many branches warning.
#
# pylint: disable=R0912
formatter = cli.make_formatter('cell')
@click.group()
@cli.admin.ON_EXCEPTIONS
def cell():
"""Manage cell configuration"""
pass
@cell.command()
@click.option('-v', '--version', help='Version.')
@click.option('-r', '--root', help='Distro root.')
@click.option('-l', '--location', help='Cell location.')
@click.option('-u', '--username', help='Cell proid account.')
@click.option('--archive-server', help='Archive server.')
@click.option('--archive-username', help='Archive username.')
@click.option('--ssq-namespace', help='SSQ namespace.')
@click.option('-d', '--data', help='Cell specific data in YAML',
type=click.Path(exists=True, read
|
able=True))
@click.option('--status', help='Cell status')
@click.option('-m', '--manifest', help='Load cell from manifest file.',
type=click.Path(exists=True, readable=True))
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def configure(cell, version, root, location, username, archive_server,
archive_
|
username, ssq_namespace, data, status, manifest):
"""Create, get or modify cell configuration"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
attrs = {}
if manifest:
with io.open(manifest, 'rb') as fd:
attrs = yaml.load(stream=fd)
if version:
attrs['version'] = version
if root:
if root == '-':
root = None
attrs['root'] = root
if location:
attrs['location'] = location
if username:
attrs['username'] = username
if archive_server:
attrs['archive-server'] = archive_server
if archive_server:
attrs['archive-username'] = archive_username
if ssq_namespace:
attrs['ssq-namespace'] = ssq_namespace
if status:
attrs['status'] = status
if data:
with io.open(data, 'rb') as fd:
attrs['data'] = yaml.load(stream=fd)
if attrs:
try:
admin_cell.create(cell, attrs)
except ldap_exceptions.LDAPEntryAlreadyExistsResult:
admin_cell.update(cell, attrs)
try:
cli.out(formatter(admin_cell.get(cell)))
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
@cell.command()
@click.option('--idx', help='Master index.',
type=click.Choice(['1', '2', '3', '4', '5']),
required=True)
@click.option('--hostname', help='Master hostname.',
required=True)
@click.option('--client-port', help='Zookeeper client port.',
type=int,
required=True)
@click.option('--kafka-client-port', help='Kafka client port.',
type=int,
required=False)
@click.option('--jmx-port', help='Zookeeper jmx port.',
type=int,
required=True)
@click.option('--followers-port', help='Zookeeper followers port.',
type=int,
required=True)
@click.option('--election-port', help='Zookeeper election port.',
type=int,
required=True)
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def insert(cell, idx, hostname, client_port, jmx_port, followers_port,
election_port, kafka_client_port):
"""Add master server to a cell"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
data = {
'idx': int(idx),
'hostname': hostname,
'zk-client-port': client_port,
'zk-jmx-port': jmx_port,
'zk-followers-port': followers_port,
'zk-election-port': election_port,
}
if kafka_client_port is not None:
data['kafka-client-port'] = kafka_client_port
attrs = {
'masters': [data]
}
try:
admin_cell.update(cell, attrs)
cli.out(formatter(admin_cell.get(cell)))
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
@cell.command()
@click.option('--idx', help='Master index.',
type=click.Choice(['1', '2', '3']),
required=True)
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def remove(cell, idx):
"""Remove master server from a cell"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
attrs = {
'masters': [{
'idx': int(idx),
'hostname': None,
'zk-client-port': None,
'zk-jmx-port': None,
'zk-followers-port': None,
'zk-election-port': None,
}]
}
try:
admin_cell.remove(cell, attrs)
cli.out(formatter(admin_cell.get(cell)))
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
@cell.command(name='list')
@cli.admin.ON_EXCEPTIONS
def _list():
"""Displays master servers"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
cells = admin_cell.list({})
cli.out(formatter(cells))
@cell.command()
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def delete(cell):
"""Delete a cell"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
try:
admin_cell.delete(cell)
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
del delete
del _list
del configure
del insert
del remove
return cell
|
wh-acmer/minixalpha-acm
|
LeetCode/Python/binary_tree_postorder_traversal_iter.py
|
Python
|
mit
| 324
| 0.006173
|
#!/usr/bin/env python
#coding: utf-8
# Definition for a binary tree node
class TreeNo
|
de:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
#
|
@return a list of integers
def postorderTraversal(self, root):
pass
|
BryceLohr/authentic
|
authentic2/attribute_aggregator/user_profile.py
|
Python
|
agpl-3.0
| 4,952
| 0.002827
|
'''
VERIDIC - Towards a centralized access control system
Copyright (C) 2011 Mikael Ates
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from authentic2.attribute_aggregator.core import get_profile_field_name_from_definition, \
get_definition_from_profile_field_name
logger = logging.getLogger(__name__)
SOURCE_NAME = 'USER_PROFILE'
def get_attributes(user, definitions=None, source=None, auth_source=False, **kwargs):
'''
Return attributes dictionnary
Dictionnary format:
attributes = dict()
data_from_source = list()
a1 = dict()
a1['oid'] = definition_name
Or
a1['definition'] = definition_name
definition may be the definition name like 'gn'
or an alias like 'givenName'
Or
a1['name'] = attribute_name_in_ns
a1['namespace'] = ns_name
a1['values'] = list_of_values
data_from_source.append(a1)
...
data_from_source.append(a2)
attributes[source_name] = data_from_source
First attempt on 'definition' key.
Else, definition is searched by 'name' and 'namespece' keys.
'''
from models import AttributeSource
try:
AttributeSource.objects.get(name=SOURCE_NAME)
except:
logger.debug('get_attributes: \
Profile source not configured')
return None
if source and source.name != SOURCE_NAME:
logger.debug('get_attributes: '
'The required source %s is not user profile' % source)
return None
attributes = dict()
data = []
try:
field_names = set()
user_profile_fields = getattr(user, 'USER_PROFILE', [])
if not user_profile_fields:
user_profile_fields = user._meta.get_all_field_names()
for field in user_profile_fields:
if isinstance(field, (tuple, list)):
field_names.add(field[0])
else:
field_names.add(field)
fields = []
if definitions:
for definition in definitions:
logger.debug('get_attributes: looking for %s' % definition)
field_name = get_profile_field_name_from_definition(definition)
if not field_name:
'''
Profile model may be extended without modifying the
mapping file if the attribute name is the same as the
definition
'''
logger.debug('get_attributes: '
'Field name will be the definition')
field_name = definition
if field_name in field_names:
fields.append((field_name, definition))
else:
logger.debug('get_attributes: Field not found in profile')
else:
fields = [(field_name,
get_definition_from_profile_field_name(field_name)) \
for field_name \
in field_names \
if get_definition_from_profile_field_name(field_name)]
for field_name, definition in fields:
logger.debug('get_attributes: found field %s' % (field_name,))
value = getattr(user, field_name, None)
if value:
if callable(value):
value = value()
logger.debug('get_attributes: found value %s' % value)
attr = {}
attr['definition'] = definition
|
if not isinstance(value, basestring) and hasattr(value,
'__iter__'):
attr['values'] = map(unicode, value)
else:
attr['values'] = [unicode(value)]
data.append(attr)
else:
logger.debug('get_attributes: no value fou
|
nd')
except (SiteProfileNotAvailable, ObjectDoesNotExist):
logger.debug('get_attributes: No user profile')
return None
attributes[SOURCE_NAME] = data
return attributes
|
ndparker/tdi
|
docs/examples/loading2.py
|
Python
|
apache-2.0
| 623
| 0.004815
|
#!/usr/bin/env python
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
# BEGIN INCLUDE
import tempfile
from tdi import html
file_1 = tempfile.NamedTemporaryFile()
try:
file_2 = tempfile.NamedTemporaryFile()
try:
file_1.write("""<html lang="en"><body tdi:overlay="huh">yay.</body></html>""")
file_1.flush()
file_2.write("""<html><body tdi:overlay="huh">file 2!</body></html
|
>""")
file_2.flush()
template = html.from_
|
files([file_1.name, file_2.name])
finally:
file_2.close()
finally:
file_1.close()
template.render()
|
molmod/yaff
|
yaff/sampling/test/test_harmonic.py
|
Python
|
gpl-3.0
| 4,266
| 0.000469
|
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import division
from __future__ import print_function
import numpy as np
from yaff import *
from yaff.sampling.test.common import get_ff_water32, get_ff_water, get_ff_bks
def test_hessian_partial_water32():
ff = get_ff_water32()
select = [1, 2, 3, 14, 15, 16]
hessian = estimate_cart_hessian(ff, select=select)
assert hessian.shape == (18, 18)
def test_hessian_full_water():
ff = get_ff_water()
hessian = estimate_cart_hessian(ff)
assert hessian.shape == (9, 9)
evals = np.linalg.eigvalsh(hessian)
print(evals)
assert sum(abs(evals) < 1e-10) == 3
def test_hessian_full_x2():
K, d = np.random.uniform(1.0, 2.0, 2)
system = System(
numbers=np.array([1, 1]),
pos=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, d]]),
ffatypes=['H', 'H'],
bonds=np.array([[0, 1]]),
)
part = ForcePartValence(system)
part.add_term(Harmonic(K, d, Bond(0, 1)))
ff = ForceField(system, [part])
hessian = estimate_cart_hessian(ff)
evals = np.linalg.eigvalsh(hessian)
assert abs(evals[:-1]).max() < 1e-5
assert abs(evals[-1] - 2*K) < 1e-5
def test_elastic_water32():
ff = get_ff_water32()
elastic = estimate_elastic(ff, do_frozen=True)
assert elastic.shape == (6, 6)
def test_bulk_elastic_bks():
ff = get_ff_bks(smooth_ei=True, reci_ei='ignore')
system = ff.system
lcs = np.array([
[1, 1, 0],
[0, 0, 1],
|
])
system.align_cell(lcs)
ff.update_rvecs(system.cell.rvecs)
opt = QNOptimizer(FullCellDOF(ff, gpos_rms=1e-6, grvecs_rms=1e-6))
opt.run()
rvecs0 = system.cell.rvecs.copy()
vol0 = system.cell.volume
pos0 = system.pos.copy()
e0 = ff.compute()
elastic = estimate_elastic(ff)
assert abs(pos0 - system.pos).max() < 1e-10
ass
|
ert abs(rvecs0 - system.cell.rvecs).max() < 1e-10
assert abs(vol0 - system.cell.volume) < 1e-10
assert elastic.shape == (6, 6)
# Make estimates of the same matrix elements with a simplistic approach
eps = 1e-3
from nose.plugins.skip import SkipTest
raise SkipTest('Double check elastic constant implementation')
# A) stretch in the Z direction
deform = np.array([1, 1, 1-eps])
rvecs1 = rvecs0*deform
pos1 = pos0*deform
ff.update_rvecs(rvecs1)
ff.update_pos(pos1)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e1 = ff.compute()
deform = np.array([1, 1, 1+eps])
rvecs2 = rvecs0*deform
pos2 = pos0*deform
ff.update_rvecs(rvecs2)
ff.update_pos(pos2)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e2 = ff.compute()
C = (e1 + e2 - 2*e0)/(eps**2)/vol0
assert abs(C - elastic[2,2]) < C*0.02
# B) stretch in the X direction
deform = np.array([1-eps, 1, 1])
rvecs1 = rvecs0*deform
pos1 = pos0*deform
ff.update_rvecs(rvecs1)
ff.update_pos(pos1)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e1 = ff.compute()
deform = np.array([1+eps, 1, 1])
rvecs2 = rvecs0*deform
pos2 = pos0*deform
ff.update_rvecs(rvecs2)
ff.update_pos(pos2)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e2 = ff.compute()
C = (e1 + e2 - 2*e0)/(eps**2)/vol0
assert abs(C - elastic[0,0]) < C*0.02
|
nrego/westpa
|
src/oldtools/aframe/data_reader.py
|
Python
|
gpl-3.0
| 22,795
| 0.011099
|
# Copyright (C) 2013 Matthew C. Zwier and Lillian T. Chong
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division; _
|
_metaclass__=type
import logging, warnings
log = logging.getLogger(__name__)
import itertools, re
from itertools import imap
import nump
|
y, h5py
import west, westpa
from oldtools.aframe import AnalysisMixin
from west import Segment
from oldtools.miscfn import parse_int_list
class WESTDataReaderMixin(AnalysisMixin):
'''A mixin for analysis requiring access to the HDF5 files generated during a WEST run.'''
def __init__(self):
super(WESTDataReaderMixin,self).__init__()
self.data_manager = None
self.west_h5name = None
# Whether pcoord caching is active
self.__cache_pcoords = False
# Cached items
self.__c_summary = None
self.__c_iter_groups = dict()
self.__c_seg_id_ranges = dict()
self.__c_seg_indices = dict()
self.__c_wtg_parent_arrays = dict()
self.__c_parent_arrays = dict()
self.__c_pcoord_arrays = dict()
self.__c_pcoord_datasets = dict()
def add_args(self, parser, upcall = True):
if upcall:
try:
upcall = super(WESTDataReaderMixin,self).add_args
except AttributeError:
pass
else:
upcall(parser)
group = parser.add_argument_group('WEST input data options')
group.add_argument('-W', '--west-data', dest='west_h5name', metavar='WEST_H5FILE',
help='''Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in west.cfg).''')
def process_args(self, args, upcall = True):
if args.west_h5name:
self.west_h5name = args.west_h5name
else:
westpa.rc.config.require(['west','data','west_data_file'])
self.west_h5name = westpa.rc.config.get_path(['west','data','west_data_file'])
westpa.rc.pstatus("Using WEST data from '{}'".format(self.west_h5name))
self.data_manager = westpa.rc.get_data_manager()
self.data_manager.backing_file = self.west_h5name
self.data_manager.open_backing(mode='r')
if upcall:
try:
upfunc = super(WESTDataReaderMixin,self).process_args
except AttributeError:
pass
else:
upfunc(args)
def clear_run_cache(self):
del self.__c_summary
del self.__c_iter_groups, self.__c_seg_id_ranges, self.__c_seg_indices, self.__c_parent_arrays, self.__c_parent_arrays
del self.__c_pcoord_arrays, self.__c_pcoord_datasets
self.__c_summary = None
self.__c_iter_groups = dict()
self.__c_seg_id_ranges = dict()
self.__c_seg_indices = dict()
self.__c_parent_arrays = dict()
self.__c_wtg_parent_arrays = dict()
self.__c_pcoord_arrays = dict()
self.__c_pcoord_datasets = dict()
@property
def cache_pcoords(self):
'''Whether or not to cache progress coordinate data. While caching this data
can significantly speed up some analysis operations, this requires
copious RAM.
Setting this to False when it was formerly True will release any cached data.
'''
return self.__cache_pcoords
@cache_pcoords.setter
def cache_pcoords(self, cache):
self.__cache_pcoords = cache
if not cache:
del self.__c_pcoord_arrays
self.__c_pcoord_arrays = dict()
def get_summary_table(self):
if self.__c_summary is None:
self.__c_summary = self.data_manager.we_h5file['/summary'][...]
return self.__c_summary
def get_iter_group(self, n_iter):
'''Return the HDF5 group corresponding to ``n_iter``'''
try:
return self.__c_iter_groups[n_iter]
except KeyError:
iter_group = self.data_manager.get_iter_group(n_iter)
return iter_group
def get_segments(self, n_iter, include_pcoords = True):
'''Return all segments present in iteration n_iter'''
return self.get_segments_by_id(n_iter, self.get_seg_ids(n_iter, None), include_pcoords)
def get_segments_by_id(self, n_iter, seg_ids, include_pcoords = True):
'''Get segments from the data manager, employing caching where possible'''
if len(seg_ids) == 0: return []
seg_index = self.get_seg_index(n_iter)
all_wtg_parent_ids = self.get_wtg_parent_array(n_iter)
segments = []
if include_pcoords:
pcoords = self.get_pcoords(n_iter, seg_ids)
for (isegid, seg_id) in enumerate(seg_ids):
row = seg_index[seg_id]
parents_offset = row['wtg_offset']
n_parents = row['wtg_n_parents']
segment = Segment(seg_id = seg_id,
n_iter = n_iter,
status = row['status'],
endpoint_type = row['endpoint_type'],
walltime = row['walltime'],
cputime = row['cputime'],
weight = row['weight'],
)
if include_pcoords:
segment.pcoord = pcoords[isegid]
parent_ids = all_wtg_parent_ids[parents_offset:parents_offset+n_parents]
segment.wtg_parent_ids = {long(parent_id) for parent_id in parent_ids}
segment.parent_id = long(parent_ids[0])
segments.append(segment)
return segments
def get_children(self, segment, include_pcoords=True):
parents = self.get_parent_array(segment.n_iter+1)
seg_ids = self.get_seg_ids(segment.n_iter+1, parents == segment.seg_id)
return self.get_segments_by_id(segment.n_iter+1, seg_ids, include_pcoords)
def get_seg_index(self, n_iter):
try:
return self.__c_seg_indices[n_iter]
except KeyError:
seg_index = self.__c_seg_indices[n_iter] = self.get_iter_group(n_iter)['seg_index'][...]
return seg_index
def get_wtg_parent_array(self, n_iter):
try:
return self.__c_wtg_parent_arrays[n_iter]
except KeyError:
parent_array = self.__c_wtg_parent_arrays[n_iter] = self.get_iter_group(n_iter)['wtgraph'][...]
return parent_array
def get_parent_array(self, n_iter):
try:
return self.__c_parent_arrays[n_iter]
except KeyError:
parent_array = self.get_seg_index(n_iter)['parent_id']
self.__c_parent_arrays[n_iter] = parent_array
return parent_array
def get_pcoord_array(self, n_iter):
try:
return self.__c_pcoord_arrays[n_iter]
except KeyError:
pcoords = self.__c_pcoord_arrays[n_iter] = self.get_iter_group(n_iter)['pcoord'][...]
return pcoords
def get_pcoord_dataset(self, n_iter):
try:
return self.__c_pcoord_datasets[n_iter]
except KeyError:
pcoord_ds = self.__c_pcoord_datasets[n_iter] = self.get_iter_group(n_iter)['pcoord']
return pcoord_ds
def get_pcoords(self, n_iter, seg_ids):
if self.__cache_pcoords:
pcarray = sel
|
alistairlow/tensorflow
|
tensorflow/contrib/distributions/python/ops/bijectors/weibull_impl.py
|
Python
|
apache-2.0
| 5,615
| 0.003384
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weibull bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Weibull",
]
class Weibull(bijector.Bijector):
"""Compute `Y = g(X) = 1 - exp((-X / scale) ** concentration), X >= 0`.
This bijector maps inputs from `[0, inf]` to [0, 1]`. The inverse of the
bijector applied to a uniform random variable `X ~ U(0, 1) gives back a
random variable with the
[Weibull distribution](https://en.wikipedia.org/wiki/Weibull_distribution):
```none
Y ~ Weibull(scale, concentration)
pdf(y; scale, concentration, y >= 0) = (scale / concentration) * (
scale / concentration) ** (concentration - 1) * exp(
-(y / scale) ** concentration)
```
"""
def __init__(self,
scale=1.,
concentration=1.,
event_ndims=0,
validate_args=False,
name="weibull"):
"""Instantiates the `Weibull` bijector.
Args:
scale: Positive Float-type `Tensor` that is the same dtype and is
broadcastable with `concentration`.
This is `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`.
concentration: Positive Float-type `Tensor` that is the same dtype and is
broadcastable with `scale`.
This is `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`.
event_ndims: Python scalar indicating the number of dimensions associated
with a particular draw from the distribution.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[scale, concentration]):
self._scale = ops.convert_to_tensor(scale, name="scale")
self._concentration = ops.convert_to_tensor(
concentration, name="concentration")
check_ops.assert_same_float_dtype([self._scale, self._concentration])
if validate_args:
self._scale =
|
control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._scale,
message="Argument scale was not positive")
], self._scale)
self._concentration = control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._concentration,
message="Argument concentration was not positive")
], self._concentration)
super(Weibull, self).__init__(
event_ndims=event_ndim
|
s,
validate_args=validate_args,
name=name)
@property
def scale(self):
"""The `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`."""
return self._scale
@property
def concentration(self):
"""The `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`."""
return self._concentration
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
return -math_ops.expm1(-((x / self.scale) ** self.concentration))
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
return self.scale * (-math_ops.log1p(-y)) ** (1 / self.concentration)
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
event_dims = self._event_dims_tensor(y)
return math_ops.reduce_sum(
-math_ops.log1p(-y) +
(1 / self.concentration - 1) * math_ops.log(-math_ops.log1p(-y)) +
math_ops.log(self.scale / self.concentration),
axis=event_dims)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
event_dims = self._event_dims_tensor(x)
return math_ops.reduce_sum(
-(x / self.scale) ** self.concentration +
(self.concentration - 1) * math_ops.log(x) +
math_ops.log(self.concentration) +
-self.concentration * math_ops.log(self.scale),
axis=event_dims)
def _maybe_assert_valid_x(self, x):
if not self.validate_args:
return x
is_valid = check_ops.assert_non_negative(
x,
message="Forward transformation input must be at least {}.".format(0))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_positive = check_ops.assert_non_negative(
y, message="Inverse transformation input must be greater than 0.")
less_than_one = check_ops.assert_less_equal(
y, constant_op.constant(1., y.dtype),
message="Inverse transformation input must be less than or equal to 1.")
return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
|
becxer/pytrain
|
test_pytrain/test_HMM/__init__.py
|
Python
|
mit
| 23
| 0
|
fro
|
m test_HMM import
|
*
|
AdrianGaudebert/socorro-crashstats
|
crashstats/auth/urls.py
|
Python
|
mpl-2.0
| 385
| 0
|
from django.conf.urls.defaults i
|
mport patterns, url, include
from . import views
urlpatterns = patterns(
'',
url(r'^browserid/mozilla/$', views.mozilla_browserid_verify,
name='mozilla_browserid_verify'),
url(r'^browserid/$', include('django_browserid.urls')),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'},
|
name='logout'),
)
|
qnorsten/svtplay-dl
|
lib/svtplay_dl/error.py
|
Python
|
mit
| 1,137
| 0.002639
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
class UIException(Exception):
pass
class ServiceError(Exception):
pass
class NoRequestedProtocols(UIException):
"""
This excpetion is thrown when the service provides streams,
but not using any accepted protocol (as decided by
options.stream_prio).
"""
def __init__(self, requested, found):
"""
The constructor takes two mandat
|
ory parameters, requested
and found. Both should be lists. requested is the protocols
we want and found is the protocols that can be used to
access the stream.
"""
|
self.requested = requested
self.found = found
super(NoRequestedProtocols, self).__init__(
"None of the provided protocols (%s) are in "
"the current list of accepted protocols (%s)" % (
self.found, self.requested
)
)
def __repr__(self):
return "NoRequestedProtocols(requested=%s, found=%s)" % (
self.requested, self.found)
|
suutari-ai/shoop
|
shuup_tests/api/test_admin.py
|
Python
|
agpl-3.0
| 1,366
| 0.000732
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE fi
|
le in the root directory of this source tree.
import pytest
from shuup import configuration
from shuup.api.admin_module.views.permissions import APIPermissionView
from shuup.api.permissions import make_permission_config_key, PermissionLevel
from shuup.core import cache
from shuup.core.api.users import UserViewSet
from shuup.testing.factories import get_default_shop
from
|
shuup.testing.utils import apply_request_middleware
def setup_function(fn):
cache.clear()
@pytest.mark.django_db
def test_consolidate_objects(rf):
get_default_shop()
# just visit to make sure GET is ok
request = apply_request_middleware(rf.get("/"))
response = APIPermissionView.as_view()(request)
assert response.status_code == 200
perm_key = make_permission_config_key(UserViewSet())
assert configuration.get(None, perm_key) is None
# now post the form to see what happens
request = apply_request_middleware(rf.post("/", {perm_key: PermissionLevel.ADMIN}))
response = APIPermissionView.as_view()(request)
assert response.status_code == 302 # good
assert int(configuration.get(None, perm_key)) == PermissionLevel.ADMIN
|
RobbieRain/he
|
test.py
|
Python
|
apache-2.0
| 7
| 0.142857
|
z
|
hangyu
| |
benjyw/pants
|
src/python/pants/option/option_value_container_test.py
|
Python
|
apache-2.0
| 3,316
| 0.000905
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from pants.option.option_value_container import OptionValueContainerBuilder
from pants.option.ranked_value import Rank, RankedValue
class OptionValueContainerTest(unittest.TestCase):
def test_unknown_values(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.HARDCODED, 1)
o = ob.build()
self.assertEqual(1, o.foo)
with self.assertRaises(AttributeError):
o.bar
def test_value_ranking(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 11)
o = ob.build()
self.assertEqual(11, o.foo)
self.assertEqual(Rank.CONFIG, o.get_rank("foo"))
ob.foo = RankedValue(Rank.HARDCODED, 22)
o = ob.build()
self.assertEqual(11, o.foo)
self.assertEqual(Rank.CONFIG, o.get_rank("foo"))
ob.foo = RankedValue(Rank.ENVIRONMENT, 33)
o = ob.build()
self.assertEqual(33, o.foo)
self.assertEqual(Rank.ENVIRONMENT, o.get_rank("foo"))
ob.foo = RankedValue(Rank.FLAG, 44)
o = ob.build()
self.assertEqual(44, o.foo)
self.assertEqual(Rank.FLAG, o.get_rank("foo"))
def test_is_flagged(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.NONE, 11)
self.assertFalse(ob.build().is_flagged("foo"))
ob.foo = RankedValue(Rank.CONFIG, 11)
self.assertFalse(ob.build().is_flagged("foo"))
ob.foo = RankedValue(Rank.ENVIRONMENT, 11)
self.assertFalse(ob.build().is_flagged("foo"))
ob.foo = RankedValue(Rank.FLAG, 11)
self.assertTrue(ob.build().is_flagged("foo"))
def test_indexing(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 1)
o = ob.build()
|
self.assertEqual(1, o["foo"])
self.assertEqual(1, o.get("foo"))
self.ass
|
ertEqual(1, o.get("foo", 2))
self.assertIsNone(o.get("unknown"))
self.assertEqual(2, o.get("unknown", 2))
with self.assertRaises(AttributeError):
o["bar"]
def test_iterator(self) -> None:
ob = OptionValueContainerBuilder()
ob.a = RankedValue(Rank.FLAG, 3)
ob.b = RankedValue(Rank.FLAG, 2)
ob.c = RankedValue(Rank.FLAG, 1)
o = ob.build()
names = list(iter(o))
self.assertListEqual(["a", "b", "c"], names)
def test_copy(self) -> None:
# copy semantics can get hairy when overriding __setattr__/__getattr__, so we test them.
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.FLAG, 1)
ob.bar = RankedValue(Rank.FLAG, {"a": 111})
p = ob.build()
z = ob.build()
# Verify that the result is in fact a copy.
self.assertEqual(1, p.foo) # Has original attribute.
ob.baz = RankedValue(Rank.FLAG, 42)
self.assertFalse(hasattr(p, "baz")) # Does not have attribute added after the copy.
# Verify that it's a shallow copy by modifying a referent in o and reading it in p.
p.bar["b"] = 222
self.assertEqual({"a": 111, "b": 222}, z.bar)
|
mozilla/relman-auto-nag
|
auto_nag/scripts/workflow/p1_no_assignee.py
|
Python
|
bsd-3-clause
| 3,281
| 0.00061
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from libmozdata import utils as lmdutils
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
from auto_nag.escalation import Escalation, NoActivityDays
from auto_nag.nag_me import Nag
from auto_nag.round_robin import RoundRobin
class P1NoAssignee(BzCleaner, Nag):
def __init__(self):
super(P1NoAssignee, self).__init__()
self.escalation = Escalation(
self.people,
data=utils.get_config(self.name(), "escalation"),
skiplist=utils.get_config("workflow", "supervisor_skiplist", []),
)
self.round_robin = RoundRobin.get_instance()
self.components_skiplist = utils.get_config("workflow", "components_skiplist")
def description(self):
return "P1 Bugs, no assignee and no activity for few days"
def nag_template(self):
return self.template()
def get_extra_for_template(self):
return {"ndays": self.ndays}
def get_extra_for_nag_template(self):
return self.get_extra_for_template()
def get_extra_for_needinfo_template(self):
return self.get_extra_for_template()
def ignore_meta(self):
return True
def has_last_comment_time(self):
return True
def has_product_component(self):
return True
def columns(self):
return ["component", "id", "summary", "last_comment"]
def handle_bug(self, bug, data):
# check if the product::component is in the list
if utils.check_product_component(self.components_skiplist, bug):
return None
return bug
def get_mail_to_auto_ni(self, bug):
# For now, disable the needinfo
return None
# Avoid to ni everyday...
if self.has_bot_set_ni(bug):
return None
mail, nick = self.round_robin.get(bug, self.date)
if mail and nick:
return {"mail": mail, "nickname": nick}
return None
def set_people_to_nag(self, bug, buginfo):
priority = "high"
if not self.filter_bug(priority):
return None
owners = self.round_robin.get(bug, self.date, only_one=False, has_nick=False)
real_owner = bug["triage_owner"]
self.add_triage_owner(owners, real_owner=real_owner)
if not self.add(owners, buginfo, priority=priority):
self.add_no_manager(buginfo["id"])
return bug
def get_bz_params(self, date):
self.ndays = NoActivityDays(self.name()).get(
(utils.get_next_release_date() - self.nag_date).days
)
self.date = lmdutils.get_date_ymd(date)
|
fields = ["tri
|
age_owner", "flags"]
params = {
"bug_type": "defect",
"include_fields": fields,
"resolution": "---",
"f1": "priority",
"o1": "equals",
"v1": "P1",
"f2": "days_elapsed",
"o2": "greaterthaneq",
"v2": self.ndays,
}
utils.get_empty_assignees(params)
return params
if __name__ == "__main__":
P1NoAssignee().run()
|
VictorLowther/swift
|
swift/common/direct_client.py
|
Python
|
apache-2.0
| 18,186
| 0.001045
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internal client library for making calls directly to the servers rather than
through the proxy.
"""
import socket
from httplib import HTTPException
from time import time
from urllib import quote as _quote
from eventlet import sleep, Timeout
from swift.common.bufferedhttp import http_connect
from swiftclient import ClientException, json_loads
from swift.common.utils import normalize_timestamp
from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
is_success, is_server_error
def quote(value, safe='/'):
if isinstance(value, unicode):
value = value.encode('utf8')
return _quote(value, safe)
def direct_get_account(node, part, account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15):
"""
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of containers) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/' + account
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' %
|
limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
with Timeout(conn_timeout):
conn = http_connect(node['
|
ip'], node['port'], node['device'], part,
'GET', path, query_string=qs)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Account server %s:%s direct GET %s gave status %s' % (node['ip'],
node['port'], repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def direct_head_container(node, part, account, container, conn_timeout=5,
response_timeout=15):
"""
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers (all header names will
be lowercase)
"""
path = '/%s/%s' % (account, container)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'HEAD', path)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct HEAD %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def direct_get_container(node, part, account, container, marker=None,
limit=None, prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15):
"""
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of objects) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/%s/%s' % (account, container)
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, query_string=qs)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Container server %s:%s direct GET %s gave stats %s' % (node['ip'],
node['port'], repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers={}):
path = '/%s/%s' % (account, container)
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path, headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct DELETE %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15):
"""
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object nam
|
Fazer56/Assignment3
|
charitysite/volunteer/urls.py
|
Python
|
mit
| 352
| 0.028409
|
#local u
|
rls.py file
from django.conf.urls import url, include
from . import views
urlpatterns = [
#url(r'^', views.appView.postLocation, name = 'postLocation'),
url(r'^volunteer/', views.appView.member, name = 'member'),
#url(r'^(?P<member_id>[0-9]+)/$', views.appView.detail, name = 'detail'),
|
#url(r'^(?P<>))
]
|
WhittKinley/aima-python
|
submissions/Sery/vacuum2.py
|
Python
|
mit
| 1,432
| 0.002095
|
import agents as ag
def HW2Agent() -> object:
"An agent that keeps track of what locations are clean or dirty."
oldPercepts = [('None', 'Clean')]
oldActions = ['NoOp']
actionScores = [{
'Right': 0,
'Left': 0,
'Up': -1,
'Down': -1,
'NoOp': -100,
}]
level = 0
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
level = len(actionScores) - 1
bump, status = percept
lastBump, lastStatus = oldPercepts[-1]
lastAction = oldActions[-1]
if status == 'Dirty':
action = 'Suck'
actionScores[level][lastAction] += 2
else:
if bump == 'Bump':
actionScores[level][lastAction] -= 10
else:
if lastAction == 'Up' or lastAction == 'Down':
|
actionScores.append({
'Righ
|
t': 0,
'Left': 0,
'Up': -1,
'Down': -1,
})
highest = -80
for actionType, score in actionScores[level].items():
if score > highest:
highest = score
action = actionType
print(actionScores)
oldPercepts.append(percept)
oldActions.append(action)
return action
return ag.Agent(program)
|
poobalan-arumugam/stateproto
|
src/extensions/lang/python/qhsm/testsamplehsm1.py
|
Python
|
bsd-2-clause
| 1,506
| 0.003984
|
import qhsm
from qhsm import QSignals, QEvent
# generated by PythonGenerator version 0.1
class TestSample1(qhsm.QHsm):
def initialiseStateMachine(self):
self.initialiseState(self.s_StateX)
def s_StateX(self, ev):
if ev.QSignal == QSignals.Entry:
self.enterStateX()
elif ev.QSignal == QSignals.Exit:
self.exitStateX()
elif ev.QSignal == QSignals.Init:
self.initialiseState(self.s_State0)
else:
return self._TopState
return None
def s_State0(self, ev):
if ev.QSignal == "Bye":
pass
self.transitionTo(self.s_State1)
elif ev.QSignal == "Hello":
if self.Ok(ev):
self.sayHello3()
self.transitionTo(self.s_State0)
else:
self.sayHello1()
self.transitionTo(self.s_State1)
elif ev.QSignal == QSignals.Entry:
self.enterState0()
elif ev.QSignal == QSignals.Exit:
self.exitState0()
else:
return self.s_StateX
return None
|
def s_State1(self, ev):
if ev.QSignal == "Hello":
self.sayHello2()
self.transitionTo(self.s_State0)
eli
|
f ev.QSignal == QSignals.Entry:
self.enterState1()
elif ev.QSignal == QSignals.Exit:
self.exitState1()
else:
return self._TopState
return None
#end of TestSample1
pass
|
spnow/grr
|
lib/rdfvalues/basic_test.py
|
Python
|
apache-2.0
| 6,320
| 0.005697
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
"""Basic rdfvalue tests."""
import time
from grr.lib import rdfvalue
from grr.lib.rdfvalues import test_base
class RDFBytesTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFBytes
def GenerateSample(self, number=0):
return rdfvalue.RDFBytes("\x00hello%s\x01" % number)
class RDFStringTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFString
def GenerateSample(self, number=0):
return rdfvalue.RDFString(u"Grüezi %s" % number)
class RDFIntegerTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFInteger
def GenerateSample(self, number=0):
return rdfvalue.RDFInteger(number)
class DurationTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.Duration
def GenerateSample(self, number=5):
return rdfvalue.Duration("%ds" % number)
def testStringRepresentationIsTransitive(self):
t = rdfvalue.Duration("5m")
self.assertEqual(t.seconds, 300)
self.assertEqual(t, rdfvalue.Duration(300))
self.assertEqual(str(t), "5m")
class ByteSizeTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.ByteSize
def GenerateSample(self, number=5):
return rdfvalue.ByteSize("%sKib" % number)
def testParsing(self):
for string, expected in [("100gb", 100 * 1000**3),
("10kib", 10*1024),
("2.5kb", 2500)]:
self.assertEqual(expected, rdfvalue.ByteSize(string))
class RDFURNTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFURN
def GenerateSample(self, number=0):
return rdfvalue.RDFURN("aff4:/C.12342%s/fs/os/" % number)
def testRDFURN(self):
"""Test RDFURN handling."""
# Make a url object
str_url = "aff4:/hunts/W:AAAAAAAA/Results"
url = rdfvalue.RDFURN(str_url, age=1)
self.assertEqual(url.age, 1)
self.assertEqual(url.Path(), "/hunts/W:AAAAAAAA/Results")
self.assertEqual(url._urn.netloc, "")
self.assertEqual(url._urn.scheme, "aff4")
# Test the Add() function
url = url.Add("some", age=2).Add("path", age=3)
self.assertEqual(url.age, 3)
self.as
|
sertEqual(url.Path(), "/hunts/W:AAAAAAAA/Results/some/path")
self.assertEqual(url._urn.netloc, "")
self.assertEqual(url._urn.scheme, "aff4")
# Test that we can handle urns with a '?' and do not interpret them as
# a delimiter between url and parameter list.
str_url = "aff4:/C.0000000000000000/fs/os/c/regex.*?]&[+{}--"
url = rdfvalue.RDFURN(str_url, age=1)
self.assertEqual(url.Path(), str_url[
|
5:])
def testInitialization(self):
"""Check that we can initialize from common initializers."""
# Empty Initializer not allowed.
self.assertRaises(ValueError, self.rdfvalue_class)
# Initialize from another instance.
sample = self.GenerateSample("aff4:/")
self.CheckRDFValue(self.rdfvalue_class(sample), sample)
def testSerialization(self, sample=None):
sample = self.GenerateSample("aff4:/")
super(RDFURNTest, self).testSerialization(sample=sample)
class RDFDatetimeTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFDatetime
def GenerateSample(self, number=0):
result = self.rdfvalue_class()
result.ParseFromHumanReadable("2011/11/%02d" % (number+1))
return result
def testTimeZoneConversions(self):
time_string = "2011-11-01 10:23:00"
# Human readable strings are assumed to always be in UTC
# timezone. Initialize from the human readable string.
date1 = rdfvalue.RDFDatetime().ParseFromHumanReadable(time_string)
self.assertEqual(int(date1), 1320142980000000)
self.assertEqual(
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(int(date1) / 1e6)),
time_string)
# We always stringify the date in UTC timezone.
self.assertEqual(str(date1), time_string)
def testInitFromEmptyString(self):
orig_time = time.time
time.time = lambda: 1000
try:
# Init from an empty string should generate a DateTime object with a zero
# time.
date = rdfvalue.RDFDatetime("")
self.assertEqual(int(date), 0)
self.assertEqual(int(date.Now()), int(1000 * 1e6))
finally:
time.time = orig_time
def testAddNumber(self):
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date + 60), 1e9 + 60e6)
self.assertEqual(int(date + 1000.23), 1e9 + 1000230e3)
self.assertEqual(int(date + (-10)), 1e9 - 10e6)
def testSubNumber(self):
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date - 60), 1e9 - 60e6)
self.assertEqual(int(date - (-1000.23)), 1e9 + 1000230e3)
self.assertEqual(int(date - 1e12), 1e9 - 1e18)
def testAddDuration(self):
duration = rdfvalue.Duration("12h")
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date + duration), 1e9 + 12 * 3600e6)
duration = rdfvalue.Duration("-60s")
self.assertEqual(int(date + duration), 1e9 - 60e6)
def testSubDuration(self):
duration = rdfvalue.Duration("5m")
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date - duration), 1e9 - 5 * 60e6)
duration = rdfvalue.Duration("-60s")
self.assertEqual(int(date - duration), 1e9 + 60e6)
duration = rdfvalue.Duration("1w")
self.assertEqual(int(date - duration), 1e9 - 7 * 24 * 3600e6)
class RDFDatetimeSecondsTest(RDFDatetimeTest):
rdfvalue_class = rdfvalue.RDFDatetimeSeconds
class HashDigestTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.HashDigest
def GenerateSample(self, number=0):
return rdfvalue.HashDigest("\xca\x97\x81\x12\xca\x1b\xbd\xca\xfa\xc21\xb3"
"\x9a#\xdcM\xa7\x86\xef\xf8\x14|Nr\xb9\x80w\x85"
"\xaf\xeeH\xbb%s" % number)
def testEqNeq(self):
binary_digest = ("\xca\x97\x81\x12\xca\x1b\xbd\xca\xfa\xc21\xb3"
"\x9a#\xdcM\xa7\x86\xef\xf8\x14|Nr\xb9\x80w\x85"
"\xaf\xeeH\xbb")
sample = rdfvalue.HashDigest(binary_digest)
hex_digest = ("ca978112ca1bbdcafac231b39a23dc4da786eff81"
"47c4e72b9807785afee48bb")
self.assertEqual(sample, hex_digest)
self.assertEqual(sample, binary_digest)
self.assertNotEqual(sample, "\xaa\xbb")
self.assertNotEqual(sample, "deadbeef")
|
openstack/nomad
|
cyborg/objects/attach_handle.py
|
Python
|
apache-2.0
| 3,799
| 0
|
# Copyright 2019 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_versionedobjects import base as object_base
from cyborg.db import api as dbapi
from cyborg.objects import base
from cyborg.objects import fields as object_fields
LOG = logging.getLogger(__name__)
ATTACH_TYPE = ["PCI", "MDEV"]
@base.CyborgObjectRegistry.register
class AttachHandle(base.CyborgObject, object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = dbapi.get_instance()
fields = {
'id': object_fields.IntegerField(nullable=False),
'uuid': object_fields.UUIDField(nullable=False),
'deployable_id': object_fields.IntegerField(nullable=False),
'cpid_id': object_fields.IntegerField(nullable=False),
'attach_type': object_fields.EnumField(valid_values=ATTACH_TYPE,
nullable=False),
# attach_info should be JSON here.
'attach_info': object_fields.StringField(nullable=False),
'in_use': object_fields.BooleanField(nullable=False)
}
def create(self, context):
"""Create a AttachHandle record in the DB."""
self.in_use = False
values = self.obj_get_changes()
db_ah = self.dbapi.attach_handle_create(context, values)
self._from_db_object(self, db_ah)
@classmethod
def get(cls, context, uuid):
"""Find a DB AttachHandle and return an Obj AttachHandle."""
db_ah = cls.dbapi.attach_handle_get_by_uuid(context, uuid)
obj_ah = cls._from_db_object(cls(context), db_ah)
return obj_ah
@classmethod
def get_by_id(cls, context, id):
"""Find a DB AttachHandle by ID and return an Obj AttachHandle."""
db_ah = cls.dbapi.attach_handle_get_by_id(context, id)
obj_ah = cls._from_db_object(cls(context), db_ah)
return obj_ah
@classmethod
def list(cls, context, filters={}):
"""Return a list of AttachHandle objects."""
if filters:
sort_dir = filters.pop('sort_dir', 'desc')
sort_key = filters.pop('sort_key', 'create_at')
limit = filters.pop('limit', None)
marker = filters.pop('marker_obj', None)
db_ahs = cls.dbapi.attach_handle_get_by_filters(context, filters,
sort_dir=sort_dir,
sort_key=sort_key,
limit=limit,
ma
|
rker=marker)
else:
db_ahs = cls.dbapi.attach_handle_list(context)
obj_ah_list = cls._from_db_object_list(db_ahs, context)
return obj_ah_list
def save(self, context):
"""Update an AttachHandle record in the DB"""
updates = self.obj_get_changes()
db_ahs = self.dbapi.attach_handle_update(context, self.uuid, updates)
self._from_db_object(self, db_ahs)
def de
|
stroy(self, context):
"""Delete a AttachHandle from the DB."""
self.dbapi.attach_handle_delete(context, self.uuid)
self.obj_reset_changes()
|
sameerparekh/pants
|
src/python/pants/util/fileutil.py
|
Python
|
apache-2.0
| 961
| 0.006243
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function
|
,
unicode_literals, with_statement)
import os
import shutil
from pants.util.contextutil import temporary_file
def atomic_copy(src, dst):
"""Copy the file src to dst, overwriting dst atomically."""
|
with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst:
shutil.copyfile(src, tmp_dst.name)
os.rename(tmp_dst.name, dst)
def create_size_estimators():
def line_count(filename):
with open(filename, 'rb') as fh:
return sum(1 for line in fh)
return {
'linecount': lambda srcs: sum(line_count(src) for src in srcs),
'filecount': lambda srcs: len(srcs),
'filesize': lambda srcs: sum(os.path.getsize(src) for src in srcs),
'nosize': lambda srcs: 0,
}
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_ddos_protection_plans_operations.py
|
Python
|
mit
| 30,431
| 0.005159
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations:
"""DdosProtectionPlansOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct par
|
ameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_res
|
ponse.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> "_models.DdosProtectionPlan":
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.networ
|
Tethik/faktura
|
migrations/versions/74af9cceeeaf_.py
|
Python
|
mit
| 634
| 0.004732
|
"""empty message
Revision ID: 74af9cceeeaf
Revises: 6e7b88dc4544
Create Date: 2017-07-30 20:47:07.982489
"""
# revision identifier
|
s, used by Alembic.
revision = '74af9cceeeaf'
down_revision = '6e7b88dc4544'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('customer', sa.Column('vat_number', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('customer', 'vat_number'
|
)
# ### end Alembic commands ###
|
Crobisaur/HyperSpec
|
Python/loadData2Hdf.py
|
Python
|
gpl-3.0
| 5,400
| 0.010741
|
__author__ = "Christo Robison"
import numpy as np
from scipy import signal
from scipy import misc
import h5py
from PIL import Image
import os
import collections
import matplotlib.pyplot as plt
import convertBsqMulti as bsq
import png
'''This program reads in BSQ datacubes into an HDF file'''
def loadBSQ(path = '/home/crob/HyperSpec_Data/WBC v ALL/WBC25', debug=False):
d31 = []
d31_norm = []
d25 = []
d25_norm = []
l25 = []
l = []
l3 = []
lam = []
for root, dirs, files in os.walk(path):
print(dirs)
for name in sorted(files): #os walk iterates arbitrarily, sort fixes it
print(name)
if name.endswith(".png"):
# Import label image
im = np.array(Image.open(os.path.join(root,name)),'f')
print np.shape(im)
im = im[:,:,0:3] # > 250
# generate a mask for 3x3 conv layer (probably not needed)
#conv3bw = signal.convolve2d(bw, np.ones([22,22],dtype=np.int), mode='valid') >= 464
print(np.shape(im))
#p = open(name+'_22sqMask.png','wb')
#w = png.Writer(255)
#bw = np.flipud(bw)
im = np.flipud(im)
#l3.append(np.reshape(conv3bw, ))
#l.append(np.reshape(bw, 138659))
l.append(im)
print(np.shape(im))
print("Name = " + name)
if name.endswith(".bsq"):
bs = bsq.readbsq(os.path.join(root,name))
print(np.shape(bs[0]))
print(len(bs[1]))
#separate bsq files by prism
if len(bs[1]) == 31:
print('BSQ is size 31')
print(len(bs[1]))
lam = bs[1]
#d31.append(np.reshape(np.transpose(bs[0], (1, 2, 0)), 4298429))
d31.append(bs[0].astype(np.float32))
d31_norm.append(bs[0].astype(np.float32)/np.amax(bs[0]))
if len(bs[1]) == 25:
print('BSQ is size 25')
print(len(bs[1]))
lam = bs[1]
d25.append(bs[0].astype(np.float32))
d25_norm.append(bs[0].astype(np.float32)/np.amax(bs[0]))
#d25.append(np.reshape(bs[0],[138659,25]).astype(np.float32))
# old don't use #d25.append(np.reshape(np.transpose(bs[0], (1, 2, 0)), 3466475))
out = collections.namedtuple('examples', ['data31', 'data31_norm', 'data25', 'data25_norm', 'labels', 'lambdas'])
o = out(data31=np.dstack(d31),data31_norm=np.dstack(d31_norm), data25=d25, data25_norm=d25_norm, labels=np.dstack(l), lambdas=lam) #np.vstack(d25), labels=np.hstack(l)
return o
def convLabels(labelImg, numBands):
'''
takes a MxNx3 numpy array and creates binary labels based on predefined classes
background = 0
red = 1 WBC
green = 2 RBC
pink = 3 nuclear material
yellow = 4 ignore
'''
|
#b = np
|
.uint8(numBands / 31)
# print(b / 31)
tempRed = labelImg[:,:,0] == 255
tempGreen = labelImg[:,:,1] == 255
tempBlue = labelImg[:,:,2] == 255
tempYellow = np.logical_and(tempRed, tempGreen)
tempPink = np.logical_and(tempRed, tempBlue)
temp = np.zeros(np.shape(tempRed))
temp[tempRed] = 1
temp[tempGreen] = 2
temp[tempPink] = 3
temp[tempYellow] = 4
print(temp)
print(tempRed, tempGreen, tempBlue, tempYellow, tempPink)
return temp
def convert_labels(labels,n_classes, debug = False):
for j in range(n_classes):
temp = labels == j
temp = temp.astype(int)
if j > 0:
conv_labels = np.append(conv_labels, temp)
print(temp[:])
else:
conv_labels = temp
print(np.shape(conv_labels))
conv_labels = np.reshape(conv_labels, [len(labels), n_classes], order='F')
if debug: print(np.shape(conv_labels))
if debug:
f = h5py.File("/home/crob/HyperSpec/Python/BSQ_whole.h5", "w")
f.create_dataset('bin_labels', data=conv_labels)
f.close()
return conv_labels
def getClassMean(data, classNum):
kee = np.equal(data['label'],classNum)
out = np.mean(data['data']*kee,axis=0)
return out
def getAverages(data, numClasses):
out = []
for i in range(numClasses):
a = getClassMean(data, i)
out.append(a)
return out
if __name__ == '__main__':
#A = loadBSQ()
path = '/home/crob/-_PreSortedData_Train_-' #oldpath=/HyperSpec_Data/WBC v ALL/WBC25
s = loadBSQ(path)
print(np.shape(s.data25))
f = h5py.File("HYPER_SPEC_TRAIN_RED.h5", "w")
f.create_dataset('data', data=s.data31, chunks=(443, 313, 1))
f.create_dataset('norm_data', data=s.data31_norm, chunks=(443,313,1))
f.create_dataset('labels', data=s.labels)
f.create_dataset('bands', data=s.lambdas)
g = np.shape(s.data31)
b = np.uint16(g[2] / 31) #issue with overflow if more than 256 samples. derp.
lab = np.reshape(s.labels, [443, 313, 3, b], 'f')
numExamples = np.shape(lab)
a = []
for j in range(np.uint16(numExamples[3])):
a.append(convLabels(lab[:, :, :, j], None))
f.create_dataset('classLabels', data=np.dstack(a))
#p = convert_labels(s.labels,2)
#f.create_dataset('bin_labels', data=p)
f.close()
|
adelinastanciu/game-of-life
|
game.py
|
Python
|
gpl-2.0
| 5,982
| 0.010532
|
import pygame
import sys
WINDOW_TITLE = "Game Of Life"
# Define some colors
BLACK = ( 0, 0, 0)
PURPLE = ( 22, 20, 48)
WHITE = (255, 255, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
BLUE = ( 67, 66, 88)
# This sets the width and height of each grid location
width = 20
height = 20
# This sets the margin between each cell
margin = 5
NR_ROWS = 20
NR_COLS = 20
SCREEN_WIDTH = 255
SCREEN_HEIGHT = 255
def add_padding(nr_rows, nr_cols, grid):
new_grid=create_grid(nr_rows+2, nr_cols+2)
for row in range(nr_rows):
for column in range(nr_cols):
new_grid[row][column]=grid[row][column]
return new_grid
def get_number_neighbours_cell(nr_rows, nr_cols, grid, row, column):
nr_neighbours = 0
if (grid[row][column-1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row][column+1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row-1][column-1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row-1][column+1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row+1][column-1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row+1][column+1] != 0):
nr_neighbours = nr_neighbours + 1
if(grid[row-1][column] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row+1][column] != 0):
nr_neighbours = nr_neighbours + 1
return nr_neighbours
def next_generation_value(nr_rows, nr_cols, grid, row, column):
nr_neighbours = get_number_neighbours_cell(nr_rows, nr_cols, grid, row, column)
if (nr_neighbours < 2):
return 0
if (grid[row][column] == 1 and (nr_neighbours == 2 or nr_neighbours == 3)):
return 1
if (grid[row][column] == 0 and (nr_neighbours == 3)):
return 1
if (nr_neighbours > 3):
return 0
return 0
def next_generation(nr_rows, nr_cols, grid):
next_grid = create_grid(nr_rows, nr_cols)
for row in range(nr_rows):
for column in range(nr_cols):
value = next_generation_value(nr_rows, nr_cols, grid, row, column)
next_grid[row][column] = value
return next_grid
def reset(nr_rows, nr_cols, grid):
for row in range(nr_rows):
for column in range(nr_cols):
grid[row][column] = 0
return grid
def select_cell():
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // (width + margin)
row = pos[1] // (height + margin)
# Set that location to zero
grid[row][column] = 1
print("Click ", pos, "Grid coordinates: ", row, column)
return grid
def random_configuration():
pass
def process_events(nr_rows, nr_cols, grid, done):
next_grid = None
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
elif event.type == pygame.MOUSEBUTTONDOWN:
grid = select_cell()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
print("Reset")
grid = reset(nr_rows, nr_cols, grid)
elif event.key == pygame.K_n:
print "Next generation"
grid = add_padding(nr_rows, nr_cols, grid)
next_grid = next_generation(nr_rows, nr_cols, grid)
elif event.key == pygame.K_c:
print "Random configuration"
random_configuration()
elif event.key == pygame.K_ESCAPE:
print "Exit"
sys.exit(0)
return (grid, next_grid, done)
def draw_grid(nr_rows, nr_cols, grid, screen, width, height, margin):
# Draw the grid
for row in range(nr_rows):
for column in range(nr_cols):
color = BLACK
if grid[row][column] == 1:
color = BLUE
pygame.draw.rect(screen,
color,
[(margin+width)*column+margin,
(margin+height)*row+margin,
width,
height])
def create_grid(nr_rows, nr_cols):
# Create a 2 dimensional array. A two dimensional
# array is simply a list of lists.
grid = []
for row in range(nr_rows):
# Add an empty array that will hold each cell in this row
grid.append([])
for col in range(nr_cols):
grid[row].append(0) # Append a cell
return grid
if __name__ == '__main__':
grid = create_grid(NR_ROWS, NR_COLS)
# Initialize pygame
pygame.init()
# Set the height and width of the screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
sc
|
reen = pygame.display.set_mode(size)
# Set title of screen
pygame.display.set_caption(WINDOW_TITLE)
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen
|
updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while done == False:
(grid, next_grid, done) = process_events(NR_ROWS, NR_COLS, grid, done)
# Set the screen background
screen.fill(PURPLE)
if next_grid is not None:
grid = next_grid
draw_grid(NR_ROWS, NR_COLS, grid, screen, width, height, margin)
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang' on exit.
pygame.quit()
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/application_gateway_probe.py
|
Python
|
mit
| 3,438
| 0.000582
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
:param id: Resource ID.
:type id: str
:param protocol: Protocol. Possible values are: 'Http' and 'Https'.
Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2016_12_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is
sent to <Protocol>://<host>:<port><path>
:type path: str
:param interval: The probing interval in seconds. This is the time
interval between two consecutive probes. Acceptable values are from 1
second to 86400 seconds.
:type interval: int
:param timeout: the probe timeout in seconds. Probe marked as failed if
valid response is not received with this timeout period. Acceptable values
are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is
marked down after consecutive probe failure count reaches
UnhealthyThreshold. Acceptable values are from 1 second to 20.
:type unhealthy_threshold: int
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str
|
'},
'etag': {'key': 'eta
|
g', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayProbe, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.host = kwargs.get('host', None)
self.path = kwargs.get('path', None)
self.interval = kwargs.get('interval', None)
self.timeout = kwargs.get('timeout', None)
self.unhealthy_threshold = kwargs.get('unhealthy_threshold', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
|
FBTUG/DevZone
|
ai/demoCamera/plant_detection/DB.py
|
Python
|
mit
| 16,293
| 0
|
#!/usr/bin/env python
"""DB for Plant Detection.
For Plant Detection.
"""
import os
import json
import base64
import requests
import numpy as np
from plant_detection import CeleryPy
from plant_detection import ENV
class DB(object):
"""Known and detected plant data for Plant Detection."""
def __init__(self):
"""Set initial attributes."""
self.plants = {'known': [], 'save': [],
'remove': [], 'safe_remove': []}
self.object_count = None
self.pixel_locations = []
self.coordinate_locations = []
self.calibration_pixel_locations = []
self.dir = os.path.dirname(os.path.realpath(__file__)) + os.sep
self.plants_file = "plant-detection_plants.json"
self.tmp_dir = None
self.weeder_destrut_r = 50
self.test_coordinates = [600, 400, 0]
self.coordinates = None
self.app = False
self.errors = {}
@staticmethod
def _api_info(api):
"""API requests setup."""
api_info = {}
if api == 'app':
try:
api_info['token'] = os.environ['API_TOKEN']
except KeyError:
api_info['token'] = 'x.{}.x'.format(
'eyJpc3MiOiAiLy9zdGFnaW5nLmZhcm1ib3QuaW86NDQzIn0')
try:
encoded_payload = api_info['token'].split('.')[1]
encoded_payload += '=' * (4 - len(encoded_payload) % 4)
json_payload = base64.b64decode(
encoded_payload).decode('utf-8')
server = json.loads(json_payload)['iss']
except: # noqa pylint:disable=W0702
server = '//my.farmbot.io:443'
api_info['url'] = 'http{}:{}/api/'.format(
's' if ':443' in server else '', server)
elif api == 'farmware':
try:
api_info['token'] = os.environ['FARMWARE_TOKEN']
except KeyError:
api_info['token'] = 'NA'
try:
os.environ['FARMWARE_URL']
except KeyError:
api_info['url'] = 'NA'
else:
api_info['url'] = CeleryPy.farmware_api_url()
api_info['headers'] = {
'Authorization': 'Bearer {}'.format(api_info['token']),
'content-type': "application/json"}
return api_info
def api_get(self, endpoint):
"""GET from an API endpoint."""
api = self._api_info('app')
response = requests.get(api['url'] + endpoint, headers=api['headers'])
self.api_response_error_collector(response)
self.api_response_error_printer()
return response
def api_response_error_collector(self, response):
"""Catch and log errors from API requests."""
self.errors = {} # reset
if response.status_code != 200:
try:
self.errors[str(response.status_code)] += 1
except KeyError:
self.errors[str(response.status_code)] = 1
def api_response_error_printer(self):
"""Print API response error output."""
error_string = ''
for key, value in self.errors.items():
error_string += '{} {} errors '.format(value, key)
print(error_string)
def _download_image_from_url(self, img_filename, url):
response = requests.get(url, stream=True)
self.api_response_error_collector(response)
self.api_response_error_printer()
if response.status_code == 200:
with open(img_filename, 'wb') as img_file:
for chunk in response:
img_file.write(chunk)
def _get_bot_state(self):
api = self._api_info('farmware')
response = requests.get(api['url'] + 'bot/state',
headers=api['headers'])
self.api_response_error_collector(response)
self.api_response_error_printer()
if response.status_code == 200:
return response.json()
def get_image(self, image_id):
"""Download an image from the FarmBot Web App API."""
response = self.api_get('images/' + str(image_id))
if response.status_code == 200:
image_json = response.json()
image_url = image_json['attachment_url']
try:
testfilename = self.dir + 'test_write.try_to_write'
testfile = open(testfilename, "w")
testfile.close()
os.remove(testfilename)
except IOError:
directory = '/tmp/'
else:
directory = self.dir
image_filename = direct
|
ory + str(image_id) + '.jpg'
self._download_image_from_url(image_filename, image_url)
self.coordinates = list([int(image_json['meta']['x']),
int(image_json['meta']['y']),
int(image_json['meta']['z'])])
return image_filename
else:
return None
def _get_raw_coordinate_values(self, redis=None):
temp = []
legacy = int(os.getenv('FARMBOT_OS_VERSION', '0.0.
|
0')[0]) < 6
if legacy:
for axis in ['x', 'y', 'z']:
temp.append(ENV.redis_load('location_data.position.' + axis,
other_redis=redis))
else:
state = self._get_bot_state()
for axis in ['x', 'y', 'z']:
try:
value = state['location_data']['position'][str(axis)]
except KeyError:
value = None
temp.append(value)
return temp
def getcoordinates(self, test_coordinates=False, redis=None):
"""Get machine coordinates from bot."""
location = None
raw_values = self._get_raw_coordinate_values(redis)
if all(axis_value is not None for axis_value in raw_values):
try:
location = [int(coordinate) for coordinate in raw_values]
except ValueError:
pass
if test_coordinates:
self.coordinates = self.test_coordinates # testing coordinates
elif location is None and not self.app:
self.coordinates = self.test_coordinates # testing coordinates
else:
self.coordinates = location # current bot coordinates
def save_plants(self):
"""Save plant detection plants to file.
'known', 'remove', 'safe_remove', and 'save'
"""
if self.tmp_dir is None:
json_dir = self.dir
else:
json_dir = self.tmp_dir
try:
with open(json_dir + self.plants_file, 'w') as plant_file:
json.dump(self.plants, plant_file)
except IOError:
self.tmp_dir = "/tmp/"
self.save_plants()
def load_plants_from_file(self):
"""Load plants from file."""
try:
with open(self.dir + self.plants_file, 'r') as plant_file:
self.plants = json.load(plant_file)
except IOError:
pass
def load_plants_from_web_app(self):
"""Download known plants from the FarmBot Web App API."""
response = self.api_get('points')
app_points = response.json()
if response.status_code == 200:
plants = []
for point in app_points:
if point['pointer_type'] == 'Plant':
plants.append({
'x': point['x'],
'y': point['y'],
'radius': point['radius']})
self.plants['known'] = plants
def identify_plant(self, plant_x, plant_y, known):
"""Identify a provided plant based on its location.
Args:
known: [x, y, r] array of known plants
plant_x, plant_y: x and y coordinates of plant to identify
Coordinate is:
within a known plant area: a plant to 'save' (it's the known plant)
within a known plant safe zone: a 'safe_remove' weed
outside a known plant area or safe zone
|
cmulliss/turtles-doing-things
|
stars_etc/turtleHouse.py
|
Python
|
cc0-1.0
| 351
| 0.025641
|
from turtle import *
mode('logo')
shape('turtle')
speed(5)
color('red', 'blue')
#draws rectangle
for i in ran
|
ge (4):
fd(200)
rt(90)
#draws r
|
oof
penup()
goto(0,200)
pendown()
rt(60)
goto(100,300)
rt(60)
goto(200,200)
penup()
#resizes turtle and changes his fill colour
shapesize(5,5)
color('red', 'orange')
goto(100,100)
rt(240)
width(200)
|
blazbratanic/protobuf
|
python/google/protobuf/internal/reflection_test.py
|
Python
|
bsd-3-clause
| 119,310
| 0.002984
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which also indirectly tests the output of the
pure-Python protocol compiler.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import copy
import gc
import operator
import struct
from google.apputils import basetest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf.internal import wire_format
from google.protobuf.internal import test_util
from google.protobuf.internal import decoder
class _MiniDecoder(object):
"""Decodes a stream of values from a string.
Once upon a time we actually had a class called decoder.Decoder. Then we
got rid of it during a redesign that made decoding much, much faster overall.
But a couple tests in this file used it to check that the serialized form of
a message was correct. So, this class implements just the methods that were
used by said tests, so that we don't have to rewrite the tests.
"""
def __init__(self, bytes):
self._bytes = bytes
self._pos = 0
def ReadVarint(self):
result, self._pos = decoder._DecodeVarint(self._bytes, self._pos)
return result
ReadInt32 = ReadVarint
ReadInt64 = ReadVarint
ReadUInt32 = ReadVarint
ReadUInt64 = ReadVarint
def ReadSInt64(self):
return wire_format.ZigZagDecode(self.ReadVarint())
ReadSInt32 = ReadSInt64
def ReadFieldNumberAndWireType(self):
return wire_format.UnpackTag(self.ReadVarint())
def ReadFloat(self):
result = struct.unpack("<f", self._bytes[self._pos:self._pos+4])[0]
self._pos += 4
return result
def ReadDouble(self):
result = struct.unpack("<d", self._bytes[self._pos:self._pos+8])[0]
self._pos += 8
return result
|
def EndOfStream(self):
return self._pos == len(self
|
._bytes)
class ReflectionTest(basetest.TestCase):
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testScalarConstructor(self):
# Constructor with only scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_double=54.321,
optional_string='optional_string')
self.assertEqual(24, proto.optional_int32)
self.assertEqual(54.321, proto.optional_double)
self.assertEqual('optional_string', proto.optional_string)
def testRepeatedScalarConstructor(self):
# Constructor with only repeated scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_int32=[1, 2, 3, 4],
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_string=["optional_string"])
self.assertEquals([1, 2, 3, 4], list(proto.repeated_int32))
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(["optional_string"], list(proto.repeated_string))
def testRepeatedCompositeConstructor(self):
# Constructor with only repeated composite types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
repeatedgroup=[
unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)])
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertEquals(
[unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)],
list(proto.repeatedgroup))
def testMixedConstructor(self):
# Constructor with only mixed types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_string='optional_string',
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)])
self.assertEqual(24, proto.optional_int32)
self.assertEqual('optional_string', proto.optional_string)
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
def testConstructorTypeError(self):
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_int32="foo")
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_string=1234)
self.assertRai
|
Monstrofil/wowp_free_camera
|
install/scripts/client/WeatherManager.py
|
Python
|
apache-2.0
| 1,201
| 0.004163
|
#Embedded file name: scripts/client/WeatherManager.py
import BigWorld
import db.DBLogic
from debug_utils import *
def InitWeather():
arenaData = db.DBLogic.g_instance.getArenaData(BigWorld.player().arenaType)
LOG_DEBUG("WeatherManager:InitWeather() '%s': %s, %s" % (arenaData.geometry, arenaData.weatherWindSpeed, arenaData.weatherWindGustiness))
try:
BigWorld.weather().windAverage(arenaData.weatherWindSpeed[0], arenaData.weatherWindSpeed[1])
BigWorld.weather().windGustiness(arenaData.weatherWindGustiness)
except ValueError:
pass
except EnvironmentError:
pass
def load_mods():
import ResMgr, os, glob
print 'Mod loader, Monstrofil'
res = ResMgr.openSection('../paths.xml')
sb = res['Paths']
vals = sb.values()[0:2]
for
|
vl in vals:
mp = vl.asString + '/scripts/client/mods/*.pyc'
for fp in glob.iglob(mp):
_, hn = os.path.split(fp)
zn, _ = hn.split('.')
if zn != '__init__':
print 'executing: ' + zn
try:
|
exec 'import mods.' + zn
except Exception as err:
print err
load_mods()
|
pbmanis/acq4
|
acq4/pyqtgraph/tests/test_ref_cycles.py
|
Python
|
mit
| 2,523
| 0.010305
|
"""
Test for unwanted reference cycles
"""
import pyqtgraph as pg
import numpy as np
import gc, weakref
import six
import pytest
app = pg.mkQApp()
skipreason = ('unclear why test is failing on python 3. skipping until someone '
'has time to fix it. Or pyside is being used. This test is '
'failing on pyside for an unknown reason too.')
def assert_alldead(refs):
for ref in refs:
assert ref() is None
def qObjectTree(root):
"""Return root and its entire tree of qobject children"""
childs = [root]
for ch in pg.QtCore.QObject.children(root):
childs += qObjectTree(ch)
return childs
def mkrefs(*objs):
"""Return a list of weakrefs to each object in *objs.
QObject instances are expanded to include all child objects.
"""
allObjs = {}
for obj in objs:
if isinstance(obj, pg.QtCore.QObject):
obj = qObjectTree(obj)
else:
obj = [obj]
for o in obj:
allObjs[id(o)] = o
return map(weakref.ref, allObjs.values())
@pytest.mark.skipif(six.PY3 or pg.Qt.QT_LIB == 'PySide', reason=skipreason)
def test_PlotWidget():
def mkobjs(*args, **kwds):
w = pg.PlotWidget(*args, **kwds)
data = pg.np.array([1,5,2,4,3])
c = w.plot(data, name='stuff')
w.addLegend()
# test that connections do not keep objects alive
w.plotItem.vb.sigRangeChanged.connect(mkrefs)
app.focusChanged.connect(w.plotItem.vb.invertY)
# return weakrefs to a bunch of objects that should die when the scope exits.
return mkrefs(w, c, data, w.plotItem, w.plotItem.vb, w.plotItem.getMenu(), w.plotItem.getAxis('left'))
for i in range(5):
assert_alldead(mkobjs())
@pytest.mark.skipif(six.PY3 or pg.Qt.QT_LIB == 'PySide', reason=skipreason)
def test_ImageView():
def mkobjs():
iv = pg.ImageView()
data = np.zeros((10,10,5))
iv.setImage(data)
return mkrefs(iv, iv.imageItem, iv.view, iv.ui.histogram, data)
for i in range(5):
assert_alldead(mkobjs())
@pytest.mark.skipif(six.PY3 or pg.Qt.QT_LIB == 'PySide', reason=skipreason)
def test_GraphicsWin
|
dow():
def mkobjs():
w = pg.GraphicsWindow()
p1 = w.addPlot()
v1 = w.addViewBox()
re
|
turn mkrefs(w, p1, v1)
for i in range(5):
assert_alldead(mkobjs())
if __name__ == '__main__':
ot = test_PlotItem()
|
cedi4155476/musicmanager
|
music_manager/load.py
|
Python
|
mit
| 547
| 0.003656
|
# -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from
|
PyQt4.QtGui import *
from Ui_load import Ui_Load
class Loading(QDialog, Ui_Load):
"""
little loading screen
"""
def __init__(self, maximum, parent=None):
"""
Constructor
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self.progressBar.setMaximum(maximum)
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
def set_loading(self, progress):
self.progressBar.setValue(progre
|
ss)
|
koepked/onramp
|
modules/hpl/bin/onramp_run.py
|
Python
|
bsd-3-clause
| 653
| 0.003063
|
#!/usr/bin/env python
#
# Curriculum Module Run Script
# - Run once per run of the module by a user
# - Run inside job submission. So in an allocation.
# - onramp_run_params.cfg file is available in current working directory
#
import os
import sys
from subprocess import call
from configobj import Confi
|
gObj
#
# Read the configobj values
#
# This will always be the name of the file, so fine to hardcode here
conf_file = "onramp_runparams.cfg"
# Already validated the file in our onramp_preprocess.py script - no need to do it again
config = ConfigObj(conf_file)
#
# Run my
|
program
#
os.chdir('src')
#
# TODO
#
# Exit 0 if all is ok
sys.exit(0)
|
tensorflow/model-optimization
|
tensorflow_model_optimization/python/examples/clustering/keras/imdb/imdb_rnn.py
|
Python
|
apache-2.0
| 2,554
| 0.000392
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a SimpleRNN on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF+LogReg.
"""
from __future__ import print_function
import tensorflow.keras as keras
import tensorflow.keras.preprocessing.sequence as sequence
from tensorflow_model_optimization.python.core.clustering.keras import cluster
from tensorflow_model_optimization.python.core.clustering.keras import cluster_config
max_features = 20000
maxlen = 100 # cut texts after this number of words
batch_size = 32
print("Loading data...")
(x_train,
y_train), (x_test,
y_test) = keras.datasets.imdb.load_data(num_words=max_features)
print(len(x_train), "train sequences")
print(len(x_test), "test sequences")
print("Pad sequences (samples x time)")
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print("x_train shape:", x_train.shape)
print("x_test shape:", x_test.shape)
print("Build model...")
model = keras.models.Sequential()
model.add(keras.layers.Embedding(max_features, 128, input_length=maxlen))
model.add(keras.layers.SimpleRNN(128))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation("sigmoid"))
model = cluster.cluster_weights(
model,
number_of_clusters=16,
cluster_centroids_init=cluster_config.CentroidInitializ
|
ation
.KMEANS_PLUS_PLUS,
)
model.compile(loss="binar
|
y_crossentropy",
optimizer="adam",
metrics=["accuracy"])
print("Train...")
model.fit(x_train, y_train, batch_size=batch_size, epochs=3,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print("Test score:", score)
print("Test accuracy:", acc)
|
smkr/pyclipse
|
plugins/org.python.pydev.jython/jysrc/assist_regex_based_proposal.py
|
Python
|
epl-1.0
| 8,719
| 0.007455
|
"""Quick Assistant: Regex based proposals.
This module combines AssistProposal, regexes and string formatting to
provide a way of swiftly coding your own custom Quick Assistant proposals.
These proposals are ready for instatiation and registering with
assist_proposal.register_proposal(): AssignToAttributeOfSelf,
AssignEmptyDictToVarIfNone, AssignEmptyDictToVarIfNone and
AssignAttributeOfSelfToVarIfNone. Using these as examples it should be
straightforward to code your own regex driven Quick Assistant proposals.
"""
__author__ = """Joel Hedlund <joel.hedlund at gmail.com>"""
__version__ = "1.0.0"
__copyright__ = '''Available under the same conditions as PyDev.
See PyDev license for details.
http://pydev.sourceforge.net
'''
import re
from org.python.pydev.core.docutils import PySelection #@UnresolvedImport
from org.python.pydev.editor.actions import PyAction #@UnresolvedImport
import assist_proposal
# For older python versions.
True, False = 1,0
class RegexBasedAssistProposal(assist_proposal.AssistProposal):
"""Base class for regex driven Quick Assist proposals.
More docs available in base class source.
New class data members
======================
regex = re.compile(r'^(?P<initial>\s*)(?P<name>\w+)\s*$'): <regex>
Must .match() current line for .isValid() to return true. Any named
groups will be available in self.vars.
template = "%(initial)sprint 'Hello World!'": <str>
This will replace what's currently on the line on .apply(). May use
string formatters with names from self.vars.
|
base_vars = {}: <dict <str>:<str>>
Used to initiallize self.vars.
New instance data members
=========================
vars = <dict <str>:<str>>
Variables used with self.template to produce the code that replaces
the current line. This wil
|
l contain values from self.base_vars, all
named groups in self.regex, as well with these two additional ones:
'indent': the static indentation string
'newline': the line delimiter string
selection, current_line, editor, offset:
Same as the corresponding args to .isValid().
"""
template = ""
base_vars = {}
regex = re.compile(r'^(?P<initial>\s*)(?P<name>\w+)\s*$')
def isValid(self, selection, current_line, editor, offset):
"""Is this proposal applicable to this line of code?
If current_line .match():es against self.regex then we will store
a lot of information on the match and environment, and return True.
Otherwise return False.
IN:
pyselection: <PySelection>
The current selection. Highly useful.
current_line: <str>
The text on the current line.
editor: <PyEdit>
The current editor.
offset: <int>
The current position in the editor.
OUT:
Boolean. Is the proposal applicable in the current situation?
"""
m = self.regex.match(current_line)
if not m:
return False
self.vars = {'indent': editor.getIndentPrefs().getIndentationString()}
self.vars.update(self.base_vars)
self.vars.update(m.groupdict())
self.selection = selection
self.current_line = current_line
self.editor = editor
self.offset = offset
return True
def apply(self, document):
"""Replace the current line with the populated template.
IN:
document: <IDocument>
The edited document.
OUT:
None.
"""
self.vars['newline'] = PyAction.getDelimiter(document)
sNewCode = self.template % self.vars
# Move to insert point:
iStartLineOffset = self.selection.getLineOffset()
iEndLineOffset = iStartLineOffset + len(self.current_line)
self.editor.setSelection(iEndLineOffset, 0)
self.selection = PySelection(self.editor)
# Replace the old code with the new assignment expression:
self.selection.replaceLineContentsToSelection(sNewCode)
#mark the value so that the user can change it
selection = PySelection(self.editor)
absoluteCursorOffset = selection.getAbsoluteCursorOffset()
val = self.vars['value']
self.editor.selectAndReveal(absoluteCursorOffset-len(val),len(val))
class AssignToAttributeOfSelf(RegexBasedAssistProposal):
"""Assign variable to attribute of self.
Effect
======
Generates code that assigns a variable to attribute of self with the
same name.
Valid when
==========
When the current line contains exactly one alphanumeric word. No check
is performed to see if the word is defined or valid in any other way.
Use case
========
It's often a good idea to use the same names in args, variables and
data members. This keeps the terminology consistent. This way
customer_id should always contain a customer id, and any other
variants are misspellings that probably will lead to bugs. This
proposal helps you do this by assigning variables to data members with
the same name.
"""
description = "Assign to attribute of self"
tag = "ASSIGN_VARIABLE_TO_ATTRIBUTE_OF_SELF"
regex = re.compile(r'^(?P<initial> {8}\s*)(?P<name>\w+)\s*$')
template = "%(initial)sself.%(name)s = %(name)s"
class AssignDefaultToVarIfNone(RegexBasedAssistProposal):
"""Assign default value to variable if None.
This is a base class intended for subclassing.
Effect
======
Generates code that tests if a variable is none, and if so, assigns a
default value to it.
Valid when
==========
When the current line contains exactly one alphanumeric word. No check
is performed to see if the word is defined or valid in any other way.
Use case
========
It's generally a bad idea to use mutable objects as default values to
methods and functions. The common way around it is to use None as the
default value, check the arg in the fuction body, and then assign
the desired mutable to it. This proposal does the check/assignment for
you. You only need to type the arg name where you want the check, and
then activate the Quick Assistant.
"""
description = "Assign default value to var if None"
tag = "ASSIGN_DEFAULT_VALUE_TO_VARIABLE_IF_NONE"
regex = re.compile(r'^(?P<initial>\s*)(?P<name>\w+)\s*$')
template = ("%(initial)sif %(name)s is None:%(newline)s"
"%(initial)s%(indent)s%(name)s = %(value)s")
base_vars = {'value': "[]"}
class AssignValueToVarIfNone(AssignDefaultToVarIfNone):
"""Assign value to variable if None."""
description = "Assign value to var if None"
tag = "ASSIGN_VALUE_TO_VARIABLE_IF_NONE"
class AssignEmptyListToVarIfNone(AssignDefaultToVarIfNone):
"""Assign empty list to variable if None."""
description = "Assign empty list to var if None"
tag = "ASSIGN_EMPTY_LIST_TO_VARIABLE_IF_NONE"
class AssignEmptyDictToVarIfNone(AssignEmptyListToVarIfNone):
"""Assign empty dictionary to variable if None."""
description = "Assign empty dict to var if None"
tag = "ASSIGN_EMPTY_DICT_TO_VARIABLE_IF_NONE"
base_vars = {'value': "dict()"}
class AssignAttributeOfSelfToVarIfNone(AssignDefaultToVarIfNone):
"""Assign an attribute of self with same name to variable if None.
Valid when
==========
When the current line contains exactly one alphanumeric word indented
by more than 8 spaces. This script does not check if the word is
defined or valid in any other way.
Use case
========
If a method does something using a data member, but just as well could do
the same thing using an argument, it's generally a good idea to let the
implementation reflect that. This makes the code more flexible. This is
usually done like so:
--------------------------
class MyClass:
def func(arg = None):
if arg is None:
arg = self.a
|
wikimedia/thumbor-exif-optimizer
|
wikimedia_thumbor_exif_optimizer/__init__.py
|
Python
|
mit
| 3,157
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
# Copyright (c) 2015
|
Wikimedia Foundation
# EXIF optimizer, aims to reduce thumbnail weight as much as possible
# while retaining some critical metadata
import os
import subprocess
from thumbor.optimizers import BaseOptimizer
from thumbor.utils import logger
class Optimizer(BaseOptimizer):
def __init__(self, context):
super(Optimizer, self).__init__(context)
self.runnable = True
self.exiftool_p
|
ath = self.context.config.EXIFTOOL_PATH
self.exif_fields_to_keep = self.context.config.EXIF_FIELDS_TO_KEEP
self.tinyrgb_path = self.context.config.EXIF_TINYRGB_PATH
self.tinyrgb_icc_replace = self.context.config.EXIF_TINYRGB_ICC_REPLACE
if not (os.path.isfile(self.exiftool_path)
and os.access(self.exiftool_path, os.X_OK)):
logger.error(
"ERROR exiftool path '{0}' is not accessible"
.format(self.exiftool_path)
)
self.runnable = False
if not (os.path.isfile(self.tinyrgb_path)
and os.access(self.tinyrgb_path, os.R_OK)):
logger.error(
"ERROR tinyrgb path '{0}' is not accessible"
.format(self.tinyrgb_path)
)
self.tinyrgb_path = False
def should_run(self, image_extension, buffer):
good_extension = 'jpg' in image_extension or 'jpeg' in image_extension
return good_extension and self.runnable
def optimize(self, buffer, input_file, output_file):
exif_fields = self.exif_fields_to_keep
# TinyRGB is a lightweight sRGB swap-in replacement created by Facebook
# If the image is sRGB, swap the existing heavy profile for TinyRGB
# Only works if icc_profile is configured to be preserved in
# EXIF_FIELDS_TO_KEEP
if (self.tinyrgb_path):
output = subprocess.check_output([
self.exiftool_path,
'-DeviceModelDesc',
'-S',
'-T',
input_file
])
logger.debug("[EXIFTOOL] exiftool output: " + output)
if (output.rstrip().lower() == self.tinyrgb_icc_replace.lower()):
new_icc = 'icc_profile<=%s' % (
self.tinyrgb_path
)
exif_fields = [
new_icc if i == 'icc_profile' else i for i in exif_fields
]
# Strip all EXIF fields except the ones we want to
# explicitely copy over
command = [
self.exiftool_path,
input_file,
'-all=',
'-tagsFromFile',
'@'
]
command += ['-{0}'.format(i) for i in exif_fields]
command += [
'-m',
'-o',
'-'
]
output = open(output_file, 'w')
subprocess.call(command, stdout=output)
|
nawarian/PHPBot
|
ext/pyautogui/bin/setup.py
|
Python
|
mit
| 320
| 0.003125
|
from distutils.core import setup
import py2exe
setup(
|
console=[
'../src/mouseup.py',
'../src/mousedown.py',
'../src/mouseclick.py',
'../src/mousemove.py',
'../src/keyboarddown.py',
'../src/keyboardkey.py',
'../src/keyboardtype.py',
'../src/keyboarddown.py',
'../src/keybo
|
ardup.py',
])
|
Stibbons/Squirrel
|
backend/squirrel/common/unittest.py
|
Python
|
gpl-3.0
| 4,675
| 0.002781
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import six
import sys
from functools import wraps
from twisted.internet import defer
from twisted.trial import unittest
log = logging.getLogger(__name__)
class TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
self.disableLogToStdout()
def assertNotEmpty(self, collec):
self.assertNotEqual(len(collec), 0, msg="Collection unexpectedly empty")
@defer.inlineCallbacks
def assertInlineCallbacksRaises(self, exceptionClass, deferred, *args, **kwargs):
yield self.assertFailure(deferred(*args, **kwargs), exceptionClass)
def assertLengthEquals(self, collection, length):
self.assertEqual(len(collection), length, msg="Invalid lenght. Expecting: {}. Got: {}"
.format(length, len(collection)))
def enableLogging(self, level=logging.DEBUG):
self.rootLogger = logging.getLogger()
self.oldLoggingLevel = self.rootLogger.getEffectiveLevel()
self.rootLog
|
ger.setLevel(level)
self.streamHandler = logging.StreamHandler(sys.stdout)
self.streamHandler.setLevel(level)
# Complete format <date> - <module name> - <level> - <message>:
|
# '%(asctime)s - %(name)-40s - %(levelname)-7s - %(message)s'
formatter = logging.Formatter('%(name)-45s - %(levelname)-7s - %(message)s')
self.streamHandler.setFormatter(formatter)
self.rootLogger.addHandler(self.streamHandler)
# Simply write an empty string, in order to be sure the first line starts at the
# beginning of the line
sys.stdout.write("\n")
def disableLogging(self):
self.rootLogger.removeHandler(self.streamHandler)
self.rootLogger.setLevel(self.oldLoggingLevel)
@classmethod
def verboseLogging(cls, level=logging.DEBUG):
# Overwrite XTestCase.verboseLogging with deferred support
'''
I enable full logging for the given function or methods. This is extremely useful in order
to enable full log only for a simple test case during debugging, and don't display them
during normal execution.
Simply comment in or out the decorator in order to enable or disable the log display
Example:
.. code-block:: python
class TestClass(TxTestCase):
@TxTestCase.verboseLogging()
def testNormalTest(self):
...
@TxTestCase.verboseLogging()
@defer.inlineCallbacks
def testInlineCallbacksTest(self):
...
@TxTestCase.verboseLogging()
@patch("patch.this.object")
@defer.inlineCallbacks
def testWithPatchAndInlineCallbacksTest(self):
...
'''
def decorator(func):
@wraps(func)
def impl(*args, **kwargs):
# In order to reuse the enableLogging, we need a self to store some values in it,
# but we are in a classmethod (verboseLogging is a method decorator). I don't want
# to store the value in the class object, so I create a temporary object named self,
# used in order to execute the enableLogging method.
self = TestCase()
TestCase.enableLogging(self)
log.info("Log to stdout enabled")
try:
res = func(*args, **kwargs)
finally:
log.info("Log to stdout disabled")
TestCase.disableLogging(self)
return res
return impl
return decorator
def disableLogToStdout(self):
'''
I disable the output of the loggings to the console by filtering all logs out.
'''
root = logging.getLogger()
root.setLevel(logging.CRITICAL + 1)
#############################
# Python 3 compatibility
# See: http://pythonhosted.org/six/#unittest-assertions
def assertCountEqual(self, actual, expected, msg=None):
return six.assertCountEqual(actual, expected, msg=msg)
def assertRaisesRegex(sel, exception, regexp, callable, *args, **kwds):
return six.assertRaisesRegex(exception, regexp, callable, *args, **kwds)
def assertRegex(self, text, regex, msg=None):
return six.assertRegex(text, regex, msg=msg)
#############################
|
bird-house/PyWPS
|
tests/processes/__init__.py
|
Python
|
mit
| 2,519
| 0.001191
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
from pywps import Process
from pywps.inout import LiteralInput, LiteralOutput
from pywps.inout.literaltypes import ValuesReference
class SimpleProcess(Process):
identifier = "simpleprocess"
def __init__(self):
self.add_input(LiteralInput())
class UltimateQuestion(Process):
def __init__(self):
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
title='Ultimate Question',
outputs=[LiteralOutput('outvalue', 'Output Value', data_type='string')])
@staticmethod
def _handler(request, response):
response.outputs['outvalue'].data = '42'
return response
class Greeter(Process):
def __init__(self):
super(Greeter, self).__init__(
self.greeter,
identifier='greeter',
title='Greeter',
inputs=[LiteralInput('name', 'Input name', data_type='string')],
outputs=[LiteralOutput('message', 'Output message', data_type='string')]
)
@staticmethod
def greeter(request, response):
name = request.inputs['name'][0].data
assert type(name) is text_type
response.outputs['message'].data = "Hello {}!".format(name)
return response
class InOut(Process):
|
def __init__(self):
super(InOut, self).__init__(
self.inout,
identifier='inout',
title='In and O
|
ut',
inputs=[
LiteralInput('string', 'String', data_type='string'),
LiteralInput('time', 'Time', data_type='time',
default='12:00:00'),
LiteralInput('ref_value', 'Referenced Value', data_type='string',
allowed_values=ValuesReference(reference="https://en.wikipedia.org/w/api.php?action=opensearch&search=scotland&format=json"), # noqa
default='Scotland',),
],
outputs=[
LiteralOutput('string', 'Output', data_type='string')
]
)
@staticmethod
def inout(request, response):
a_string = request.inputs['string'][0].data
response.outputs['string'].data = "".format(a_string)
return response
|
googleapis/python-pubsub
|
google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
|
Python
|
apache-2.0
| 9,379
| 0.001279
|
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import logging
import random
import threading
import time
import typing
from typing import Dict, Iterable, Optional, Union
try:
from collections.abc import KeysView
KeysView[None] # KeysView is only subscriptable in Python 3.9+
except TypeError:
# Deprecated since Python 3.9, thus only use as a fallback in older Python versions
from typing import KeysView
from google.cloud.pubsub_v1.subscriber._protocol import requests
if typing.TYPE_CHECKING: # pragma: NO COVER
from google.cloud.pubsub_v1.subscriber._protocol.streaming_pull_manager import (
StreamingPullManager,
)
_LOGGER = logging.getLogger(__name__)
_LEASE_WORKER_NAME = "Thread-LeaseMaintainer"
class _LeasedMessage(typing.NamedTuple):
sent_time: float
"""The local time when ACK ID was initially leased in seconds since the epoch."""
size: int
ordering_key: Optional[str]
class Leaser(object):
def __init__(self, manager: "StreamingPullManager"):
self._thread: Optional[threading.Thread] = None
self._manager = manager
# a lock used for start/stop operations, protecting the _thread attribute
self._operational_lock = threading.Lock()
# A lock ensuring that add/remove operations are atomic and cannot be
# intertwined. Protects the _leased_messages and _bytes attributes.
self._add_remove_lock = threading.Lock()
# Dict of ack_id -> _LeasedMessage
self._leased_messages: Dict[str, _LeasedMessage] = {}
self._bytes = 0
"""The total number of bytes consumed by leased messages."""
self._stop_event = threading.Event()
@property
def message_count(self) -> int:
"""The number of leased messages."""
return len(self._leased_messages)
@property
def ack_ids(self) -> KeysView[str]:
"""The ack IDs of all leased messages."""
return self._leased_messages.keys()
@property
def bytes(self) -> int:
"""The total size, in bytes, of all le
|
ased messages."""
return self._bytes
def add(self, items: Iterable[requests.LeaseRequest]) -> None:
"""Add messages to be managed by the leaser."""
with self._add_remove_lock:
for item in items:
# Add the ack ID to the set of managed ack IDs, and increment
# the size counter.
if item.ack_id not i
|
n self._leased_messages:
self._leased_messages[item.ack_id] = _LeasedMessage(
sent_time=float("inf"),
size=item.byte_size,
ordering_key=item.ordering_key,
)
self._bytes += item.byte_size
else:
_LOGGER.debug("Message %s is already lease managed", item.ack_id)
def start_lease_expiry_timer(self, ack_ids: Iterable[str]) -> None:
"""Start the lease expiry timer for `items`.
Args:
items: Sequence of ack-ids for which to start lease expiry timers.
"""
with self._add_remove_lock:
for ack_id in ack_ids:
lease_info = self._leased_messages.get(ack_id)
# Lease info might not exist for this ack_id because it has already
# been removed by remove().
if lease_info:
self._leased_messages[ack_id] = lease_info._replace(
sent_time=time.time()
)
def remove(
self,
items: Iterable[
Union[requests.AckRequest, requests.DropRequest, requests.NackRequest]
],
) -> None:
"""Remove messages from lease management."""
with self._add_remove_lock:
# Remove the ack ID from lease management, and decrement the
# byte counter.
for item in items:
if self._leased_messages.pop(item.ack_id, None) is not None:
self._bytes -= item.byte_size
else:
_LOGGER.debug("Item %s was not managed.", item.ack_id)
if self._bytes < 0:
_LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes)
self._bytes = 0
def maintain_leases(self) -> None:
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
# Also update the deadline currently used if enough new ACK data has been
# gathered since the last deadline update.
deadline = self._manager._obtain_ack_deadline(maybe_update=True)
_LOGGER.debug("The current deadline value is %d seconds.", deadline)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are beyond the max lease time. This ensures
# that in the event of a badly behaving actor, we can drop messages
# and allow the Pub/Sub server to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size, item.ordering_key)
for ack_id, item in leased_messages.items()
if item.sent_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
assert self._manager.dispatcher is not None
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a modack request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
assert self._manager.dispatcher is not None
ack_id_gen = (ack_id for ack_id in ack_ids)
self._manager._send_lease_modacks(ack_id_gen, deadline)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http
|
nawawi/wkhtmltopdf
|
webkit/Source/ThirdParty/gyp/pylib/gyp/MSVSUserFile.py
|
Python
|
lgpl-3.0
| 6,250
| 0.0056
|
#!/usr/bin/python2.4
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import common
import os
import re
import socket # for gethostname
import xml.dom
import xml_fix
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
|
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.doc = None
def Create(self, name):
"""Creates the
|
user file document.
Args:
name: Name of the user file.
"""
self.name = name
# Create XML doc
xml_impl = xml.dom.getDOMImplementation()
self.doc = xml_impl.createDocument(None, 'VisualStudioUserFile', None)
# Add attributes to root element
self.n_root = self.doc.documentElement
self.n_root.setAttribute('Version', self.version.ProjectVersion())
self.n_root.setAttribute('Name', self.name)
# Add configurations section
self.n_configs = self.doc.createElement('Configurations')
self.n_root.appendChild(self.n_configs)
def _AddConfigToNode(self, parent, config_type, config_name):
"""Adds a configuration to the parent node.
Args:
parent: Destination node.
config_type: Type of configuration node.
config_name: Configuration name.
"""
# Add configuration node and its attributes
n_config = self.doc.createElement(config_type)
n_config.setAttribute('Name', config_name)
parent.appendChild(n_config)
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self._AddConfigToNode(self.n_configs, 'Configuration', name)
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
n_cmd = self.doc.createElement('DebugSettings')
abs_command = _FindCommandInPath(command[0])
n_cmd.setAttribute('Command', abs_command)
n_cmd.setAttribute('WorkingDirectory', working_directory)
n_cmd.setAttribute('CommandArguments', " ".join(command[1:]))
n_cmd.setAttribute('RemoteMachine', socket.gethostname())
if environment and isinstance(environment, dict):
n_cmd.setAttribute('Environment',
" ".join(['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]))
else:
n_cmd.setAttribute('Environment', '')
n_cmd.setAttribute('EnvironmentMerge', 'true')
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
n_cmd.setAttribute('Attach', 'false')
n_cmd.setAttribute('DebuggerType', '3') # 'auto' debugger
n_cmd.setAttribute('Remote', '1')
n_cmd.setAttribute('RemoteCommand', '')
n_cmd.setAttribute('HttpUrl', '')
n_cmd.setAttribute('PDBPath', '')
n_cmd.setAttribute('SQLDebugging', '')
n_cmd.setAttribute('DebuggerFlavor', '0')
n_cmd.setAttribute('MPIRunCommand', '')
n_cmd.setAttribute('MPIRunArguments', '')
n_cmd.setAttribute('MPIRunWorkingDirectory', '')
n_cmd.setAttribute('ApplicationCommand', '')
n_cmd.setAttribute('ApplicationArguments', '')
n_cmd.setAttribute('ShimCommand', '')
n_cmd.setAttribute('MPIAcceptMode', '')
n_cmd.setAttribute('MPIAcceptFilter', '')
# Find the config, and add it if it doesn't exist.
found = False
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
found = True
if not found:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
config.appendChild(n_cmd)
break
def Write(self, writer=common.WriteOnDiff):
"""Writes the user file."""
f = writer(self.user_file_path)
self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n')
f.close()
#------------------------------------------------------------------------------
|
google/jax
|
jax/experimental/jax2tf/examples/tf_js/quickdraw/quickdraw.py
|
Python
|
apache-2.0
| 6,160
| 0.011364
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app # type: ignore
from absl import flags
import os # type: ignore
import time
from typing import Callable
import flax # type: ignore
from flax import linen as nn
from flax.training import common_utils # type: ignore
import jax # type: ignore
from jax import lax
from jax import numpy as jnp
from jax.experimental.jax2tf.examples import saved_model_lib # type: ignore
import numpy as np # type: ignore
import tensorflow as tf # type: ignore
from tensorflowjs.converters import convert_tf_saved_model # type: ignore
from jax.config import config # type: ignore
config.config_with_absl()
import utils
flags.DEFINE_boolean("run_eval_on_train", False,
("Also run eval on the train set after each epoch. This "
"slows down training considerably."))
flags.DEFINE_integer("num_epochs", 5,
("Number of epochs to train for."))
flags.DEFINE_integer("num_classes", 100, "Number of classification classes.")
flags.register_validator("num_classes",
lambda value: value >= 1 and value <= 100,
message="--num_classes must be in range [1, 100]")
FLAGS = flags.FLAGS
# The code below is an adaptation for Flax from the work published here:
# https://blog.tensorflow.org/2018/07/train-model-in-tfkeras-with-colab-and-run-in-browser-tensorflowjs.html
class QuickDrawModule(nn.Module):
@nn.compact
def __call__(self, x):
x = nn.Conv(features=16, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=32, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=128)(x)
x = nn.relu(x)
x = nn.Dense(features=FLAGS.num_classes)(x)
x = nn.softmax(x)
return x
def predict(params, inputs):
"""A functional interface to the trained Module."""
return QuickDrawModule().apply({'params': params}, inputs)
def categorical_cross_entropy_loss(logits, labels):
onehot_labels = common_utils.onehot(labels, logits.shape[-1])
return jnp.mean(-jnp.sum(onehot_labels * jnp.log(logits), axis=1))
def update(optimizer, inputs, labels):
def loss_fn(params):
logits = predict(params, inputs)
return categorical_cross_entropy_loss(logits, labels)
grad = jax.grad(loss_fn)(optimizer.target)
optimizer = optimizer.apply_gradient(grad)
return optimizer
def accuracy(predict: Callable, params, dataset):
def top_k_classes(x, k):
bcast_idxs = jnp.broadcast_to(np.arange(x.shape[-1]), x.shape)
sorted_vals, sorted_idxs = lax.sort_key_val(x, bcast_idxs)
topk_idxs = (
lax.slice_in_dim(sorted_idxs, -k, sorted_idxs.shape[-1], axis=-1))
return topk_idxs
def _per_batch(inputs, labels):
logits = predict(params, inputs)
predicted_classes = top_k_classes(logits, 1)
predicted_classes = predicted_classes.reshape((predicted_classes.shape[0],))
return jnp.mean(predicted_classes == labels)
batched = [_per_batch(inputs, labels) for inputs, labels in dataset]
return jnp.mean(jnp.stack(batched))
def train_one_epoch(optimizer, train_ds):
for inputs, labels in train_ds:
optimizer = jax.jit(update)(optimizer, inputs, labels)
return optimizer
def init_model():
rng = jax.random.PRNGKey(0)
init_shape = jnp.ones((1, 28, 28, 1), jnp.float32)
initial_params = QuickDrawModule().init(rng, init_shape)["params"]
optimizer = flax.optim.Adam(
learning_rate=0.001, beta1=0.9, beta2=0.999).create(initial_params)
return optimizer, initial_params
def train(train_ds, test_ds, classes):
optimizer, params = init_model()
for epoch in range(1, FLAGS.num_epochs+1):
start_time = time.time()
optimizer = train_one_epoch(optimizer, train_ds)
if FLAGS.run_eval_on_train:
train_acc = accuracy(predict, optimizer.target, train_ds)
print("Training set accuracy {}".format(train_acc))
test_acc = accuracy(predict, optimizer.target, test_ds)
print("Test set accuracy {}".format(test_acc))
epoch_time = time.time() - start_time
print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
return optimizer.target
def main(*args):
base_model_path = "/tmp/jax2tf/tf_js_quickdraw"
dataset_path = os.path.join(base_model_path, "data")
num_classes = FLAGS.num_classes
classes = utils.download_dataset(dataset_path, num_classes)
assert len(classes) == num_classes, classes
print(f"Classes are: {classes}")
print("Loading dataset into memory...")
train_ds, test_ds = utils.load_classes(dataset_path, classes)
print(f"Starting training for {FLAGS.num_epochs} epochs...")
flax_params = train(train_ds, test_ds, classes)
model_dir = os.path.join(base_model_path, "saved_models")
# t
|
he model must be converted with with_gradient set to True to be able to
# convert the saved model to TF.js, as "PreventGradient" is not supported
saved_model_lib.convert_and_save_model(predic
|
t, flax_params, model_dir,
input_signatures=[tf.TensorSpec([1, 28, 28, 1])],
with_gradient=True, compile_model=False,
enable_xla=False)
conversion_dir = os.path.join(base_model_path, 'tfjs_models')
convert_tf_saved_model(model_dir, conversion_dir)
if __name__ == "__main__":
app.run(main)
|
cogu/autosar
|
autosar/signal.py
|
Python
|
mit
| 1,012
| 0.024704
|
from autosar.base import splitRef
from autosar.element import Element
import sys
class SystemSignal(Element):
def __init__(self,name,dataTypeRef,initValueRef,length,desc=None,parent=None):
super().__init__(name,parent)
self.dataTypeRef=dataTypeRef
self.initValueRef=initValueRef
self.length=length
self.desc=desc
self.parent=parent
def asdict(self):
data={'type': self.__class__.__name__,'name':self.name,
'dataTypeRef': self.dataTypeRef,
|
'initValueRef': self.initValueRef,
'length': self.length
}
if self.desc is not None: data['desc']=self.desc
return data
class SystemSignalGroup(Element):
def __init__(self, name, systemSignalRefs=None,parent=None):
super().__init__(name,parent)
if isinstance(syste
|
mSignalRefs,list):
self.systemSignalRefs=systemSignalRefs
else:
self.systemSignalRefs=[]
|
glenpp/OrviboS20
|
setup.py
|
Python
|
gpl-2.0
| 1,055
| 0.020853
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file t
|
han to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "S20Control",
version = "0.1",
author = "Glen Pitt-Pladdy / Guy Sheffer",
author_email = "glenpp@users.noreply.github.com",
description = ("Python management utility for Orvibo S20 WiFi Plug"),
license = "GNU",
keywords = "s20 orvibo orvibos20",
url = "https://github.com/glenpp/OrviboS20",
packages=[
|
'S20control'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Beta",
"Topic :: Utilities",
"License :: GNU License",
],
entry_points = {
'console_scripts': [
'S20control = S20control.S20control:main',
],
},
)
|
silveregg/moto
|
moto/ecs/models.py
|
Python
|
apache-2.0
| 23,919
| 0.002843
|
from __future__ import unicode_literals
import uuid
from random import randint, random
from moto.core import BaseBackend
from moto.ec2 import ec2_backends
from copy import copy
class BaseObject(object):
def camelCase(self, key):
words = []
for i, word in enumerate(key.split('_')):
if i > 0:
words.append(word.title())
else:
words.append(word)
return ''.join(words)
def gen_response_object(self):
response_object = copy(self.__dict__)
for key, value in response_object.items():
if '_' in key:
response_object[self.camelCase(key)] = value
del response_object[key]
return response_object
@property
def response_object(self):
return self.gen_response_object()
class Cluster(BaseObject):
def __init__(self, cluster_name):
self.active_services_count = 0
self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format(cluster_name)
self.name = cluster_name
self.pending_tasks_count = 0
self.registered_container_instances_count = 0
self.running_tasks_count = 0
self.status = 'ACTIVE'
@property
def physical_resource_id(self):
return self.name
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['clusterArn'] = self.arn
response_object['clusterName'] = self.name
del response_object['arn'], response_object['name']
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random name if necessary
cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))),
)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if original_resource.name != properties['ClusterName']:
ecs_backend = ecs_backends[region_name]
ecs_backend.delete_cluster(original_resource.arn)
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random name if necessary
cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))),
)
else:
# no-op when nothing changed between old and new resources
return original_resource
class TaskDefinition(BaseObject):
def __init__(self, family, revision, container_definitions, volumes=None):
self.family = family
self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format(family, revision)
self.container_definitions = container_definitions
if volumes is None:
self.volumes = []
else:
self
|
.volumes = volumes
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['taskDefinitionArn'] = response_object['arn']
del response_object['arn']
return response_object
@classmethod
def create_fr
|
om_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6)))
container_definitions = properties['ContainerDefinitions']
volumes = properties['Volumes']
ecs_backend = ecs_backends[region_name]
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6)))
container_definitions = properties['ContainerDefinitions']
volumes = properties['Volumes']
if (original_resource.family != family or
original_resource.container_definitions != container_definitions or
original_resource.volumes != volumes
# currently TaskRoleArn isn't stored at TaskDefinition instances
):
ecs_backend = ecs_backends[region_name]
ecs_backend.deregister_task_definition(original_resource.arn)
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes)
else:
# no-op when nothing changed between old and new resources
return original_resource
class Task(BaseObject):
def __init__(self, cluster, task_definition, container_instance_arn, overrides={}, started_by=''):
self.cluster_arn = cluster.arn
self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(str(uuid.uuid1()))
self.container_instance_arn = container_instance_arn
self.last_status = 'RUNNING'
self.desired_status = 'RUNNING'
self.task_definition_arn = task_definition.arn
self.overrides = overrides
self.containers = []
self.started_by = started_by
self.stopped_reason = ''
@property
def response_object(self):
response_object = self.gen_response_object()
return response_object
class Service(BaseObject):
def __init__(self, cluster, service_name, task_definition, desired_count):
self.cluster_arn = cluster.arn
self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format(service_name)
self.name = service_name
self.status = 'ACTIVE'
self.running_count = 0
self.task_definition = task_definition.arn
self.desired_count = desired_count
self.events = []
self.load_balancers = []
self.pending_count = 0
@property
def physical_resource_id(self):
return self.arn
@property
def response_object(self):
response_object = self.gen_response_object()
del response_object['name'], response_object['arn']
response_object['serviceName'] = self.name
response_object['serviceArn'] = self.arn
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if isinstance(properties['Cluster'], Cluster):
cluster = properties['Cluster'].name
else:
cluster = properties['Cluster']
if isinstance(properties['TaskDefinition'], TaskDefinition):
task_definition = properties['TaskDefinition'].family
else:
task_definition = properties['TaskDefinition']
service_name = '{0}Service{1}'.format(cluster, int(random() * 10 ** 6))
desired_count = properties['DesiredCount']
# TODO: LoadBalancers
# TODO: Role
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_service(
cluster, service_name, task_definition, desired_count)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if isinstance(properties['Cluster'], Cluster):
cluster_name = properties['Cluster'].name
else:
cluster_name = properties['Cluster']
if isinstance(properties['TaskDefinition'], TaskDefinition):
task_definition = properties['TaskDefinition'].family
else:
task_definition = properties['TaskDefinition']
desired_count = properties['D
|
letsencrypt/letsencrypt
|
certbot-dns-nsone/certbot_dns_nsone/__init__.py
|
Python
|
apache-2.0
| 3,339
| 0.000599
|
"""
The `~certbot_dns_nsone.dns_nsone` plugin automates the process of completing
a ``dns-01`` challenge (`~acme.challenges.DNS01`) by creating, and subsequently
removing, TXT records using the NS1 API.
.. note::
The plugin is not installed by default. It can be installed by heading to
`certbot.eff.org <https://certbot.eff.org/instructions#wildcard>`_, choosing your system and
selecting the Wildcard tab.
Named Arguments
---------------
======================================== ========================
|
=============
``--dns-nsone-credentials`` NS1 credentials_ INI file
|
.
(Required)
``--dns-nsone-propagation-seconds`` The number of seconds to wait for DNS
to propagate before asking the ACME
server to verify the DNS record.
(Default: 30)
======================================== =====================================
Credentials
-----------
Use of this plugin requires a configuration file containing NS1 API credentials,
obtained from your NS1
`account page <https://my.nsone.net/#/account/settings>`_.
.. code-block:: ini
:name: credentials.ini
:caption: Example credentials file:
# NS1 API credentials used by Certbot
dns_nsone_api_key = MDAwMDAwMDAwMDAwMDAw
The path to this file can be provided interactively or using the
``--dns-nsone-credentials`` command-line argument. Certbot records the path
to this file for use during renewal, but does not store the file's contents.
.. caution::
You should protect these API credentials as you would the password to your
NS1 account. Users who can read this file can use these credentials to issue
arbitrary API calls on your behalf. Users who can cause Certbot to run using
these credentials can complete a ``dns-01`` challenge to acquire new
certificates or revoke existing certificates for associated domains, even if
those domains aren't being managed by this server.
Certbot will emit a warning if it detects that the credentials file can be
accessed by other users on your system. The warning reads "Unsafe permissions
on credentials configuration file", followed by the path to the credentials
file. This warning will be emitted each time Certbot uses the credentials file,
including for renewal, and cannot be silenced except by addressing the issue
(e.g., by using a command like ``chmod 600`` to restrict access to the file).
Examples
--------
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``
certbot certonly \\
--dns-nsone \\
--dns-nsone-credentials ~/.secrets/certbot/nsone.ini \\
-d example.com
.. code-block:: bash
:caption: To acquire a single certificate for both ``example.com`` and
``www.example.com``
certbot certonly \\
--dns-nsone \\
--dns-nsone-credentials ~/.secrets/certbot/nsone.ini \\
-d example.com \\
-d www.example.com
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``, waiting 60 seconds
for DNS propagation
certbot certonly \\
--dns-nsone \\
--dns-nsone-credentials ~/.secrets/certbot/nsone.ini \\
--dns-nsone-propagation-seconds 60 \\
-d example.com
"""
|
unicefuganda/uSurvey
|
survey/forms/form_helper.py
|
Python
|
bsd-3-clause
| 1,258
| 0.00159
|
__author__ = 'anthony <>'
from collections import OrderedDict
from django import forms
class FormOrderMixin(object):
def order_fields(self, field_order):
"""
Rearranges the fields according to field_order.
field_order is a list of field n
|
ames specifying the order. Fields not
included in the list are
|
appended in the default order for backward
compatibility with subclasses not overriding field_order. If field_order
is None, all fields are kept in the order defined in the class.
Unknown fields in field_order are ignored to allow disabling fields in
form subclasses without redefining ordering.
"""
if field_order is None:
return
fields = OrderedDict()
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def get_form_field_no_validation(fieldname):
class FieldNoValidation(fieldname):
def clean(self, value):
return value
return FieldNoValidation
class Icons(object):
icons = {}
|
schumilo/vUSBf
|
usbscapy.py
|
Python
|
gpl-2.0
| 17,727
| 0.011677
|
"""
vUSBf: A KVM/QEMU based USB-fuzzing framework.
Copyright (C) 2015 Sergej Schumilo, OpenSource Security Ralf Spenneberg
This file is part of vUSBf.
See the file LICENSE for copying permission.
"""
__author__ = 'Sergej Schumilo'
from scapy.all import *
#####################################
####### SCAPY EXTENSION STUFF #######
#####################################
# XLEShortField
class XLEShortField(LEShortField, XShortField):
def i2repr(self, pkt, x):
return XShortField.i2repr(self, pkt, x)
# XLEIntField
class XLEIntField(LEIntField, XIntField):
def i2repr(self, pkt, x):
return XIntField.i2repr(self, pkt, x)
####################################
####### REDIR SPECIFIC STUFF #######
####################################
usbredir_type_enum = { # CONTROL PACKETS
0: "hello",
1: "device_connect",
2: "device_disconnect",
3: "reset",
4: "interface_info",
5: "ep_info",
6: "set_configuration",
7: "get_configuration",
8: "configuration_status",
9: "set_alt_setting",
10: "get_alt_setting",
11: "alt_setting_status",
12: "start_iso_stream",
13: "stop_iso_stream",
14: "iso_stream_status",
15: "start_interrupt_receiving",
16: "stop_interrupt_receiving",
17: "interrupt_receiving_status",
18: "alloc_bulk_streams",
19: "free_bulk_streams",
20: "bulk_streams_status",
21: "cancel_data_packet",
22: "filter_reject",
23: "filter_filter",
24: "device_disconnect_ack", # DATA PACKETS
100: "data_control_packet",
101: "data_bulk_packet",
102: "data_iso_packet",
103: "data_interrupt_packet"}
# DO NOT FUZZ THE FOLLOWING REDIR SPECIFIC PACKAGES! FUZZING WILL CAUSE IN QEMU CRASH!
class usbredirheader(Packet):
name = "UsbredirPacket"
fields_desc = [LEIntEnumField("Htype", -1, usbredir_type_enum),
LEIntField("HLength", 0),
LEIntField("Hid", -1)]
# Redir Packet No. 0 (redir hello)
class hello_redir_header(Packet):
name = "Hello_Packet"
fields_desc = [StrLenField("version", "", length_from=64), # StrLenField("caps", "", length_from=4)]
LEIntField("capabilites", 1)]
class hello_redir_header_host(Packet):
name = "Hello_Packet_Host"
fields_desc = [StrLenField("version", "", length_from=56)]
# Redir Packet No. 1 (redir connect)
class connect_redir_header(Packet):
name = "Connect_Packet"
fields_desc = [ByteField("speed", 0),
XByteField("device_class", 0),
XByteField("device_subclass", 0),
XByteField("device_protocol", 0),
XLEShortField("vendor_id", 0),
XLEShortField("product_id", 0),
XLEShortField("device_version_bcd", 0)]
# Redir Packet No. 4 (interface info) [SIZE 132 BYTES]
class if_info_redir_header(Packet):
name = "Interface Info Packet"
fields_desc = [LEIntField("interface_count", None),
FieldListField("interface", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_class", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_subclass", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_protocol", None, ByteField("Value", 0), length_from=lambda p: 32)]
# Redir Packet No. 5 (endpoint info) [SIZE 160 BYTES]
class ep_info_redir_header(Packet):
name = "Endpoint Info Packet"
fields_desc = [FieldListField("ep_type", None, ByteEnumField("type_value", 0, {0: "type_control",
1: "type_iso",
2: "type interrupt",
255: "type invalid", })
, length_from=lambda p: 32),
FieldListField("interval", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("max_packet_size", None, XLEShortField("Value", 0), length_from=lambda p: 32 * 2)]
# Redir Packet No. 100 (data control) [SIZE 10 BYTES]
class data_control_redir_header(Packet):
name = "Data_Control_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("request", 0),
ByteField("requesttype", 0),
ByteField("status", 0),
XLEShortField("value", 0),
LEShortField("index", 0),
LEShortField("length", 0)]
# Redir Packet No. 101 (data bulk) [SIZE 8 BYTES]
class data_bulk_redir_header(Packet):
name = "Data_Bulk_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", None),
LEIntField("stream_id", None),
LEShortField("length_high", None)]
# Redir Packet No. 102 (data iso) [SIZE 4 BYTES]
class data_iso_redir_header(Packet):
name = "Data_Iso_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", 0)]
# Redir Packet No. 103 (data interrupt) [SIZE 4 BYTES]
class data_interrupt_redir_header(Packet):
name = "Data_Interrupt_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", 0)]
redir_specific_type = [[0, hello_redir_header],
[1, connect_redir_header],
[100, data_control_redir_header],
[101, data_bulk_redir_header],
[102, data_iso_redir_header],
[103, data_interrupt_redir_header]]
##################################
####### USB SPECIFIC STUFF #######
####### ENUMARATION PHASE #######
##################################
# USB Header (URB - replaced by usbredirheader)
class usb_header(Packet):
name = "USB_Packet"
fields_desc = [XLongField("id", 0xffff88003720d540),
ByteField("type", 43),
ByteField("transfer type", 2),
ByteField("endpoint", 80),
ByteField("device", 0),
LEShortField("bus_id", 0),
ByteField("device_setup_request", 0),
ByteField("data_present", 0),
LELongField("urb_sec
|
", 0),
LEIntField("urb_usec", 0),
LEIntField("urb_status", 0),
LEIntField("urb_length", 0),
LEIntField("data_length", 0
|
)]
# Generic USB Descriptor Header
class usb_generic_descriptor_header(Packet):
name = "USB_GENERIC_DESCRIPTOR_HEADER"
fields_desc = [ByteField("bLength", 0),
XByteField("bDescriptorType", 0x1)]
# USB Device Descriptor Packet (DescriptorType 0x01)
class usb_device_descriptor(Packet):
name = "USB_Device_Descriptor"
fields_desc = [ByteField("bLength", 18),
XByteField("bDescriptorType", 0x01),
XLEShortField("bcdUSB", 0x0),
XByteField("bDeviceClass", 0x1),
ByteField("bDeviceSubClass", 0),
ByteField("bDeviceProtocol", 0),
ByteField("bMaxPacketSize", 0),
XLEShort
|
jeremy-c/unusualbusiness
|
unusualbusiness/utils/models.py
|
Python
|
bsd-3-clause
| 7,002
| 0.003999
|
from django.db.models import Model, CharField, URLField
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore import blocks
from wagtail.wagtailembeds.blocks import EmbedBlock
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.whitelist import attribute_rule, check_url, allow_without_attributes
class RenderInlineMixin(object):
def __init__(self):
pass
def render_inline(self):
template = get_template(self.ajax_template)
return template.render({
'self': self
})
class RelatedHowToMixin(object):
def __init__(self):
pass
def related_how_tos(self):
return [related_how_to_page.how_to_page
for related_how_to_page
in self.how_to_page.select_related().all()]
def related_how_to_theory_articles(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_theory_articles = []
for related_how_to in related_how_tos:
how_to_articles = related_how_to.theory_page_list()
related_articles = self.related_how_to_pages(how_to_articles, self_idx)
related_how_to_theory_articles.append({
'how_to': related_how_to,
'articles': related_articles
})
return related_how_to_theory_articles
def related_how_to_story_articles(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_story_articles = []
for related_how_to in related_how_tos:
how_to_articles = related_how_to.story_page_list()
related_articles = self.related_how_to_pages(how_to_articles, self_idx)
related_how_to_story_articles.append({
'how_to': related_how_to,
'articles': related_articles
})
return related_how_to_story_articles
def related_how_to_news_articles(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_news_articles = []
for related_how_to in related_how_tos:
how_to_articles = related_how_to.news_page_list()
related_articles = self.related_how_to_pages(how_to_articles, self_idx)
related_how_to_news_articles.append({
'how_to': related_how_to,
'articles': related_articles
})
return related_how_to_news_articles
def related_how_to_events(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_events = []
for related_how_to in related_how_tos:
how_to_events = related_how_to.event_page_list()
related_events = self.related_how_to_pages(how_to_events, self_idx)
related_how_to_events.append(related_events)
return related_how_to_events
def upcoming_related_event(self, related_how_tos=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
how_to_event_lists = [how_to_page.event_page_list()
for how_to_page
in related_how_tos]
event_pages = []
for how_to_event_list in how_to_event_lists:
if len(how_to_event_list) > 0:
for how_to_event in how_to_event_list:
if how_to_event and how_to_event.is_upcoming:
event_pages.append(how_to_event)
if len(event_pages) > 0:
return sorted(event_pages, key=lambda event: event.start_date)[0]
return event_pages
@staticmethod
def related_how_to_pages(how_to_pages, self_idx=None):
previous_page_idx = 0
next_article_idx = 1
if self_idx:
for idx, page in enumerate(how_to_pages):
if page.id is self_idx:
self_idx = idx
previous_page_idx = self_idx - 1
next_article_idx = self_idx + 1
previous_page = None
next_page = None
if 0 <= previous_page_idx < len(how_to_pages):
previous_page = how_to_pages[previous_page_idx]
if 0 <= next_article_idx < len(how_to_pages):
next_page = how_to_pages[next_article_idx]
return (previous_page, next_page)
# Blocks
class PullQuoteBlock(blocks.StructBlock):
pull_quote = blocks.TextBlock(verbose_name=_('Pull quote'),
required=True,
rows=2)
attribution = blocks.CharBlock(verbose_name=_('Quote attribution to'),
help_text=_('The name of the person or organization that '
'the quote can be attributed to quote'),
required=False)
link = blocks.URLBlock(verbose_name=_('Link'),
help_text=_("Click quote to go to link."),
required=False)
class Meta:
template = 'articles/blocks/pullquote.html'
icon = 'openquote'
label = 'Pull Quote'
class FeaturedImageBlock(blocks.StructBlock):
image = ImageChooserBlock(required=True)
class Meta:
icon='image'
label=_('Image')
template='articles/blocks/featured_image.html'
help_text=_('The featured image is shown in the list-view and detail-view')
class FeaturedVideoBlock(blocks.StructBlock):
video = EmbedBlock(required=True)
class Meta:
icon='media'
label=_('Video')
template='articles/blocks/featured_video.html'
help_text=_('The featured video is only shown in the detail-view, make sure to also selecte a featured image')
class FeaturedAudioBlock(blocks.StructBlock):
audio = EmbedBlock(re
|
quired=Tr
|
ue)
class Meta:
icon='media'
label=_('Audio')
template='articles/blocks/featured_audio.html'
help_text=_('The featured audio is only shown in the detail-view, make sure to also selecte a featured image')
class PageFormat:
TEXT = ('text', _('Story'))
THEORY = ('theory', _('Theory'))
VIDEO = ('video', _('Video'))
AUDIO = ('audio', _('Audio'))
IMAGES = ('images', _('Photo report'))
EVENT = ('event', _('Activity'))
ORGANIZATION = ('organization', _('Practitioner'))
LINK = ('link', _('Link'))
DOCUMENT = ('document', _('Document'))
ALL = (
TEXT,
THEORY,
VIDEO,
AUDIO,
IMAGES,
EVENT,
ORGANIZATION,
LINK,
DOCUMENT
)
def __init__(self):
pass
|
dofeldsc/vivo_uos
|
my_pump/pump/vivopump.py
|
Python
|
gpl-3.0
| 47,667
| 0.003168
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
vivopump -- module of helper functions for the pump
"""
import sys
import csv
import string
import random
import logging
__author__ = "Michael Conlon"
__copyright__ = "Copyright (c) 2016 Michael Conlon"
__license__ = "New BSD license"
__version__ = "0.8.7"
logger = logging.getLogger(__name__)
class DefNotFoundException(Exception):
"""
Raise this exception when update definition fle is not found
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class InvalidDefException(Exception):
"""
Raise this exception when update definition contains values that can not be processed
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class InvalidSourceException(Exception):
"""
Raise this exception when update data contains values that can not be processed
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class PathLengthException(Exception):
"""
Raise this exception when update def has a path length greater than support
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class UnicodeCsvReader(object):
"""
From http://stackoverflow.com/questions/1846135/python-csv-
library-with-unicode-utf-8-support-that-just-works. Added errors='ignore'
to handle cases when the input file misrepresents itself as utf-8.
"""
def __init__(self, f, encoding="utf-8", **kwargs):
self.csv_reader = csv.reader(f, **kwargs)
self.encoding = encoding
def __iter__(self):
return self
def next(self):
"""
Read and split the csv row into fields
"""
row = self.csv_reader.next()
# now decode
return [unicode(cell, self.encoding, errors='ignore') for cell in row]
@property
def line_num(self):
"""
Return line number
"""
return self.csv_reader.line_num
class UnicodeDictReader(csv.DictReader):
"""
A Unicode CSV Reader
"""
def __init__(self, f, encoding="utf-8", fieldnames=None, **kwds):
csv.DictReader.__init__(self, f, fieldnames=fieldnames, **kwds)
self.reader = UnicodeCsvReader(f, encoding=encoding, **kwds)
def read_csv(filename, skip=True, delimiter='|'):
"""
Read a CSV file, return dictionary object
:param filename: name of file to read
:param skip: should lines with invalid number of columns be skipped? False=Throw Exception
:param delimiter: The delimiter for CSV files
:return: Dictionary object
"""
fp = open(filename, 'rU')
data = read_csv_fp(fp, skip, delimiter)
fp.close()
return data
def read_csv_fp(fp, skip=True, delimiter="|"):
"""
Given a filename, read the CSV file with that name. We use "|" as a
separator in CSV files to allow commas to appear in values.
CSV files read by this function follow these conventions:
-- use delimiter as a separator. Defaults to vertical bar.
-- have a first row that contains column headings.
-- all elements must have values. To specify a missing value, use
the string "None" or "NULL" between separators, that is |None| or |NULL|
-- leading and trailing whitespace in values is ignored. | The | will be
read as "The"
-- if skip=True, rows with too many or too few data elements are skipped.
if skip=False, a RowError is thrown
CSV files processed by read_csv will be returned as a dictionary of
dictionaries, one dictionary per row keyed by an integer row number. This supports
maintaining the order of the data input, which is important for some applications
"""
class RowError(Exception):
"""
Thrown when the number of data elements on a row in a CSV is not equal to the number of header elements
"""
pass
heading = []
row_number = 0
data = {}
for row in UnicodeCsvReader(fp, delimiter=delimiter):
i = 0
for r in row:
# remove white space fore and aft
row[i] = r.strip(string.whitespace).encode("utf-8")
i += 1
if len(heading) == 0:
heading = row # the first row is the heading
continue
row_number += 1
if len(row) == len(heading):
data[row_number] = {}
i = 0
for r in row:
data[row_number][heading[i]] = r
i += 1
elif not skip:
raise RowError("On row " + str(row_number) + ", expecting " +
str(len(heading)) + " data values. Found " +
str(len(row)) + " data values. Row contents = " +
str(row))
else:
pass # row has wrong number of columns and skip is True
logger.debug("loader returns {} rows".format(len(data)))
return data
def write_csv_fp(fp, data, delimiter='|'):
"""
Write a CSV to a file pointer. Used to support stdout.
:param fp: File pointer. Could be stdout.
:param data: data to be written
:param delimiter: field delimiter for output
:return:
"""
assert(len(data.keys()) > 0)
# create a list of var_names from the first row
var_names = data[data.keys()[0]].keys()
fp.write(delimiter.join(var_names).encode('utf-8') + '\n')
for key in sorted(data.keys()):
fp.write(delimiter.join([data[key][x] for x in var_names]) + '\n')
def write_csv(filename, data, delimiter='|'):
"""
Given a filename, a data structure as produced by read_csv and an optional
delimiter, write a file that can be read by read_csv
The data structure is a dictionary keyed by an integer of "row numbers"
preserving the natural order of the data. Each element is in turn a
dictionary of name value pairs. All values are strings.
:param filename: name of file to write
:param data: data structure to be written to the file
:param delim
|
iter: field delimiter. Popular choices are '|', '\t' and ','
:return:
"""
with open(filename, 'w') as f:
f.write(delimiter.join(data[data.keys()[0]].keys()).encode('utf-8') + '\n')
for key in sorted(data.keys()):
f.write(delimiter.join(data[key].values()).encode('utf-8') + '\n')
def replace_initials(
|
s):
"""
For a string s, find all occurrences of A. B. etc and replace them with A B etc
:param s:
:return: string with replacements made
"""
import re
def repl_function(m):
"""
Helper function for re.sub
"""
return m.group(0)[0]
t = re.sub('[A-Z]\.', repl_function, s)
return t
def key_string(s):
"""
Given a string s, return a string with a bunch of punctuation and special
characters removed and then everything lower cased. Useful for matching
strings in which case, punctuation and special characters should not be
considered in the match
"""
k = s.encode("utf-8", "ignore").translate(None,
""" \t\n\r\f!@#$%^&*()_+:"<>?-=[]\\;'`~,./""")
k = k.lower()
return k
def get_vivo_types(selector, parms, separator=';'):
"""
Query VIVO using the selector and return a dictionary with keys of all uri satisfying the selector and
data of all the types for each uri, separated by the separator
:param: selector: query fragment for selecting the entities whose types will be returned
:param: parms: vivo_query parms
:return: dictionary of types keyed by uri
"""
query = """
select ?uri (GROUP_CONCAT(?type; separator="{{separator}}") AS ?types)
where {
{{selector}}
?uri rdf:type ?type .}
GROUP BY ?uri
"""
q = query.replace("{{separator}}", separator)
q = q.replace("{{selector}
|
idea4bsd/idea4bsd
|
python/testData/refactoring/move/class/before/src/lib1.py
|
Python
|
apache-2.0
| 122
| 0.008197
|
class URLOpener(object):
def __init__(self, x):
self.x = x
def urlopen(self):
return file(se
|
lf.x)
|
|
amanharitsh123/zulip
|
zerver/lib/fix_unreads.py
|
Python
|
apache-2.0
| 6,949
| 0.000863
|
import time
import logging
from typing import Callable
|
, List, TypeVar, Text
from psycopg2.extensions import cursor
CursorObj = TypeVar('CursorObj', bou
|
nd=cursor)
from django.db import connection
from zerver.models import UserProfile
'''
NOTE! Be careful modifying this library, as it is used
in a migration, and it needs to be valid for the state
of the database that is in place when the 0104_fix_unreads
migration runs.
'''
logger = logging.getLogger('zulip.fix_unreads')
logger.setLevel(logging.WARNING)
def build_topic_mute_checker(cursor, user_profile):
# type: (CursorObj, UserProfile) -> Callable[[int, Text], bool]
'''
This function is similar to the function of the same name
in zerver/lib/topic_mutes.py, but it works without the ORM,
so that we can use it in migrations.
'''
query = '''
SELECT
recipient_id,
topic_name
FROM
zerver_mutedtopic
WHERE
user_profile_id = %s
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
tups = {
(recipient_id, topic_name.lower())
for (recipient_id, topic_name) in rows
}
def is_muted(recipient_id, topic):
# type: (int, Text) -> bool
return (recipient_id, topic.lower()) in tups
return is_muted
def update_unread_flags(cursor, user_message_ids):
# type: (CursorObj, List[int]) -> None
um_id_list = ', '.join(str(id) for id in user_message_ids)
query = '''
UPDATE zerver_usermessage
SET flags = flags | 1
WHERE id IN (%s)
''' % (um_id_list,)
cursor.execute(query)
def get_timing(message, f):
# type: (str, Callable) -> None
start = time.time()
logger.info(message)
f()
elapsed = time.time() - start
logger.info('elapsed time: %.03f\n' % (elapsed,))
def fix_unsubscribed(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
recipient_ids = []
def find_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
(NOT zerver_subscription.active)
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'get recipients',
find_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
query = '''
SELECT
zerver_usermessage.id
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
user_message_ids.append(row[0])
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding unread messages for non-active streams',
find
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for non-active streams',
fix
)
def fix_pre_pointer(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
pointer = user_profile.pointer
if not pointer:
return
recipient_ids = []
def find_non_muted_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
zerver_subscription.in_home_view AND
zerver_subscription.active
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'find_non_muted_recipients',
find_non_muted_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find_old_ids():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
is_topic_muted = build_topic_mute_checker(cursor, user_profile)
query = '''
SELECT
zerver_usermessage.id,
zerver_message.recipient_id,
zerver_message.subject
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
zerver_usermessage.message_id <= %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, pointer, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for (um_id, recipient_id, topic) in rows:
if not is_topic_muted(recipient_id, topic):
user_message_ids.append(um_id)
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding pre-pointer messages that are not muted',
find_old_ids
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for pre-pointer non-muted messages',
fix
)
def fix(user_profile):
# type: (UserProfile) -> None
logger.info('\n---\nFixing %s:' % (user_profile.email,))
with connection.cursor() as cursor:
fix_unsubscribed(cursor, user_profile)
fix_pre_pointer(cursor, user_profile)
|
egtaonline/egtaonline-api
|
test/test_eo.py
|
Python
|
apache-2.0
| 14,077
| 0.000142
|
"""Test cli"""
import asyncio
import contextlib
import io
import json
import sys
import traceback
from unittest import mock
import pytest
from egtaonline import __main__ as main
from egtaonline import api
from egtaonline import mockserver
# TODO async fixtures may be possible with python 3.6, but it's not possible
# with async_generator
async def run(*args):
"""Run a command line and return if it ran successfully"""
try:
await main.amain(*args)
except SystemExit as ex:
return not int(str(ex))
except Exception: # pylint: disable=broad-except
traceback.print_exc()
return False
return True
def stdin(inp):
"""Patch stdin with input"""
return mock.patch.object(sys, 'stdin', io.StringIO(inp))
# This is a hack to allow "writing" to the underlying buffer of a stringio
class _StringBytes(io.BytesIO):
"""A wrapper for bytes io that allows getting the string
This is necessary because for zip files, the result needs to be written to
a byte stream."""
def close(self):
pass
def getvalue(self):
return super().getvalue().decode('utf8')
@contextlib.contextmanager
def stdout():
"""Patch stdout and return stringio"""
buff = _StringBytes()
with mock.patch.object(sys, 'stdout', io.TextIOWrapper(buff)):
yield buff
def stderr():
"""Patch stderr and return stringio"""
return mock.patch.object(sys, 'stderr', io.StringIO())
@pytest.mark.asyncio
async def test_help():
"""Test getting help by itself"""
with stderr() as err:
assert await run('-h'), err.getvalue()
@pytest.mark.asyncio
@pytest.mark.parametrize('cmd', ['sim', 'game', 'sched', 'sims'])
async def test_cmd_help(cmd):
"""Test getting help from commands"""
with stderr() as err:
assert await run(cmd, '-h'), err.getvalue()
@pytest.mark.asyncio
async def test_sim():
"""Test sim functionality"""
async with mockserver.server() as server:
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sim'), err.getvalue()
assert not out.getvalue()
server.create_simulator('sim', '1')
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sim'), err.getvalue()
# get by id
sim = json.loads(out.getvalue())
with stderr() as err:
assert await run('-a', '', 'sim', str(sim['id'])), err.getvalue()
# get by name
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sim', sim['name'], '-n',
sim['version']), err.getvalue()
assert sim['id'] == json.loads(out.getvalue())['id']
assert not await run('-a', '', 'sim', '--', '-1')
# add role
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-rr'), err.getvalue()
# add strategy
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-rr', '-ss'), err.getvalue()
# add strategies
with stdin(json.dumps({'r': ['q'], 'a': ['b']})), stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-j-'), err.getvalue()
# remove strategy
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-drr', '-sq'), err.getvalue()
# remove role
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-dra'), err.getvalue()
# remove strategies
with stdin(json.dumps({'r': ['s']})), stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-dj-'), err.getvalue()
# get zip
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-z'), err.getvalue()
@pytest.mark.asyncio
async def test_game(tmpdir): # pylint: disable=too-many-statements
"""Test game functionality"""
conf = str(tmpdir.join('conf.json'))
with open(conf, 'w') as fil:
json.dump({}, fil)
async with mockserver.server() as server:
with stdout() as out, stderr() as err:
assert await run('-a', '', 'game'), err.getvalue()
assert not out.getvalue()
sim_id = server.create_simulator('sim', '1')
game_spec = {
'players': {
'r': 2,
},
'strategies': {
'r': ['s0', 's1'],
},
}
with stdin(json.dumps(game_spec['strategies'])), stderr() as err:
assert await run(
'-a', '', 'sim', str(sim_id), '-j-'), err.getvalue()
# get canon game
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf',
conf), err.getvalue()
game = json.loads(out.getvalue())
# verify its now listed with games
with stdout() as out, stderr() as err:
assert await run('-a', '', 'game'), err.getvalue()
game2 = json.loads(out.getvalue())
assert game == game2
# get game structure
with stdout() as out, stderr() as err:
assert await run('-a', '', 'game', str(game['id'])), err.getvalue()
struct = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf',
conf), err.getvalue()
assert struct == json.loads(out.getvalue())
# get game summary
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '--summary'), err.getvalue()
summ = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf', conf,
'--summary'), err.getvalue()
assert summ == json.loads(out.getvalue())
# get observations
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']),
'--observations'), err.getvalue()
obs = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf', conf,
'--observations'), err.getvalue()
assert obs == json.loads(out.getvalue())
# get full data
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '--full'), err.getvalue()
full = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf', conf,
'--full'), err.getvalue()
assert full == json.loads(out.getvalue())
# test name works
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'game', game['name'], '-n'), err.getvalue()
assert game['id'] == json.loads(out.getvalue())['id']
# remove strategy
with stderr() as err:
asse
|
rt await run(
|
'-a', '', 'game', str(game['id']), '-drr',
'-ss0'), err.getvalue()
# remove strategys
with stdin(json.dumps({'r': ['s1']})), stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-dj-'), err.getvalue()
# remove role
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-drr'), err.getvalue()
# add role
assert not await run(
|
mrtazz/simplenote.vim
|
autoload/SimplenoteList.py
|
Python
|
mit
| 651
| 0.004608
|
def SimplenoteList():
if (float(vim.eval("a:0"))>=1):
try:
# check fo
|
r valid date string
datetime.datetime.strptime(vim.eval("a:1"), "%Y-%m-%d")
interface.list_note_index_in_scratch_buffer(since=vim.eval("a:1"))
except ValueError:
|
interface.list_note_index_in_scratch_buffer(tags=vim.eval("a:1").split(","))
else:
interface.list_note_index_in_scratch_buffer()
try:
set_cred()
SimplenoteList()
except simplenote.SimplenoteLoginFailed:
# Note: error has to be caught here and not in __init__
reset_user_pass('Login Failed. Check token?')
# vim: expandtab
|
dangra/scrapy-sci
|
setup.py
|
Python
|
bsd-3-clause
| 1,322
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('..
|
:changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='scrapy-sci',
version='0.1.0',
description='Improve your scrapy pipeline with machine learning',
long_description=readme + '\n\n' + history,
author='John Cadigan',
author_email='johnpaulc
|
adigan@gmail.com',
url='https://github.com/johncadigan/scrapy-sci',
packages=[
'scrapy_sci',
"scrapy_sci.commands",
"scrapy_sci.templates",
],
package_dir={'scrapy_sci':
'scrapy_sci'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='machine learning',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements
)
|
reedobrien/mongo-python-driver
|
pymongo/max_key.py
|
Python
|
apache-2.0
| 604
| 0
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:
|
//www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bson.max_key impor
|
t *
|
stadtgestalten/stadtgestalten
|
grouprise/features/polls/migrations/0014_auto_20180222_1033.py
|
Python
|
agpl-3.0
| 1,195
| 0.001674
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-22 09:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
cla
|
ss Migration(migrations.Migration):
dependencies = [
('polls', '0013_auto_20180109_1302'),
]
operations = [
migrations.CreateModel(
name='WorkaroundPoll',
fields=[
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('condorcet', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.AlterModelOptions(
name='option',
options={'ordering': ('id',)},
),
migrations.AlterModelOptions(
name='vote',
options={'ordering': ('time_updated',)},
),
migrations.AddField(
model_name='option',
name='poll_new1',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='options_new', to='polls.WorkaroundPoll'),
),
]
|
cgimenop/Excel2Testlink
|
ExcelParser/TLCase.py
|
Python
|
mit
| 6,315
| 0.012985
|
'''
Created on 21 de oct. de 2015
@author: cgimenop
'''
from xml.dom import minidom
class TLCase:
CODE = 0;
TITLE = 1
SUMMARY = 2
IMPORTANCE = 3
PRECONDITIONS = 4
STEPS = 5
EXPECTED_RESULT = 6
EXTRA_DETAILS = 7 #UNUSED
EXECUTION_TYPE = 8
E2E = 9 #UNUSED
REGRESSION = 10 #UNUSED
LINKED_STORIES = 11
STATE = 12 #UNUSED
COMMENTS = 13 #UNUSED
EXECUTION_TYPE_MANUAL = "1"
EXECUTION_TYPE_AUTO = "2"
EXCEL_IMPORTANCE_LOW = "L"
EXCEL_IMPORTANCE_MEDIUM = "M"
EXCEL_IMPORTANCE_HIGH = "H"
IMPORTANCE_LOW = "1"
IMPORTANCE_MEDIUM = "2"
IMPORTANCE_HIGH = "3"
'''
classdocs
'''
def __init__(self, params = [], req_spec = None):
'''
Constructor
'''
if (len(params) == 0 or req_spec is None):
print("Invalid test case parameters")
raise
self.req_spec_title = req_spec
self.title = params[self.TITLE].value
self.summar
|
y = "</br>".join([params[self.CODE].value, params[self.TITLE].value, "Covers: ", params[self.LINKED_STORIES].value.strip()])
|
self.importance = self.importance_value(params[self.IMPORTANCE].value)
self.preconditions = params[self.PRECONDITIONS].value.replace("\n", "</br>")
#TODO: This will need further work to split the excel cell in multiple steps
self.steps = params[self.STEPS].value.replace("\n", "</br>")
self.expected_result = "</br>".join([str(params[self.EXPECTED_RESULT].value), str(params[self.EXTRA_DETAILS].value)])
self.expected_result = self.expected_result.replace("\n", "</br>")
if (params[self.EXECUTION_TYPE].value == "yes"):
self.execution_type = self.EXECUTION_TYPE_AUTO
else:
self.execution_type = self.EXECUTION_TYPE_MANUAL
self.requirements = dict()
self.get_requirements(params[self.LINKED_STORIES].value.split(","))
def importance_value(self, value):
if (value == None):
return self.IMPORTANCE_MEDIUM
switcher = {
self.EXCEL_IMPORTANCE_LOW: self.IMPORTANCE_LOW,
self.EXCEL_IMPORTANCE_MEDIUM: self.IMPORTANCE_MEDIUM,
self.EXCEL_IMPORTANCE_HIGH: self.IMPORTANCE_HIGH,
}
return switcher.get(value.upper(), self.IMPORTANCE_MEDIUM)
def get_requirements(self, requirements_list = None):
if (requirements_list is None):
return self.requirements
xml_doc = minidom.Document()
self.requirements = dict()
for requirement in requirements_list:
stripped_requirement = requirement.strip()
xml_requirement = xml_doc.createElement("requirement")
req_spec = xml_doc.createElement("req_spec_title")
cdata = xml_doc.createCDATASection(self.req_spec_title)
req_spec.appendChild(cdata)
title = xml_doc.createElement("title")
title_cdata = xml_doc.createCDATASection(stripped_requirement)
title.appendChild(title_cdata)
xml_requirement.appendChild(req_spec)
xml_requirement.appendChild(title)
if (stripped_requirement not in self.requirements):
self.requirements[stripped_requirement] = xml_requirement
return self.requirements
def to_xml(self):
xml_doc = minidom.Document()
xml_test_case = xml_doc.createElement("testcase")
xml_test_case.setAttribute("name", self.title)
summary = xml_doc.createElement("summary")
cdata = xml_doc.createCDATASection(self.summary)
summary.appendChild(cdata)
xml_test_case.appendChild(summary)
preconditions = xml_doc.createElement("preconditions")
cdata = xml_doc.createCDATASection(self.preconditions)
preconditions.appendChild(cdata)
xml_test_case.appendChild(preconditions)
steps = xml_doc.createElement("steps")
xml_test_case.appendChild(steps)
step = xml_doc.createElement("step")
steps.appendChild(step)
actions = xml_doc.createElement("actions")
step.appendChild(actions)
cdata = xml_doc.createCDATASection(self.steps)
actions.appendChild(cdata)
expected_results = xml_doc.createElement("expectedresults")
step.appendChild(expected_results)
cdata = xml_doc.createCDATASection(self.expected_result)
expected_results.appendChild(cdata)
#TODO: When test description is correctly splited into steps this will have to change accordingly
step_number = xml_doc.createElement("step_number")
step.appendChild(step_number)
cdata = xml_doc.createCDATASection("1")
step_number.appendChild(cdata)
execution_type = xml_doc.createElement("execution_type")
cdata = xml_doc.createCDATASection(self.execution_type)
execution_type.appendChild(cdata)
xml_test_case.appendChild(execution_type)
importance = xml_doc.createElement("importance")
cdata = xml_doc.createCDATASection(self.importance)
importance.appendChild(cdata)
xml_test_case.appendChild(importance)
xml_requirements = xml_doc.createElement("requirements")
for requirement_index in self.requirements:
case_requirement = self.requirements[requirement_index]
doc_id = xml_doc.createElement("doc_id")
doc_id_cdata = xml_doc.createCDATASection(requirement_index)
doc_id.appendChild(doc_id_cdata)
case_requirement.appendChild(doc_id)
xml_requirements.appendChild(case_requirement)
xml_test_case.appendChild(xml_requirements)
return xml_test_case
|
brettcs/diffoscope
|
tests/comparators/test_fonts.py
|
Python
|
gpl-3.0
| 1,851
| 0.002706
|
# -*- coding: utf-8 -*-
#
# diffo
|
scope: in-depth comparison of files, archives, and directories
#
# Copyright © 2015 Jérémy Bobbio <lunar@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it wi
|
ll be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import pytest
from diffoscope.config import Config
from diffoscope.comparators.fonts import TtfFile
from diffoscope.comparators.missing_file import MissingFile
from utils.data import data, load_fixture
from utils.tools import skip_unless_tools_exist
ttf1 = load_fixture('Samyak-Malayalam1.ttf')
ttf2 = load_fixture('Samyak-Malayalam2.ttf')
def test_identification(ttf1):
assert isinstance(ttf1, TtfFile)
def test_no_differences(ttf1):
difference = ttf1.compare(ttf1)
assert difference is None
@pytest.fixture
def differences(ttf1, ttf2):
return ttf1.compare(ttf2).details
@skip_unless_tools_exist('showttf')
def test_diff(differences):
expected_diff = open(data('ttf_expected_diff')).read()
assert differences[0].unified_diff == expected_diff
@skip_unless_tools_exist('showttf')
def test_compare_non_existing(monkeypatch, ttf1):
monkeypatch.setattr(Config(), 'new_file', True)
difference = ttf1.compare(MissingFile('/nonexisting', ttf1))
assert difference.source2 == '/nonexisting'
assert len(difference.details) > 0
|
ric2b/Vivaldi-browser
|
chromium/build/android/apk_operations.py
|
Python
|
bsd-3-clause
| 73,936
| 0.009157
|
#!/usr/bin/env vpython3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Using colorama.Fore/Back/Style members
# pylint: disable=no-member
from __future__ import print_function
import argparse
import collections
import json
import logging
import os
import pipes
import posixpath
import random
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import textwrap
import zipfile
import adb_command_line
import devil_chromium
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_errors
from devil.android import device_utils
from devil.android import flag_changer
from devil.android.sdk import adb_wrapper
from devil.android.sdk import build_tools
from devil.android.sdk import intent
from devil.android.sdk import version_codes
from devil.utils import run_tests_helper
_DIR_SOURCE_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', '..'))
_JAVA_HOME = os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'jdk', 'current')
with devil_env.SysPath(
os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'colorama', 'src')):
import colorama
from incremental_install import installer
from pylib import constants
from pylib.symbols import deobfuscator
from pylib.utils import simpleperf
from pylib.utils import app_bundle_utils
with devil_env.SysPath(
os.path.join(_DIR_SOURCE_ROOT, 'build', 'android', 'gyp')):
import bundletool
BASE_MODULE = 'base'
def _Colorize(text, style=''):
return (style
+ text
+ colorama.Style.RESET_ALL)
def _InstallApk(devices, apk, install_dict):
def install(device):
if install_dict:
installer.Install(device, install_dict, apk=apk, permissions=[])
else:
device.Install(apk, permissions=[], allow_downgrade=True, reinstall=True)
logging.info('Installing %sincremental apk.', '' if install_dict else 'non-')
device_utils.DeviceUtils.parallel(devices).pMap(install)
# A named tuple containing the information needed to convert a bundle into
# an installable .apks archive.
# Fields:
# bundle_path: Path to input bundle file.
# bundle_apk_path: Path to output bundle .apks archive file.
# aapt2_path: Path to aapt2 tool.
# keystore_path: Path to keystore file.
# keystore_password: Password for the keystore file.
# keystore_alias: Signing key name alias within the keystore file.
# system_image_locales: List of Chromium locales to include in system .apks.
BundleGenerationInfo = collections.namedtuple(
'BundleGenerationInfo',
'bundle_path,bundle_apks_path,aapt2_path,keystore_path,keystore_password,'
'keystore_alias,system_image_locales')
def _GenerateBundleApks(info,
output_path=None,
minimal=False,
minimal_sdk_version=None,
mode=None,
optimize_for=None):
"""Generate an .apks archive from a bundle on demand.
Args:
info: A BundleGenerationInfo instance.
output_path: Path of output .apks archive.
minimal: Create the minimal set of apks possible (english-only).
minimal_sdk_version: When minimal=True, use this sdkVersion.
mode: Build mode, either None, or one of app_bundle_utils.BUILD_APKS_MODES.
optimize_for: Override split config, either None, or one of
app_bundle_utils.OPTIMIZE_FOR_OPTIONS.
"""
logging.info('Generating .apks file')
app_bundle_utils.GenerateBundleApks(
info.bundle_path,
# Store .apks file beside the .aab file by default so that it gets cached.
output_path or info.bundle_apks_path,
info.aapt2_path,
info.keystore_path,
info.keystore_password,
info.keystore_alias,
system_image_locales=info.system_image_locales,
mode=mode,
minimal=minimal,
minimal_sdk_version=minimal_sdk_version,
optimize_for=optimize_for)
def _InstallBundle(devices, apk_helper_instance, modules, fake_modules):
def Install(device):
device.Install(
apk_helper_instance,
permissions=[],
modules=modules,
fake_modules=fake_modules,
allow_downgrade=True)
# Basic checks for |modules| and |fake_modules|.
# * |fake_modules| cannot include 'base'.
# * If |fake_modules| is given, ensure |modules| includes 'base'.
# * They must be disjoint (checked by device.Install).
modules_set = set(modules) if modules else set()
fake_modules_set = set(fake_modules) if fake_modules else set()
if BASE_MODULE in fake_modules_set:
raise Exception('\'-f {}\' is disallowed.'.format(BASE_MODULE))
if fake_modules_set and BASE_MODULE not in modules_set:
raise Exception(
'\'-f FAKE\' must be accompanied by \'-m {}\''.format(BASE_MODULE))
logging.info('Installing bu
|
ndle.')
device_utils.DeviceUtils.parallel(devices).pMap(Install)
def _UninstallApk(devices, install_dict, package_name):
def uninstall(device):
if install_dict:
installer.Uninstall(device, package_name)
else:
device.Uninstall(package_name)
device_utils.DeviceUtils.parallel(devices).pMap(uninstall)
def _IsWebViewProvider(apk_helper_instance):
meta_data = apk_helper_instance.GetAllMeta
|
data()
meta_data_keys = [pair[0] for pair in meta_data]
return 'com.android.webview.WebViewLibrary' in meta_data_keys
def _SetWebViewProvider(devices, package_name):
def switch_provider(device):
if device.build_version_sdk < version_codes.NOUGAT:
logging.error('No need to switch provider on pre-Nougat devices (%s)',
device.serial)
else:
device.SetWebViewImplementation(package_name)
device_utils.DeviceUtils.parallel(devices).pMap(switch_provider)
def _NormalizeProcessName(debug_process_name, package_name):
if not debug_process_name:
debug_process_name = package_name
elif debug_process_name.startswith(':'):
debug_process_name = package_name + debug_process_name
elif '.' not in debug_process_name:
debug_process_name = package_name + ':' + debug_process_name
return debug_process_name
def _LaunchUrl(devices, package_name, argv=None, command_line_flags_file=None,
url=None, apk=None, wait_for_java_debugger=False,
debug_process_name=None, nokill=None):
if argv and command_line_flags_file is None:
raise Exception('This apk does not support any flags.')
if url:
# TODO(agrieve): Launch could be changed to require only package name by
# parsing "dumpsys package" rather than relying on the apk.
if not apk:
raise Exception('Launching with URL is not supported when using '
'--package-name. Use --apk-path instead.')
view_activity = apk.GetViewActivityName()
if not view_activity:
raise Exception('APK does not support launching with URLs.')
debug_process_name = _NormalizeProcessName(debug_process_name, package_name)
def launch(device):
# --persistent is required to have Settings.Global.DEBUG_APP be set, which
# we currently use to allow reading of flags. https://crbug.com/784947
if not nokill:
cmd = ['am', 'set-debug-app', '--persistent', debug_process_name]
if wait_for_java_debugger:
cmd[-1:-1] = ['-w']
# Ignore error since it will fail if apk is not debuggable.
device.RunShellCommand(cmd, check_return=False)
# The flags are first updated with input args.
if command_line_flags_file:
changer = flag_changer.FlagChanger(device, command_line_flags_file)
flags = []
if argv:
adb_command_line.CheckBuildTypeSupportsFlags(device,
command_line_flags_file)
flags = shlex.split(argv)
try:
changer.ReplaceFlags(flags)
except device_errors.AdbShellCommandFailedError:
logging.exception('Failed to set flags')
if url is None:
# Simulate app icon click if no url is present.
cmd = [
'am', 'start', '-p', package_name, '-c',
'android.intent.category.LAUNCHER', '-a', 'an
|
ofek/pypinfo
|
pypinfo/__init__.py
|
Python
|
mit
| 23
| 0
|
__version__ = '20.
|
0.0'
|
|
theheros/kbengine
|
kbe/res/scripts/common/Lib/test/test_unicode.py
|
Python
|
lgpl-3.0
| 71,973
| 0.00335
|
""" Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
import struct
import sys
import unittest
import warnings
from test import support, string_tests
import _string
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
class UnicodeTest(string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest):
type2test = str
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
def test_literals(self):
self.assertEqual('\xff', '\u00ff')
self.assertEqual('\uffff', '\U0000ffff')
self.assertRaises(SyntaxError, eval, '\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, '\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, '\'\\U%08x\'' % 0x110000)
# raw strings should not have unicode escapes
self.assertNotEqual(r"\u0020", " ")
def test_ascii(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(ascii('abc'), "'abc'")
self.assertEqual(ascii('ab\\c'), "'ab\\\\c'")
self.assertEqual(ascii('ab\\'), "'ab\\\\'")
self.assertEqual(ascii('\\c'), "'\\\\c'")
self.assertEqual(ascii('\\'), "'\\\\'")
self.assertEqual(ascii('\n'), "'\\n'")
self.assertEqual(ascii('\r'), "'\\r'")
self.a
|
ssertEqual(ascii('\t'), "'\\t'")
self.assertEqual(ascii('\b'), "'\\x08'")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'"), '''"'"''')
self.assertEqual(ascii('"'), """'"'""")
latin1r
|
epr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = ascii(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test ascii works on wide unicode escapes without overflow.
self.assertEqual(ascii("\U00010000" * 39 + "\uffff" * 4096),
ascii("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, ascii, WrongRepr())
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr('abc'), "'abc'")
self.assertEqual(repr('ab\\c'), "'ab\\\\c'")
self.assertEqual(repr('ab\\'), "'ab\\\\'")
self.assertEqual(repr('\\c'), "'\\\\c'")
self.assertEqual(repr('\\'), "'\\\\'")
self.assertEqual(repr('\n'), "'\\n'")
self.assertEqual(repr('\r'), "'\\r'")
self.assertEqual(repr('\t'), "'\\t'")
self.assertEqual(repr('\b'), "'\\x08'")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'"), '''"'"''')
self.assertEqual(repr('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9"
"\xaa\xab\xac\\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5"
"\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3"
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1"
"\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd"
"\xfe\xff'")
testrepr = repr(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
repr("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, repr, WrongRepr())
def test_iterators(self):
# Make sure unicode objects have an __iter__ method
it = "\u1111\u2222\u3333".__iter__()
self.assertEqual(next(it), "\u1111")
self.assertEqual(next(it), "\u2222")
self.assertEqual(next(it), "\u3333")
self.assertRaises(StopIteration, next, it)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(1, 'aaa', 'count', 'a', -1)
self.checkequalnofix(3, 'aaa', 'count', 'a', -10)
self.checkequalnofix(2, 'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, 'aaa
|
LICEF/edx-platform
|
common/test/acceptance/pages/studio/settings_group_configurations.py
|
Python
|
agpl-3.0
| 4,712
| 0.000849
|
"""
Course Group Configurations page.
"""
from .course_page import CoursePage
class GroupConfigurationsPage(CoursePage):
"""
Course Group Configurations page.
"""
url_path = "group_configurations"
def is_browser_on_page(self):
return self.q(css='body.view-group-configurations').present
def group_configurations(self):
"""
Return list of the group configurations for the course.
"""
css = '.group-configurations-list-item'
return [GroupConfiguration(self, index) for index in xrange(len(self.q(css=css)))]
def create(self):
"""
Creates new group configuration.
"""
self.q(css=".new-button").first.click()
class GroupConfiguration(object):
"""
Group Configuration wrapper.
"""
def __init__(self, page, index):
self.page = page
self.SELECTOR = '.group-configurations-list-item-{}'.format(index)
self.index = index
def get_selector(self, css=''):
return ' '.join([self.SELECTOR, css])
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.get_selector(css=selector))
def toggle(self):
"""
Expand/collapse group configuration.
"""
css = 'a.group-toggle'
self.find_css(css).first.click()
def get_text(self, css):
"""
Return text for the defined by css locator.
"""
return self.find_css(css).first.text[0]
def edit(self):
"""
Open editing view for the group configuration.
"""
css = '.action-edit .edit'
self.find_css(css).first.click()
def save(self):
"""
Save group configuration.
"""
css = '.action-primary'
self.find_css(css).first.click()
self.page.wait_for_ajax()
def cancel(self):
"""
Cancel group configuration.
"""
css = '.action-secondary'
self.find_css(css).first.click()
@property
def mode(self):
"""
Return group configuration mode.
"""
if self.find_css('.group-configuration-edit').present:
return 'edit'
elif self.find_css('.group-configuration-details').present:
return 'details'
@property
def id(self):
"""
Return group configuration id.
"""
return self.get_text('.group-configuration-id .group-configuration-value')
@property
def validation_message(self):
"""
Return validation message.
"""
return self.get_text('.message-status.error')
@property
def name(self):
"""
Return group configuration name.
"""
return self.get_text('.group-configurati
|
on-title')
@name.setter
def name(self, value):
"""
Set group configuration name.
"""
css = '.group-configuration-name-input'
self.find_css(css).first.fill(value)
@property
def description(self):
"""
Return group configuration description.
"""
return self.get_text('.group-configuration-description')
@description.setter
def description(self, value):
""
|
"
Set group configuration description.
"""
css = '.group-configuration-description-input'
self.find_css(css).first.fill(value)
@property
def groups(self):
"""
Return list of groups.
"""
css = '.group'
def group_selector(config_index, group_index):
return self.get_selector('.groups-{} .group-{} '.format(config_index, group_index))
return [Group(self.page, group_selector(self.index, index)) for index, element in enumerate(self.find_css(css))]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
class Group(object):
"""
Group wrapper.
"""
def __init__(self, page, prefix_selector):
self.page = page
self.prefix = prefix_selector
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.prefix + selector)
@property
def name(self):
"""
Return group name.
"""
css = '.group-name'
return self.find_css(css).first.text[0]
@property
def allocation(self):
"""
Return allocation for the group.
"""
css = '.group-allocation'
return self.find_css(css).first.text[0]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
|
onrik/pyqiwi
|
tests/test_client.py
|
Python
|
mit
| 8,897
| 0.001351
|
# coding: utf-8
from datetime import datetime
from decimal import Decimal
from unittest import TestCase
import httpretty
from pyqiwi import QiwiError, Qiwi
class QiwiErrorTestCase(TestCase):
def test_error_code(self):
error = QiwiError(143)
self.assertEqual(error.code, 143)
@httpretty.activate
class QiwiClientTestCase(TestCase):
shop_id = '123'
api_id = '456'
api_password = '123qwe'
notification_password = 'qwe123'
def setUp(self):
self.client = Qiwi(self.shop_id, self.api_id, self.api_password, self.notification_password)
def tearDown(self):
httpretty.reset()
def parse(self, data):
if isinstance(data, bytes):
data = data.decode('utf-8')
return dict(map(lambda x: x.split('='), data.split('&')))
def test__get_invoice_url(self):
self.assertEqual(
self.client._get_invoice_url('10001'),
'https://api.qiwi.com/api/v2/prv/123/bills/10001'
)
def test__get_refund_url(self):
self.assertEqual(
self.client._get_refund_url('1', '002'),
'https://api.qiwi.com/api/v2/prv/123/bills/1/refund/002'
)
def test_url_encode(self):
encoded = self.client._urlencode({
'foo': 'bar',
'ext': '',
'user': 'tel:+79998887766',
})
self.assertEqual(self.parse(encoded), {
'foo': 'bar',
'user': 'tel%3A%2B79998887766',
})
def test_make_auth(self):
self.assertEqual(
self.client._make_auth('user1', 'password'),
b'Basic dXNlcjE6cGFzc3dvcmQ='
)
self.assertEqual(
self.client._make_auth('123456', 'zLQkZDdRvBNUkf9spassword'),
b'Basic MTIzNDU2OnpMUWtaRGRSdkJOVWtmOXNwYXNzd29yZA=='
)
def test__make_signature(self):
signature = self.client._make_signature({
'b': 'bar',
'a': 'foo',
'some': 'param',
'comment': u'Заказ №101'
})
self.assertEqual(signature, b'7nHZIf/w6DLq+CuvzV2BmhT71xA=')
def test__request(self):
url = 'http://example.com'
auth = self.client._make_auth(self.api_id, self.api_password).decode('utf-8')
httpretty.register_uri(httpretty.GET, url, '{"response": {"result_code": 0}}')
response = self.client._request(url)
request = httpretty.HTTPretty.last_request
self.assertEqual(response, {'result_code': 0})
self.assertEqual(request.headers.get('Accept'), 'application/json')
self.assertEqual(request.headers.get('Authorization'), auth)
httpretty.register_uri(httpretty.PUT, url, '{"response": {"result_code": 0}}')
response = self.client._request(url, {'user': 'tel:+79998887766'})
request = httpretty.HTTPretty.last_request
self.assertEqual(response, {'result_code': 0})
self.assertEqual(request.headers.get('Accept'), 'application/json')
self.assertEqual(request.headers.get('Authorization'), auth)
self.assertEqual(request.headers.get('Content-Type'), 'application/x-www-form-urlencoded')
self.assertEqual(request.body, b'user=tel%3A%2B79998887766')
httpretty.reset()
httpretty.register_uri(
httpretty.GET, url, '{"response": {"result_code": 33}}', status=400)
try:
self.client._request(url)
except QiwiError as e:
self.assertEqual(e.code, 33)
else:
self.fail('QiwiError not raised')
def test_create_invoice(self):
invoice_id = '101'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.PUT, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "101"
}
}
}""")
invoice = self.client.create_invoice(
invoice_id=invoice_id,
amount=Decimal('22.00'),
currency='RUB',
comment='Order #101',
user='tel:+79998887766',
lifetime=datetime(2017, 1, 2, 15, 22, 33),
)
self.assertEqual(invoice, {'invoice_id': '101'})
self.assertEqual(self.parse(httpretty.HTTPretty.last_request.body), {
'amount': '22.00',
'ccy': 'RUB',
'comment': 'Order+%23101',
'user': 'tel%3A%2B7
|
9998887766',
'lifetime': '2017-01-02T15%3A22%3A33',
})
def test_cancel_invoice(self):
invoice_id = '102'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.PATCH, url, body="""{
"response": {
|
"result_code": 0,
"bill": {
"invoice_id": "102",
"status": "rejected"
}
}
}""")
invoice = self.client.cancel_invoice(invoice_id)
self.assertEqual(invoice, {
'invoice_id': '102',
'status': "rejected",
})
self.assertEqual(
httpretty.HTTPretty.last_request.body,
b'status=rejected'
)
def test_get_invoice(self):
invoice_id = '103'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.GET, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "103",
"status": "paid"
}
}
}""")
invoice = self.client.get_invoice(invoice_id)
self.assertEqual(invoice, {
'invoice_id': '103',
'status': "paid",
})
def test_create_refund(self):
invoice_id = '104'
refund_id = '1'
url = self.client._get_refund_url(invoice_id, refund_id)
httpretty.register_uri(httpretty.PUT, url, body="""{
"response": {
"result_code": 0,
"refund": {
"invoice_id": "104",
"refund_id": "1",
"amount": "100.00"
}
}
}""")
refund = self.client.create_refund(invoice_id, refund_id, Decimal('100.00'))
self.assertEqual(refund, {
'invoice_id': '104',
'refund_id': '1',
'amount': '100.00',
})
self.assertEqual(
httpretty.HTTPretty.last_request.body,
b'amount=100.00'
)
def test_get_refund(self):
invoice_id = '105'
refund_id = '1'
url = self.client._get_refund_url(invoice_id, refund_id)
httpretty.register_uri(httpretty.GET, url, body="""{
"response": {
"result_code": 0,
"refund": {
"invoice_id": "104",
"refund_id": "1",
"amount": "100.00",
"status": "fail"
}
}
}""")
refund = self.client.get_refund(invoice_id, refund_id)
self.assertEqual(refund, {
'invoice_id': '104',
'refund_id': '1',
'amount': '100.00',
'status': 'fail',
})
def test_get_invoice_url(self):
url = self.client.get_invoice_url('106')
expected = 'https://bill.qiwi.com/order/external/main.action?' + self.client._urlencode({
'shop': self.client.shop_id,
'transaction': '106',
})
self.assertEqual(url, expected)
url = self.client.get_invoice_url('107', True, 'http://google.com/success', 'http://google.com/fail', 'iframe', 'qw')
expected = 'https://bill.qiwi.com/order/external/main.action?' + self.client._urlencode({
'shop': self.client.shop_id,
'transaction': '107',
'iframe': True,
'success_url': 'http://google.com/success',
'fail_url': 'http://google.com/fail',
'target': 'iframe',
'pay_source': 'qw',
})
self.assertEqual(url, expected)
def test_check_aut
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/source/repos/clone.py
|
Python
|
bsd-3-clause
| 3,580
| 0.002235
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clone Google Cloud Platform git repository.
"""
import textwrap
from googlecloudsdk.api_lib.source import git
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.credentials import store as c_store
class Clone(base.Command):
"""Clone project git repository in the current directory."""
detailed_help = {
'DESCRIPTION': """\
This command clones git repository for the currently active
Google Cloud Platform project into the specified folder in the
current directory.
""",
'EXAMPLES': textwrap.dedent("""\
To use the default Google Cloud repository for development, use the
following commands. We recommend that you use your project name as
TARGET_DIR to make it apparent which directory is used for which
project. We also recommend to clone the repository named 'default'
since it is automatically created for each project, and its
contents can be browsed and edited in the Developers Console.
$ gcloud init
$ gcloud source repos clone default TARGET_DIR
$ cd TARGET
|
_DIR
... create/edit files and create one or more commits ...
$ git push origin master
"""),
|
}
@staticmethod
def Args(parser):
parser.add_argument(
'--dry-run',
action='store_true',
help=('If provided, prints the command that would be run to standard '
'out instead of executing it.'))
parser.add_argument(
'src',
metavar='REPOSITORY_NAME',
help=('Name of the repository. '
'Note: Google Cloud Platform projects generally have (if '
'created) a repository named "default"'))
parser.add_argument(
'dst',
metavar='DIRECTORY_NAME',
nargs='?',
help=('Directory name for the cloned repo. Defaults to the repository '
'name.'))
@c_exc.RaiseToolExceptionInsteadOf(git.Error, c_store.Error)
def Run(self, args):
"""Clone a GCP repository to the current directory.
Args:
args: argparse.Namespace, the arguments this command is run with.
Raises:
ToolException: on project initialization errors.
Returns:
The path to the new git repository.
"""
# Ensure that we're logged in.
c_store.Load()
project_id = properties.VALUES.core.project.Get(required=True)
project_repo = git.Git(project_id, args.src)
path = project_repo.Clone(destination_path=args.dst or args.src,
dry_run=args.dry_run)
if path and not args.dry_run:
log.status.write('Project [{prj}] repository [{repo}] was cloned to '
'[{path}].\n'.format(prj=project_id, path=path,
repo=project_repo.GetName()))
|
AlexHagerman/code_louisville_django
|
LouiePizza/Menus/migrations/0003_menuitem.py
|
Python
|
mpl-2.0
| 780
| 0.002564
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-24 00:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Menus', '000
|
2_auto_20170824_0051'),
]
operations = [
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('price', models.FloatField()),
('item_type',
|
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Menus.MenuItemType')),
],
),
]
|
caalle/Python-koans
|
python 3/koans/about_iteration.py
|
Python
|
mit
| 4,469
| 0.010293
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
fib = 0
for num in it:
fib += num
self.assertEqual(__ , fib)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual(__, next(stages))
next(stages)
self.assertEqual(__, next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegexpMatches(err_msg, __)
# -----------------
|
-------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping =
|
map(self.add_ten, seq)
self.assertNotEqual(list, type(mapping).__name__)
self.assertEqual(__, type(mapping).__name__)
# In Python 3 built in iterator funcs return iteratable view objects
# instead of lists
for item in mapping:
mapped_seq.append(item)
self.assertEqual(__, mapped_seq)
# None, iterator methods actually return objects of iter type in
# python 3. In python 2 map() would give you a list.
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual(__, even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual(__, name)
# ------------------------------------------------------------------
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
# As of Python 3 reduce() has been demoted from a builtin function
# to the functools module.
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(__, type(result).__name__)
# Reduce() syntax is same as Python 2
self.assertEqual(__, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(__, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_creating_lists_with_list_comprehensions(self):
feast = ['lambs', 'sloths', 'orangutans', 'breakfast cereals', 'fruit bats']
comprehension = [delicacy.capitalize() for delicacy in feast]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, comprehension[2])
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(__, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iteratable sequence
result = map(self.add_ten, range(1,4))
self.assertEqual(__, list(result))
try:
# Files act like a collection of lines
file = open("example_file.txt")
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(__, list(upcase_lines))
# NOTE: You can create your own collections that work with each,
# map, select, etc.
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
if file:
file.close()
|
jedie/django-cms-tools
|
django_cms_tools_tests/test_command_list_page_by_plugin.py
|
Python
|
gpl-3.0
| 2,800
| 0.005357
|
"""
:created: 24.04.2018 by Jens Diemer
:copyleft: 2018 by the django-cms-tools team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import os
from django.core.management import call_command
from cms.models import Page
# https://github.com/jedie/django-tools
from django_tools.unittest_utils.django_command import DjangoCommandMixin
from django_tools.unittest_utils.stdout_redirect import StdoutStderrBuffer
from django_tools.unittest_utils.unittest_base import BaseUnittestCase
# Django CMS Tools
import django_cms_tools_test_project
from django_cms_tools.fixture_helper.pages import CmsPageCreator
MANAGE_DIR = os.path.abspath(os.path.dirname(django_cms_tools_test_project.__file__))
class CmsPluginUnittestGeneratorTestCase(DjangoCommandMixin, BaseUnittestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
page, created = CmsPageCreator().create()
assert created
assert Page.objects.all().count() == 2 # draft + publish
def test_list_all_plugins(self):
with StdoutStderrBuffer() as buff:
call_command("list_page_by_plugin")
output = buff.get_output()
print(output)
self.assertEqual_dedent(output,
"""
No plugin-type given.
All CMS plugin types:
12 instances: 'djangocms_text_ckeditor.TextPlugin'
There are 1 plugins.
"""
)
def test_wrong_plugin_type(self):
with StdoutStderrBuffer() as buff:
call_command("list_page_by_plugin", "foobar_app.FooBarPlugin")
output = buff.get_output()
print(output)
self.assertEqual_dedent(output,
"""
ERROR: Given plugin type 'foobar_app.FooBarPlugin' doesn't exists!
Hint: Maybe you mean: 'FooBarPlugin' ?!?
All CMS plugin types:
12 instances: 'djangocms_text_ckeditor.TextPlugin'
|
There are 1 plugins.
"""
)
def test_TextPlugin(self):
with StdoutStderrBuffer() as buff:
call_command("list_page_by_plugin", "TextPlugin")
output = buff.get_output()
print(output)
self.assertIn("Found 12 'TextPlugin' plugins... 2
|
placeholders... 1 pages:", output)
self.assertIn("* CmsPageCreator in en", output)
self.assertIn("* /de/", output)
self.assertIn("* /en/", output)
self.assertIn("There are 2 app models with PlaceholderFields:", output)
self.assertIn("* StaticPlaceholder 'draft,public' - 0 total entries Skip", output)
self.assertIn("* LandingPageModel 'content' - 0 total entries Skip", output)
|
squidpie/somn
|
src/somnMesh.py
|
Python
|
mit
| 23,564
| 0.018673
|
#!/usr/bin/python3.3
import somnTCP
import somnUDP
import somnPkt
import somnRouteTable
from somnLib import *
import struct
import queue
import threading
import socket
import time
import random
PING_TIMEOUT = 5
class somnData():
def __init__(self, ID, data):
self.nodeID = ID
self.data = data
class somnMesh(threading.Thread):
TCPTxQ = queue.Queue()
TCPRxQ = queue.Queue()
UDPRxQ = queue.Queue()
UDPAlive = threading.Event()
networkAlive = threading.Event()
routeTable = somnRouteTable.somnRoutingTable()
cacheId = [0,0,0,0]
cacheRoute = [0,0,0,0]
cacheNextIndex = 0
_mainLoopRunning = 0
enrolled = False
nodeID = 0
nodeIP = "127.0.0.1"
nodePort = 0
lastEnrollReq = 0
connCache = [('',0),('',0),('',0)]
_printCallbackFunction = None
def __init__(self, TxDataQ, RxDataQ, printCallback = None):
threading.Thread.__init__(self)
self.CommTxQ = TxDataQ
self.CommRxQ = RxDataQ
random.seed()
self.nodeID = random.getrandbits(16)
self.nextConnCacheIndex = 0
self._printCallbackFunction = printCallback
TCPTxQ = queue.Queue()
TCPRxQ = queue.Queue()
UDPRxQ = queue.Queue()
self.pendingRouteID = 0
self.pendingRoute = 0
self.pendingRouteHTL = 1
self.routeLock = threading.Lock()
self.routeBlock = threading.Event()
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingCache = [0,0,0,0,0]
self.pingLock = threading.Lock()
def printinfo(self, outputStr):
if self._printCallbackFunction == None:
print("{0:04X}: {1}".format(self.nodeID, outputStr))
else:
self._printCallbackFunction(self.nodeID, outputStr)
def enroll(self):
#self.printinfo("enrolling")
tcpRespTimeout = False
ACK = random.getrandbits(16)
enrollPkt = somnPkt.SomnPacket()
enrollPkt.InitEmpty("NodeEnrollment")
enrollPkt.PacketFields['ReqNodeID'] = self.nodeID
enrollPkt.PacketFields['ReqNodeIP'] = IP2Int(self.nodeIP)
enrollPkt.PacketFields['ReqNodePort'] = self.nodePort
enrollPkt.PacketFields['AckSeq'] = ACK
udp = somnUDP.somnUDPThread(enrollPkt, self.UDPRxQ, self.networkAlive, self.UDPAlive)
udp.start()
while not tcpRespTimeout and self.routeTable.getNodeCount() < 3:
try:
enrollResponse = self.TCPRxQ.get(timeout = 1)
except queue.Empty:
tcpRespTimeout = True
break
respNodeID = enrollResponse.PacketFields['RespNodeID']
respNodeIP = enrollResponse.PacketFields['RespNodeIP']
respNodePort = enrollResponse.PacketFields['RespNodePort']
#self.printinfo("Got enrollment response from {0:04X}".format(respNodeID))
self.routeTable.getNodeIndexFromId(respNodeID)
if self.routeTable.getNodeIndexFromId(respNodeID) > 0:
self.TCPRxQ.task_done()
continue
elif enrollResponse.PacketType == somnPkt.SomnPacketType.NodeEnrollment and enrollResponse.PacketFields['AckSeq'] == ACK:
if self.routeTable.addNode(
|
respNodeID, Int2IP(respNodeIP), respNodePort) < 0:
self.printinfo("Something went wrong in adding the node")
#TODO: Can we make this an exception?
packedEnrollResponse = somnPkt.SomnPacketTxWrapper(enrollResponse,
|
Int2IP(respNodeIP),respNodePort)
self.TCPTxQ.put(packedEnrollResponse)
self.enrolled = True
self.printinfo("Enrolled to: {0:04X}".format(respNodeID))
self.TCPRxQ.task_done()
#break
return udp
def run(self):
socket.setdefaulttimeout(5)
self.networkAlive.set()
Rx = somnTCP.startSomnRx(self.nodeIP, self.nodePort, self.networkAlive, self.TCPRxQ)
Tx = somnTCP.startSomnTx(self.networkAlive, self.TCPTxQ)
while True:
if Rx.bound and Tx.bound: break
self.nodePort = Rx.port
#self.printinfo("Port: {0}".format(self.nodePort))
enrollAttempts = 0
while not self.enrolled:
self.UDPAlive.set()
UDP = self.enroll()
if self.enrolled:
break
elif enrollAttempts < 2:
self.UDPAlive.clear()
UDP.join()
enrollAttempts = enrollAttempts + 1
else:
self.enrolled = True
self.printinfo("Enrolled as Alpha Node")
break
#start main loop to handle incoming queueus
self._mainLoopRunning = 1
rxThread = threading.Thread(target = self._handleTcpRx)
rxThread.start()
self.pingTimer.start()
while self._mainLoopRunning:
self._handleUdpRx()
self._handleTx()
# Do a bunch of stuff
try:
self.pingTimer.cancel()
except:
pass
self.networkAlive.clear()
UDP.networkAlive = False
UDP.join()
Rx.join()
Tx.join()
self.TCPRxQ.join()
self.TCPTxQ.join()
self.CommTxQ.join()
self.CommRxQ.join()
def _pingRouteTable(self):
# check if previous route requests were returned
self.pingLock.acquire()
for idx, node in enumerate(self.pingCache):
if node != 0:
# remove nodes where no response was returned
self.printinfo("Dropping Node: {0:04X}".format(node))
self.routeTable.removeNodeByIndex(self.routeTable.getNodeIndexFromId(node))
# unset returned route cache
self.pingCache[idx] = 0
self.pingLock.release()
# send a RouteReqeust for node 0xFFFF to each entry in the routing table
for node in self.routeTable.getConnectedNodes():
nodeIndex = self.routeTable.getNodeIndexFromId(node)
self.pingLock.acquire()
self.pingCache[nodeIndex - 1] = node
self.pingLock.release()
pingPkt = somnPkt.SomnPacket()
pingPkt.InitEmpty(somnPkt.SomnPacketType.RouteRequest)
pingPkt.PacketFields['SourceID'] = self.nodeID
pingPkt.PacketFields['LastNodeID'] = self.nodeID
pingPkt.PacketFields['DestID'] = 0xFFFF
pingPkt.PacketFields['HTL'] = 1
TxInfo = self.routeTable.getNodeInfoByIndex(nodeIndex)
TxPkt = somnPkt.SomnPacketTxWrapper(pingPkt, TxInfo.nodeAddress, TxInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingTimer.start()
def _handleTx(self):
#print("Handle TX")
try:
TxData = self.CommTxQ.get(False)
except:
return
#TODO: Tx Data coming from the Comm Layer needs to packetized
route = 0
#check cache for route to dest ID
if TxData.nodeID in self.cacheId:
route = self.cacheRoute[self.cacheId.index(TxData.nodeID)]
else:
route = self._getRoute(TxData.nodeID)
#TODO Lock around this
self.pendingRouteID = 0
self.pendingRouteHTL = 1
if route == 0: # no valid rout found
self.printinfo(" *** NO ROUTE FOUND *** ")
return
# inset path into cache, for now this is a FIFO eviction policy, should upgrade to an LFU policy
self.cacheId[self.cacheNextIndex] = TxData.nodeID
self.cacheRoute[self.cacheNextIndex] = route
self.cacheNextIndex = self.cacheNextIndex + 1
if self.cacheNextIndex > 3:
self.cacheNextIndex = 0
#pop first step in route from route string
nextRoute, newRoute = self._popRoute(route)
#nextRouteStep = newRoute[0]
#set route string in packet
TxPkt = somnPkt.SomnPacket()
TxPkt.InitEmpty(somnPkt.SomnPacketType.Message)
TxPkt.PacketFields['SourceID'] = self.nodeID
TxPkt.PacketFields['DestID'] = TxData.nodeID
TxPkt.PacketFields['Message'] = TxData.data
TxPkt.PacketFields['Route'] = newRoute
#create wrapper packet to send to next step in route
TxNodeInfo = self.routeTable.getNodeInfoByIndex(nextRoute)
if TxNodeInfo is None:
self.cacheRoute[self.cacheId.index(TxData.nodeID)] = 0
self.CommTxQ.task_done()
self.CommTxQ.put(TxData)
return
txPktWrapper = somnPkt.SomnPacketTxWrapper(TxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
#send packet to TX layer
self.TCPTxQ.put(txPktWrapper)
self.CommTxQ.task_done()
def _handleTcpRx(self):
while self._mainLoopRunning:
try:
RxPkt = self.TCPRxQ.get(False)
except:
continue
pktType = RxPkt.PacketType
#self.printinfo("
|
psiinon/addons-server
|
src/olympia/versions/tests/test_views.py
|
Python
|
bsd-3-clause
| 19,783
| 0
|
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.utils.encoding import smart_text
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.utils.http import urlquote
from unittest import mock
from pyquery import PyQuery
from olympia import amo
from olympia.access import acl
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon
from olympia.amo.templatetags.jinja_helpers import user_media_url
from olympia.amo.tests import TestCase, addon_factory
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import urlencode, urlparams
from olympia.files.models import File
from olympia.users.models import UserProfile
def decode_http_header_value(value):
"""
Reverse the encoding that django applies to bytestrings in
HttpResponse._convert_to_charset(). Needed to test header values that we
explicitly pass as bytes such as filenames for content-disposition or
xsendfile headers.
"""
return value.encode('latin-1').decode('utf-8')
class TestViews(TestCase):
def setUp(self):
super(TestViews, self).setUp()
self.addon = addon_factory(
slug=u'my-addôn', file_kw={'size': 1024},
version_kw={'version': '1.0'})
self.version = self.addon.current_version
self.addon.current_version.update(created=self.days_ago(3))
def test_version_update_info(self):
self.version.release_notes = {
'en-US': u'Fix for an important bug',
'fr': u'Quelque chose en français.\n\nQuelque chose d\'autre.'
}
self.version.save()
file_ = self.version.files.all()[0]
file_.update(platform=amo.PLATFORM_WIN.id)
# Copy the
|
file to create a new one attached to the same version.
# This tests https://github.com/mozilla/addons-server/issues/8950
file_.pk = None
file_.platform = amo.PLATFORM_MAC.id
file_.save()
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert resp
|
onse.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
# pyquery is annoying to use with XML and namespaces. Use the HTML
# parser, but do check that xmlns attribute is present (required by
# Firefox for the notes to be shown properly).
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == 'Fix for an important bug'
# Test update info in another language.
with self.activate(locale='fr'):
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
assert b'<br/>' in response.content, (
'Should be using XHTML self-closing tags!')
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == (
u"Quelque chose en français.<br/><br/>Quelque chose d'autre.")
def test_version_update_info_legacy_redirect(self):
response = self.client.get('/versions/updateInfo/%s' % self.version.id,
follow=True)
url = reverse('addons.versions.update_info',
args=(self.version.addon.slug, self.version.version))
self.assert3xx(response, url, 302)
def test_version_update_info_legacy_redirect_deleted(self):
self.version.delete()
response = self.client.get(
'/en-US/firefox/versions/updateInfo/%s' % self.version.id)
assert response.status_code == 404
def test_version_update_info_no_unlisted(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 404
class TestDownloadsBase(TestCase):
fixtures = ['base/addon_5299_gcal', 'base/users']
def setUp(self):
super(TestDownloadsBase, self).setUp()
self.addon = Addon.objects.get(id=5299)
self.file = File.objects.get(id=33046)
self.file_url = reverse('downloads.file', args=[self.file.id])
self.latest_url = reverse('downloads.latest', args=[self.addon.slug])
def assert_served_by_host(self, response, host, file_=None):
if not file_:
file_ = self.file
assert response.status_code == 302
assert response.url == (
urlparams('%s%s/%s' % (
host, self.addon.id, urlquote(file_.filename)
), filehash=file_.hash))
assert response['X-Target-Digest'] == file_.hash
def assert_served_internally(self, response, guarded=True):
assert response.status_code == 200
file_path = (self.file.guarded_file_path if guarded else
self.file.file_path)
assert response[settings.XSENDFILE_HEADER] == file_path
def assert_served_locally(self, response, file_=None, attachment=False):
path = user_media_url('addons')
if attachment:
path += '_attachments/'
self.assert_served_by_host(response, path, file_)
def assert_served_by_cdn(self, response, file_=None):
assert response.url.startswith(settings.MEDIA_URL)
assert response.url.startswith('http')
self.assert_served_by_host(response, user_media_url('addons'), file_)
class TestDownloadsUnlistedVersions(TestDownloadsBase):
def setUp(self):
super(TestDownloadsUnlistedVersions, self).setUp()
self.make_addon_unlisted(self.addon)
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_returns_404(self):
"""File downloading isn't allowed for unlisted addons."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: True)
def test_download_for_unlisted_addon_owner(self):
"""File downloading is allowed for addon owners."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: True)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_reviewer(self):
"""File downloading isn't allowed for reviewers."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_unlisted_reviewer(self):
"""File downloading is allowed for unlisted reviewers."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
class TestDownloads(TestDownloadsBase):
|
scrain777/MassivelyUnreliableSystems
|
Voting/Utilities/check.py
|
Python
|
mit
| 2,156
| 0.009276
|
#!/usr/bin/env python3
"""
MIT License
Copyright (c) 2016 Steven P. Crain, SUNY Plattsburgh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT
|
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""First, read in the office stats.
STATS Office Precinc
|
ts VoteLimit Votes Voters
STATS Lammersville Joint USD Governing Board Members 1 3 150 50
"""
stats=dict()
fstats=open("offices.stats","r")
for line in fstats:
if line[-1]=="\n":
line=line[:-1]
line=line.split("\t")
stats[line[1]]=line[1:]+[0,]
fstats.close()
fin=open("precincts.tsv", "r")
"""Read the header line containing the names of the offices."""
line=fin.readline()
if(line[-1]=='\n'):
line=line[:-1]
offices=line.split("\t")
"""Read the office assignments for each precinct."""
for line in fin:
if line[-1]=="\n":
line=line[:-1]
line=line.split("\t")
for i in range(1,len(line)):
if line[i]:
"""This precinct votes for this office, so tally the number of
voters that we have available.
"""
stats[offices[i]][5]+=int(line[0])
fin.close()
for office in offices[1:]:
if float(stats[office][4])>stats[office][5]:
print(stats[office])
|
mikofski/Carousel
|
examples/PythagoreanThm/pythagorean_thm.py
|
Python
|
bsd-3-clause
| 2,570
| 0
|
#! python
from carousel.core.data_sources import DataSource, DataParameter
from carousel.core.outputs import Output, OutputParameter
from carousel.core.formulas im
|
port Formula, FormulaParameter
from carousel.core.calculations import Calc, CalcParameter
from carousel.core.simulations import Simulation, SimParameter
from carousel.core.models import Model, ModelParameter
from carousel.contrib.readers import ArgumentReader
from carousel.core import UREG
import numpy as np
import os
DATA
|
= {'PythagoreanData': {'adjacent_side': 3.0, 'opposite_side': 4.0}}
class PythagoreanData(DataSource):
adjacent_side = DataParameter(units='cm', uncertainty=1.0)
opposite_side = DataParameter(units='cm', uncertainty=1.0)
def __prepare_data__(self):
for k, v in self.parameters.iteritems():
self.uncertainty[k] = {k: v['uncertainty'] * UREG.percent}
class Meta:
data_cache_enabled = False
data_reader = ArgumentReader
class PythagoreanOutput(Output):
hypotenuse = OutputParameter(units='cm')
def f_pythagorean(a, b):
a, b = np.atleast_1d(a), np.atleast_1d(b)
return np.sqrt(a * a + b * b).reshape(1, -1)
class PythagoreanFormula(Formula):
f_pythagorean = FormulaParameter(
units=[('=A', ), ('=A', '=A')],
isconstant=[]
)
class Meta:
module = __name__
class PythagoreanCalc(Calc):
pythagorean_thm = CalcParameter(
formula='f_pythagorean',
args={'data': {'a': 'adjacent_side', 'b': 'opposite_side'}},
returns=['hypotenuse']
)
class PythagoreanSim(Simulation):
settings = SimParameter(
ID='Pythagorean Theorem',
commands=['start', 'load', 'run'],
sim_length=[0, 'hour'],
write_fields={
'data': ['adjacent_side', 'opposite_side'],
'outputs': ['hypotenuse']
}
)
class PythagoreanModel(Model):
data = ModelParameter(sources=[PythagoreanData])
outputs = ModelParameter(sources=[PythagoreanOutput])
formulas = ModelParameter(sources=[PythagoreanFormula])
calculations = ModelParameter(sources=[PythagoreanCalc])
simulations = ModelParameter(sources=[PythagoreanSim])
class Meta:
modelpath = os.path.dirname(__file__)
if __name__ == '__main__':
m = PythagoreanModel()
m.command('run', data=DATA)
out_reg = m.registries['outputs']
fmt = {
'output': out_reg['hypotenuse'],
'uncertainty': out_reg.uncertainty['hypotenuse']['hypotenuse']
}
print 'hypotenuse = %(output)s +/- %(uncertainty)s' % fmt
|
harryberlin/BMW-RaspControl-Skin
|
skin.confluence-vertical/scripts/system_shutdown.py
|
Python
|
gpl-2.0
| 138
| 0
|
import o
|
s
import xbmc
import time
os.system("sudo service HelgeInterface stop")
time.sleep(1)
xbmc.executebuiltin('XBMC.Powerdown')
pas
|
s
|
erikdejonge/django-htmlmin
|
htmlmin/decorators.py
|
Python
|
bsd-2-clause
| 837
| 0
|
# Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be f
|
ound in the LICENSE file.
from functools import wraps
from htmlmin.minify import html_minify
def minified_response(f):
@wraps(f)
def minify(*args, **kwargs):
response = f(*args, **kwargs)
minifiable_status = response.status_code == 200
minifiable_content = 'text/html' in response['Content-Type']
if minif
|
iable_status and minifiable_content:
response.content = html_minify(response.content)
return response
return minify
def not_minified_response(f):
@wraps(f)
def not_minify(*args, **kwargs):
response = f(*args, **kwargs)
response.minify_response = False
return response
return not_minify
|
Sapphirine/Human-Activity-Monitoring-and-Prediction
|
analysis.py
|
Python
|
apache-2.0
| 6,718
| 0.004614
|
__author__ = 'Chao'
import numpy as np
from sklearn import svm, cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
activity_label = {'1': 'WALKING',
'2': 'WALKING_UPSTAIRS',
'3': 'WALKING_DOWNSTAIRS',
'4': 'SITTING',
'5': 'STANDING',
'6': 'LAYING'}
# ############################# Open data set ###############################
X = []
y = []
X_fin = []
y_fin = []
print "Opening dataset..."
try:
with open("X_train.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X.append(pair)
f.close()
with open("y_train.txt", 'rU') as f:
res = list(f)
for line in res:
y.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
try:
with open("X_test.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X_fin.append(pair)
f.close()
with open("y_test.txt", 'rU') as f:
res = list(f)
for line in res:
y_fin.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
print "Dataset opened."
X = np.array(X)
y = np.array(y)
###### Separate data set into 70% training set and 30% test set
print "Separating data into 70% training set & 30% test set..."
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3)
print "Dataset separated."
###### Get best parameters ######
############################### Kernel=Linear ###############################
print "######## SVM, Kernel = Linear #########"
#C_linear = [0.1, 1, 10, 100]
C_linear = [3]
result_linear = []
print "C value chosen from: ", C_linear
print "Calculating accuracy with K-fold..."
for C in C_linear:
svc_linear = svm.SVC(kernel='linear', C=C)
scores = cross_validation.cross_val_score(svc_linear, X_train, y_train, scoring='accuracy', cv=6)
result_linear.append(scores.mean())
print "result:", result_linear
#Result with different C are equal, so here choose C=1 directly as the best parameter.
best_param_linear = {"C": 3}
#linear_test_score = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X_test, y_test).score(X_test, y_test)
#rbf_test_score = svm.SVC(kernel='rbf', C=best_param_rbf.get("C"), gamma=best_param_rbf.get("gamma")).fit(X_test, y_test).score(X_test, y_test)
#poly_test_score = svm.SVC(kernel='poly', C=best_param_poly.get("C"), degree=best_param_poly.get("degree")).fit(X_test, y_test).score(X_test, y_test)
linear_test = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = linear_test.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
#print "Linear Kernel test score: ", linear_test_score
#print "RBF Kernel test score: ", rbf_test_score
#print "Poly Kernel test score: ", poly_test_score
################################### Random Forests ####################################
print "##### Random Forest ######"
n_estimators_list = range(1, 16, 1)
result_random_forests = []
max_score_rf = float("-inf")
best_param_rf = None
for n_estimators in n_estimators_list:
print "Testing n_estimators = ", n_estimators
rf_clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=None, min_samples_split=1, random_state=0)
scores = cross_validation.cross_val_score(rf_clf, X_train, y_train, scoring="accuracy", cv=6)
result_random_forests.append(scores.mean())
if scores.mean() > max_score_rf:
max_score_rf = scores.mean()
best_param_rf = {"n_estimators": n_estimators}
print "number of trees: ", n_estimators_list
print "results: ", result_random_forests
print "best accuracy: ", max_score_rf
print "best parameter: ", best_param_rf
rf_clf_test_score = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None,
min_samples_split=1, random_state=0).fit(X_test, y_test).score(X_test,
y_test)
print "Test set accuracy: ", rf_clf_test_score
rf_clf = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None, min_samples_split=1,
random_state=0).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = rf_clf.predict(X_fin[i])
b = y_fin[i]
print "+ ", a[0],
print "- ", b
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
################################### K Nearest Neighbors ####################################
print "##### K Nearest Neighbors ######"
n_neighbors_list = range(1, 6, 1)
result_n_neighbors = []
max_score_knn = float("-inf")
best_param_knn = None
for n_neighbors in n_neighbors_list:
print "Testing n_neighbors = ", n_neighbors
neigh = KNeighborsClassifier(n_neighbors=n_neighbors)
scores = cross_validation.cross_val_score(neigh, X_train, y_train, scoring="accuracy", cv=6)
result_n_neighbors.append(scores.mean())
if scores.mean() > max_score_knn:
max_score_knn = scores.mean()
best_param_knn = {"n_neighbors": n_neighbors}
print "number of neighbors: ", n_neighbors_list
print "results: ", result_n_neighbors
print "best accuracy: ", max_score_knn
print "best parameter
|
: ", best_param_knn
neigh_test_score = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X_test, y_test).score(X_test, y_test)
print "
|
Test set accuracy: ", neigh_test_score
neigh = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = neigh.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
|
adamcharnock/lightbus
|
lightbus/utilities/singledispatch.py
|
Python
|
apache-2.0
| 447
| 0
|
import s
|
ys
if sys.version_info >= (3, 8):
from functools import singledispatchmethod
else:
from functools import singledispatch, update_wrapper
def singledispatchmethod(func):
dispatcher = singl
|
edispatch(func)
def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw)
wrapper.register = dispatcher.register
update_wrapper(wrapper, func)
return wrapper
|
dblalock/dig
|
tests/exper_bits.py
|
Python
|
mit
| 34,782
| 0.000633
|
#!/usr/bin/env python
import functools
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import time
import kmc2
from collections import namedtuple
from scipy import stats
from sklearn import cluster
from sklearn.decomposition import TruncatedSVD
from numba import jit
from datasets import load_dataset, Datasets
from joblib import Memory
_memory = Memory('.', verbose=0)
# ================================================================ Distances
def dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def _learn_expected_dists_for_diffs(X_embed, X_quant, base_dist_func=dists_sq,
samples_per_bin=1e3):
# TODO try fitting dists based on orig data, not embedded
assert np.array_equal(X_quant[:10], X_quant[:10].astype(np.int))
assert X_embed.shape == X_quant.shape
uniqs = np.unique(X_quant)
cardinality = len(uniqs)
dists = np.zeros(cardinality)
counts = np.zeros(cardinality)
assert np.max(uniqs) == (cardinality - 1) # must be ints 0..b for some b
nsamples = int(counts.size * samples_per_bin)
for n in range(nsamples):
row1, row2 = np.random.randint(X_embed.shape[0], size=2)
col1, col2 = np.random.randint(X_embed.shape[1], size=2)
diff = np.abs(X_quant[row1, col1] - X_quant[row2, col2]).astype(np.int)
dist = base_dist_func(X_embed[row1, col1], X_embed[row2, col2])
counts[diff] += 1
dists[diff] += dist
assert np.min(counts) > 0
dists /= counts
return dists - np.min(dists)
# return dists / counts # TODO uncomment
# return np.array([base_dist_func(i, 0) for i in np.arange(cardinality)])
def learn_dists_func(X_embed, X_quant, base_dist_func=dists_sq,
samples_per_bin=1e3):
"""
Args:
X_embed (2D, array-like): the data just before quantization
X_quant (2D, array-like): quantized version of `X_embed`
base_dist_func (f(a, b) -> R+): function used to compute distances
between pairs of scalars
samples_per_bin (scalar > 0): the expected number of samples per bin
Returns:
f(X, q), a function with the same signature as `dists_sq` and `dists_l1`
"""
expected_dists = _learn_expected_dists_for_diffs(
X_embed, X_quant, base_dist_func, samples_per_bin)
print "expected_dists: ", expected_dists
def f(X, q, expected_dists=expected_dists):
diffs = np.abs(X - q)
orig_shape = diffs.shape
# assert np.min(diffs)
dists = expected_dists[diffs.ravel().astype(np.int)]
return dists.reshape(orig_shape).sum(axis=-1)
return f
def dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def dists_elemwise_l1(x, q):
return np.abs(x - q)
LUT_QUANTIZE_FLOOR = 'floor'
def learn_query_lut(X_embed, X_quant, q_embed,
elemwise_dist_func=dists_elemwise_sq,
samples_per_bin=1e3,
quantize_algo=LUT_QUANTIZE_FLOOR):
assert np.array_equal(X_embed.shape, X_quant.shape)
# assert np.array_equal(q_embed.shape, q_quant.shape)
assert np.equal(X_embed.shape[-1], q_embed.shape[-1])
ndims = q_embed.shape[-1]
uniqs = np.unique(X_quant)
cardinality = len(uniqs)
distances = np.zeros((cardinality, ndims))
counts = np.zeros((cardinality, ndims))
# assert cardinality == 4 # TODO rm
assert np.max(uniqs) == (cardinality - 1) # must be ints 0..b for some b
nsamples = min(int(cardinality * samples_per_bin), X_embed.shape[0])
all_cols = np.arange(ndims, dtype=np.int)
for n in range(nsamples):
bins = X_quant[n].astype(np.int)
dists = elemwise_dist_func(X_embed[n], q_embed)
counts[bins, all_cols] += 1
distances[bins, all_cols] += dists.ravel()
# TODO also learn avg dist and set scale factor such that avg point will
# just barely integer overflow
assert np.min(counts) > 0
return np.asfortranarray(distances / counts)
# def _inner_dists_lut(X_quant, q_lut, all_cols):
# @jit
def _inner_dists_lut(X_quant, q_lut):
# ret = np.zeros(X_quant.shape[0])
# offset cols of X_quant so that col i has offset of i * `cardinality`;
# this will allow us to directly index into q_lut all at once
cardinality, ndims = q_lut.shape
offsets = np.arange(ndims, dtype=np.int) * cardinality
X_quant_offset = X_quant + offsets
dists = q_lut.T.ravel()[X_quant_offset.ravel()]
# dists = q_lut.T.ravel()[X_quant_offset.reshape(-1)]
dists = dists.reshape(X_quant.shape)
return np.sum(dists, axis=-1)
# ndims = q_lut.shape[-1]
# for i in range(X_quant.shape[0]):
# row = X_quant[i]
# for i, row in enumerate(X_quant):
# for j in range(ndims):
# ret[i] += q_lut[row[j], j]
# dists = q_lut[row, all_cols]
# ret[i] = np.sum(dists)
# return ret
# @profile
def dists_lut(X_quant, q_lut): # q_lut is [cardinality, ndims]
"""
>>> X_quant = np.array([[0, 2], [1, 0]], dtype=np.int)
>>> q_lut = np.array([[10, 11, 12], [20, 21, 22]]).T
>>> dists_lut(X_quant, q_lut)
array([ 32., 31.])
"""
assert X_quant.shape[-1] == q_lut.shape[-1]
# return dists_sq(X_quant, q_lut) # TODO rm # yep, identical output
# ndims = q_lut.shape[-1]
X_quant = np.atleast_2d(X_quant.astype(np.int))
# all_cols = np.arange(ndims, dtype=np.int)
return _inner_dists_lut(X_quant, q_lut)
# cardinality, ndims = q_lut.shape
# offsets = np.arange(ndims, dtype=np.int) * cardinality
# X_quant_offset = X_quant + offsets
# dists = q_lut.T.ravel()[X_quant_offset.ravel()]
# # dists = q_lut.T.ravel()[X_quant_offset.reshape(-1)]
# dists = dists.reshape(X_quant.shape)
# return np.sum(dists, axis=-1)
def dists_to_vects(X, q):
row_norms = np.sum(X*X, axis=1, keepdims=True)
q_norms = np.sum(q*q, axis=1)
prods = np.dot(X, q.T)
return -2 * prods + row_norms + q_norms
def hamming_dist(v1, v2):
return np.count_nonzero(v1 != v2)
def hamming_dists(X, q):
return np.array([hamming_dist(row, q) for row in X])
# ================================================================ Misc
def randwalk(*args):
ret = np.random.randn(*args)
ret = np.cumsum(ret, axis=-1)
return ret / np.linalg.norm(ret, axis=-1, keepdims=True) * ret.shape[-1]
def top_k_idxs(elements, k, smaller_better=False):
if smaller_better:
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn)[:k]
else:
which_nn = len(elements) - 1 - np.arange(k)
return np.argpartition(elements, kth=which_nn)[-k:][::-1]
def find_knn(X, q, k):
dists = dists_sq(X, q)
idxs = top_k_idxs(dists, k, smaller_better=True)
return idxs, dists[idxs]
def orthogonalize_rows(A):
Q, R = np.linalg.qr(A.T)
return Q.T
# ================================================================ Clustering
@_memory.cache
def kmeans(X, k):
seeds = kmc2.kmc2(X, k)
# plt.imshow(centroids, interpolation=None)
estimator = cluster.MiniBatchKMeans(k, init=seeds, max_iter=16).fit(X)
# estimator = cluster.KMeans(k, max_iter=4).fit(X)
return estimator.cluster_centers_, estimator.labels_
def groups_from_labels(X, labels, num_centroids):
# form groups associated with each centroid
groups = [[] for _ in range(num_centroids)]
for i, lbl in enumerate(labels):
grou
|
ps[lbl].append(X[i])
for i, g in enumerate(groups[:]):
|
groups[i] = np.array(g, order='F')
# groups[i] = np.array(g)
# group_sizes = [len(g) for g in groups]
# huh; these are like 80% singleton clusters in 64D and 32 kmeans iters...
# print sorted(group_sizes)
# plt.hist(labels)
# plt.hist(group_sizes, bins=num_centroids)
# plt.show()
return groups
@_memory.cache
def load_dataset_and_groups(which_dataset, num_centroids=256,
**load_dataset_kwargs):
X, q = load_dataset(which_dataset, **load_dataset_kwarg
|
santhoshtr/silpa
|
src/silpa/modules/dictionary/dictionary.py
|
Python
|
agpl-3.0
| 6,167
| 0.020593
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Dictionary
# Copyright 2008 Santhosh Thottingal <santhosh.thottingal@gmail.com>
# http://www.smc.org.in
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# If you find any bugs or have any suggestions email: santhosh.thottingal@gmail.com
# URL: http://www.smc.org.in
from common import *
from utils import silpalogger
import os
from dictdlib import DictDB
from jsonrpc import *
from wiktionary import get_def
try:
from modules.render import render
except:
silpalogger.exception("Failed to import render module")
# Have the render instance initiated only once
renderer = render.getInstance()
#
|
One image for No image found
no_meaning_found = renderer.render_text("No meanings found","png",0,0,"Red",font_size=10)
class Dictionary(SilpaModule):
def __init__(self):
self.template=os.path.join(os.path.dirname(__file__), 'dictionary.html')
self.response
|
= SilpaResponse(self.template)
self.imageyn=None
self.text=None
self.dictionaryname=None
self.fontsize=16
self.imagewidth=300
self.imageheight=300
def set_request(self,request):
self.request=request
self.response.populate_form(self.request)
self.text=self.request.get('text')
self.imageyn=self.request.get('image')
if self.request.get('fontsize')!=None:
self.fontsize= int( self.request.get('fontsize'))
if self.request.get('imagewidth')!=None:
self.imagewidth=int(self.request.get('imagewidth'))
if self.request.get('imageheight')!=None:
self.imageheight=int(self.request.get('imageheight'))
self.dictionaryname=self.request.get('dict')
def get_response(self):
if self.imageyn != None:
if self.imageyn.lower()=="y":
image_url = self.getdef_image(self.text,self.dictionaryname,"png",self.imagewidth,self.imageheight,"Black",self.fontsize)
self.response.response_code = "303 see other"
self.response.header = [('Location', image_url)]
elif self.imageyn.lower() == "w":
image_url = self.get_wiktionary_def_image(self.text,self.dictionaryname,"png",self.imageheight,self.imagewidth,"Black",self.fontsize)
self.response.response_code = "303 See other"
self.response.header = [('Location',image_url)]
else:
wordmeaning=self.getdef(self.text,self.dictionaryname)
self.response.content = wordmeaning.decode("utf-8")
self.response.response_code = "200 OK"
self.response.mime_type="text/plain;charset=UTF-8"
self.response.header = [('Content-Type','text/plain;charset=UTF-8')]
return self.response
def get_json_result(self):
error=None
_id = 0
try:
if self.request.get('word'):
definition = self.getdef(self.request.get('word'),self.request.get('dictionary'))
data = dumps({"result":definition, "id":_id, "error":error})
except JSONEncodeException:
#translate the exception also to the error
error = {"name": "JSONEncodeException", "message":"Result Object Not Serializable"}
data = dumps({"result":None, "id":id_, "error":error})
return data
def get_form(self):
page = open(self.template,'r').read()
return page
def get_free_dict(self, src, dest):
dict_dir=os.path.join(os.path.dirname(__file__), 'dictionaries')
dictdata=dict_dir+ "/freedict-"+src+"-"+dest
if os.path.isfile(dictdata+".index"):
return dictdata
return None
@ServiceMethod
def getdef(self, word, dictionary):
meaningstring= ""
src = dictionary.split("-")[0]
dest = dictionary.split("-")[1]
dictdata = self.get_free_dict(src,dest)
if dictdata:
dict = DictDB(dictdata)
clean_word = word.lower()
clean_word = clean_word.strip()
meanings = dict.getdef(clean_word)
for meaning in meanings:
meaningstring += meaning
if meaningstring == "None":
meaningstring = "No definition found"
return meaningstring
return meaningstring.decode("utf-8")
@ServiceMethod
def getdef_image(self,word,dictionary,file_type='png', width=0, height=0,color="Black",fontsize=10):
meaning = self.getdef(word,dictionary)
if meaning == "No definition found":
return no_meaning_found
else:
return renderer.render_text(meaning,file_type,width,height,color,font_size=fontsize)
@ServiceMethod
def get_wiktionary_def_image(self,word,dictionary,file_type='png',width=0,height=0,color="Black",fontsize=10):
tmp = dictionary.split("-")
src_lang = tmp[0]
dest_lang = tmp[1]
meaning = get_def(word,src_lang,dest_lang)
if meaning == None:
return no_meaning_found
else:
return renderer.render_text(meaning,file_type,0,0,color,font_size=fontsize)
def get_module_name(self):
return "Dictionary"
def get_info(self):
return "Bilingual Dictionaries"
def getInstance():
return Dictionary()
|
Varbin/EEH
|
_vendor/ldap3/protocol/formatters/validators.py
|
Python
|
bsd-2-clause
| 5,077
| 0.001773
|
"""
"""
# Created on 2016.08.09
#
# Author: Giovanni Cannata
#
# Copyright 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from ... import SEQUENCE_TYPES, STRING_TYPES
from .formatters import format_time
from ...utils.conv import to_raw
# Validators return True if value is valid, False if value is not valid,
# or a value different from True and False that is a valid value to substitute to the input value
def check_type(input_value, value_type):
if isinstance(input_value, value_type):
return True
if isinstance(input_value, SEQUENCE_TYPES):
for value in input_value:
if not isinstance(value, value_type):
return False
return True
return False
def always_valid(input_value):
return True
def validate_generic_single_value(input_value):
if not isinstance(input_value, SEQUENCE_TYPES):
return True
try: # object couldn't have a __len__ method
if len(input_value) == 1:
return True
except Exception:
|
pass
return False
def val
|
idate_integer(input_value):
if check_type(input_value, (float, bool)):
return False
if str is bytes: # Python 2, check for long too
if check_type(input_value, (int, long)):
return True
else: # Python 3, int only
if check_type(input_value, int):
return True
sequence = True # indicates if a sequence must be returned
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
else:
sequence = True # indicates if a sequence must be returned
valid_values = [] # builds a list of valid int values
for element in input_value:
try: # try to convert any type to int, an invalid conversion raise TypeError of ValueError, if both are valid and equal then then int() value is used
float_value = float(element)
int_value = int(element)
if float_value == int_value:
valid_values.append(int(element))
else:
return False
except (ValueError, TypeError):
return False
if sequence:
return valid_values
else:
return valid_values[0]
def validate_bytes(input_value):
return check_type(input_value, bytes)
def validate_boolean(input_value):
# it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed
if validate_generic_single_value(input_value): # valid only if a single value or a sequence with a single element
if isinstance(input_value, SEQUENCE_TYPES):
input_value = input_value[0]
if isinstance(input_value, bool):
if input_value:
return 'TRUE'
else:
return 'FALSE'
if isinstance(input_value, STRING_TYPES):
if input_value.lower() == 'true':
return 'TRUE'
elif input_value.lower() == 'false':
return 'FALSE'
return False
def validate_time(input_value):
# if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
else:
sequence = True # indicates if a sequence must be returned
valid_values = []
changed = False
for element in input_value:
if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time
if isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string
valid_values.append(element)
else:
return False
elif isinstance(element, datetime):
changed = True
if element.tzinfo: # a datetime with a timezone
valid_values.append(element.strftime('%Y%m%d%H%M%S%z'))
else: # datetime without timezone, assumed local and adjusted to UTC
offset = datetime.now() - datetime.utcnow()
valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ'))
else:
return False
if changed:
if sequence:
return valid_values
else:
return valid_values[0]
else:
return True
|
jedie/DrQueueIPython
|
DrQueue/__init__.py
|
Python
|
gpl-3.0
| 6,522
| 0.005213
|
# -*- coding: utf-8 -*-
"""
DrQueue main module
Copyright (C) 2011-2013 Andreas Schroeder
This file is part of DrQueue.
Licensed under GNU General Public License version 3. See LICENSE for details.
"""
import platform
import os
import sys
import smtplib
import json
from email.mime.text import MIMEText
from .client import Client
from .job import Job
from .computer import Computer
supported_renderers = ['3delight', '3dsmax', 'aftereffects', 'aqsis', \
'blender', 'cinema4d', 'general', 'lightwave', 'luxrender', 'mantra', \
'maya', 'mentalray', 'nuke', 'shake', 'terragen', 'turtle', 'vray', 'xsi']
supported_os = ['Windows', 'Mac OSX', 'Linux', 'FreeBSD', 'NetBSD', 'OpenBSD', \
'AIX', 'Solaris']
def check_renderer_support(renderer):
"""Check if renderer is supported."""
if renderer in supported_renderers:
return True
else:
return False
def get_rendertemplate(renderer):
"""Return template filename from renderer name"""
filename = ""
if renderer == '3delight':
filename = '3delight_sg.py'
if renderer == '3dsmax':
|
filename = '3dsmax_sg.py'
if renderer == 'aftereffects':
filename = 'aftereffects_sg.py'
if renderer == 'aqsis':
filename = 'aqsis_sg.py'
if renderer == 'blender':
filename = 'blender_sg.py'
if renderer == 'cinema4d':
filename = 'cinema4d_sg.py'
if renderer == 'general':
filename = 'general_sg.py'
if renderer == 'lightwave':
|
filename = 'lightwave_sg.py'
if renderer == 'luxrender':
filename = 'luxrender_sg.py'
if renderer == 'mantra':
filename = 'mantra_sg.py'
if renderer == 'maya':
filename = 'maya_sg.py'
if renderer == 'mentalray':
filename = 'mentalray_sg.py'
if renderer == 'nuke':
filename = 'nuke_sg.py'
if renderer == 'pixie':
filename = 'pixie_sg.py'
if renderer == 'shake':
filename = 'shake_sg.py'
if renderer == 'terragen':
filename = 'terragen_sg.py'
if renderer == 'turtle':
filename = 'turtle_sg.py'
if renderer == 'vray':
filename = 'vray_sg.py'
if renderer == 'xsi':
filename = 'xsi_sg.py'
return filename
def get_osname():
"""Return operating system name"""
osname = platform.system()
if osname == 'Darwin':
osname = 'Mac OSX'
return osname
def run_script_with_env(render_script, env_dict):
"""Run template script on IPython engine"""
import platform, os, sys
# set some variables on target machine
env_dict['DRQUEUE_OS'] = platform.system()
env_dict['DRQUEUE_ETC'] = os.path.join(os.getenv('DRQUEUE_ROOT'), "etc")
env_dict['DRQUEUE_LOGFILE'] = os.path.join(os.getenv('DRQUEUE_ROOT'),
"logs", env_dict['DRQUEUE_LOGFILE'])
# import specific render template
sys.path.append(env_dict['DRQUEUE_ETC'])
impmod = render_script.replace('.py', '')
__import__(impmod)
template = sys.modules[impmod]
# run template with env_dict
status = template.run_renderer(env_dict)
return status
def check_deps(dep_dict):
"""Run all dependency checking functions. This method runs directly on the engine."""
if ('os_name' in dep_dict) and (engine_has_os(dep_dict['os_name']) == False):
return False
elif ('minram' in dep_dict) and (engine_has_minram(dep_dict['minram']) == False):
return False
elif ('mincores' in dep_dict) and (engine_has_mincores(dep_dict['mincores']) == False):
return False
elif ('pool_name' in dep_dict) and (engine_is_in_pool(dep_dict['pool_name']) == False):
return False
elif ('job_id' in dep_dict) and (job_is_enabled(dep_dict['job_id']) == False):
return False
else:
return True
def engine_is_in_pool(pool_name):
"""Check if engine belongs to certain pool. This method runs directly on the engine."""
# check os.environ["DRQUEUE_POOL"]
if ("DRQUEUE_POOL" in os.environ) and (pool_name in os.environ["DRQUEUE_POOL"]):
return True
else:
return False
def engine_has_os(os_name):
"""Check if engine is running on certain OS. This method runs directly on the engine."""
running_os = get_osname()
if os_name == running_os:
return True
else:
return False
def engine_has_minram(minram):
"""Check if engine has at least minram GB RAM. This method runs directly on the engine."""
mem = Computer.get_memory()
if mem >= minram:
return True
else:
return False
def engine_has_mincores(mincores):
"""Check if engine has at least mincores CPU cores. This method runs directly on the engine."""
ncpus = Computer.get_ncpus()
ncorescpu = Computer.get_ncorescpu()
cores = ncpus * ncorescpu
if cores >= mincores:
return True
else:
return False
def job_is_enabled(job_id):
"""Check if job is enabled. This method runs directly on the engine."""
job = Job.query_db(job_id)
if (job != None) and (job["enabled"] == True):
return True
else:
return False
def send_email(job_name, recipients):
"""Notify recipients about finish of job."""
# load email configuration
user_dir = os.path.expanduser("~")
config_file = os.path.join(user_dir, ".drqueue", "email_config.json")
try:
fp = open(config_file, "rb")
except:
print("Email configuration could not be loaded.")
try:
config = json.load(fp)
except:
print("Email configuration could not be parsed.")
print(config)
mail_from = config['from']
body_text = "Your render job \"%s\" is finished." % job_name
# Create a text/plain message
msg = MIMEText(body_text)
# subject, sender and recipients
msg['Subject'] = "Job \"%s\" is finished" % job_name
msg['From'] = mail_from
msg['To'] = recipients
if config['smtp_ssl'] == "1":
# connect via SSL
smtp = smtplib.SMTP_SSL(config['smtp_server'], int(config['smtp_port']))
else:
# connect without SSL
smtp = smtplib.SMTP(config['smtp_server'], int(config['smtp_port']))
# start TLS encryption
if config['smtp_tls'] == "1":
smtp.starttls()
if config['smtp_auth'] == "1":
# authenticate if required
smtp.login(config['smtp_user'], config['smtp_passwd'])
try:
smtp.sendmail(msg['From'], msg['To'], msg.as_string())
except:
print("Email could not be sent.")
smtp.quit()
|
NightlySide/nightlyside.github.io
|
cb7b5b05c87711611f4700ff52c23409/iss.py
|
Python
|
mit
| 3,088
| 0.006479
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class VM:
def __init__(self, num_reg = 4):
self.regs = [0 for _ in range(num_reg)] # registers
self.pc = 0 # program counter
self.prog = None
self.reg1 = self.reg2 = self.reg3 = self.imm = None
self.running = False
def fetch(self):
instruction = self.prog[self.pc]
self.pc += 1
return instruction
def decode(self, instr):
instrNum = (instr & 0xF000) >> 12
self.reg2 = (instr & 0xF0 ) >> 4;
self.reg1 = (instr & 0xF00 ) >> 8;
self.reg3 = (instr & 0xF )
self.imm = (instr & 0xFF )
return instrNum
def eval(self, instrNum):
if (instrNum == 0):
print("halt")
self.running = False
elif (instrNum == 1):
print(f"loadi r{self.reg1} #{self.imm}")
self.regs[self.reg1] = self.imm
elif (instrNum == 2):
print(f"add r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] + self.regs[self.reg3]
elif (instrNum == 3):
print(f"sub r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] - self.regs[self.reg3]
elif (instrNum == 4):
print(f"mult r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] * self.regs[self.reg3]
elif (instrNum == 5):
print(f"div r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] / self.regs[self.reg3]
elif (instrNum == 6):
print(f"and r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] & self.regs[self.reg3]
elif (instrNum == 7):
print(f"or r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] | self.regs[self.reg3]
elif (instrNum == 8):
print(f"xor r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] ^ self.regs[self.reg3]
def showRegs(self):
res = "regs ="
for k in range(len(self.regs)):
res += " " + str(hex(self.regs[k]))[2:].zfill(4)
print(res)
def run(self, prog, show_regs=True):
self.prog = prog
self.running = True
while self.running:
instruction = self.fetch()
instrNum = self.decode(instruction)
self.eval(instrNum)
if show_regs: self.showRegs()
self.prog = None
if __name__ == "__main__":
# Structure d'une instruction :
#
|
2 3 0 1 = 0x1301
# num_instr addr_reg_1 addr_reg_2 addr_reg_3
#
# Variante (po
|
ur charger un entier)
# 1 0 6 4 = 0x1064
# num_instr addr_reg valeur_immédiate (hex)
prog = [0x1064, 0x11C8, 0x12FA, 0x2301, 0x3132, 0x2201, 0x0000]
vm = VM(num_reg=4)
vm.run(prog)
|
cncf/cross-cloud
|
validate-cluster/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 36,222
| 0.000055
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version
|
():
|
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to star
|
lukejuusola/MarkovMimic
|
MarkovChainBot.py
|
Python
|
mit
| 1,445
| 0.039446
|
from random import randint
import re
STARTTAG = "<start>"
ENDTAG = "<end>"
class MarkovChainBot:
''' A Markov Chain text generator
data is a list of strings that it is trained on, ie a list of books.
'''
def __init__(self, exclusion_list):
''' '''
self.data = []
self.probs = {STARTTAG: [ENDTAG]}
self.trained = True
self.exclusions = [re.compile(x) for x in
|
exclusion_list]
def Train(self):
assert type(self.data) == list
for obj in self.data:
assert type(obj) == str
if len(self.data) == 0:
return
self.probs = {}
def addWordToPr
|
obsDict(dic, index, target):
if index in dic.keys():
dic[index].append(target)
else:
dic[index] = [target]
for text in self.data:
words = list(map(lambda x: x.lower(), text.split()))
if not words:
continue
addWordToProbsDict(self.probs, STARTTAG, words[0])
for i in range(len(words)-1):
addWordToProbsDict(self.probs, words[i], words[i+1])
addWordToProbsDict(self.probs, words[len(words)-1], ENDTAG)
def GenerateText(self):
ret = ''
curWord = STARTTAG
while(curWord != ENDTAG):
nextIn = randint(0, len(self.probs[curWord])-1)
curWord = self.probs[curWord][nextIn]
if(curWord == ENDTAG or curWord == STARTTAG):
continue
render = True
for pat in self.exclusions:
if(pat.match(curWord)):
render = False
if render:
ret += curWord
if(curWord != ENDTAG):
ret += ' '
return ret
|
nitely/http-lazy-headers
|
tests/tests_fields_/tests_transfer_encoding.py
|
Python
|
mit
| 2,131
| 0
|
# -*- coding: utf-8 -*-
import http_lazy_headers as hlh
from . import utils
class TransferEncodingTest(utils.FieldTestCase):
field = hlh.TransferEncoding
def test_raw_values(self):
self.assertFieldRawEqual(
['gzip, chunked', 'foobar;bar=qux'],
((hlh.Encodings.gzip, hlh.ParamsCI()),
(hlh.Encodings.chunked, hlh.ParamsCI()),
('foobar', hlh.ParamsCI([('b
|
ar', 'qux')]))))
|
self.assertFieldRawEqual(
['GziP'],
((hlh.Encodings.gzip, hlh.ParamsCI()),))
def test_str(self):
self.assertFieldStrEqual(
((hlh.Encodings.gzip, hlh.ParamsCI()),
(hlh.Encodings.chunked, hlh.ParamsCI()),
('foobar', hlh.ParamsCI([('bar', 'qux')]))),
'transfer-encoding: gzip, chunked, foobar; bar=qux')
def test_raw_empty(self):
"""
Should NOT allow empty raw value
"""
self.assertRaisesHeaderError([''])
def test_empty(self):
"""
Should NOT allow empty value
"""
self.assertRaisesInternalError(())
def test_raw_bad_values(self):
"""
Should not allow bad raw values
"""
self.assertRawOK(['foo'])
self.assertRawOK(['foo;bar=baz'])
self.assertRaisesHeaderError(['^='])
self.assertRaisesHeaderError(['foo;'])
self.assertRaisesHeaderError(['foo;='])
self.assertRaisesHeaderError(['foo;bar='])
self.assertRaisesHeaderError(['foo;bar = baz'])
self.assertRaisesHeaderError(['foo;bar= baz'])
self.assertRaisesHeaderError(['foo;bar =baz'])
def test_bad_values(self):
"""
Should not allow bad values
"""
good_te = ('foo', hlh.ParamsCI())
self.assertOK([good_te])
self.assertRaisesInternalError([1])
self.assertRaisesInternalError(['foo'])
self.assertRaisesInternalError([None])
self.assertRaisesInternalError([('', hlh.ParamsCI())])
self.assertRaisesInternalError([(None, hlh.ParamsCI())])
self.assertRaisesInternalError([('foo', None)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.