repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
google-research/google-research | amortized_bo/simple_ising_model.py | Python | apache-2.0 | 6,429 | 0.005755 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementation of Ising models. Try to find maximum energy configuration."""
import gin
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from amortized_bo import base_problem
from amortized_bo import domains
def _one_hot(inputs, depth):
"""Wrapper around tf.one_hot that passes through one-hot inputs unchanged."""
if len(inputs.shape) == 3:
return inputs
else:
return tf.one_hot(inputs, depth=depth)
def _fully_connected_ising_model_energy(variables, potentials):
"""Ising model with full-connected coupling graph.
Args:
variables: [batch_size, sequence_length] int array (np or Tensor) or
[batch_size, sequence_length, vocab_size] array (corresponding to one-hot
vectors).
potentials: [sequence_length, sequence_length, vocab_size, vocab_size] float
array (np or Tensor).
Returns:
[batch_size] Tensor of energy.
"""
variables = np.asarray(variables, dtype=int)
vocab_size = potentials.shape[-1]
onehot = _one_hot(variables, depth=vocab_size)
return tf.einsum('bim,bjn,ijmn->b', onehot, onehot, potentials)
def _locally_connected_ising_model_energy(variables, potentials):
"""1D Ising model with couplings between adjacent variables.
Args:
variables: [batch_size, sequence_length] int array (np or Tensor) or
[batch_size, sequence_length, vocab_size] array (corresponding to one-hot
vectors).
potentials: [sequence_length - 1, vocab_size, vocab_size]
Returns:
[batch_size] array of energy
"""
variables = np.asarray(variables, dtype=int)
vocab_size = potentials.shape[-1]
oh = _one_hot(variables, depth=vocab_size)
return tf.einsum('bim,bin,imn->b', oh[:, :-1, :], oh[:, 1:, :], potentials)
@gin.configurable
class IsingModel(base_problem.BaseProblem):
"""Maximize energy of a hidden Ising model.
Attributes:
length: Number of nodes.
vocab_size: Number of possible values for each position.
seed: Seed for generating potentials between positions.
fully_connected: If true, simulate fully connected ising model. Otherwise
locally connected.
weight_variance: std of normal distribution used to generate potentials.
**kwargs: additional args passed to the BaseProblem constructor.
"""
def __init__(self,
length=8,
vocab_size=2,
seed=0,
fully_connected=False,
weight_variance=1.0,
**kwargs):
self._domain = domains.FixedLengthDiscreteDomain(
vocab_size=vocab_size, length=length)
super(IsingModel, self).__init__(**kwargs)
random = np.random.RandomState(seed=seed)
if fully_connected:
self.potentials = tf.constant(
random.normal(
size=[length, length, vocab_size, vocab_size],
scale=weight_variance),
dtype=tf.float32)
else:
self.potentials = tf.constant(
random.normal(
size=[length - 1, vocab_size, vocab_size], scale=weight_variance),
dtype=tf.float32)
self._fully_connected = fully_connected
def compute_output_shape(self, input_shape):
return (input_shape[0],)
def __call__(self, sequences):
if self._fully_connected:
return _fully_connected_ising_model_energy(sequences, self.potentials)
else:
return _locally_connected_ising_model_energy(sequences, self.potentials)
def _alternating_sequence(token1, token2, length):
"""Make alternating sequence of token1 and token2 with specified length."""
return [(token2 if i % 2 else token1) for i in range(length)]
@gin.configurable
class AlternatingChainIsingModel(base_problem.BaseProblem):
"""Ising model with a controllable number of isolated local optima.
Suppose the model's vocabulary is {A, B, C, D, ...}. We break the vocabulary
into pairs {(A, B), (C, D), ...}. The energy function counts the number of
times elements of a pair are next to each other (e.g., the number of times a B
is next to an A or a C is next to a D) in the sequence.
Note that there are multiple isolated local optima:
ABABAB, BABABA, CDCDCD, | DCDCDC.
This problem serves as a benchmark for solvers' ability to find all local
optima.
Attributes:
length: Number of nodes.
vocab_size: Numb | er of possible values for each position. Must be even.
**kwargs: additional args passed to the BaseProblem constructor.
"""
def __init__(self, length=4, vocab_size=2, **kwargs):
self._domain = domains.FixedLengthDiscreteDomain(
vocab_size=vocab_size, length=length)
super(AlternatingChainIsingModel, self).__init__(**kwargs)
if vocab_size % 2:
raise ValueError('vocab_size must be even for '
'AlternatingChainIsingModel.')
potentials = np.zeros(shape=[vocab_size, vocab_size], dtype=np.float32)
for i in range(0, vocab_size, 2):
potentials[i][i + 1] = 1.
potentials[i + 1][i] = 1.
self.potentials = tf.tile(potentials[tf.newaxis, :, :], [length - 1, 1, 1])
self._global_optima = self._get_global_optima()
def _get_global_optima(self):
optima = []
for i in range(0, self._domain.vocab_size, 2):
optima.append(
_alternating_sequence(
token1=i, token2=(i + 1), length=self._domain.length))
optima.append(
_alternating_sequence(
token1=(i + 1), token2=i, length=self._domain.length))
return optima
def compute_metrics(self, population, fast_only=False):
del fast_only
return {
'fraction_of_global_optima_found':
np.mean(
np.float32(population.contains_structures(self._global_optima)))
}
def __call__(self, sequences):
return _locally_connected_ising_model_energy(sequences, self.potentials)
|
44px/redash | redash/settings/helpers.py | Python | bsd-2-clause | 471 | 0 | import json
import os
def fix_assets_path(path):
fullpath = os.path.join(os.path.dirname(__file__), "../", path)
return | fullpath
def array_from_string(s):
array = s.split(',')
if "" in array:
array.remove("")
return array
def set_from_string(s):
return set(array_from_string(s))
def parse_boolean(str):
return json.loads(str.low | er())
def int_or_none(value):
if value is None:
return value
return int(value)
|
mbuhot/mbuhot-euler-solutions | python/problem-017.py | Python | mit | 1,881 | 0.02286 | #! /usr/bin/env python3
description = """
Number letter counts
Problem 17
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.
"""
_say = {
1 : 'one',
2 : 'two',
3 : 'three',
4 : 'four',
5 : 'five',
6 : 'six',
7 : 'seven',
8 : 'eight',
9 : 'nine',
10: 'ten',
11 : 'eleven',
12 : 'twelve',
13 : 'thirteen',
14 : 'fourteen',
15 : 'fifteen',
16 : 'sixteen',
17 : 'seventeen',
18 : 'eighteen',
19 : 'nineteen',
20 : 'twenty',
30 : 'thirty',
40 : 'forty',
50 : 'fifty',
60 : 'sixty',
70 : 'seventy',
80 : 'eighty',
90 : 'ninety',
}
def say(n):
if n < 100:
if n in _say: return _say[n]
tens = (n // 10) * 10
ones = n % 10
return '%s %s' % (_say[tens], _say[ones])
elif n < 1000:
hundreds = (n // 100)
rest = | n % 100
if rest == 0:
return '%s hundred' % say(hundreds)
else:
return '%s hundred and %s' % (say(hundreds), say(rest))
elif n < 1000000:
thousands = (n // 1000)
rest = n % 1000
if rest == 0:
return '%s thousand' % say(thousands)
else:
return '%s thousand %s' % (say(thousands), say(rest))
assert(say(11) == 'eleven')
asser | t(say(95) == 'ninety five')
assert(say(111) == 'one hundred and eleven')
assert(say(592) == 'five hundred and ninety two')
assert(say(111111) == 'one hundred and eleven thousand one hundred and eleven')
print(sum(len(say(n).replace(' ', '')) for n in range(1, 1001)))
|
extremenetworks/xkit | EXOS/Python/watch/watch.py | Python | bsd-2-clause | 5,605 | 0.004817 | # watch.py
# usage: watch.py [-h] [-c COUNT] [-i INTERVAL] [-d] command
#
# This script displays the output of a specified CLI command every n seconds
# Example "run script watch.py "show port packet no-ref""
#
# positional arguments:
# command Command to iterate. Should be enclosed in quotes (i.e.
# "show l2stats vlan Mgmt")
#
# optional arguments:
# -h, --help show this help message and exit
# -c COUNT, --count COUNT
# Number of times to issue the command (default 3)
# -i INTERVAL, --interval INTERVAL
# Wait time between command iterations (default 5 sec)
# -d, --diff If numerical values have changed in an ouput print
# difference between previous and current command
# iteration
# Last updated: March 31, 2016
import argparse
import shlex
from exsh import clicmd
from time import sleep
import sys
import re
import signal
class ArgParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def try_cli(command):
"""Try CLI command and exit if invalid"""
try:
return clicmd(command, True)
except:
print 'Script Error: Check Command Syntax'
exit()
def version_check():
pimage = True
sh_switch = clicmd('show switch', True)
ver = ''
if 'Image Selected: secondary' in sh_switch:
pimage = False
sh_switch = sh_switch.splitlines()
for line in sh_switch:
if (pimage and ('Primary ver:' in line)) or (not pimage and ('Secondary ver:' in line)):
ver = line.split(':')
ver = ver[1].strip()
if ver == '':
print FMT_ERROR.format('Problem detecting software version')
exit()
elif ver.startswith('15.6') or ver.startswith('15.7'):
return True
else:
return False
def main():
parser = ArgParser(prog='watch.py', description = "This script displays the output of a "
"specified CLI command every n seconds "
"Example \"run script watch.py \"show port "
"packet no-ref\"\"")
parser.add_argument("command", help="Command to iterate. Should be enclosed in quotes "
"(i.e. \"show l2stats vlan Mgmt\")")
parser.add_argument('-c', '--count', help='Number of times to issue the command (default 3)', type=int, default=3)
parser.add_argument('-i', '--interval', help='Wait time between command iterations (default 5 sec)', type=int,
default=5)
parser.add_argument('-d', '--diff', help='If numerical values have changed in an ouput print difference between '
'previous and current command iteration', action="store_true")
args = parser.parse_args()
cmd = args.command
count = args.count
interval = args.interval
stat_diff = args.diff
cli_refresh = True
legacy_version = version_check()
# Create and register a hanlder for SIGINT so we handle ^C cleanly
def signal_handler(signal, frame):
if cli_refresh and not legacy_version:
# renable cli refresh, if we disabled it previously
clicmd('enable cli refresh')
sys.stdout.flush()
sys.exit(0)
signal.signal(signal.SIGINT, signal_hand | ler)
# Handle Auto-refreshing Commands
if legacy_version:
print 'WARNING: Switch is running pre 16.1 code. Please be sure to not use auto-refreshing commands\n'
else:
# Check to see if cli refresh is disabled
cli_out = clicmd('show management | include "CLI refresh"', True)
if 'Disabled' in cli_out:
cl | i_refresh = False
if cli_refresh:
# Temporarily disable refreshing CLI commands to prevent script from hanging
clicmd('disable cli refresh')
if stat_diff:
prev_output = try_cli(cmd)
print prev_output
count -=1
prev_output = prev_output.split('\n')
while count != 0:
sleep(interval)
curr_output = try_cli(cmd).split('\n')
for index in range(len(prev_output)):
# Split current and prev command outputs into list divided based on numerical and non-numerical strings
prev = re.split(r'(\d+)', prev_output[index])
curr = re.split(r'(\d+)', curr_output[index])
for i in range(len(prev)):
if prev[i].isdigit() and curr[i] > prev[i]:
diff = int(curr[i]) - int(prev[i])
diff = '+' + str(diff)
FMT = '{0:>' + str(len(curr[i])) + '}'
sys.stdout.write(FMT.format(diff))
else:
sys.stdout.write(prev[i])
sys.stdout.flush()
print
count -= 1
prev_output = curr_output
else:
for i in range(count):
print try_cli(cmd)
sleep(interval)
if cli_refresh and not legacy_version:
# Restore CLI refresh
clicmd('enable cli refresh')
if __name__ == '__main__':
try:
main()
except SystemExit:
# catch SystemExit to prevent EXOS shell from exiting to the login prompt
pass
|
joushx/OAuth.py | github.py | Python | mit | 388 | 0.018041 | from | oauth import OAuth
class Github(OAuth):
def __init__(self, consumer_key, consumer_secret):
self.r | equest_token_url = "https://github.com/login/oauth/request_token"
self.authorize_url = "https://github.com/login/oauth/authorize"
self.access_token_url = "https://github.com/login/oauth/access_token"
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
|
team-diana/generic_input_controller | scripts/commander.py | Python | mit | 242 | 0.004132 | from command | import Command
class Commander(object):
def __init__(self):
pass
def set_command(self, command):
self.command = command
def update_command(self, cmd_data):
self.command.onc | allback(cmd_data)
|
pyta-uoft/pyta | examples/pylint/e1138_unsupported_delete_operation.py | Python | gpl-3.0 | 595 | 0.001681 | from typing import List
class NamedList:
"""A contaner class for storing a list of named integers."""
def __init__(self, names: List[str], values: List[int]) -> None:
self._names = names
self._values = values
def __getitem__(self, name: str) -> int:
idx = self._names | .index(name)
return self._values[idx]
def __contains__(self, name: str) -> bool:
return name in self._names
| named_list = NamedList(['a', 'b', 'c'], [1, 2, 3])
print('c' in named_list) # Prints True
del named_list['c'] # Error on this line
print('c' in named_list)
|
D3f0/coreemu | daemon/core/session.py | Python | bsd-2-clause | 46,224 | 0.002228 | #
# CORE
# Copyright (c)2010-2013 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Tom Goff <thomas.goff@boeing.com>
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
session.py: defines the Session class used by the core-daemon daemon program
that manages a CORE session.
'''
import os, sys, tempfile, shutil, shlex, atexit, gc, pwd
import threading, time, random
from core.api import coreapi
if os.uname()[0] == "Linux":
from core.netns import nodes
from core.netns.vnet import GreTapBridge
elif os.uname()[0] == "FreeBSD":
from core.bsd import nodes
from core.emane import emane
from core.misc.utils import check_call, mutedetach, readfileintodict, \
filemunge, filedemunge
from core.conf import ConfigurableManager, Configurable
from core.location import CoreLocation
from core.service import CoreServices
from core.broker import CoreBroker
from core.mobility import MobilityManager
from core.sdt import Sdt
from core.misc.ipaddr import MacAddr
from core.misc.event import EventLoop
from core.constants import *
from core.xen import xenconfig
class Session(object):
# sessions that get automatically shutdown when the process
# terminates normally
__sessions = set()
''' CORE session manager.
'''
def __init__(self, sessionid = None, cfg = {}, server = None,
persistent = False, mkdir = True):
if sessionid is None:
# try to keep this short since it's used to construct
# network interface names
pid = os.getpid()
sessionid = ((pid >> 16) ^
(pid & ((1 << 16) - 1)))
sessionid ^= ((id(self) >> 16) ^ (id(self) & ((1 << 16) - 1)))
sessionid &= 0xffff
self.sessionid = sessionid
self.sessiondir = os.path.join(tempfile.gettempdir(),
"pycore.%s" % self.sessionid)
if mkdir:
os.mkdir(self.sessiondir)
self.name = None
self.filename = None
self.thumbnail = None
self.user = None
self.node_count = None
self._time = time.time()
self.evq = EventLoop()
# dict of objects: all nodes and nets
self._objs = {}
self._objslock = threading.Lock()
# dict of configurable objects
self._confobjs = {}
self._confobjslock = threading.Lock()
self._handlers = set()
self._handlerslock = threading.Lock()
self._state = None
self._hooks = {}
self._state_hooks = {}
# dict of configuration items from /etc/core/core.conf config file
self.cfg = cfg
self.add_state_hook(coreapi.CORE_EVENT_RUNTIME_STATE,
self.runtime_state_hook)
self.setstate(state=coreapi.CORE_EVENT_DEFINITION_STATE,
info=False, sendevent=False)
self.server = server
if not persistent:
self.addsession(self)
self.master = False
self.broker = CoreBroker(session=self, verbose=True)
self.location = CoreLocation(self)
self.mobility = MobilityManager(self)
self.services = CoreServices(self)
self.emane = emane.Emane(self)
self.xen = xenconfig.XenConfigManager(self)
self.sdt = Sdt(self)
# future parameters set by the GUI may go here
self.options = SessionConfig(self)
self.metadata = SessionMetaData(self)
@classmethod
def addsession(cls, session):
cls.__sessions.add(session)
@classmethod
def delsession(cls, session):
try:
cls.__sessions.remove(session)
except KeyError:
pass
@classmethod
def atexit(cls):
while cls.__sessions:
s = cls.__sessions.pop()
print >> sys.stderr, "WARNING: automatically shutting down " \
"non-persistent session %s" % s.sessionid
s.shutdown()
def __del__(self):
# note: there is no guarantee this will ever run
self.shutdown()
def shutdown(self):
''' Shut down all emulation objects and remove the session directory.
'''
if hasattr(self, 'emane'):
self.emane.shutdown()
if hasattr(self, 'broker'):
self.broker.shutdown()
if hasattr(self, 'sdt'):
self.sdt.shutdown()
self.delobjs()
preserve = False
if hasattr(self.options, 'preservedir'):
if self.options.preservedir == '1':
preserve = True
if not preserve:
shutil.rmtree(self.sessiondir, ignore_errors = True)
if self.server:
self.server.delsession(self)
self.delsession(self)
def isconnected(self):
''' Returns true if this session has a request handler.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return False
else:
return True
def connect(self, handler):
''' Set the request handler for this session, making it connected.
'''
# the master flag will only be set after a GUI has connected with the
# handler, e.g. not during normal startup
if handler.master is True:
self.master = True
with self._handlerslock:
self._handlers.add(handler)
def disconnect(self, handler):
''' Disconnect a request handler from this session. Shutdown this
session if there is no running emulation.
'''
with self._handlerslock:
try:
self._handlers.remove(handler)
except KeyError:
raise ValueError, \
"Handler %s not associated with this session" % handler
num_handlers = len(self._handlers)
if num_handlers == 0:
# shut down this session unless we are instantiating, running,
# or collecting final data
if self.getstate() < coreapi.CORE_EVENT_INSTANTIATION_STATE or \
self.getstate() > coreapi.CORE_EVENT_DATACOLLECT_STATE:
self.shutdown()
| def broadcast(self, src, msg):
''' Send Node and Link CORE API messages to all handlers connected to this session.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
if isinstance(msg, coreapi.CoreNodeMessage) or \
isinstance(msg, coreapi.CoreLinkMessage):
try:
handler.sendall(msg.rawmsg)
except Exception, e:
| self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def broadcastraw(self, src, data):
''' Broadcast raw data to all handlers except src.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
try:
handler.sendall(data)
except Exception, e:
self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def gethandler(self):
''' Get one of the connected handlers, preferrably the master.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return None
for handler in self._handlers:
if handler.master:
return handler
for handler in self._handlers:
return handler
def setstate(self, state, info = False, sendevent = False,
returnevent = False):
''' Set the session state. When info is true, log the state change
event using the session handler's info method. When sendevent is
true, generate a CORE API Event Message and send to the connected
entity.
'''
if state == self._state:
return []
self._time = time.time()
self._state = state
self.run_state_hooks(state)
replies |
hkjallbring/pusher-http-python | pusher_tests/test_requests_adapter.py | Python | mit | 702 | 0.01567 | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from pusher import Pusher
import unittest
import httpretty
class TestRequestsBackend(unittest.TestCase):
def setUp(self):
self.pusher = Pusher.from_url(u'http://key:secret@api.pusherapp | .com/apps/4')
@httpretty.activate
def test_trigger_requests_success(self):
httpretty.register_uri(httpretty.POST, "http://api.pusherapp.com/apps/4/events",
body="{}",
content_type="application/json")
response = self.pusher.trigger(u'test_channel', u'test', {u'data': u'yolo'})
self.assertEqual(response, {})
if | __name__ == '__main__':
unittest.main() |
lasote/conan | conans/client/generators/visualstudio_multi.py | Python | mit | 2,436 | 0.002874 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load
class VisualStudioMultiGenerator(Generator):
template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup />
<ItemGroup />
</Project>
"""
@property
def filename(self):
pass
@property
def content(self):
configuration = str(self.conanfile.settings.build_type)
platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
vsversion = str(self.settings.compiler.version)
# there is also ClCompile.RuntimeLibrary, but it's handling is | a bit complicated, so skipping for now
condition = | " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
% (configuration, platform, vsversion + '.0')
name_multi = 'conanbuildinfo_multi.props'
name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()
multi_path = os.path.join(self.output_path, name_multi)
if os.path.isfile(multi_path):
content_multi = load(multi_path)
else:
content_multi = self.template
dom = minidom.parseString(content_multi)
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', name_current)
import_group = dom.getElementsByTagName('ImportGroup')[0]
children = import_group.getElementsByTagName("Import")
for node in children:
if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
break
else:
import_group.appendChild(import_node)
content_multi = dom.toprettyxml()
content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())
vs_generator = VisualStudioGenerator(self.conanfile)
content_current = vs_generator.content
return {name_multi: content_multi, name_current: content_current}
|
treytabner/cloudprefs-old | simulate.py | Python | agpl-3.0 | 3,219 | 0.003728 | """Cloud Preferences simulation tool"""
# Copyright 2013 Trey Tabner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import random
import requests
import string
import time
import uuid
from multiprocessing import Process
ENDPOINT = os.environ.get('ENDPOINT', 'http://localhost:8888')
START = int(os.environ.get('START', 100000))
MAX = int(os.environ.get('MAX', 1000)) # Simulate 1000 users at once
def random_password(size=12, chars=string.letters + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def headers(user_id):
return {
'X-User-Id': str(START),
#'X-User-Id': str(user_id),
}
def get(user_id, url):
return requests.get('%s/%s' % (ENDPOINT, url),
headers=headers(user_id))
def post(user_id, url, payload=None):
if payload:
data = json.dumps(payload)
else:
data = None
return | requests.post('%s/%s' % (ENDPOINT, url), data=data,
headers=headers(user_id))
def delete(user_id, url):
return requ | ests.delete('%s/%s' % (ENDPOINT, url),
headers=headers(user_id))
def simulate(user_id):
#response = delete(user_id, '')
#assert response.status_code == 204
#response = post(user_id,
# 'managed_cloud/build_config',
# payload=['driveclient', 'monitoring'])
#assert response.status_code == 204
#response = get(user_id, 'managed_cloud/build_config')
#assert response.status_code == 200
#assert 'driveclient' in response.json()
#assert 'monitoring' in response.json()
devices = [uuid.uuid4() for x in range(int(os.environ.get('DEVICES', 10)))]
for device in devices:
current = random_password()
updated = int(time.time())
payload = {
"current": current,
"updated": updated,
}
response = post(user_id, '%s/password' % device,
payload=payload)
assert response.status_code == 204
#response = get(user_id, '%s/password' % device)
#assert response.status_code == 200
#assert payload == response.json()
def main():
start = datetime.datetime.now()
tenants = range(START, START+MAX)
sims = []
for tenant in tenants:
p = Process(target=simulate, args=(tenant,))
p.start()
sims.append(p)
while sims:
print "Sims: %s" % len(sims)
for sim in sims:
if not sim.is_alive():
sims.remove(sim)
time.sleep(0.1)
end = datetime.datetime.now()
print "Total: %s" % (end - start)
if __name__ == "__main__":
main()
|
interactomix/iis | test_utils/base.py | Python | agpl-3.0 | 1,749 | 0 | import tempfile
from datetime import datetime
import flask_testing
from flask import url_for
import iis
from iis.models import User
from iis.extensions import db
class BaseTestCase(flask_testing.TestCase):
DB_FILE = tempfile.mkstemp()
SQLALCHEMY_DATABASE_URI = "sqlite:///" + DB_FILE[1]
LOGGING = {"version": 1}
TESTING = True
WTF_CSRF_ENABLED = False
USER_ENABLE_LOGIN_WITHOUT_CONFIRM = True
def create_app(self):
ret = iis.create_app(self.__class__)
app = ret[0]
self.user_manager = ret[1]
return app
def setUp(self):
db.create_all()
self.create_user("admin", "passW1")
def tearDown(self):
db.session.remove()
db.drop_all()
def login(self, username=None, password=None):
username = username or "admin"
password = password or "passW1"
self.client.post(u | rl_for('user.login'), data=dict(
username=username,
password=password
), follow_redirects=False)
return User.query.filter_by(username=username).one()
def logout(self):
self.client.get(url_for("user.logout"))
def create_user(self, username, password):
user = User(username=username,
password= | self.user_manager.hash_password(password),
email=username + "@localhost",
confirmed_at=datetime.fromtimestamp(0.0),
active=True)
db.session.add(user)
db.session.commit()
return user
def assertLoginRequired(self, url):
self.logout()
res = self.client.get(url)
self.assertEqual(302, res.status_code)
self.assertIn(url_for('user.login'), res.headers['Location'])
|
sebastian-software/jasy | jasy/test/style/strings.py | Python | mit | 1,663 | 0.004811 | #!/usr/bin/env python3
import sys
import os
import unittest
import logging
import inspect
# Extend PYTHONPATH with local 'lib' folder
if __name__ == "__main__":
jasyroot = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, jasyroot)
print("Running from %s..." % jasyroot)
import jasy.style.Engine as Engine
import jasy.core.Permutation as Permutation
class Tests(unittest.TestCase):
def process(self, code):
callerName = inspect.stack()[1][3][5:]
permutation = Permutation.Permutation({
"jasy.engine" : "gecko",
"jasy.debug" : True
})
tree = Engine.getTree(code, callerName)
tree = Engine.permutateTree(tree, permutation)
tree = Engine.reduceTre | e(tree)
return Engine.compressTree(tree)
def test_basic(self):
self.assertEqual(self.process(r'''
.foo{
content: "Hello";
}
'''), '.foo{content:"Hello";}')
def test_quotes(self):
self.assertEqual(self.process(r'''
.foo{
quotes: "\201E" "\201C" "\201A" "\2018";
}
'''), r'.foo{quotes:"\201E" "\201C" "\201A" "\2018";}')
def test_single | (self):
self.assertEqual(self.process(r'''
.foo{
content: 'Hello "Frank"';
}
'''), '.foo{content:\'Hello "Frank"\';}')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.ERROR)
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
twitter/pants | tests/python/pants_test/backend/jvm/targets/test_jvm_app.py | Python | apache-2.0 | 14,693 | 0.006057 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import str
from textwrap import dedent
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.base.exceptions import TargetDefinitionException
from pants.base.parse_context import ParseContext
from pants.build_graph.address import Address
from pants.build_graph.app_base import Bundle, DirectoryReMapper
from pants.source.wrapped_globs import Globs
from pants_test.test_base import TestBase
def _bundle(rel_path):
pc = ParseContext(rel_path=rel_path, type_aliases={})
return Bundle(pc)
def _globs(rel_path):
pc = ParseContext(rel_path=rel_path, type_aliases={})
return Globs(pc)
class JvmAppTest(TestBase):
def test_simple(self):
binary_target = self.make_target(':foo-binary', JvmBinary, main='com.example.Foo')
app_target = self.make_target(':foo', JvmApp, basename='foo-app', binary=':foo-binary')
self.assertEqual('foo-app', app_target.payload.basename)
self.assertEqual('foo-app', app_target.basename)
self.assertEqual(binary_target, app_target.binary)
self.assertEqual([':foo-binary'], list(app_target.compute_dependency_specs(payload=app_target.payload)))
def test_jvmapp_bundle_payload_fields(self):
app_target = self.make_target(':foo_payload',
JvmApp,
basename='foo-payload-app',
archive='zip')
self.assertEqual('foo-payload-app', app_target.payload.basename)
self.assertIsNone(app_target.payload.deployjar)
self.assertEqual('zip', app_target.payload.archive)
def test_bad_basename(self):
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.* basename must not equal name.'):
self.make_target(':foo', JvmApp, basename='foo')
def create_app(self, rel_path, name=None, **kwargs):
self.create_file(os.path.join(rel_path, 'config/densities.xml'))
return self.make_target(Address(rel_path, name or 'app').spec,
JvmApp,
bundles=[_bundle(rel_path)(fileset='config/densities.xml')],
**kwargs)
def test_binary_via_binary(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', binary=':bin')
self.assertEqual(app.binary, bin)
def test_binary_via_dependencies(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', dependencies=[bin])
self.assertEqual(app.binary, bin)
def test_degenerate_binaries(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', binary=':bin', dependencies=[bin])
self.assertEqual(app.binary, bin)
def test_no_binary(self):
app = self.create_app('src/java/org/archimedes/buoyancy')
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app\).*'
r' An app must define exactly one'):
app.binary
def test_too_many_binaries_mixed(self):
self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
bin2 = self.make_target('src/java/org/archimedes/buoyancy:bin2', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', binary=':bin', dependencies=[bin2])
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app\).*'
r' An app must define exactly one'):
app.binary
def test_too_many_binaries_via_deps(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
bin2 = self.make_target('src/java/org/archimedes/buoyancy:bin2', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', dependencies=[bin, bin2])
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app\).*'
r' An app must define exactly one'):
app.binary
def test_not_a_binary(self):
self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
self.create_app('src/java/org/archim | edes/buoyancy', name='app', binary=':bin')
app = self.create_app('src/java/org/archimedes/buoyancy', name='app2', binary=':app')
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app2\) | .*'
r' Expected binary dependency'):
app.binary
class BundleTest(TestBase):
def test_bundle_filemap_dest_bypath(self):
spec_path = 'src/java/org/archimedes/buoyancy'
densities = self.create_file(os.path.join(spec_path, 'config/densities.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(fileset='config/densities.xml')])
self.assertEqual(1, len(app.bundles))
# after one big refactor, ../../../../../ snuck into this path:
self.assertEqual({densities: 'config/densities.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_dest_byglobs(self):
spec_path = 'src/java/org/archimedes/tub'
one = self.create_file(os.path.join(spec_path, 'config/one.xml'))
two = self.create_file(os.path.join(spec_path, 'config/two.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
globs = _globs(spec_path)
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(fileset=globs('config/*.xml'))])
self.assertEqual(1, len(app.bundles))
self.assertEqual({one: 'config/one.xml', two: 'config/two.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_dest_relative(self):
spec_path = 'src/java/org/archimedes/crown'
five = self.create_file(os.path.join(spec_path, 'gold/config/five.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(relative_to='gold',
fileset='gold/config/five.xml')])
self.assertEqual(1, len(app.bundles))
self.assertEqual({five: 'config/five.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_dest_remap(self):
spec_path = 'src/java/org/archimedes/crown'
one = self.create_file(os.path.join(spec_path, 'config/one.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
mapper = DirectoryReMapper(os.path.join(spec_path, 'config'), 'gold/config')
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(mapper=mapper, fileset='config/one.xml')])
self.assertEqual(1, len(app.bundles))
self.assertEqual({one: 'gold/config/one.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_remap_base_not_exists(self):
# Create directly
with self.assertRaises(DirectoryReMapper.NonexistentBaseError):
DirectoryReMapper("dummy/src/java/org/archimedes/crown/missing", "dummy")
def test_bundle_add(self):
spec_path = 'src/java/org/archimedes/volume'
stone_dense = self.create_file(os.path.join(spec_path, 'config/stone/ |
uw-it-aca/spotseeker_server | spotseeker_server/migrations/0003_auto_20181105_2251.py | Python | apache-2.0 | 648 | 0.001543 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db | import migrations, models
class Migration(migrations.Migration | ):
dependencies = [
('spotseeker_server', '0002_auto_20181029_2244'),
]
operations = [
migrations.AlterField(
model_name='spot',
name='last_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='spot',
name='spottypes',
field=models.ManyToManyField(related_name='spots', max_length=50, to='spotseeker_server.SpotType', blank=True),
),
]
|
mgood7123/UPM | Tests/PACKAGES/CDE/tests/pwd_readfile_test/pwd_readfile_test.py | Python | gpl-3.0 | 124 | 0.016129 | import | os
# generate an ABSOLUTE PATH to my-file.txt
f = os.getcwd() + '/my-file.txt'
for line in open(f):
print line, | |
TrueCar/sentry-notes | setup.py | Python | bsd-3-clause | 863 | 0.001159 | #!/usr/bin/env python
"""
Setup for Sentry Notes Plugin
==============================
Adds the option to record simple plain-text notes to
message groups in sentry's GUI
"""
# See ht | tp://packages.python.org/distribute/setuptools.html
# for insight on how this works
from setuptools import setup, find_packages
setup(
name="sentry-notes",
version="0.1",
license="3-clause BSD",
description="Notation Tool for Sentry",
long_description=__doc__,
url="https://github.com/TrueCar/sentry-notes | ",
author="Truecar.com Development Team",
author_email="dev@truecar.com",
maintainer="Ted Schundler",
maintainer_email="tschundler@truecar.com",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
#package_data={'': ['*.txt', '*.html']},
install_requires=[
"sentry>=3.5.8",
],
)
|
tylertian/Openstack | openstack F/cinder/cinder/test.py | Python | apache-2.0 | 9,555 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import functools
import uuid
import unittest
import mox
import nose.plugins.skip
import stubout
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.openstack.common import timeutils
from cinder import service
from cinder import tests
from cinder.tests import fake_flags
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
LOG = logging.getLogger(__name__)
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
class TestingException(Exception):
pass
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
flags.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.injected = []
self._services = []
def tearDown(self):
"""Runs after each test method to tear down test environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
FLAGS.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
FLAGS.set_override(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' % locals())
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' % locals())
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's som | ething else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertio | n("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' % locals())
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
|
kwilliams-mo/iris | lib/iris/tests/test_io_init.py | Python | gpl-3.0 | 5,347 | 0.00374 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the io/__init__.py module.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import unittest
from io import BytesIO
import iris.fileformats as iff
import iris.io.format_picker as fp
import iris.io
class TestDecodeUri(unittest.TestCase):
def test_decode_uri(self):
tests = {
'/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp': (
'file', '/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp'
),
'C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp': (
'file', 'C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp'
),
'file:///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp': (
'file', '///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp'
),
'http://www.somehost.com:8080/resource/thing.grib': (
'http', '//www.somehost.com:8080/resource/thing.grib'
),
}
for uri, pair in tests.items():
self.assertEqual(pair, iris.io.decode_uri(uri))
class TestFileFormatPicker(tests.IrisTest):
def test_known_formats(self):
self.assertString(str(iff.FORMAT_AGENT),
tests.get_result_path(('file_load',
'known_loaders.txt')))
@iris.tests.skip_data
def test_format_picker(self):
# ways to test the format picker = list of (format-name, file-spec)
test_specs = [
('NetCDF',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc']),
('NetCDF 64 bit offset format',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc.k2']),
('NetCDF_v4',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc4.k3']),
('NetCDF_v4',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc4.k4']),
('UM Fieldsfile (FF) post v5.2',
['FF', 'n48_multi_field']),
('GRIB',
['GRIB', 'grib1_second_order_packing', 'GRIB_00008_FRANX01']),
('GRIB',
['GRIB', 'jpeg2000', 'file.grib2']),
('UM Post Processing file (PP)',
['PP', 'simple_pp', 'global.pp']),
('UM Fieldsfile (FF) ancillary',
['FF', 'ancillary_fixed_length_header']),
# ('BUFR',
# ['BUFR', 'mss', 'BUFR_Samples',
# 'JUPV78_EGRR_121200_00002501']),
('NIMROD',
['NIMROD', 'uk2km', 'WO0000000003452',
'201007020900_u1096_ng_ey00_visibility0180_screen_2km']),
# ('NAME',
# ['NAME', '20100509_18Z_variablesource_12Z_VAAC',
# 'Fields_grid1_201005110000.txt']),
]
# test th | at each filespec is identified as the expected format
for (expected_format_name, file_spec) in test_specs:
test_path = tests.get_data_path(file_spec)
with open(test_path, 'r') as test_file:
a = iff.FORMAT_AGENT.get_spec(test_path, test_file)
self.assertEqual | (a.name, expected_format_name)
def test_format_picker_nodata(self):
# The following is to replace the above at some point as no real files
# are required.
# (Used binascii.unhexlify() to convert from hex to binary)
# Packaged grib, magic number offset by set length, this length is
# specific to WMO bulletin headers
header_lengths = [21, 80, 41, 42]
for header_length in header_lengths:
binary_string = header_length * '\x00' + 'GRIB' + '\x00' * 100
with BytesIO('rw') as bh:
bh.write(binary_string)
bh.name = 'fake_file_handle'
a = iff.FORMAT_AGENT.get_spec(bh.name, bh)
self.assertEqual(a.name, 'GRIB')
def test_open_dap(self):
# tests that *ANY* http or https URL is seen as an OPeNDAP service.
# This may need to change in the future if other protocols are
# supported.
DAP_URI = 'http://geoport.whoi.edu/thredds/dodsC/bathy/gom15'
a = iff.FORMAT_AGENT.get_spec(DAP_URI, None)
self.assertEqual(a.name, 'NetCDF OPeNDAP')
@iris.tests.skip_data
class TestFileExceptions(tests.IrisTest):
def test_pp_little_endian(self):
filename = tests.get_data_path(('PP', 'aPPglob1', 'global_little_endian.pp'))
self.assertRaises(ValueError, iris.load_cube, filename)
if __name__ == '__main__':
tests.main()
|
pvagner/orca | test/keystrokes/gtk3-demo/role_label.py | Python | lgpl-2.1 | 6,801 | 0.001617 | #!/usr/bin/python
"""Test of label presentation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Dialog and Message Boxes"))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("space"))
sequence.append(KeyComboAction("space"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. This message box label",
["BRAILLE LINE: 'number of times:'",
" VISIBLE: 'number of times:', cursor=17",
"BRAILLE LINE: 'number of times:'",
" VISIBLE: 'number of times:', cursor=17",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times: selected label'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"2. This message box label Where Am I",
["BRAILLE LINE: 'number of times:'",
" VISIBLE: 'number of times:', cursor=17",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times: selected label'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"3. This message box label Extended Where Am I",
["BRAILLE LINE: 'number of times:'",
" VISIBLE: 'number of times:', cursor=17",
"BRAILLE LINE: 'number of times:'",
" VISIBLE: 'number of times:', cursor=17",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times: selected label'",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times: selected label'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Home"))
sequence.append(utils.AssertPresentationAction(
"4. Press Home to unselect the label and move to the first character'",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=1",
"BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=1",
"SPEECH OUTPUT: 'T'",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times:'",
"SPEECH OUTPUT: 'unselected' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"5. This message box label caret movement to 'h'",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'", |
" VISIBLE: 'This message box has been popped', cursor=2",
"SPEECH OUTPUT: 'h'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift><Control>Right"))
sequence.append(utils.AssertPresentationAction(
"6. This message box label caret select 'his' of 'This'",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'Th | is message box has been popped', cursor=5",
"SPEECH OUTPUT: 'his'",
"SPEECH OUTPUT: 'selected' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"7. This message box label caret selection Where Am I",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=5",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times: selected label'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"8. This message box label caret selection Extended Where Am I",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=5",
"BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=5",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times: selected label'",
"SPEECH OUTPUT: 'This message box has been popped up the following",
"number of times: selected label'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"9. Left Arrow to move to h unselecting his'",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=2",
"BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=2",
"SPEECH OUTPUT: 'h'",
"SPEECH OUTPUT: 'his'",
"SPEECH OUTPUT: 'unselected' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift><Control>Left"))
sequence.append(utils.AssertPresentationAction(
"10. This message box label caret select 'T' in 'This'",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=1",
"SPEECH OUTPUT: 'T' voice=uppercase",
"SPEECH OUTPUT: 'selected' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift><Control>Right"))
sequence.append(utils.AssertPresentationAction(
"11. This message box label caret unselect 'T' and select rest of 'This'",
["BRAILLE LINE: 'gtk3-demo application Information alert This message box has been popped up the following'",
" VISIBLE: 'This message box has been popped', cursor=5",
"SPEECH OUTPUT: 'T'",
"SPEECH OUTPUT: 'unselected' voice=system",
"SPEECH OUTPUT: 'his'",
"SPEECH OUTPUT: 'selected' voice=system"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
SauloAislan/ironic | ironic/conf/oneview.py | Python | apache-2.0 | 2,255 | 0 | # Copyright 2016 Intel Corporation
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.StrOpt('manager_url',
help=_('URL where OneView is available.')),
cfg.StrOpt('username',
help=_('OneView username to be used.')),
cfg.StrOpt('password',
secret=True,
help=_('OneView password to be used.')),
cfg.BoolOpt('allow_insecure_connections',
default=False,
help=_('Option to allow insecure connection | with OneView.')),
cfg.StrOpt('tls_cacert_file | ',
help=_('Path to CA certificate.')),
cfg.IntOpt('max_polling_attempts',
default=12,
help=_('Max connection retries to check changes on OneView.')),
cfg.BoolOpt('enable_periodic_tasks',
default=True,
help=_('Whether to enable the periodic tasks for OneView '
'driver be aware when OneView hardware resources are '
'taken and released by Ironic or OneView users '
'and proactively manage nodes in clean fail state '
'according to Dynamic Allocation model of hardware '
'resources allocation in OneView.')),
cfg.IntOpt('periodic_check_interval',
default=300,
help=_('Period (in seconds) for periodic tasks to be '
'executed when enable_periodic_tasks=True.')),
]
def register_opts(conf):
conf.register_opts(opts, group='oneview')
|
seunghwanl/APMAE4990 | scripts/regression.py | Python | apache-2.0 | 3,369 | 0.009498 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.grid_search import GridSearchCV
from sklearn.externals import joblib
import time
import datetime
import math
months = ["apr", "may", "jun", "jul", "aug", "sep"]
days_eng = ["Mon", "Tues", "Wed", "Thurs", "Fri", "Sat", "Sun"]
for month in months:
for day in days_eng:
filename = month+day+"2014.csv"
df = pd.read_csv(filename)
X = []
y = []
datetimedf=df.loc[:,'Date/Time']
t=pd.to_datetime(datetimedf, format="%m/%d/%Y %H:%M:%S")
df["Hour"] = t.dt.hour
df["Week"] = t.dt.week
dataframes = []
for i in range(24):
# doing this to get each hour for each week. So if there are 5 weeks
# we need to get 5 data points for each of 24 hour intervals
# df.Hour % 24 is done by using groupby('Hour').count() few lines below.
df_new = df.loc[df.Week % 24 == i]
dataframes.append(df_new)
for dframe in dataframes:
dframe = dframe.groupby("Hour").count()
dframe = dframe[dframe.columns[0]]
for idx, i in enumerate(dframe.as_matrix()):
y.append([i])
X.append([idx])
# one week scatter plot |
# if month == 'jul' and day == 'Sun':
# plt.scatter(X, y, s = 1)
# plt.legend(loc="lower right")
# plt.title("RandomForest Regression " + m | onth + " " + day)
# plt.xlabel("Hours")
# plt.ylabel("Number of Pickups")
# plt.show()
y = np.asarray(y)
X = np.asarray(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 42)
# perform gridsearch with cv = 5
rf = RandomForestRegressor()
params = {'n_estimators': [x for x in range(10, 40, 2)], 'max_depth': [x for x in range(1, 7)]}
reg = GridSearchCV(estimator = rf, param_grid = params, cv = 5)
reg.fit(X_train, y_train)
print("best params: ", reg.best_params_)
rf = RandomForestRegressor(n_estimators = reg.best_params_['n_estimators'], max_depth = reg.best_params_['max_depth'])
rf.fit(X_train, y_train)
r2_train = r2_score(y_train, rf.predict(X_train))
r2_test = r2_score(y_test, rf.predict(X_test))
# filename = "./Result/" + month+day+"2014.pkl"
# joblib.dump(rf, filename, compress=9)
filename = "./Result/rep.txt"
with open(filename, 'a') as f:
f.write("year: 2014 month: " + month + " day: " + day + " train r2: "
+ str(r2_train) + " test r2: " + str(r2_test) + "\n")
print("train r2", r2_train)
print("test r2",r2_test)
scatter plot (all weeks cumulative)
# if month == 'jul' and day == 'Fri':
# plt.scatter(X, y, s = 1)
# plt.legend(loc="lower right")
# plt.title("RandomForest Regression " + month + " " + day)
# plt.xlabel("Hours")
# plt.ylabel("Number of Pickups")
# plt.show()
|
mikeengland/fireant | fireant/tests/queries/test_data_blending_integration.py | Python | apache-2.0 | 55,610 | 0.001259 | from unittest import TestCase
from pypika import Tables, functions as fn
import fireant as f
from fireant import DataSet, DataType, Database, Field, ReactTable
from fireant.tests.database.mock_database import TestDatabase
class DataSetBlenderIntegrationTests(TestCase):
maxDiff = None
def test_select_only_a_metric_from_primary_dataset(
self,
):
db = Database()
t0, t1 = Tables("test0", "test1")
primary_ds = DataSet(
table=t0,
database=db,
fields=[
Field(
"timestamp",
label="Timestamp",
definition=t0.timestamp,
data_type=DataType.date,
),
Field(
"metric0",
label="Metric0",
definition=fn.Sum(t0.metric),
data_type=DataType.number,
),
],
)
secondary_ds = DataSet(
table=t1,
database=db,
fields=[
Field(
"timestamp",
label="Timestamp",
definition=t1.timestamp,
data_type=DataType.date,
),
Field(
"metric1",
label="Metric1",
definition=fn.Sum(t1.metric),
data_type=DataType.number,
),
],
)
blend_ds = primary_ds.blend(secondary_ds).on({primary_ds.fields.timestamp: secondary_ds.fields.timestamp})
sql = (blend_ds.query().dimension(blend_ds.fields.timestamp).widget(ReactTable(blend_ds.fields.metric0))).sql
(query,) = sql
| self.assertEqual(
'SELECT "sq0"."$timestamp" "$timestamp","sq0"."$metric0" "$metric0" '
'FROM (SELECT "timestamp" "$timestamp",SUM("metric") "$metric0" FROM "test0" GROUP BY "$timestamp") "sq0" '
'ORDER BY "$timestamp" LIMIT 200000',
str(query),
)
def test_use_metric_from_primary_datase | t_when_alias_conflicts_with_metric_from_secondary(
self,
):
db = Database()
t0, t1 = Tables("test0", "test1")
primary_ds = DataSet(
table=t0,
database=db,
fields=[
Field(
"timestamp",
label="Timestamp",
definition=t0.timestamp,
data_type=DataType.date,
),
Field(
"metric",
label="Metric",
definition=t0.metric,
data_type=DataType.number,
),
],
)
secondary_ds = DataSet(
table=t1,
database=db,
fields=[
Field(
"timestamp",
label="Timestamp",
definition=t1.timestamp,
data_type=DataType.date,
),
Field(
"metric",
label="Metric",
definition=t1.metric,
data_type=DataType.number,
),
],
)
blend_ds = (
primary_ds.blend(secondary_ds)
.on_dimensions()
.extra_fields(
Field(
"metric_share",
label="Metric Share",
definition=primary_ds.fields.metric / secondary_ds.fields.metric,
data_type=DataType.number,
)
)
)
sql = (
blend_ds.query().dimension(blend_ds.fields.timestamp).widget(ReactTable(blend_ds.fields.metric_share))
).sql
(query,) = sql
self.assertEqual(
"SELECT "
'"sq0"."$timestamp" "$timestamp",'
'"sq0"."$metric"/"sq1"."$metric" "$metric_share" '
"FROM ("
"SELECT "
'"timestamp" "$timestamp",'
'"metric" "$metric" '
'FROM "test0" '
'GROUP BY "$timestamp"'
') "sq0" '
"LEFT JOIN ("
"SELECT "
'"timestamp" "$timestamp",'
'"metric" "$metric" '
'FROM "test1" '
'GROUP BY "$timestamp"'
') "sq1" ON "sq0"."$timestamp"="sq1"."$timestamp" '
'ORDER BY "$timestamp" '
'LIMIT 200000',
str(query),
)
def test_produce_a_sql_with_multiple_subqueries_in_from_clause_when_blender_not_mapped_on_any_fields(
self,
):
db = Database()
t0, t1 = Tables("test0", "test1")
primary_ds = DataSet(
table=t0,
database=db,
fields=[
Field(
"timestamp",
label="Timestamp",
definition=t0.timestamp,
data_type=DataType.date,
),
Field(
"metric1",
label="Metric1",
definition=t0.metric,
data_type=DataType.number,
),
],
)
secondary_ds = DataSet(
table=t1,
database=db,
fields=[
Field(
"metric2",
label="Metric2",
definition=t1.metric,
data_type=DataType.number,
)
],
)
blend_ds = primary_ds.blend(secondary_ds).on({})
sql = (
blend_ds.query()
.dimension(blend_ds.fields.timestamp)
.widget(ReactTable(blend_ds.fields.metric1, blend_ds.fields.metric2))
).sql
(query,) = sql
self.assertEqual(
"SELECT "
'"sq0"."$timestamp" "$timestamp",'
'"sq0"."$metric1" "$metric1",'
'"sq1"."$metric2" "$metric2" '
"FROM ("
"SELECT "
'"timestamp" "$timestamp",'
'"metric" "$metric1" '
'FROM "test0" '
'GROUP BY "$timestamp"'
') "sq0",'
"("
"SELECT "
'"metric" "$metric2" '
'FROM "test1"'
') "sq1" '
'ORDER BY "$timestamp" '
'LIMIT 200000',
str(query),
)
def test_select_unmapped_dimension_from_secondary_but_only_metric_from_primary(
self,
):
db = Database()
t0, t1 = Tables("test0", "test1")
primary_ds = DataSet(
table=t0,
database=db,
fields=[
Field(
"timestamp",
label="Timestamp",
definition=t0.timestamp,
data_type=DataType.date,
),
Field(
"metric0",
label="Metric0",
definition=fn.Sum(t0.metric),
data_type=DataType.number,
),
],
)
secondary_ds = DataSet(
table=t1,
database=db,
fields=[
Field(
"timestamp",
label="Timestamp",
definition=t1.timestamp,
data_type=DataType.date,
),
Field(
"account",
label="Account",
definition=t1.account,
data_type=DataType.number,
),
Field(
"metric1",
label="Metric1",
definition=fn.Sum(t1.metric),
data_type=DataType.number,
),
],
)
blend_ds = primary_ds.blend(secondary_ds).on({primary_ds.fields.timestamp: secondary_ds.fields.timestamp})
sql = (
blend_ds.query()
.dimension(blend_ds.fields.timestamp, blend_ds.fields.a |
belese/luciphone | Luciphone/writeuid.py | Python | gpl-2.0 | 431 | 0.020882 | from modules.py532lib.NFC import NFC as NFC
print ('Write UID')
#write .UID in current dir
UID_FILE = ".UID"
def write_uid(uid):
f = open(UID_FILE,'w')
| print("Write UID : %s"%str(uid))
uid = f.write(str(uid))
f.close
NFC.stop()
def stop(uid):
NFC.stop()
NFC.add_event_detect(NFC.NEWTAG,write_uid)
NFC.add_event_detect(NFC.REMOVETAG,stop)
pr | int('Put the disk on plate')
NFC.start()
|
roadmapper/ansible | lib/ansible/module_utils/network/vyos/config/static_routes/static_routes.py | Python | gpl-3.0 | 20,425 | 0.001812 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The vyos_static_routes class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list, dict_diff, remove_empties
from ansible.module_utils.network.vyos.facts.facts import Facts
from ansible.module_utils.six import iteritems
from ansible.module_utils.network. vyos.utils.utils import get_route_type, \
get_lst_diff_for_dicts, get_lst_same_for_dicts, dict_delete
class Static_routes(ConfigBase):
"""
The vyos_static_routes class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'static_routes',
]
def __init__(self, module):
super(Static_routes, self).__init__(module)
def get_static_routes_facts(self, data=None):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources, data=data)
static_routes_facts = facts['ansible_network_resources'].get('static_routes')
if not static_routes_facts:
return []
return static_routes_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
warnings = list()
commands = list()
if self.state in self.ACTION_STATES:
existing_static_routes_facts = self.get_static_routes_facts()
else:
existing_static_routes_facts = []
if self.state in self.ACTION_STATES or self.state == 'rendered':
commands.extend(self.set_config(existing_static_routes_facts))
if commands and self.state in self.ACTION_STATES:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
if self.state in self.ACTION_STATES:
result['commands'] = commands
if self.state in self.ACTION_STATES or self.state == 'gathered':
changed_static_routes_facts = self.get_static_routes_facts()
elif self.state == 'rendered':
result['rendered'] = commands
elif self.state == 'parsed':
running_config = self._module.params['running_config']
if not running_config:
self._module.fail_json(
msg="value of running_config parameter must not be empty for state parsed"
)
result['parsed'] = self.get_static_routes_facts(data=running_config)
else:
changed_static_routes_facts = []
if self.state in self.ACTION_STATES:
result['before'] = existing_static_routes_facts
if result['changed']:
result['after'] = changed_static_routes_facts
elif self.state == 'gathered':
result['gathered'] = changed_static_routes_facts
result['warnings'] = warnings
return result
def set_config(self, existing_static_routes_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
have = existing_static_routes_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
if self.state in ('merged', 'replaced', 'overri | dden', 'rendered') and not want:
self._module.fail_json(msg='value of config parameter must not be empty for state {0}'.format(self.state))
if self.state == 'overridden':
commands.extend(self._state_overridden(want=want, have=have))
elif self.state == 'deleted':
commands.extend(self._state_deleted(want=want, have=have))
elif want:
routes = self._get_routes(want)
for r in routes:
h_ite | m = self.search_route_in_have(have, r['dest'])
if self.state == 'merged' or self.state == 'rendered':
commands.extend(self._state_merged(want=r, have=h_item))
elif self.state == 'replaced':
commands.extend(self._state_replaced(want=r, have=h_item))
return commands
def search_route_in_have(self, have, want_dest):
"""
This function returns the route if its found in
have config.
:param have:
:param dest:
:return: the matched route
"""
routes = self._get_routes(have)
for r in routes:
if r['dest'] == want_dest:
return r
return None
def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
if have:
for key, value in iteritems(want):
if value:
if key == 'next_hops':
commands.extend(self._update_next_hop(want, have))
elif key == 'blackhole_config':
commands.extend(self._update_blackhole(key, want, have))
commands.extend(self._state_merged(want, have))
return commands
def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
routes = self._get_routes(have)
for r in routes:
route_in_want = self.search_route_in_have(want, r['dest'])
if not route_in_want:
commands.append(self._compute_command(r['dest'], remove=True))
routes = self._get_routes(want)
for r in routes:
route_in_have = self.search_route_in_have(have, r['dest'])
commands.extend(self._state_replaced(r, route_in_have))
return commands
def _state_merged(self, want, have, opr=True):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
if have:
commands.extend(self._render_updates(want, have))
else:
commands.extend(self._render_set_commands(want))
return commands
def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
if want:
routes = self._get_routes(want)
if not routes:
for w in want:
af = w['address_families']
|
HumanExposure/factotum | apps_api/core/authentication.py | Python | gpl-3.0 | 854 | 0 | import datetime
from rest_framework.authentication import TokenAuthentication
from rest_framework import exceptions
from factotum.environment import env
class ExpiringTokenAuthentication(TokenAuthentication):
keyword | = "Bearer"
def authenticate_credentials(self, key):
model = self.get_model()
try:
token = model.objects.get(key=key)
except model.DoesNotExist:
raise exceptions.AuthenticationFailed("Invalid token")
if not token.user.is_active:
raise exceptions.AuthenticationFailed("User inactive or deleted")
now = datetime.datetime.now()
if token.created < now - datetime.timedelta(
| milliseconds=env.FACTOTUM_WS_TOKEN_TTL
):
raise exceptions.AuthenticationFailed("Token has expired")
return token.user, token
|
ratschlab/ASP | examples/undocumented/python_modular/classifier_multiclasslinearmachine_modular.py | Python | gpl-2.0 | 1,156 | 0.036332 | import classifier_multiclass_shared
[traindat, label_traindat, testdat, label_testdat] | = classifier_multiclass_shar | ed.prepare_data(False)
parameter_list = [[traindat,testdat,label_traindat,2.1,1,1e-5],[traindat,testdat,label_traindat,2.2,1,1e-5]]
def classifier_multiclasslinearmachine_modular (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,width=2.1,C=1,epsilon=1e-5):
from shogun.Features import RealFeatures, MulticlassLabels
from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC, LinearMulticlassMachine, MulticlassOneVsOneStrategy, MulticlassOneVsRestStrategy
feats_train = RealFeatures(fm_train_real)
feats_test = RealFeatures(fm_test_real)
labels = MulticlassLabels(label_train_multiclass)
classifier = LibLinear(L2R_L2LOSS_SVC)
classifier.set_epsilon(epsilon)
classifier.set_bias_enabled(True)
mc_classifier = LinearMulticlassMachine(MulticlassOneVsOneStrategy(), feats_train, classifier, labels)
mc_classifier.train()
out = mc_classifier.apply().get_labels()
return out
if __name__=='__main__':
print('MulticlassMachine')
classifier_multiclasslinearmachine_modular(*parameter_list[0])
|
bcorbet/SickRage | sickbeard/providers/iptorrents.py | Python | gpl-3.0 | 11,284 | 0.004342 | # Author: seedboy
# URL: https://github.com/seedboy
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
import datetime
import urlparse
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex, AuthException
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
from sickbeard.helpers import sanitizeSceneName
from sickbeard.show_n | ame_helpers import allPossibleShowNames
class IPTorrentsProvider(generic.TorrentProvider):
| def __init__(self):
generic.TorrentProvider.__init__(self, "IPTorrents")
self.supportsBacklog = True
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.freeleech = False
self.cache = IPTorrentsCache(self)
self.urls = {'base_url': 'https://www.iptorrents.com',
'login': 'https://www.iptorrents.com/torrents/',
'search': 'https://www.iptorrents.com/torrents/?%s%s&q=%s&qf=ti',
}
self.url = self.urls['base_url']
self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'iptorrents.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password,
'login': 'submit',
}
try:
response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
return False
if re.search('tries left', response.text) \
or re.search('<title>IPT</title>', response.text) \
or response.status_code == 401:
logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
return False
return True
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
freeleech = '&free=on' if self.freeleech else ''
if not self._doLogin():
return results
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
# URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
searchURL = self.urls['search'] % (self.categorie, freeleech, search_string)
searchURL += ';o=seeders' if mode != 'RSS' else ''
logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
try:
data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
if not html:
logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG)
continue
if html.find(text='No Torrents Found!'):
logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG)
continue
torrent_table = html.find('table', attrs={'class': 'torrents'})
torrents = torrent_table.find_all('tr') if torrent_table else []
#Continue only if one Release is found
if len(torrents) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.WARNING)
continue
for result in torrents[1:]:
try:
torrent = result.find_all('td')[1].find('a')
torrent_name = torrent.string
torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href']
torrent_details_url = self.urls['base_url'] + torrent['href']
torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'} |
janutechnology/VirtShell | virtshell_server/virtshell_server/provisioning/images.py | Python | gpl-2.0 | 676 | 0.002959 | from provisioning.images_repository import ImagesRepository
import uuid
class Im | ages(object):
def __init__(self):
self.images_repository = ImagesRepository()
def get_all_images(self):
return self.images_repository.get_all_images()
|
def get_image(self, name):
return self.images_repository.get_image(name)
def create_image(self, image):
image['uuid'] = str(uuid.uuid4())
return self.images_repository.create_image(image)
def delete_image(self, name):
return self.images_repository.delete_image(name)
def update_image(self, name, image):
return self.images_repository.update_image(name, image) |
forj-oss/docs | conf.py | Python | apache-2.0 | 10,336 | 0.006772 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Forj documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 19 14:00:10 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Forj'
copyright = '2014, Forj community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'console'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '/img/forj_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Forjdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = | {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tre | e into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Forj.tex', 'Forj Documentation',
'Forj community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'forj', 'Forj Documentation',
['Forj community'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Forj', 'Forj Documentation',
'Forj community', 'Forj', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#tex |
fevangelista/pyWicked | external/pybind11/pybind11/_version.py | Python | mit | 77 | 0 | version_info = (1, | 9, 'dev0')
__version__ = '.'.join(map(str, | version_info))
|
pypa/virtualenv | src/virtualenv/activation/activator.py | Python | mit | 1,411 | 0.003544 | from __future__ import absolute_import, unicode_literals
import os
from abc import ABCMeta, abstractmethod
from six import add_metaclass
@add_metaclass(ABCMeta)
class Activator(object):
"""Generates an activate script for the virtual environment"""
def __init__(self, options):
"""Create a new activator generator.
| :param options: the parsed options as defined within :meth:`add_parser_arguments`
"""
self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == "." else options.prompt
@classmethod
def supports(cls, interpreter):
"""Check if the activation script is supported in the given interpreter.
:param interpreter: the interpreter we need to support
:return: ``True`` if supported, ``False`` otherwise
"""
return True
@clas | smethod
def add_parser_arguments(cls, parser, interpreter):
"""
Add CLI arguments for this activation script.
:param parser: the CLI parser
:param interpreter: the interpreter this virtual environment is based of
"""
@abstractmethod
def generate(self, creator):
"""Generate the activate script for the given creator.
:param creator: the creator (based of :class:`virtualenv.create.creator.Creator`) we used to create this \
virtual environment
"""
raise NotImplementedError
|
coxmediagroup/googleads-python-lib | examples/dfp/v201505/network_service/make_test_network.py | Python | apache-2.0 | 1,901 | 0.006839 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Licen | se is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific languag | e governing permissions and
# limitations under the License.
"""This example creates a test network.
You do not need to have a DFP account to run this example, but you do need to
have a Google account (created at http://www.google.com/accounts/newaccount
if you currently don't have one) that is not associated with any other DFP test
networks. Once this network is created, you can supply the network code in your
settings to make calls to other services.
Alternatively, if you do not wish to run this example, you can create a test
network at:
https://dfp-playground.appspot.com
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201505')
# Create a test network.
network = network_service.MakeTestNetwork()
# Display results.
print ('Test network with network code \'%s\' and display name \'%s\' '
'created.' % (network['networkCode'], network['displayName']))
print ('You may now sign in at http://www.google.com/dfp/main?networkCode=%s'
% network['networkCode'])
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
lmazuel/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/deployment.py | Python | mit | 3,290 | 0.000304 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class Deployment(ProxyOnlyResource):
"""User crendentials used for publishing activity.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param deployment_id: Identifier for deployment.
:type deployment_id: str
:param status: Deployment status.
:type status: int
:param message: Details about deployment status.
:type message: str
:param author: Who authored the deployment.
:type author: str
:param deployer: Who performed the deployment.
:type deployer: str
:param author_email: Author email.
:type author_email: str
:param start_time: Start time.
:type start_time: datetime
:param end_time: End time.
:type end_time: datetime
:param active: True if deployment is currently active, false if completed
and null if not started.
:type active: bool
:param details: Details on deployment.
:type details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deployment_id': {'key': 'properties.id', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'int'},
'message': {'key': 'properties.message', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'str'},
'deployer': {'key': | 'properties.deployer', 'type': 'str'},
'author_email': {'key': 'properties.authorEmail', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'active': {'key' | : 'properties.active', 'type': 'bool'},
'details': {'key': 'properties.details', 'type': 'str'},
}
def __init__(self, kind=None, deployment_id=None, status=None, message=None, author=None, deployer=None, author_email=None, start_time=None, end_time=None, active=None, details=None):
super(Deployment, self).__init__(kind=kind)
self.deployment_id = deployment_id
self.status = status
self.message = message
self.author = author
self.deployer = deployer
self.author_email = author_email
self.start_time = start_time
self.end_time = end_time
self.active = active
self.details = details
|
psi4/psi4 | psi4/driver/procrouting/sapt/__init__.py | Python | lgpl-3.0 | 973 | 0.001028 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, versi | on 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# | with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .sapt_proc import run_sapt_dft, sapt_dft, run_sf_sapt
|
cdelguercio/slothauth | slothauth/middleware.py | Python | apache-2.0 | 2,323 | 0.004735 | from django.contrib.auth import authenticate, login
from django.contrib.auth import get_user_model
from . import settings
Account = get_user_model()
class PasswordlessUserMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
passwordless_key = request.GET.get(settings.PASSWORDLESS_GET_PARAM, None)
if passwordless_key and not (passwordless_key == ''):
user = authenticate(passwordless_key=passwordless_key,
force=True) # TODO adding force makes the passwordless key authenticate users WITH a password too via passwordless key
if user and user.is_active:
login(request, user)
response = self.get_response(request)
return response
class OneTimeAuthenticationKeyMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
one_time_authentication_key = request.GET.get(settings.ONE_TIME_AUTHENTICATION_KEY_GET_PARAM, None)
if one_time_authentication_key and not (one_time_authentication_key == ''):
user = authenticate(one_time_authentication_key=one_time_authentication_key)
if user and user.is_active:
login(request, user)
| response = self.get_response(request)
return response
class ImpersonateMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if (hasattr(request.user, 'can_impersonate') and request.user.can_impersonate) or\
(hasattr(request.user, 'is_superuser') and request.user.is_superuser):
if 'impersonate_id' in request.session:
| request.user = Account.objects.get(id=request.session['impersonate_id'])
elif "__impersonate" in request.GET:
request.session['impersonate_id'] = int(request.GET["__impersonate"])
request.user = Account.objects.get(id=request.session['impersonate_id'])
elif "__unimpersonate" in request.GET and 'impersonate_id' in request.session:
del request.session['impersonate_id']
response = self.get_response(request)
return response
|
kevinoconnor7/penelophant | penelophant/models/Bid.py | Python | apache-2.0 | 744 | 0.017473 | """ Bid Model """
from penelophant | .database import db
from .Model import Model
from .User import User
class Bid(Model):
""" Bid data representation """
__mapper_args__ = {
'order_by': 'price DESC, bid_time ASC'
}
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer,
db.ForeignKey(User.id, ondelete='RESTRI | CT', onupdate='CASCADE'),
nullable=False
)
auction_id = db.Column(db.Integer,
db.ForeignKey('auction.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False
)
bid_time = db.Column(db.TIMESTAMP, default=db.func.now())
price = db.Column(db.Numeric('13,2'), default=0)
user = db.relationship(User, backref="bids")
# Will be populated by backref
auction = None
|
ME-ICA/me-ica | meica.libs/mdp/hinet/__init__.py | Python | lgpl-2.1 | 2,949 | 0.000678 | """Hierarchical Networks Package.
This package makes it possible to construct graph-like Node structures,
especially hierarchical networks.
The most important building block is the new Layer node, which works as an
horizontal version of flow. It encapsulates a list of Nodes, which are trained
and executed in parallel.
For example we can take two Nodes with 100 dimensional input to
construct a layer with a 200 dimensional input. The first half of the input
data is automatically fed into the first Node, the second half into the second
Node.
Since one might also want to use Flows (i.e. vertical stacks of Nodes) in a
Layer, a wrapper class for Nodes is provided.
The FlowNode class wraps any Flow into a Node, which can then be used like any
other Node. Together with the Layer this allows you to combine Nodes both
horizontally and vertically. Thereby one can in principle realize
any feed-forward network topology.
For a hierarchical networks one might want to route the different parts of the
data to different Nodes in a Layer in complicated ways. This is done by a
Switchboard that handles all the routing.
Defining the routing manually can be quite tediou | s, so one can derive subclasses
for special routing situations. One such subclass for 2d image data is provided.
It maps the data according to rectangular overlapping 2d input areas. One can
then feed the output into a La | yer and each Node will get the correct input.
"""
from flownode import FlowNode
from layer import Layer, SameInputLayer, CloneLayer
from switchboard import (
Switchboard, SwitchboardException, MeanInverseSwitchboard,
ChannelSwitchboard,
Rectangular2dSwitchboard, Rectangular2dSwitchboardException,
DoubleRect2dSwitchboard, DoubleRect2dSwitchboardException,
DoubleRhomb2dSwitchboard, DoubleRhomb2dSwitchboardException
)
from htmlvisitor import (
HiNetHTMLVisitor, HiNetXHTMLVisitor, NewlineWriteFile, show_flow
)
from switchboard_factory import (
get_2d_image_switchboard, FactoryExtensionChannelSwitchboard,
FactoryRectangular2dSwitchboard, FactoryDoubleRect2dSwitchboard,
FactoryDoubleRhomb2dSwitchboard
)
__all__ = ['FlowNode', 'Layer', 'SameInputLayer', 'CloneLayer',
'Switchboard', 'SwitchboardException', 'ChannelSwitchboard',
'Rectangular2dSwitchboard', 'Rectangular2dSwitchboardException',
'DoubleRect2dSwitchboard', 'DoubleRect2dSwitchboardException',
'DoubleRhomb2dSwitchboard', 'DoubleRhomb2dSwitchboardException',
'HiNetHTMLVisitor', 'HiNetXHTMLVisitor', 'NewlineWriteFile',
'show_flow', 'get_2d_image_switchboard'
]
from mdp.utils import fixup_namespace
fixup_namespace(__name__, __all__,
('flownode',
'layer',
'switchboard',
'hinet_Visitor',
'switchboard_factory',
'utils',
'fixup_namespace'
))
|
fkmclane/web.py | tests/test_header.py | Python | mit | 5,961 | 0.000336 | from fooster.web import web
import pytest
test_key = 'Magical'
test_value = 'header'
test_header = test_key + ': ' + test_value + '\r\n'
poor_key = 'not'
poor_value = 'good'
poor_header = poor_key + ':' + poor_value + '\r\n'
good_header = poor_key + ': ' + poor_value + '\r\n'
case_key = 'wEIrd'
case_key_title = case_key.title()
case_value = 'cAse'
case_header = case_key + ': ' + case_value + '\r\n'
case_header_test = case_key + ': ' + test_value + '\r\n'
nonstr_key = 6
nonstr_value = None
def test_add_get():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers.get(test_key) == test_value
def test_add_getlist():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers.getlist(test_key) == [test_value]
def | test_add_getitem():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers[test_key | ] == test_value
def test_getitem_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers[test_key]
def test_getlist_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers.getlist(test_key)
def test_getlist_default():
headers = web.HTTPHeaders()
assert headers.getlist(test_key, []) == []
def test_set_remove():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert headers.get(test_key) == test_value
headers.remove(test_key)
def test_set_multiple():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(test_key, test_value)
assert headers.get(test_key) == test_value
assert headers.getlist(test_key) == [test_value] * 2
def test_set_overwrite():
headers = web.HTTPHeaders()
headers.set(test_key, test_value, True)
headers.set(test_key, test_value, True)
assert headers.get(test_key) == test_value
assert headers.getlist(test_key) == [test_value]
def test_setitem_delitem():
headers = web.HTTPHeaders()
headers[test_key] = test_value
assert headers[test_key] == test_value
del headers[test_key]
def test_remove_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers.remove(test_key)
def test_delitem_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
del headers[test_key]
def test_retrieve():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert headers.retrieve(test_key) == test_header
def test_len():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert len(headers) == 1
headers.set(poor_key, poor_value)
assert len(headers) == 2
def test_multiple_add_get_len_retrieve():
headers = web.HTTPHeaders()
headers.add(case_header)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value]
assert headers.retrieve(case_key) == case_header
headers.add(case_header)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value] * 2
assert headers.retrieve(case_key) == case_header + case_header
headers.add(case_header_test)
assert len(headers) == 1
assert headers.get(case_key) == test_value
assert headers.getlist(case_key) == [case_value] * 2 + [test_value]
assert headers.retrieve(case_key) == case_header + case_header + case_header_test
def test_multiple_set_get_len_retrieve():
headers = web.HTTPHeaders()
headers.set(case_key, case_value)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value]
assert headers.retrieve(case_key) == case_header
headers.set(case_key, case_value)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value] * 2
assert headers.retrieve(case_key) == case_header + case_header
headers.set(case_key, test_value)
assert len(headers) == 1
assert headers.get(case_key) == test_value
assert headers.getlist(case_key) == [case_value] * 2 + [test_value]
assert headers.retrieve(case_key) == case_header + case_header + case_header_test
def test_clear():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.clear()
assert len(headers) == 0
def test_case():
headers = web.HTTPHeaders()
headers.set(case_key, case_value)
assert headers.get(case_key_title) == case_value
assert headers.retrieve(case_key_title) == case_header
def test_iter():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.set(case_key, case_value)
header_list = []
for header in headers:
header_list.append(header)
assert test_header in header_list
assert good_header in header_list
assert case_header in header_list
def test_contains():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.set(case_key, case_value)
assert test_key in headers
assert poor_key in headers
assert case_key in headers
assert test_key.upper() in headers
assert poor_key.upper() in headers
assert case_key.upper() in headers
assert test_key.lower() in headers
assert poor_key.lower() in headers
assert case_key.lower() in headers
def test_poor_header():
headers = web.HTTPHeaders()
headers.add(poor_header)
assert headers.get(poor_key) == poor_value
def test_set_key_nonstr():
headers = web.HTTPHeaders()
with pytest.raises(TypeError):
headers.set(nonstr_key, test_value)
def test_set_value_nonstr():
headers = web.HTTPHeaders()
with pytest.raises(TypeError):
headers.set(test_key, nonstr_value)
|
mworks/mworks | examples/Tests/Stimulus/DriftingGrating/StartingPhase/get_current_phase.py | Python | mit | 185 | 0 | def get_current_phase(tag):
sdu = getvar('#stimDisplayUpdate')
| for item i | n sdu:
if item['name'] == tag:
return '%.2f' % -item['current_phase']
return ''
|
RFDesign/SiK | Firmware/tools/rssi.py | Python | bsd-2-clause | 1,670 | 0.026347 | #!/usr/bin/env python
# RSSI production test
import serial, sys, optparse, time, fdpexpect
parser = optparse.OptionParser("update_mode")
parser.add_op | tion("--baudrate", type='int', default=57600, help='baud rate')
parser.add_option("--rtscts", action='store_true', default=False, help='enable rtscts')
parser.add_option("--dsrdtr", action='store_tru | e', default=False, help='enable dsrdtr')
parser.add_option("--xonxoff", action='store_true', default=False, help='enable xonxoff')
opts, args = parser.parse_args()
if len(args) == 0:
print("usage: rssi.py <DEVICE...>")
sys.exit(1)
def rssi(device):
port = serial.Serial(device, opts.baudrate, timeout=0,
dsrdtr=opts.dsrdtr, rtscts=opts.rtscts, xonxoff=opts.xonxoff)
ser = fdpexpect.fdspawn(port.fileno(), logfile=sys.stdout)
ser.send('+++')
time.sleep(1)
ser.send('\r\nATI\r\n')
try:
ser.expect(['OK','SiK .* on HM-TRP'], timeout=2)
except fdpexpect.TIMEOUT:
print("timeout")
return
ser.send('AT&F\r\n')
try:
ser.expect(['OK'], timeout=2)
except fdpexpect.TIMEOUT:
print("timeout")
return
ser.send('AT&T=RSSI\r\n')
ctr = 0
while ctr < 200:
try:
count = port.inWaiting()
if count == 0:
count = 1
buf = port.read(count)
if len(buf) == 0:
continue
sys.stdout.write(buf)
sys.stdout.flush()
ctr = ctr + 1
except KeyboardInterrupt:
sys.exit(0)
port.close()
for d in args:
print("Putting %s into rssi test mode" % d)
rssi(d)
|
hazelnusse/robot.bicycle | data/physicalparameters/RawData/PeriodMeasurements/RearWheel/populateTable.py | Python | bsd-2-clause | 2,289 | 0.002621 | import psycopg2
import scipy.io
import os
# Set to true if you want to query tables before adding data:
queryConfigs = False
queryTimeSeries = False
# Create a connection to RoboticBicycle database
conn = psycopg2.connect(database="robot_bicycle_parameters", user="hazelnusse")
cur = conn.cursor()
def insert_statement(cur, table, row):
q = cur.mogrify("insert into " + table + " values(%s, %s, %s, %s, %s);", row)
return q
for subdir, dirs, files in os.walk('.'):
i = 0
j = 0
files.sort()
for file in files:
if file.find('RobotRwheelTorsional') != -1:
matdata = scipy.io.loadmat(file)
i += 1
row = (i,
int(matdata['sampleRate'][0][0]),
int(matdata['duration'][0][0]),
matdata['data'].transpose()[0].tolist(),'')
SQL = insert_statement(
cur,
'parametermeasurements.rearwheeltorsionalpendulumtimeseries',
row)
try:
cur.execute(SQL)
conn.commit()
except (psycopg2.IntegrityError, psycopg2.InternalError) as inst:
print("Exception in adding wheel torsional pendulum data:")
print(type(inst))
print(inst)
conn.rollback()
continue
print(cur.statusmessage)
if file.find('RobotRwheelCompound') != -1:
matdata = scipy.io.loadmat(file)
j += 1
row = (j,
int(matdata['sampleRate'][0][0]),
int(matdata['duration'][0][0]),
matdata['data'].transpose()[0].tolist(), '')
SQL = insert_statement(
cur,
'parametermeasurements.rearwheelcompoundpendulumtimeseries',
row)
try:
cur.execute(SQL)
conn.commit()
except (psycopg2.IntegrityEr | ror, psy | copg2.InternalError) as inst:
print("Exception in adding wheel compound pendulum data:")
print(type(inst))
print(inst)
conn.rollback()
continue
print(cur.statusmessage)
cur.close()
conn.close()
|
exiahuang/SalesforceXyTools | libs/cherrypy/wsgiserver/wsgiserver3.py | Python | apache-2.0 | 77,620 | 0.001391 | """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_makefile',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import os
try:
import queue
except:
import Queue as queue
import re
import email.utils
import socket
import sys
if 'win' in sys.platform and hasattr(socket, "AF_INET6"):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
if sys.version_info < (3,1):
import io
else:
import _pyio as io
DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE
import threading
import time
from traceback import format_exc
if sys.version_info >= (3, 0):
bytestr = bytes
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
bytestr = str
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']]
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = b", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
d | ef read(self, siz | e=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
|
vjFaLk/frappe | frappe/automation/doctype/assignment_rule/test_assignment_rule.py | Python | mit | 6,555 | 0.051869 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import random_string
from frappe.test_runner import make_test_records
class TestAutoAssign(unittest.TestCase):
def setUp(self):
make_test_records("User")
days = [
dict(day = 'Sunday'),
dict(day = 'Monday'),
dict(day = 'Tuesday'),
dict(day = 'Wednesday'),
dict(day = 'Thursday'),
dict(day = 'Friday'),
dict(day = 'Saturday'),
]
self.assignment_rule = get_assignment_rule([days, days])
clear_assignments()
def test_round_robin(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), 'test@example.com')
note = make_note(dict(public=1))
# check if auto assigned to second user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), 'test1@example.com')
clear_assignments()
note = make_note(dict(public=1))
# check if auto assigned to third user, even if
# previous assignments where closed
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), 'test2@example.com')
# check loop back to first user
note = make_note(dict(public=1))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), 'test@example.com')
def test_load_balancing(self):
self.assignment_rule.rule = 'Load Balancing'
self.assignment_rule.save()
for _ in range(30):
note = make_note(dict(public=1))
# check if each user has 10 assignments (?)
for user in ('test@example.com', 'test1@example.com', 'test2@example.com'):
self.assertEqual(len(frappe.get_all('ToDo', dict(owner = user, reference_type = 'Note'))), 10)
# clear 5 assignments for first user
# can't do a limit in "delete" since postgres does not support it
for d in frappe.get_all('ToDo', dict(reference_type = 'Note', owner = 'test@example.com'), limit=5):
frappe.db.sql("delete from tabToDo where name = %s", d.name)
# add 5 more assignments
for i in range(5):
make_note(dict(public=1))
# check if each user still has 10 assignments
for user in ('test@example.com', 'test1@example.com', 'test2@example.com'):
self.assertEqual(len(frappe.get_all('ToDo', dict(owner = user, reference_type = 'Note'))), 10)
def test_assign_condition(self):
# check condition
note = make_note(dict(public=0))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), None)
def test_clear_assignment(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
))[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.owner, 'test@example.com')
# test auto unassign
note.public = 0
note.save()
todo.load_from_db()
# check if todo is cancelled
self.assertEqual(todo.status, 'Cancelled')
def test_close_assignment(self):
note = make_note(dict(public=1, content="valid"))
# check if auto assigned
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
))[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.owner, 'test@example.com')
note.content="Closed"
note.save()
todo.load_from_db()
# check if todo is closed
self.assertEqual(todo.status, 'Closed')
# check if closed todo retained assignment
self.assertEqual(todo.owner, 'test@example.com')
def check_multiple_rules(self):
note = make_note(dict(public=1, notify_on_login=1))
# check if auto assigned to test3 (2nd rule is applied, as it has higher priority)
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), 'test@example.com')
def check_assignment_rule_scheduling(self):
frappe.db.sql("DELETE FROM `tabAssignment Rule`")
days_1 = [dict(day = 'Sunday'), dict(day = 'Monday'), dict(day = 'Tuesday')]
days_2 = [dict(day = 'Wednesday'), dict(day = 'Thursday'), dict(day = 'Friday'), dict(day = 'Saturday')]
get_assignment_rule([days_1, days_2], ['public == 1', 'public == 1'])
frappe.flags.assignment_day = "Monday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), ['test@example.com', 'test1@example.com', 'test2@example.com'])
frappe.flags.assignment_day = "Friday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), ['test3@example.com'])
def clear_assignments():
frappe.db.sql("delete from tabToDo where reference_type = 'Note'")
def get_assignment_rule(days, assign=None):
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 1')
if not assign:
assign = ['public == 1', 'notify_on_login == 1']
assignment_rule = frappe.get_doc(dict(
name = 'For Note 1',
doctype = 'Assignment Rule',
priority = 0,
document_type = 'Note',
assign_condition = assign[0],
unassign_condition = 'public == 0 or notify_on_login == 1',
close_condition = '"Closed" in content',
rule = 'Round Robin',
assignment_days = days[0],
users = [
dict(user = 'test@example.com'),
dict(user = 'test1@example.com'),
dict(user = 'test2@example.com'),
]
)).insert()
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 2')
# 2nd rule
| frappe.get_doc(dict(
name = 'For Note 2',
d | octype = 'Assignment Rule',
priority = 1,
document_type = 'Note',
assign_condition = assign[1],
unassign_condition = 'notify_on_login == 0',
rule = 'Round Robin',
assignment_days = days[1],
users = [
dict(user = 'test3@example.com')
]
)).insert()
return assignment_rule
def make_note(values=None):
note = frappe.get_doc(dict(
doctype = 'Note',
title = random_string(10),
content = random_string(20)
))
if values:
note.update(values)
note.insert()
return note |
ceibal-tatu/pygobject | gi/pygtkcompat.py | Python | lgpl-2.1 | 14,279 | 0.001401 | # -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2011-2012 Johan Dahlin <johan@gnome.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
"""
PyGTK compatibility layer.
This modules goes a little bit longer to maintain PyGTK compatibility than
the normal overrides system.
It is recommended to not depend on this layer, but only use it as an
intermediate step when porting your application to PyGI.
Compatibility might never be 100%, but the aim is to make it possible to run
a well behaved PyGTK application mostly unmodified on top of PyGI.
"""
import sys
import warnings
try:
# Python 3
from collections import UserList
from imp import reload
except ImportError:
# Python 2 ships that in a different module
from UserList import UserList
import gi
from gi.repository import GObject
def _install_enums(module, dest=None, strip=''):
if dest is None:
dest = module
modname = dest.__name__.rsplit('.', 1)[1].upper()
for attr in dir(module):
try:
obj = getattr(module, attr, None)
except:
continue
try:
if issubclass(obj, GObject.GEnum):
for value, enum in obj.__enum_values__.items():
name = enum.value_name
name = name.replace(modname + '_', '')
if strip and name.startswith(strip):
name = name[len(strip):]
setattr(dest, name, enum)
except TypeError:
continue
try:
if issubclass(obj, GObject.GFlags):
for value, flag in obj.__flags_values__.items():
name = flag.value_names[-1].replace(modname + '_', '')
setattr(dest, name, flag)
except TypeError:
continue
def enable():
# gobject
from gi.repository import GLib
sys.modules['glib'] = GLib
# gobject
from gi.repository import GObject
sys.modules['gobject'] = GObject
from gi._gobject import propertyhelper
sys.modules['gobject.propertyhelper'] = propertyhelper
# gio
from gi.repository import Gio
sys.modules['gio'] = Gio
_unset = object()
def enable_gtk(version='2.0'):
# set the default encoding like PyGTK
reload(sys)
if sys.version_info < (3,0):
sys.setdefaultencoding('utf-8')
# atk
gi.require_version('Atk', '1.0')
from gi.repository import Atk
sys.modules['atk'] = Atk
_install_enums(Atk)
# pango
gi.require_version('Pango', '1.0')
from gi.repository import Pango
sys.modules['pango'] = Pango
_install_enums(Pango)
# pangocairo
gi.require_version('PangoCairo', '1.0')
from gi.repository import PangoCairo
sys.modules['pangocairo'] = PangoCairo
# gdk
gi.require_version('Gdk', version)
gi.require_version('GdkPixbuf', '2.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
sys.modules['gtk.gdk'] = Gdk
_install_enums(Gdk)
_install_enums(GdkPixbuf, dest=Gdk)
Gdk._2BUTTON_PRESS = 5
Gdk.BUTTON_PRESS = 4
Gdk.screen_get_default = Gdk.Screen.get_default
Gdk.Pixbuf = GdkPixbuf.Pixbuf
Gdk.pixbuf_new_from_file = GdkPixbuf.Pixbuf.new_from_file
Gdk.PixbufLoader = GdkPixbuf.PixbufLoader.new_with_type
orig_get_frame_extents = Gdk.Window.get_frame_extents
def get_frame_extents(window):
try:
try:
rect = Gdk.Rectangle(0, 0, 0, 0)
except TypeError:
rect = Gdk.Rectangle()
orig_get_frame_extents(window, rect)
except TypeError:
rect = orig_get_frame_extents(window)
return rect
Gdk.Window.get_frame_extents = get_frame_extents
orig_get_origin = Gdk.Window.get_origin
def get_origin(self):
return orig_get_origin(self)[1:]
Gdk.Window.get_origin = get_origin
# gtk
gi.require_version('Gtk', version)
from gi.repository import Gtk
| sys.modules['gtk'] = Gtk
Gtk.gdk = Gdk
Gtk.pygtk_version = (2, 99, 0)
Gtk.gtk_version = (Gtk.MAJOR_VERSION,
Gtk.MINOR_VERSION,
Gtk.MICRO_VERSION)
_install_enums(Gtk)
# Action
def set_tool_item_type(menuaction, gtype):
warnings.warn('set_tool_item_type() is no | t supported',
DeprecationWarning, stacklevel=2)
Gtk.Action.set_tool_item_type = classmethod(set_tool_item_type)
# Alignment
orig_Alignment = Gtk.Alignment
class Alignment(orig_Alignment):
def __init__(self, xalign=0.0, yalign=0.0, xscale=0.0, yscale=0.0):
orig_Alignment.__init__(self)
self.props.xalign = xalign
self.props.yalign = yalign
self.props.xscale = xscale
self.props.yscale = yscale
Gtk.Alignment = Alignment
# Box
orig_pack_end = Gtk.Box.pack_end
def pack_end(self, child, expand=True, fill=True, padding=0):
orig_pack_end(self, child, expand, fill, padding)
Gtk.Box.pack_end = pack_end
orig_pack_start = Gtk.Box.pack_start
def pack_start(self, child, expand=True, fill=True, padding=0):
orig_pack_start(self, child, expand, fill, padding)
Gtk.Box.pack_start = pack_start
# TreeViewColumn
orig_tree_view_column_pack_end = Gtk.TreeViewColumn.pack_end
def tree_view_column_pack_end(self, cell, expand=True):
orig_tree_view_column_pack_end(self, cell, expand)
Gtk.TreeViewColumn.pack_end = tree_view_column_pack_end
orig_tree_view_column_pack_start = Gtk.TreeViewColumn.pack_start
def tree_view_column_pack_start(self, cell, expand=True):
orig_tree_view_column_pack_start(self, cell, expand)
Gtk.TreeViewColumn.pack_start = tree_view_column_pack_start
# TreeView
def insert_column_with_attributes(view, position, title, cell, *args, **kwargs):
pass
Gtk.TreeView.insert_column_with_attributes = insert_column_with_attributes
# CellLayout
orig_cell_pack_end = Gtk.CellLayout.pack_end
def cell_pack_end(self, cell, expand=True):
orig_cell_pack_end(self, cell, expand)
Gtk.CellLayout.pack_end = cell_pack_end
orig_cell_pack_start = Gtk.CellLayout.pack_start
def cell_pack_start(self, cell, expand=True):
orig_cell_pack_start(self, cell, expand)
Gtk.CellLayout.pack_start = cell_pack_start
orig_set_cell_data_func = Gtk.CellLayout.set_cell_data_func
def set_cell_data_func(self, cell, func, user_data=_unset):
def callback(*args):
if args[-1] == _unset:
args = args[:-1]
return func(*args)
orig_set_cell_data_func(self, cell, callback, user_data)
Gtk.CellLayout.set_cell_data_func = set_cell_data_func
# CellRenderer
class GenericCellRenderer(Gtk.CellRenderer):
pass
Gtk.GenericCellRenderer = GenericCellRenderer
# ComboBox
orig_combo_row_separator_func = Gtk.ComboBox.set_row_separator_func
def combo_row_separator_func(self, func, user_data=_unset):
def callback(*args):
if args[-1] == _unset:
args = args[:-1]
return func(*args)
orig_combo_row_separator_func(self, callback, user_data)
Gtk.ComboBox.set_row_separator_func = combo_row_separator_func
# ComboBoxEntry
class ComboBoxEntry(Gtk.ComboBox):
def __init__(self, **kwds):
Gtk.ComboBox.__in |
keyz182/adamsays | says/models.py | Python | mit | 350 | 0.005714 | from django.db import models
from dj | ango.contrib.auth.models import User
# Create your models here.
class Quote(models.Model):
quote = models.CharField(max_length=2048)
date = models.DateTimeField('date published')
poster = models.ForeignKey(User)
def __str__(self): # __unicode__ on Python 2 |
return self.quote |
mjwtom/swift | test/unit/obj/test_diskfile.py | Python | apache-2.0 | 221,579 | 0.000005 | # -*- coding:utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.obj.diskfile"""
import six.moves.cPickle as pickle
import os
import errno
import itertools
import mock
import unittest
import email
import tempfile
import uuid
import xattr
import re
from collections import defaultdict
from random import shuffle, randint
from shutil import rmtree
from time import time
from tempfile import mkdtemp
from hashlib import md5
from contextlib import closing, contextmanager
from gzip import GzipFile
from eventlet import hubs, timeout, tpool
from test.unit import (FakeLogger, mock as unit_mock, temptree,
patch_policies, debug_logger, EMPTY_ETAG,
make_timestamp_iter)
from nose import SkipTest
from swift.obj import diskfile
from swift.common import utils
from swift.common.utils import hash_path, mkdirs, Timestamp
from swift.common import ring
from swift.common.splice import splice
from swift.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
DiskFileError, ReplicationLockTimeout, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace, DiskFileXattrNotSupported
from swift.common.storage_policy import (
POLICIES, get_policy_string, StoragePolicy, ECStoragePolicy,
BaseStoragePolicy, REPL_POLICY, EC_POLICY)
test_policies = [
StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one', is_default=False,
ec_type='jerasure_rs_vand',
ec_ndata=10, ec_nparity=4),
]
def find_paths_with_matching_suffixes(needed_matches=2, needed_suffixes=3):
paths = defaultdict(list)
while True:
path = ('a', 'c', uuid.uuid4().hex)
hash_ = hash_path(*path)
suffix = hash_[-3:]
paths[suffix].append(path)
if len(paths) < needed_suffixes:
# in the extreamly unlikely situation where you land the matches
# you need before you get the total suffixes you need - it's
# simpler to just ignore this suffix for now
continue
if len(paths[suffix]) >= needed_matches:
break
return paths, suffix
def _create_test_ring(path, policy):
ring_name = get_policy_string('object', policy)
testgz = os.path.join(path, ring_name + '.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5]]
intended_devs = [
{'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
'port': 6000},
{'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127 | .0.0.1',
'port': 6000},
{'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
'port': 6000},
{'id' | : 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
'port': 6000},
{'id': 4, 'device': 'sda1', 'zone': 5, 'ip': '127.0.0.4',
'port': 6000},
{'id': 5, 'device': 'sda1', 'zone': 6,
'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000},
{'id': 6, 'device': 'sda1', 'zone': 7,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334',
'port': 6000}]
intended_part_shift = 30
intended_reload_time = 15
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id, intended_devs,
intended_part_shift),
f)
return ring.Ring(path, ring_name=ring_name,
reload_time=intended_reload_time)
@patch_policies
class TestDiskFileModuleMethods(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = ''
# Setup a test ring per policy (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
self.existing_device = 'sda1'
os.mkdir(os.path.join(self.devices, self.existing_device))
self.objects = os.path.join(self.devices, self.existing_device,
'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.ring = _create_test_ring(self.testdir, POLICIES.legacy)
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _create_diskfile(self, policy):
return self.df_mgr.get_diskfile(self.existing_device,
'0', 'a', 'c', 'o',
policy=policy)
def test_extract_policy(self):
# good path names
pn = 'objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
pn = 'objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
# leading slash
pn = '/objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
pn = '/objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
# full paths
good_path = '/srv/node/sda1/objects-1/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(good_path), POLICIES[1])
good_path = '/srv/node/sda1/objects/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(good_path), POLICIES[0])
# short paths
path = '/srv/node/sda1/objects/1/1234.data'
self.assertEqual(diskfile.extract_policy(path), POLICIES[0])
path = '/srv/node/sda1/objects-1/1/1234.data'
self.assertEqual(diskfile.extract_policy(path), POLICIES[1])
# well formatted but, unknown policy index
pn = 'objects-2/0/606/198427efcff042c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), None)
# malformed path
self.assertEqual(diskfile.extract_policy(''), None)
bad_path = '/srv/node/sda1/objects-t/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(bad_path), None)
pn = 'XXXX/0/606/1984527ed42b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), None)
bad_path = '/srv/node/sda1/foo-1/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(bad_path), None)
bad_path = '/srv/node/sda1/obj1/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(bad_path), None)
def test_quarantine_renamer(self):
for policy in POLICIES:
# we use this for convenience, not really about a diskfile layout
df = self._create_diskfile(policy=policy)
mkdirs(df._datadir)
exp_dir = os.path.join(self.devices, 'quarantined',
diskfile.get_data_dir(policy),
os.path.basename(df._datadir))
qbit = os.path.join(df._datadir, 'qbit')
with open(qbit, 'w') as f:
f.write('abc') |
eesatfan/vuplus-enigma2 | lib/python/Components/AVSwitch.py | Python | gpl-2.0 | 7,088 | 0.034142 | from config import config, ConfigSlider, ConfigSelection, ConfigYesNo, \
ConfigEnableDisable, ConfigSubsection, ConfigBoolean, ConfigSelectionNumber, ConfigNothing, NoSave
from enigma import eAVSwitch, getDesktop
from SystemInfo import SystemInfo
from os import path as os_path
class AVSwitch:
def setInput(self, input):
INPUT = { "ENCODER": 0, "SCART": 1, "AUX": 2 }
eAVSwitch.getInstance().setInput(INPUT[input])
def setColorFormat(self, value):
eAVSwitch.getInstance().setColorFormat(value)
def setAspectRatio(self, value):
eAVSwitch.getInstance().setAspectRatio(value)
def setSystem(self, value):
eAVSwitch.getInstance().setVideomode(value)
def getOutputAspect(self):
valstr = config.av.aspectratio.value
if valstr in ("4_3_letterbox", "4_3_panscan"): # 4:3
return (4,3)
elif valstr == "16_9": # auto ... 4:3 or 16:9
try:
aspect_str = open("/proc/stb/vmpeg/0/aspect", "r").read()
if aspect_str == "1": # 4:3
return (4,3)
except IOError:
pass
elif valstr in ("16_9_always", "16_9_letterbox"): # 16:9
pass
elif valstr in ("16_10_letterbox", "16_10_panscan"): # 16:10
return (16,10)
return (16,9)
def getFrame | bufferScale(self):
aspect = self.getOutputAspect()
fb_size = getDesktop(0).size()
return (aspect[0] * fb_size.height(), aspect[1] * fb_size.width())
def getAspectRatioSetting(self):
valstr = config.av.aspectratio.value
if valstr == "4_3_letterbox":
val = 0
elif valstr == "4_3_panscan":
val = 1
elif valstr == "16_9":
val = 2
elif | valstr == "16_9_always":
val = 3
elif valstr == "16_10_letterbox":
val = 4
elif valstr == "16_10_panscan":
val = 5
elif valstr == "16_9_letterbox":
val = 6
return val
def setAspectWSS(self, aspect=None):
if not config.av.wss.value:
value = 2 # auto(4:3_off)
else:
value = 1 # auto
eAVSwitch.getInstance().setWSS(value)
def InitAVSwitch():
config.av = ConfigSubsection()
config.av.yuvenabled = ConfigBoolean(default=False)
colorformat_choices = {"cvbs": _("CVBS"), "rgb": _("RGB"), "svideo": _("S-Video")}
# when YUV is not enabled, don't let the user select it
if config.av.yuvenabled.value:
colorformat_choices["yuv"] = _("YPbPr")
# ikseong
config.av.colorformat = ConfigSelection(choices=colorformat_choices, default="cvbs")
config.av.aspectratio = ConfigSelection(choices={
"4_3_letterbox": _("4:3 Letterbox"),
"4_3_panscan": _("4:3 PanScan"),
"16_9": _("16:9"),
"16_9_always": _("16:9 always"),
"16_10_letterbox": _("16:10 Letterbox"),
"16_10_panscan": _("16:10 PanScan"),
"16_9_letterbox": _("16:9 Letterbox")},
default = "4_3_letterbox")
config.av.aspect = ConfigSelection(choices={
"4_3": _("4:3"),
"16_9": _("16:9"),
"16_10": _("16:10"),
"auto": _("Automatic")},
default = "auto")
config.av.policy_169 = ConfigSelection(choices={
# TRANSLATORS: (aspect ratio policy: black bars on top/bottom) in doubt, keep english term.
"letterbox": _("Letterbox"),
# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
"panscan": _("Pan&Scan"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
"scale": _("Just Scale")},
default = "letterbox")
config.av.policy_43 = ConfigSelection(choices={
# TRANSLATORS: (aspect ratio policy: black bars on left/right) in doubt, keep english term.
"pillarbox": _("Pillarbox"),
# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
"panscan": _("Pan&Scan"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, with stretching the left/right)
"nonlinear": _("Nonlinear"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
"scale": _("Just Scale")},
default = "pillarbox")
config.av.tvsystem = ConfigSelection(choices = {"pal": _("PAL"), "ntsc": _("NTSC"), "multinorm": _("multinorm")}, default="pal")
config.av.wss = ConfigEnableDisable(default = True)
config.av.defaultac3 = ConfigYesNo(default = False)
config.av.generalAC3delay = ConfigSelectionNumber(-1000, 1000, 25, default = 0)
config.av.generalPCMdelay = ConfigSelectionNumber(-1000, 1000, 25, default = 0)
config.av.vcrswitch = ConfigEnableDisable(default = False)
iAVSwitch = AVSwitch()
def setColorFormat(configElement):
map = {"cvbs": 0, "rgb": 1, "svideo": 2, "yuv": 3}
iAVSwitch.setColorFormat(map[configElement.value])
def setAspectRatio(configElement):
map = {"4_3_letterbox": 0, "4_3_panscan": 1, "16_9": 2, "16_9_always": 3, "16_10_letterbox": 4, "16_10_panscan": 5, "16_9_letterbox" : 6}
iAVSwitch.setAspectRatio(map[configElement.value])
def setSystem(configElement):
map = {"pal": 0, "ntsc": 1, "multinorm" : 2}
iAVSwitch.setSystem(map[configElement.value])
def setWSS(configElement):
iAVSwitch.setAspectWSS()
# this will call the "setup-val" initial
config.av.colorformat.addNotifier(setColorFormat)
config.av.aspectratio.addNotifier(setAspectRatio)
config.av.tvsystem.addNotifier(setSystem)
config.av.wss.addNotifier(setWSS)
iAVSwitch.setInput("ENCODER") # init on startup
SystemInfo["ScartSwitch"] = eAVSwitch.getInstance().haveScartSwitch()
try:
can_downmix = open("/proc/stb/audio/ac3_choices", "r").read()[:-1].find("downmix") != -1
except:
can_downmix = False
SystemInfo["CanDownmixAC3"] = can_downmix
if can_downmix:
def setAC3Downmix(configElement):
open("/proc/stb/audio/ac3", "w").write(configElement.value and "downmix" or "passthrough")
config.av.downmix_ac3 = ConfigYesNo(default = True)
config.av.downmix_ac3.addNotifier(setAC3Downmix)
try:
can_downmix_aac = open("/proc/stb/audio/aac_choices", "r").read()[:-1].find("downmix") != -1
except:
can_downmix_aac = False
SystemInfo["CanDownmixAAC"] = can_downmix_aac
if can_downmix_aac:
def setAACDownmix(configElement):
open("/proc/stb/audio/aac", "w").write(configElement.value and "downmix" or "passthrough")
config.av.downmix_aac = ConfigYesNo(default = True)
config.av.downmix_aac.addNotifier(setAACDownmix)
try:
can_osd_alpha = open("/proc/stb/video/alpha", "r") and True or False
except:
can_osd_alpha = False
SystemInfo["CanChangeOsdAlpha"] = can_osd_alpha
def setAlpha(config):
open("/proc/stb/video/alpha", "w").write(str(config.value))
if can_osd_alpha:
config.av.osd_alpha = ConfigSlider(default=255, limits=(0,255))
config.av.osd_alpha.addNotifier(setAlpha)
if os_path.exists("/proc/stb/vmpeg/0/pep_scaler_sharpness"):
def setScaler_sharpness(config):
myval = int(config.value)
try:
print "--> setting scaler_sharpness to: %0.8X" % myval
open("/proc/stb/vmpeg/0/pep_scaler_sharpness", "w").write("%0.8X" % myval)
open("/proc/stb/vmpeg/0/pep_apply", "w").write("1")
except IOError:
print "couldn't write pep_scaler_sharpness"
config.av.scaler_sharpness = ConfigSlider(default=13, limits=(0,26))
config.av.scaler_sharpness.addNotifier(setScaler_sharpness)
else:
config.av.scaler_sharpness = NoSave(ConfigNothing())
|
dirrelito/animated-invention | ThreadedTicTacToe/_old/ttt.py | Python | apache-2.0 | 5,563 | 0.039907 | # old single process tictactoe game to be rewritten....
import numpy
import os
class TicTacToePlayer:
def __init__(self, playerType, playerName, ttt_game_settings):
if playerType not in ['AI', 'Terminal']:
raise(ValueError())
self.type=playerType
self.name=playerName
self.board_size = ttt_game_settings['board_size']
self.nr_of_positions = ttt_game_settings['board_size'][0]*ttt_game_settings['board_size'][1]
def playMove(self, board):
if self.type == 'AI':
return numpy.random.randint(1,self.nr_of_positions+1)
elif self.type == 'Terminal':
return self.playFromTerminal(board)
def playFromTerminal(self,board):
os.system('cls' if os.name == 'nt' else 'clear')
print(board)
while True:
try:
moveAttempt = input("%s! Play a position (1-%i): " % (self.name, self.nr_of_positions))
if ((1 <= int(moveAttempt) <= self.nr_of_positions)):
row = (int(moveAttempt)-1) // self.board_size[1]
col = (int(moveAttempt)-1) % self.board_size[1]
if board[row,col] == 0:
break # valid move!
else:
print("That position is already taken!")
else:
print("That is not an integer between 1 and %i!" % self.nr_of_positions)
except ValueError:
print("That is not an integer!")
return int(moveAttempt)
def getName(self):
return self.name
class TicTacToeGame:
def __init__(self, player1Name="player1", playerMinus1Name="player2", player1Type="Terminal", playerMinus1Type="Terminal"\
, settings=dict(board_size=(3,3), win_length=3)):
self.game, self.board_size, self.win_length = self.initializa_game(settings)
self.player1 = TicTacToePlayer(\
playerType=player1Type,\
playerName=player1Name,\
ttt_game_settings=settings)
self.playerMinus1 = TicTacToePlayer(\
playerType=playerMinus1Type,\
playerName=playerMinus1Name,\
ttt_game_settings=settings)
def getGame(self):
return self.game
def initializa_game(self,settings):
board_size_in = settings['board_size']
win_length = settings['win_length']
board_size = numpy.asarray(board_size_in,dtype='int')
if len(board_size) != 2: raise(ValueError('Not a good size!'))
if win_length > min(board_size) or not isinstance(win_length, int): raise(ValueError('Not a X in rows config.'))
return [], board_size, win_length
def getBoard(self):
board = numpy.zeros(shape=self.board_size)
currPlayerIs1 = True
for move in self.game:
row = (int(move)-1) // self.board_size[1]
col = (int(move)-1) % self.board_size[1]
board[row,col] = 1 if currPlayerIs1 else -1
currPlayerIs1 = not currPlayerIs1
return board
def playMove(self,move):
if int(move) in self.game \
or int(move) > self.board_size[0]*self.board_size[1]:
return False #invalid move
else:
self.game.append(int(move))
return True
def play(self):
currPlayerIs1 = True
while True:
moveAttempt = self.player1.playMove(self.getBoard()) if currPlayerIs1 else self.playerMinus1.playMove(self.getBoard())
if self.playMove(moveAttempt):
currPlayerIs1 = not currPlayerIs1
gameHasEnded, endMessage = self.checkGameEnded()
if gameHasEnded:
print(endMessage)
print(self.getBoard())
print("Thank you for playing Tic-Tac-Toe!")
break
def checkWinner(self):
board = self.getBoard()
lastmove = self.game[-1]
row = (int(lastmove)-1) // self.board_size[1]
col = (int(lastmove)-1) % self.board_size[1]
lastmove = (row,col)
currPlayerName = self.player1.getName() if len(self.game) % 2 == 1 else self.playerMinus1.getName()
num_N = self.checkStreak(lastmove,'N')
num_S = self.checkStreak(lastmove,'S')
if 1+num_N + num_S == self.win_length:
return True, "%s won!" % currPlayerName
num_E = self.checkStreak(lastmove,'E')
num_W = self.checkStreak(lastmove,'W')
if 1+num_E + num_W == self.win_length:
return True, "%s won!" % currPlayerName
num_NE = self.checkStreak(lastmove,'NE')
num_SW = self.checkStreak(lastmove,'SW')
if 1+num_NE + num_SW == self.win_length:
return True, "%s won!" % currPlayerName
num_SE = self.checkStreak(lastmove,'SE')
num_NW = self.checkStreak(lastmove,'NW')
if 1+num_SE + num_NW == self.win_length:
return True, "%s won!" % currPlayerName
return False, ""
def checkStreak(self,po | sition,direction):
if direction == 'N':
parsed_dir = (-1,0)
elif direction =='S':
parsed_dir = (1,0)
elif direction == 'E':
pars | ed_dir = (0,1)
elif direction == 'W':
parsed_dir = (0,-1)
elif direction == 'NW':
parsed_dir = (-1,-1)
elif direction == 'SW':
parsed_dir = (1,-1)
elif direction == 'NE':
parsed_dir = (-1,1)
elif direction == 'SE':
parsed_dir = (1,1)
next_pos = numpy.asarray(position)+numpy.asarray(parsed_dir)
board = self.getBoard()
if next_pos[0] <0 or next_pos[1] < 0: return 0
if next_pos[0] >= self.board_size[0] or next_pos[1] >= self.board_size[1]: return 0
player_here = board[position[0],position[1]]
next_player = board[next_pos[0],next_pos[1]]
if player_here != next_player:
return 0
else:
return self.checkStreak(next_pos,direction) + 1
def checkGameEnded(self):
gameHasEnded, endMessage = self.checkWinner()
if not gameHasEnded:
if len(self.game) == self.board_size[0]*self.board_size[1]:
gameHasEnded = True
endMessage = "Its a tie!"
return gameHasEnded, endMessage
if __name__ == "__main__":
ttt_game_settings = dict(board_size=(4,4), win_length=3)
myGame = TicTacToeGame(player1Name="Ludvig", playerMinus1Name="PC", playerMinus1Type="AI",settings=ttt_game_settings)
myGame.play()
|
ngocson2vn/webapps | shortly/__init__.py | Python | gpl-2.0 | 18 | 0.055556 | # T | his is a mo | dule |
micjerry/groupservice | handlers/acceptinvite.py | Python | apache-2.0 | 951 | 0.003155 | import tornado.web
import tornado.gen
import json
import io
import logging
from mickey.basehandler import BaseHandler
from mickey.groups import GroupMgrMgr, MickeyGroup
class AcceptInviteHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.gen.co | routine
def post(self):
data = json.loads(self.request.body.decode("utf-8"))
groupid = data.get("groupid", "")
logging.info("begin to add members to group %s" % groupid)
if not groupid:
logging.error("invalid request")
self.set_status(403)
self.finish()
return
| group = yield GroupMgrMgr.getgroup(groupid)
if not group:
logging.error("group %s does not exist" % groupid)
self.set_status(404)
self.finish()
return
rst_code = yield group.add_realmember(self.p_userid)
self.set_status(rst_code)
self.finish()
|
migcruz/dota2analytics | backend/apps/users/models.py | Python | mit | 2,526 | 0 | from __future__ import unicode_literals
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
BaseUserManager)
from django.db import models
from django.utils import timezone
class EmailUserManager(BaseUserManager):
def _create_user(self, email, password, is_staff, is_superuser,
**extra_fields):
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
is_active = extra_fields.pop("is_active", True)
user = self.model(
email=email,
is_staff=is_staff,
is_active=is_active,
is_superuser=is_superuser,
last_login=now,
date_joined=now,
**extra_fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
is_staff = extra_fields.pop("is_staff", False)
return self._create_user(
email,
password,
is_staff,
False,
**extra_fields
)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(
email, password,
True,
True,
**extra_fields
)
class EmailUser(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField(
max_length=254,
unique=True,
error_messages={
'unique': 'That email address is already taken.'
| }
)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(default=timezone.now)
last_updated = models.DateTimeField(auto_now=True)
objects = EmailUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['fir | st_name', 'last_name']
class Meta:
permissions = (
('view_emailuser', 'Can view email users'),
)
def __unicode__(self):
return self.email
def get_short_name(self):
return '{first_name}'.format(
first_name=self.first_name
)
def get_full_name(self):
return '{first_name} {last_name}'.format(
first_name=self.first_name,
last_name=self.last_name,
)
|
jmakov/ggrc-core | src/ggrc_workflows/__init__.py | Python | apache-2.0 | 34,934 | 0.00853 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from datetime import datetime, date
from flask import Blueprint
from sqlalchemy import inspect, and_, orm
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models import all_models
from ggrc.rbac.permissions import is_allowed_update
from ggrc.services.common import Resource
from ggrc.services.registry import service
from ggrc_workflows import models, notification
from ggrc_workflows.models import relationship_helper
from ggrc_workflows.models import WORKFLOW_OBJECT_TYPES
from ggrc_workflows.converters import IMPORTABLE, EXPORTABLE
from ggrc_workflows.converters.handlers import COLUMN_HANDLERS
from ggrc_workflows.services.common import Signals
from ggrc_workflows.services.workflow_cycle_calculator import get_cycle_calculator
from ggrc_workflows.roles import (
WorkflowOwner, WorkflowMember, BasicWorkflowReader, | WorkflowBasicReader
)
from ggrc_basic_permissions.models import Role, UserRole, ContextImplication
from ggrc_basic_permissio | ns.contributed_roles import (
RoleContributions, RoleDeclarations, DeclarativeRoleImplications
)
# Initialize Flask Blueprint for extension
blueprint = Blueprint(
'ggrc_workflows',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/ggrc_workflows',
)
for type_ in WORKFLOW_OBJECT_TYPES:
model = getattr(all_models, type_)
model.__bases__ = (
# models.workflow_object.Workflowable,
models.task_group_object.TaskGroupable,
models.cycle_task_group_object.CycleTaskGroupable,
models.workflow.WorkflowState,
) + model.__bases__
# model.late_init_workflowable()
model.late_init_task_groupable()
model.late_init_cycle_task_groupable()
def get_public_config(current_user):
"""Expose additional permissions-dependent config to client.
"""
return {}
# Initialize service endpoints
def contributed_services():
return [
service('workflows', models.Workflow),
service('workflow_people', models.WorkflowPerson),
service('task_groups', models.TaskGroup),
service('task_group_tasks', models.TaskGroupTask),
service('task_group_objects', models.TaskGroupObject),
service('cycles', models.Cycle),
service('cycle_task_entries', models.CycleTaskEntry),
service('cycle_task_groups', models.CycleTaskGroup),
service('cycle_task_group_objects', models.CycleTaskGroupObject),
service('cycle_task_group_object_tasks', models.CycleTaskGroupObjectTask)
]
def contributed_object_views():
from . import models
from ggrc.views.registry import object_view
return [
object_view(models.Workflow),
]
DONE_STATUSES = ("Verified",)
def _get_min_next_due_date(due_dated_objects, exclude_statuses=DONE_STATUSES):
next_due_date = None
for obj in due_dated_objects:
if obj.status not in exclude_statuses:
obj_next_due_date = obj.next_due_date
if isinstance(obj_next_due_date, datetime):
obj_next_due_date = obj_next_due_date.date()
if obj_next_due_date is not None:
if next_due_date is None or next_due_date > obj_next_due_date:
next_due_date = obj_next_due_date
return next_due_date
def _get_min_end_date(timeboxed_objects, exclude_statuses=DONE_STATUSES):
end_date = None
for obj in timeboxed_objects:
if obj.status not in exclude_statuses:
obj_end_date = obj.end_date
if isinstance(obj_end_date, datetime):
obj_end_date = obj_end_date.date()
if obj_end_date is not None:
if end_date is None or end_date > obj_end_date:
end_date = obj_end_date
return end_date
def _get_date_range(timeboxed_objects):
start_date = None
end_date = None
for obj in timeboxed_objects:
obj_start_date = obj.start_date
if isinstance(obj_start_date, datetime):
obj_start_date = obj_start_date.date()
obj_end_date = obj.end_date
if isinstance(obj_end_date, datetime):
obj_end_date = obj_end_date.date()
if obj_start_date is not None:
if start_date is None or start_date > obj_start_date:
start_date = obj_start_date
if obj_end_date is not None:
if end_date is None or end_date < obj_end_date:
end_date = obj_end_date
return start_date, end_date
def update_cycle_dates(cycle):
if cycle.id:
# If `cycle` is already in the database, then eager load required objects
cycle = models.Cycle.query.filter_by(id=cycle.id).\
options(orm.joinedload_all(
'cycle_task_groups.'
'cycle_task_group_objects.'
'cycle_task_group_object_tasks')).one()
if not cycle.cycle_task_group_object_tasks:
cycle.start_date, cycle.end_date = None, None
cycle.next_due_date = None
cycle.is_current = False
db.session.add(cycle)
return
for ctg in cycle.cycle_task_groups:
# This is where we calculate the start and end dates
for ctgo in ctg.cycle_task_group_objects:
ctgo.start_date, ctgo.end_date = _get_date_range(
ctgo.cycle_task_group_object_tasks)
ctgo.next_due_date = _get_min_end_date(
ctgo.cycle_task_group_object_tasks)
if len(ctg.cycle_task_group_objects) == 0:
# Handle case where cycle task group has no mapped objects
ctg.start_date, ctg.end_date = _get_date_range(
ctg.cycle_task_group_tasks)
ctg.next_due_date = _get_min_end_date(
ctg.cycle_task_group_tasks)
else:
ctg.start_date, ctg.end_date = _get_date_range(
ctg.cycle_task_group_objects)
ctg.next_due_date = _get_min_next_due_date(
ctg.cycle_task_group_objects)
cycle.start_date, cycle.end_date = _get_date_range(cycle.cycle_task_groups)
cycle.next_due_date = _get_min_next_due_date(cycle.cycle_task_groups)
@Resource.model_posted.connect_via(models.Cycle)
def handle_cycle_post(sender, obj=None, src=None, service=None):
if src.get('autogenerate', False):
# When called via a REST POST, use current user.
current_user = get_current_user()
workflow = obj.workflow
obj.calculator = get_cycle_calculator(workflow)
if workflow.non_adjusted_next_cycle_start_date:
base_date = workflow.non_adjusted_next_cycle_start_date
else:
base_date = date.today()
build_cycle(obj, current_user=current_user, base_date=base_date)
adjust_next_cycle_start_date(obj.calculator, workflow, move_forward=True)
update_workflow_state(workflow)
db.session.add(workflow)
def _create_cycle_task(task_group_task, cycle, cycle_task_group,
current_user, base_date=None):
# TaskGroupTasks for one_time workflows don't save relative start/end
# month/day. They only saves start and end dates.
# TaskGroupTasks for all other workflow frequencies save the relative
# start/end days.
if not base_date:
base_date = date.today()
description = models.CycleTaskGroupObjectTask.default_description if \
task_group_task.object_approval else task_group_task.description
date_range = cycle.calculator.task_date_range(
task_group_task, base_date=base_date)
start_date, end_date = date_range
cycle_task_group_object_task = models.CycleTaskGroupObjectTask(
context=cycle.context,
cycle=cycle,
cycle_task_group=cycle_task_group,
task_group_task=task_group_task,
title=task_group_task.title,
description=description,
sort_index=task_group_task.sort_index,
start_date=start_date,
end_date=end_date,
contact=task_group_task.contact,
status="Assigned",
modified_by=current_user,
task_type=task_group_task.task_type,
response_options=task_group_task.response_options,
)
return cycle_task_group_object_task
def build_cycle(cycle, current_user=None, base_date=None):
if not base_date:
base_date = date.today()
# Determine the relevant Workflow
workflow = cycle.workflow
# Use WorkflowOwner role when this is called via the cron job.
if no |
cp16net/trove | trove/db/sqlalchemy/mappers.py | Python | apache-2.0 | 3,298 | 0 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CO | NDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_e | xc
from sqlalchemy import Table
def map(engine, models):
meta = MetaData()
meta.bind = engine
if mapping_exists(models['instance']):
return
orm.mapper(models['instance'], Table('instances', meta, autoload=True))
orm.mapper(models['root_enabled_history'],
Table('root_enabled_history', meta, autoload=True))
orm.mapper(models['datastore'],
Table('datastores', meta, autoload=True))
orm.mapper(models['datastore_version'],
Table('datastore_versions', meta, autoload=True))
orm.mapper(models['capabilities'],
Table('capabilities', meta, autoload=True))
orm.mapper(models['capability_overrides'],
Table('capability_overrides', meta, autoload=True))
orm.mapper(models['service_statuses'],
Table('service_statuses', meta, autoload=True))
orm.mapper(models['dns_records'],
Table('dns_records', meta, autoload=True))
orm.mapper(models['agent_heartbeats'],
Table('agent_heartbeats', meta, autoload=True))
orm.mapper(models['quotas'],
Table('quotas', meta, autoload=True))
orm.mapper(models['quota_usages'],
Table('quota_usages', meta, autoload=True))
orm.mapper(models['reservations'],
Table('reservations', meta, autoload=True))
orm.mapper(models['backups'],
Table('backups', meta, autoload=True))
orm.mapper(models['security_group'],
Table('security_groups', meta, autoload=True))
orm.mapper(models['security_group_rule'],
Table('security_group_rules', meta, autoload=True))
orm.mapper(models['security_group_instance_association'],
Table('security_group_instance_associations', meta,
autoload=True))
orm.mapper(models['configurations'],
Table('configurations', meta, autoload=True))
orm.mapper(models['configuration_parameters'],
Table('configuration_parameters', meta, autoload=True))
orm.mapper(models['conductor_lastseen'],
Table('conductor_lastseen', meta, autoload=True))
orm.mapper(models['clusters'],
Table('clusters', meta, autoload=True))
orm.mapper(models['datastore_configuration_parameters'],
Table('datastore_configuration_parameters', meta,
autoload=True))
def mapping_exists(model):
try:
orm.class_mapper(model)
return True
except orm_exc.UnmappedClassError:
return False
|
secnot/rectpack | setup.py | Python | apache-2.0 | 876 | 0.002283 | from setuptools import setup
long_description = """A collection of heuristic algorithms for solving the 2D knapsack problem,
also known as the bin packing problem. In essence packing a set of rectangles into the
smallest number of bins."""
setup(
name="rectpack",
version="0.2.2",
description="2D Rectangle packing library",
long_description=long_desc | ription,
url="https://github.com/secnot/rectpack/",
author="SecNot" | ,
keywords=["knapsack", "rectangle", "packing 2D", "bin", "binpacking"],
license="Apache-2.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
],
packages=["rectpack"],
zip_safe=False,
test_suite="nose.collector",
tests_require=["nose"],
)
|
innusource/siteg.py | _app/walker.py | Python | mit | 303 | 0 | from os import walk
from defi | nitions import Config
class Walker(object):
def __init__(self, directory):
super(Walker, self).__init__()
cfg = Config()
for (_path, _file, _archives) in walk(cfg.globals['root'] + directory):
pass
| self.archives = _archives
|
itsallvoodoo/csci-school | CSCI220/Week 15 - FINAL/Scrabble/StartScreen.py | Python | apache-2.0 | 2,963 | 0.015862 | from graphics import *
from Button import *
from CreateNewUserScreen import *
from ChangePasswordScreen import *
import os
class StartScreen:
def is_game_in_progress(self,gid):
for filename in os.listdir("games"):
if str(gid) == filename:
return True
return False
def __init__(self):
self.player = None
def getPlayer(self):
return self.player
def login(self,db):
win = GraphWin("CofC Scrabble",300,300)
win.setCoords(0,0,100,100)
Text(Point(17,80),"User: ").draw(win)
Text(Point(17,70),"Password: ").draw(win)
user_entry = Entry(Point(50,80),10)
user_entry.draw(win)
password_entry = Entry(Point(50,70),10)
password_entry.draw(win)
message = Text(Point(50,90),"")
message.draw(win)
# Create a login button and a quit button
login_button = Button("New game",Point(5,50),Point(35,60))
login_button.draw(win)
continue_button = Button("Continue game",Point(5,35),Point(50,45))
continue_button.draw(win)
game_id_entry = Entry(Point(70,40),10)
game_id_entry.draw(win)
new_button = Button("New user",Point(5,2),Point(35,12))
new_button.draw(win)
change_button = Button("Change Password",Point(45,2),Point(95,12))
change_button.draw(win)
quit_button = Button("Quit",Point(5,15),Point(25,25))
quit_button.draw(win)
while True: # Maximum number of clicks
p = win.getMouse()
if login_button.clicked(p):
user1 = user_entry.getText()
password1 = password_entry.getText()
if db.valid_user(user1,password1):
self.player = db.get_user(user1)
win.close()
return "new"
else:
message.setText("Invalid user and/or password")
elif continue_button.clicked(p):
user1 = user_entry.getText()
password1 = password_entry.getText()
gid = int(game_id_entry.getText())
in_progress = self.is_game_in_progress(gid)
if db.valid_user(user1,password1) and in_progress:
self.player = db.get_user(user1)
win.close()
return gid
else:
message.setText("Invalid user/password or game ID")
elif new_button.clicked(p):
scree | n = CreateNewUserScreen()
screen.create_new_user(db)
elif change_button.cli | cked(p):
screen = ChangePasswordScreen()
screen.change_password(db)
elif quit_button.clicked(p):
win.close()
return "quit"
|
caravancoop/configstore | tests/test_awsssm.py | Python | mit | 3,127 | 0.001279 | import pretend
import pytest
from botocore.exceptions import ClientError
from configstore.backends.awsssm import AwsSsmBackend
def test_awsssm_init_bad_install(monkeypatch):
monkeypatch.setattr('configstore.backends.awsssm.boto3', None)
with pytest.raises(ImportError):
AwsSsmBackend()
def test_awsssm_success(monkeypatch):
response = {'Parameter': {
'Value': 'postgres://localhost/app',
}}
fake_client = pretend.stub(
get_parameter=pretend.call_recorder(lambda Name, WithDecryption: response),
)
fake_boto3 = pretend.stub(
client=pretend.call_recorder(lambda service: fake_client),
)
monkeypatch.setattr('configstore.backends.awsssm.boto3', fake_boto3)
b = AwsSsmBackend()
value = b.get_setting('DATABASE_URL')
assert value == 'postgres://localhost/app'
assert fake_boto3.client.calls == [pretend.call('ssm')]
assert fake_client.get_parameter.calls == [
pretend.call(Name='DATABASE_URL', WithDecryption=True),
]
def test_awsssm_success_with_prefix(monkeypatch):
response = {'Parameter': {
'Value': 'off',
}}
fake_client = pretend.stub(
get_parameter=pretend.call_recorder(lambda Name, WithDecryption: response) | ,
)
fake_boto3 = pretend.stub(
client=pretend.call_recorder(lambda service: fake_client),
)
monkeypatch.setattr('configstore.backends.awsssm.boto3', fake_boto3)
b = AwsSsmBackend('/myapp/staging/')
value = b.get_setting('DEBUG')
assert value == 'off'
assert fake_boto3.client.calls == [pretend.cal | l('ssm')]
assert fake_client.get_parameter.calls == [
pretend.call(Name='/myapp/staging/DEBUG', WithDecryption=True),
]
def test_awsssm_missing(monkeypatch):
error = ClientError({'Error': {'Code': 'ParameterNotFound'}}, 'get_parameter')
fake_client = pretend.stub(
get_parameter=pretend.raiser(error),
)
fake_boto3 = pretend.stub(
client=lambda service: fake_client,
)
monkeypatch.setattr('configstore.backends.awsssm.boto3', fake_boto3)
b = AwsSsmBackend()
value = b.get_setting('/app1/TEMPLATE_DEBUG')
assert value is None
def test_awsssm_missing_with_prefix(monkeypatch):
error = ClientError({'Error': {'Code': 'ParameterNotFound'}}, 'get_parameter')
fake_client = pretend.stub(
get_parameter=pretend.raiser(error),
)
fake_boto3 = pretend.stub(
client=lambda service: fake_client,
)
monkeypatch.setattr('configstore.backends.awsssm.boto3', fake_boto3)
b = AwsSsmBackend('/app1/')
value = b.get_setting('TEMPLATE_DEBUG')
assert value is None
def test_awsssm_error(monkeypatch):
error = ClientError({'Error': {'Code': 'SomethingBad'}}, 'get_parameter')
fake_client = pretend.stub(
get_parameter=pretend.raiser(error),
)
fake_boto3 = pretend.stub(
client=lambda service: fake_client,
)
monkeypatch.setattr('configstore.backends.awsssm.boto3', fake_boto3)
b = AwsSsmBackend('/app1/')
with pytest.raises(ClientError):
b.get_setting('TEMPLATE_DEBUG')
|
kurkop/server-tools | __unported__/mass_editing/wizard/__init__.py | Python | agpl-3.0 | 1,106 | 0.001808 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C) 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHA | NTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have recei | ved a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import mass_editing_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
one2pret/winsys | winsys/asyncio.py | Python | mit | 2,311 | 0.019472 | # -*- coding: iso-8859-1 -*-
ur"""AsyncIO objects wrap the Win32 Overlapped API. They are instantiated by
passing a handle which has been opened for Overlapped IO. They can be waited
on by the functions in the :mod:`ipc` module and are True when complete,
False otherwise.
"""
import pywintypes
import winerror
import win32event
import win32file
from winsys import constants, core, exc, ipc, utils
class x_asyncio (exc.x_winsys):
pass
WINERROR_MAP = {
}
wrapped = exc.wrapper (WINERROR_MAP, x_asyncio)
class AsyncIO (core._WinSysObject):
def __init__ (self):
core._WinSysObject.__init__ (self)
self.event = ipc.event (needs_ma | nual_reset=True)
self.overlapped = wrapped (win32file.OVERLAPPED)
self.overlapped.hEvent = self.event.pyobject ()
def pyobject (self):
ur"""Return the pyobject of the underlying event so that this object can
be waited on by the :func:`ipc.all` or :func:`ipc.any` functions
"""
return self.event.pyobject | ()
def is_complete (self):
ur":returns: `True` if the IO has completed"
return self.event.isSet ()
__nonzero__ = is_complete
def wait (self):
ur"""Wait for the IO to complete in such a way that the wait can
be interrupted by a KeyboardInterrupt.
"""
while not self.event.wait (timeout_s=0.5):
pass
class AsyncHandler (AsyncIO):
BUFFER_SIZE = 4096
def __init__ (self, handle, buffer_size=BUFFER_SIZE):
AsyncIO.__init__ (self)
self.handle = handle
class AsyncWriter (AsyncHandler):
def __init__ (self, handle, data):
AsyncHandler.__init__ (self, handle)
self.data = data
wrapped (win32file.WriteFile, self.handle, data, self.overlapped)
class AsyncReader (AsyncHandler):
BUFFER_SIZE = 4096
def __init__ (self, handle):
AsyncHandler.__init__ (self, handle)
self.buffer = win32file.AllocateReadBuffer (self.BUFFER_SIZE)
wrapped (win32file.ReadFile, self.handle, self.buffer, self.overlapped)
def data (self):
ur"""Wait until the IO has completed and return the data from the read. This
is expected to be called after is_complete is true.
"""
n_bytes = win32file.GetOverlappedResult (self.handle, self.overlapped, True)
return str (self.buffer)[:n_bytes]
|
un33k/django-contactware | contactware/signals.py | Python | bsd-3-clause | 95 | 0 | from django.dispatch import Signal
contact_sent = Signal(providing_args=["request, contact"]) | ||
aznashwan/py-securestring | securestring.py | Python | gpl-2.0 | 3,385 | 0 | # Copyright 2015, Nashwan Azhari.
# Licensed under the GPLv2, see LICENSE file for details.
"""
A pure Python implementation of the functionality of the ConvertTo-SecureString
and ConvertFrom-SecureString PoweShell commandlets.
Usage example:
from securestring import encrypt, decrypt
if __name__ == "__main__":
str = "My horse is amazing"
# encryption:
try:
enc = encrypt(str)
print("The encryption of %s is: %s" % (str, enc))
except Exception as e:
print(e)
# decryption:
try:
dec = decrypt(enc)
print("The decr | yption of the above is: %s" % dec)
except Exception as e:
p | rint(e)
# checking of operation symmetry:
print("Encryption and decryption are symmetrical: %r", dec == str)
# decrypting powershell input:
psenc = "<your output of ConvertFrom-SecureString>"
try:
dec = decrypt(psenc)
print("Decryption from ConvertFrom-SecureString's input: %s" % dec)
except Exception as e:
print(e)
"""
from codecs import encode
from codecs import decode
from blob import Blob
from ctypes import byref
from ctypes import create_string_buffer
from ctypes import windll
protect_data = windll.crypt32.CryptProtectData
unprotect_data = windll.crypt32.CryptUnprotectData
def encrypt(input):
"""Encrypts the given string following the same syscalls as done by
ConvertFrom-SecureString.
Arguments:
input -- an input string.
Returns:
output -- string containing the output of the encryption in hexadecimal.
"""
# CryptProtectData takes UTF-16; so we must convert the data here:
encoded = input.encode("utf-16")
data = create_string_buffer(encoded, len(encoded))
# create our various Blobs:
input_blob = Blob(len(encoded), data)
output_blob = Blob()
flag = 0x01
# call CryptProtectData:
res = protect_data(byref(input_blob), u"", byref(Blob()), None,
None, flag, byref(output_blob))
input_blob.free_blob()
# check return code:
if res == 0:
output_blob.free_blob()
raise Exception("Failed to encrypt: %s" % input)
else:
raw = output_blob.get_data()
output_blob.free_blob()
# encode the resulting bytes into hexadecimal before returning:
hex = encode(raw, "hex")
return decode(hex, "utf-8").upper()
def decrypt(input):
"""Decrypts the given hexadecimally-encoded string in conformity
with CryptUnprotectData.
Arguments:
input -- the encrypted input string in hexadecimal format.
Returns:
output -- string containing the output of decryption.
"""
# de-hex the input:
rawinput = decode(input, "hex")
data = create_string_buffer(rawinput, len(rawinput))
# create out various Blobs:
input_blob = Blob(len(rawinput), data)
output_blob = Blob()
dwflags = 0x01
# call CryptUnprotectData:
res = unprotect_data(byref(input_blob), u"", byref(Blob()), None,
None, dwflags, byref(output_blob))
input_blob.free_blob()
# check return code:
if res == 0:
output_blob.free_blob()
raise Exception("Failed to decrypt: %s" + input)
else:
raw = output_blob.get_data()
output_blob.free_blob()
# decode the resulting data from UTF-16:
return decode(raw, "utf-16")
|
dav-stott/phd-thesis | fw_gridding_idw.py | Python | mit | 12,343 | 0.018067 | # -*- coding: utf-8 -*-
"""
Created on Thu May 29 12:28:08 2014
@author: david
"""
import numpy as np
import sys
import os
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from osgeo import gdal_array
from osgeo import gdalconst
from scipy.ndimage import filters
class Empty_Grid():
def __init__(self, minx, maxx, miny, maxy, pix):
self.rows = int((maxy-miny)/pix)
print maxx,minx
print 'rows', self.rows
self.cols = int((maxx-minx)/pix)
print 'cols',self.cols
self.empty = np.zeros((self.rows,self.cols))
print 'Array dimensions', self.empty.shape
self.x_vals = np.arange(minx,maxx,pix)
self.y_vals = np.arange(miny,maxy,pix)
print self.y_vals.shape, self.x_vals.shape
if not self.empty[1].shape == self.x_vals.shape:
if self.empty[1].shape < self.x_vals.shape:
print 'x empty < xvals'
diff = self.empty[1].shape[0]-self.x_vals.shape[0]
self.x_vals = self.x_vals[0:-diff]
if self.empty.shape[1] > self.x_vals.shape:
print 'x empty > xvals'
diff = self.empty[1].shape[0]-self.x_vals.shape[0]
newmax = self.x_vals[-1]+(diff*pix)
self.x_vals = np.append((self.x_vals,np.arange(self.x_vals[-1],newmax,pix)))
if not self.empty[0].shape[0] == self.y_vals.shape[0]:
if self.empty[0].shape[0] < self.y_vals.shape[0]:
print 'y empty < yvals'
print self.empty[0].shape, self.y_vals.shape
diff = self.empty.shape[0]-self.y_vals.shape[0]
self.y_vals = self.y_vals[0:-diff]
if self.empty[0].shape > self.y_vals.shape:
print 'y empty > yvals'
diff = self.empty[0].shape[0] - self.y_vals.shape[0]
print self.y_vals.shape[0], self.empty[0].shape[0]
print diff
newmax = self.y_vals[-1]+(diff*pix)
print y_vals[-1],newmax
self.y_vals = np.hstack((self.y_vals,np.arange(self.y_vals[-1],newmax,pix)))
class Grid_Data():
def __init__(self, points, pix, rad):
minx = np.min(points[:,0])
maxx = np.max(points[:,0])
miny = np.min(points[:,1])
maxy = np.max(points[:,1])
grid = Empty_Grid(minx,maxx,miny,maxy,pix)
print 'shapes:',grid.empty.shape, grid.x_vals.shape, grid.y_vals.shape
#instantiate counters
direct = 0
null = 0
interpolated = 0
row_ref = 0
self.gridded = grid.empty
for row in grid.y_vals:
col_ref = 0
#define the minimum & maximum coordinates of the row
cellymin = row
cellymax = row+pix
#define the centre of the cell
cell_centy = cellymin+(pix/2)
#use this to define search radius for interpolation later
rad_miny = cell_centy-(pix*rad)
rad_maxy = cell_centy+(pix*rad)
# use this to find poits along the row within the radius
# this will constrain the search space for later
rad_row_idx = np.where((points[:,1]>=rad_miny)&
(points[:,1]<=rad_maxy))
#slice
rad_row_pts = points[rad_row_idx]
# find points coincident with the cells in the row
#doing this with the search-radius subset to keep everything efficient
row_idx = np.where((rad_row_pts[:,1]>=cellymin)&
(rad_row_pts[:,1]<=cellymax))
#slice
row_pts = rad_row_pts[row_idx]
# iterate through the columns at each y value
for column in grid.x_vals:
# define the boundaries of th cell
cellxmin = column
cellxmax = column+pix
#find points coincident with cell
col_idx = np.where((row_pts[:,0]>=cellxmin)&
(row_pts[:,0]<=cellxmax))
# create an array of z values in each cell
col_pts = row_pts[col_idx]
cell_zvals = col_pts[:,2]
# get the shape of this
cell_znum = cell_zvals.shape[0]
# if there's only one point the becomes the z value of the cell
if cell_znum == 1:
cell_z = cell_zvals[0]
direct = direct+1
#method = 1
# if there's more than one point z = the mean of the points
elif cell_znum >1:
cell_z = np.mean(cell_zvals)
direct = direct+1
#method = 2
# if there's no points...
else:
# find the centre of the cell
cell_centx = cellxmin+(pix/2)
# define a search radius
rad_minx = cell_centx-(pix*rad)
rad_maxx = cell_centx+(pix*rad)
# find the lidar points within the search radius
rad_points = np.where((rad_row_pts[:,0]>=rad_minx)&
(rad_row_pts[:,0]<=rad_maxx))
rad_zs = rad_row_pts[rad_points]
# work out how many points fall within this
rad_num = rad_zs.shape[0]
#if no points within the radius cell value = no data
if rad_num == 0:
cell_z = -999
null = null+1
#method = 0
#otherwise pass the points to be interpolated using idw
else:
cell_z = self.interpolate_idw(rad_points,
cellxmin,
cellymin,
rad_row_pts)
interpolated = interpolated+1
#method = 3
#print row_ref,col_ref
self.gridded[row_ref,col_ref] = cell_z
#sys.stdout.write("\r\Direct %s Interpolated %s Null %s" %(direct, interpolated, null))
#sys.stdout.flush()
col_ref = col_ref+1
row_ref = row_ref+1
#stack z value into columns along row
def distance_matrix(self,rad_points, column, row, flightline):
#slice the input points using the indices of points in the search radius
points = flightline[rad_points]
#make a matrix
cell = np.vstack((column,row)).T
#unfuncs....
d0 = np.subtract.outer(points[:,0], cell[:,0])
d1 = np.subtract.outer(points[:,1], cell[:,1])
#return distance between points and points
return np.hypot(d0,d1)
def interpolate_idw(self,rad_points, column, row, flightline):
#make the distance matrix
d = self.distance_matrix(rad_points, column, row, flightline)
#slice the points
points = flightline[rad_points]
#define distance weights using the distance matrix
weights = 1.0/d
#divide and resassign weights using average
weights /= weights.sum(axis=0)
#matrix multiplication
cell_z = np.dot(weights.T, points[:,2])
return cell_ | z
class WriteImage():
def __init__(self,
| o |
nchaparr/Geospatial-Analysis-with-Python | 1138_08_03-flood-fill.py | Python | cc0-1.0 | 2,243 | 0.029871 | """
Crawls a terrain raster from a starting
point and "floods" everything at the same
or lower elevation by producing a mask
image of 1,0 values.
"""
import numpy as np
from linecache import getline
def floodFill(c,r,mask):
"""
Crawls a mask array containing
only 1 and 0 values from the
starting point (c=column,
r=row - a.k.a. x,y) and returns
an array with all 1 values
connected to the starting cell.
This algorithm performs a 4-way
check non-recursively.
"""
# cells already filled
filled = set()
# cells to fill
fill = set()
fill.add((c,r))
width = mask.shape[1]-1
height = mask.shape[0]-1
# Our output inundation array
flood = np.zeros_like(mask, dtype=np.int8)
# Loop through and modify the cells which
# need to be checked.
while fill:
# Grab a cell
x,y = fill.pop()
if y == height or x == width or x < 0 or y < 0:
# Don't fill
continue
if mask[y][x] == 1:
# Do fill
flood[y][x]=1
filled.add((x,y))
# Check neighbors for 1 values
west =(x-1,y)
east = (x+1,y)
north = (x,y-1)
south = (x,y+1)
if not west in filled:
fill.add(west)
if not east in filled:
fill.add(east)
if not north in filled:
fill.add(north)
if not south in filled:
fill.add(south)
return flood
source = "terrain.asc"
target = "flood.asc"
print "Opening image..."
img = np.loadtxt(source, skipr | ows=6)
print "Image opened"
a = np.where(img<70, 1,0)
print "Image masked"
# Parse the headr using a loop and
# the built-in linecache module
hdr = [getline(source, i) for i in range(1,7)]
values = [float(h.split(" ")[-1].strip()) for h in hdr]
cols,rows,lx,ly,cell,nd = values
xres = cell
yres = cell * -1
# Starting point for the
# flood inundation
sx = | 2582
sy = 2057
print "Beginning flood fill"
fld = floodFill(sx,sy, a)
print "Finished Flood fill"
header=""
for i in range(6):
header += hdr[i]
print "Saving grid"
# Open the output file, add the hdr, save the array
with open(target, "wb") as f:
f.write(header)
np.savetxt(f, fld, fmt="%1i")
print "Done!"
|
ric2b/Vivaldi-browser | chromium/chrome/test/enterprise/e2e/policy/allow_deleting_browser_history/allow_deleting_browser_history_webdriver_test.py | Python | bsd-3-clause | 1,991 | 0.005023 | # Copyright (c) 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from absl import app
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
import test_util
# Detect if history deletion is enabled or disabled and print the result.
# The way to check is:
# - visit chrome://history;
# - get the first history item;
# - check the checkbox. If history deletion is disabled, then the check
# box has attribute 'disabled';
# TODO(crbug.com/986444): move those helper methods into test_util.py once
def getElementFromShadowRoot(driver, element, selector):
if element is None:
return None
else:
return driver.execute_script(
"return arguments[0].shadowRoot.querySelector(arguments[1])", element,
selector)
def main(argv):
driver = test_util.create_chrome_webdriver()
try:
driver.get('http://www.google.com')
driver.get('chrome://history')
# wait for page to be loaded
wait = WebDriverWait(driver, 10)
wait.until(
expected_conditions.visibility_of_element_located((By.TAG_NAME,
'history-app')))
history_app = driver.find_element_by_css_selector("history-app")
histroy_list = getElementFromShadowRoot(driver, history_app, "history-list")
# get the checkbox of the first history item
histroy_item = getElementFromShadowRo | ot(driver, hi | stroy_list,
'history-item')
checkbox = getElementFromShadowRoot(driver, histroy_item,
'#main-container cr-checkbox')
disabled = checkbox.get_attribute('disabled')
if disabled == 'true':
print('DISABLED')
else:
print('ENABLED')
finally:
driver.quit()
if __name__ == '__main__':
app.run(main)
|
acortelyou/drone-tools | dji2papywizard.py | Python | apache-2.0 | 5,866 | 0.003239 | #!/usr/bin/env python
"""
Copyright 2016 Alex Cortelyou
Licensed under the Apache License, Version 2.0 (the "License");
you | may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the | License.
"""
import os
import argparse
import xml.etree.ElementTree as ET
from datetime import datetime
from glob import glob
import exifread
# pylint: disable=C0103
#arguments
parser = argparse.ArgumentParser(description='Extracts DJI image metadata into a Papywizard panohead data file for use with stitching software',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file', metavar='INPUT', nargs='+',
help='image files to process')
parser.add_argument('-t', dest='title', default='Untitled Panorama',
help='title for panohead file')
parser.add_argument('-c', dest='comment',
default='Generated by ' + os.path.basename(__file__),
help='comment for panohead file')
parser.add_argument('-o', dest='output', default='pano.xml',
help='filename for output')
parser.add_argument('--no-bracket', dest='no_bracket', action='store_true',
help='ignore hdr bracketing information')
args = parser.parse_args()
#constants
djiDateFormat = "%Y:%m:%d %H:%M:%S"
panDateFormat = "%Y-%m-%d_%Hh%Mm%Ss"
tagTime = 'EXIF DateTimeOriginal'
tagBias = 'EXIF ExposureBiasValue'
tagFocal = 'EXIF FocalLengthIn35mmFilm'
tagFile = 'Image ImageDescription'
tagNotes = 'Image ApplicationNotes'
tagGimbalPitch = '{http://www.dji.com/drone-dji/1.0/}GimbalPitchDegree'
tagGimbalRoll = '{http://www.dji.com/drone-dji/1.0/}GimbalRollDegree'
tagGimbalYaw = '{http://www.dji.com/drone-dji/1.0/}GimbalYawDegree'
#variables
shots = []
brackets = {}
startTime = None
endTime = None
focal = None
#process files
for a in args.file:
for f in glob(a):
#extract metadata
handle = open(f, 'rb')
exif = exifread.process_file(handle, debug=True)
exif.update(ET.fromstring(exif[tagNotes].printable)[0][0].attrib)
handle.close()
#check times
time = datetime.strptime(exif[tagTime].printable, djiDateFormat)
if not startTime or startTime > time:
startTime = time
if not endTime or endTime < time:
endTime = time
#check bracket
bias = str(exif[tagBias])
if bias not in brackets.keys():
brackets[bias] = len(brackets) + 1
#check focal
if not focal:
focal = exif[tagFocal]
#add shot to list
shots.append({
"time": time,
"bracket": bias,
"file": str(exif[tagFile]),
"roll": str(exif[tagGimbalRoll]),
"yaw": str(exif[tagGimbalYaw]),
"pitch": str(exif[tagGimbalPitch]),
})
#quit if nothing to output
if len(shots) == 0:
print "no shots found"
quit()
#sort the output by order taken
shots = sorted(shots, key=lambda k: k['file'])
#create panohead document
root = ET.Element("papywizard", version="c")
header = ET.SubElement(root, "header")
general = ET.SubElement(header, "general")
ET.SubElement(general, "title").text = args.title
ET.SubElement(general, "gps").text = ''
ET.SubElement(general, "comment").text = args.comment
shooting = ET.SubElement(header, "shooting", mode="mosaic")
ET.SubElement(shooting, "headOrientation").text = "up"
ET.SubElement(shooting, "cameraOrientation").text = "landscape"
ET.SubElement(shooting, "stabilizationDelay").text = "1"
ET.SubElement(shooting, "counter").text = "1"
ET.SubElement(shooting, "startTime").text = startTime.strftime(panDateFormat)
ET.SubElement(shooting, "endTime").text = endTime.strftime(panDateFormat)
camera = ET.SubElement(header, "camera")
ET.SubElement(camera, "timeValue").text = "1"
ET.SubElement(camera, "bracketing", nbPicts=str(len(brackets)) if not args.no_bracket else '1')
ET.SubElement(camera, "sensor", coef="1.0", ratio="4:3")
lens = ET.SubElement(header, "lens", type='rectilinear')
ET.SubElement(lens, "focal").text = str(focal)
mosaic = ET.SubElement(header, "mosaic")
ET.SubElement(mosaic, "nbPicts", pitch="1", yaw="1")
ET.SubElement(mosaic, "overlap", minimum="0.5", pitch="1.0", yaw="0.5")
shoot = ET.SubElement(root, "shoot")
#add each shot to document
count = 0
for shot in shots:
count += 1
bracket = str(brackets[shot['bracket']]) if not args.no_bracket else '1'
pict = ET.SubElement(shoot, "pict", id=str(count), bracket=bracket)
ET.SubElement(pict, "time").text = shot['time'].strftime(panDateFormat)
ET.SubElement(pict, "position", pitch=shot['pitch'], roll=shot['roll'], yaw=shot['yaw'])
#write xml to file
ET.ElementTree(root).write(args.output, encoding='utf8', method='xml')
#success
print count, 'shots processed'
"""
'EXIF DateTimeOriginal'
'EXIF ExposureMode'
'EXIF ExposureIndex'
'EXIF ExposureProgram'
'EXIF ExposureTime'
'EXIF MeteringMode'
'EXIF ShutterSpeedValue'
'GPS GPSAltitude'
'GPS GPSLatitude'
'GPS GPSLongitude'
'{http://ns.adobe.com/tiff/1.0/}Make'
'{http://ns.adobe.com/tiff/1.0/}Model'
'{http://www.dji.com/drone-dji/1.0/}GimbalRollDegree'
'{http://www.dji.com/drone-dji/1.0/}FlightRollDegree'
'{http://www.dji.com/drone-dji/1.0/}GimbalPitchDegree'
'{http://www.dji.com/drone-dji/1.0/}FlightPitchDegree'
'{http://www.dji.com/drone-dji/1.0/}GimbalYawDegree'
'{http://www.dji.com/drone-dji/1.0/}FlightYawDegree'
"""
|
sean666888/The-Bomb | s.py | Python | mit | 430 | 0.002326 | #!/usr/bin/env python
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = | s.accept()
print 'Connection | address:', addr
while 1:
data = conn.recv(BUFFER_SIZE)
if not data: break
print "received data:", data
conn.send(data) # echo
conn.close()
|
smallyear/linuxLearn | salt/salt/key.py | Python | apache-2.0 | 48,988 | 0.000367 | # -*- coding: utf-8 -*-
'''
The Salt Key backend API and interface used by the CLI. The Key class can be
used to manage salt keys directly without interfacing with the CLI.
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import copy
import json
import stat
import shutil
import fnmatch
import hashlib
import logging
# Import salt libs
import salt.crypt
import salt.utils
import salt.exceptions
import salt.utils.event
import salt.daemons.masterapi
from salt.utils import kinds
from salt.utils.event import tagify
# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import input
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
import msgpack
except ImportError:
pass
log = logging.getLogger(__name__)
def get_key(opts):
if opts['transport'] in ('zeromq', 'tcp'):
return Key(opts)
else:
return RaetKey(opts)
class KeyCLI(object):
'''
Manage key CLI operations
'''
def __init__(self, opts):
self.opts = opts
if self.opts['transport'] in ('zeromq', 'tcp'):
self.key = Key(opts)
else:
self.key = RaetKey(opts)
def list_status(self, status):
'''
Print out the keys under a named status
:param str status: A string indicating which set of keys to return
'''
keys = self.key.list_keys()
if status.startswith('acc'):
salt.output.display_output(
{self.key.ACC: keys[self.key.ACC]},
'key',
self.opts
)
elif status.startswith(('pre', 'un')):
salt.output.display_output(
{self.key.PEND: keys[self.key.PEND]},
'key',
self.opts
)
elif status.startswith('rej'):
salt.output.display_output(
{self.key.REJ: keys[self.key.REJ]},
'key',
sel | f.opts
)
elif status.startswith('den'):
if self.key.DEN:
salt.output.display_output(
{self.key.DEN: keys[self.key.DEN]},
'key',
self.opts
)
elif status.startswith('all'):
self.list_all()
def list_all(self):
'''
Print out all keys
'''
| salt.output.display_output(
self.key.list_keys(),
'key',
self.opts)
def accept(self, match, include_rejected=False):
'''
Accept the keys matched
:param str match: A string to match against. i.e. 'web*'
:param bool include_rejected: Whether or not to accept a matched key that was formerly rejected
'''
def _print_accepted(matches, after_match):
if self.key.ACC in after_match:
accepted = sorted(
set(after_match[self.key.ACC]).difference(
set(matches.get(self.key.ACC, []))
)
)
for key in accepted:
print('Key for minion {0} accepted.'.format(key))
matches = self.key.name_match(match)
keys = {}
if self.key.PEND in matches:
keys[self.key.PEND] = matches[self.key.PEND]
if include_rejected and bool(matches.get(self.key.REJ)):
keys[self.key.REJ] = matches[self.key.REJ]
if not keys:
msg = (
'The key glob {0!r} does not match any unaccepted {1}keys.'
.format(match, 'or rejected ' if include_rejected else '')
)
print(msg)
raise salt.exceptions.SaltSystemExit(code=1)
if not self.opts.get('yes', False):
print('The following keys are going to be accepted:')
salt.output.display_output(
keys,
'key',
self.opts)
try:
veri = input('Proceed? [n/Y] ')
except KeyboardInterrupt:
raise SystemExit("\nExiting on CTRL-c")
if not veri or veri.lower().startswith('y'):
_print_accepted(
matches,
self.key.accept(
match_dict=keys,
include_rejected=include_rejected
)
)
else:
print('The following keys are going to be accepted:')
salt.output.display_output(
keys,
'key',
self.opts)
_print_accepted(
matches,
self.key.accept(
match_dict=keys,
include_rejected=include_rejected
)
)
def accept_all(self, include_rejected=False):
'''
Accept all keys
:param bool include_rejected: Whether or not to accept a matched key that was formerly rejected
'''
self.accept('*', include_rejected=include_rejected)
def delete(self, match):
'''
Delete the matched keys
:param str match: A string to match against. i.e. 'web*'
'''
def _print_deleted(matches, after_match):
deleted = []
for keydir in (self.key.ACC, self.key.PEND, self.key.REJ):
deleted.extend(list(
set(matches.get(keydir, [])).difference(
set(after_match.get(keydir, []))
)
))
for key in sorted(deleted):
print('Key for minion {0} deleted.'.format(key))
matches = self.key.name_match(match)
if not matches:
print(
'The key glob {0!r} does not match any accepted, unaccepted '
'or rejected keys.'.format(match)
)
raise salt.exceptions.SaltSystemExit(code=1)
if not self.opts.get('yes', False):
print('The following keys are going to be deleted:')
salt.output.display_output(
matches,
'key',
self.opts)
try:
veri = input('Proceed? [N/y] ')
except KeyboardInterrupt:
raise SystemExit("\nExiting on CTRL-c")
if veri.lower().startswith('y'):
_print_deleted(
matches,
self.key.delete_key(match_dict=matches)
)
else:
print('Deleting the following keys:')
salt.output.display_output(
matches,
'key',
self.opts)
_print_deleted(
matches,
self.key.delete_key(match_dict=matches)
)
def delete_all(self):
'''
Delete all keys
'''
self.delete('*')
def reject(self, match, include_accepted=False):
'''
Reject the matched keys
:param str match: A string to match against. i.e. 'web*'
:param bool include_accepted: Whether or not to accept a matched key that was formerly accepted
'''
def _print_rejected(matches, after_match):
if self.key.REJ in after_match:
rejected = sorted(
set(after_match[self.key.REJ]).difference(
set(matches.get(self.key.REJ, []))
)
)
for key in rejected:
print('Key for minion {0} rejected.'.format(key))
matches = self.key.name_match(match)
keys = {}
if self.key.PEND in matches:
keys[self.key.PEND] = matches[self.key.PEND]
if include_accepted and bool(matches.get(self.key.ACC)):
keys[self.key.ACC] = matches[self.key.ACC]
if not keys:
msg = 'The key glob {0!r} does not match any {1} keys.'.format(
match,
'accepted or |
wcong/ants | ants/bootstrap/bootstrap.py | Python | bsd-3-clause | 1,189 | 0.003364 | #!/usr/bin/env python
# encoding=utf8
__author__ = 'wcong'
import sys
from twisted.internet import reactor
from ants.node import node
from ants | import settings
from conf import settings as project_settings
import logging
import argparse
'''
what we do
start the node
let the node do what he need to do
'''
parser = argparse.ArgumentParser(description='ants -- scale crawler')
parser.add_argument('-tcp_port', help='transport port', type=int, dest='tcp_port')
parser.add_argument('-http_port', help='http port', type=int, dest='http_port')
class Bootstrap():
def __init__(self, args):
logging.info("do not panic,it is shipping")
| self.setting = settings.Settings()
self.setting.setmodule(project_settings)
if args:
cmd_args = parser.parse_args(args)
if cmd_args.tcp_port:
self.setting.set('TRANSPORT_PORT', cmd_args.tcp_port)
if cmd_args.http_port:
self.setting.set('HTTP_PORT', cmd_args.http_port)
# #
# we init almost everything in node manager
def start(self):
node_manager = node.NodeManager(self.setting)
node_manager.start()
reactor.run() |
yousoff92/data-collector | python/jobstreet/JobStreetExtractor.py | Python | mit | 8,609 | 0.006621 | """
Python source code to extract listing from mudah.my
"""
from functools import total_ordering
from job | street.config import General, Authentication, Location
import pandas as pd
import requests
i | mport webbrowser as web
import urllib.parse as urlparse
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import dateutil.relativedelta as rd
import math
import mechanicalsoup
import logging as logger
import os
clear = lambda: os.system('cls') #on Windows System
# TODO - Advance criteria
# For logging purpose
logger.basicConfig(level=logger.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
urllib3_logger = logger.getLogger('urllib3')
urllib3_logger.setLevel(logger.CRITICAL)
class JobStreetExtractor:
"""
Extractor for getting job dataset from jobstreet malaysia
"""
__chrome_path__ = General.CHROME_PATH.value
__base_url__ = General.JOBSTREET_URL.value
# Mapping values to required Jobstreet parameter
# https://www.jobstreet.com.my/en/job-search/job-vacancy.php?key=Software&area=2&location=51200&position=3%2C4&job-type=5&experience-min=03&experience-max=-1&salary=6%2C000
# &salary-max=7%2C000&classified=1&salary-option=on&job-posted=0&src=1&ojs=4
# key
# area
# location
# position
# job-type : 5,10,16
# experience-min
# experience-max
# salary
# salary-max
# classified
# salary-option
# job-posted
# src
# ojs
# sort
# order
# pg
def __authenticate__(self):
login_url = Authentication.JOBSTREET_LOGIN_URL.value
browser = mechanicalsoup.StatefulBrowser()
browser.open(login_url)
browser.select_form('#login')
browser['login_id'] = Authentication.JOBSTREET_USERNAME.value
browser['password'] = Authentication.JOBSTREET_PASSWORD.value
browser.submit_selected()
return browser
def __scraping__(self, keyword=None, location=None, minSalary=None, maxSalary=None, minExperience=None, maxExperience=None):
# login
browser = self.__authenticate__(self)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
# construct filter criteria
filter_criteria = {}
if keyword is not None:
filter_criteria.update({'key': keyword })
if location is not None:
filter_criteria.update({'location' : location.value })
if minSalary is not None:
filter_criteria.update({'salary' : minSalary })
if maxSalary is not None:
filter_criteria.update({'salary-max' : maxSalary })
if minExperience is not None:
filter_criteria.update({'experience-min' : minExperience })
if maxExperience is not None:
filter_criteria.update({'experience-max' : maxExperience })
# filter_criteria = {
# 'key':'Software',
# 'area': '2',
# 'location':'51200',
# 'position':'3,4',
# 'job-type':'5',
# 'salary':'6000',
# 'salary-max':'7000',
# 'classified':'1',
# 'salary-option':'on',
# 'job-posted':'0',
# 'src':'1',
# 'ojs':'4',
# }
page_url = self.__base_url__
url_parts = list(urlparse.urlparse(page_url))
final_df = pd.DataFrame()
# test to get number of pages
page_criteria = {'pg': str(1)}
filter_criteria.update(page_criteria)
url_parts[4] = urlencode(filter_criteria)
page_url = urlparse.urlunparse(url_parts)
response = browser.open(page_url)
# get total lists
total_list = BeautifulSoup(response.content, "html.parser").find("span", class_="pagination-result-count").string
pages = 1
if total_list is not None:
logger.info(str(total_list))
total_list = total_list[total_list.find("of")+len("of"):total_list.rfind("jobs")]
total_list = total_list.strip().replace(',', '')
logger.info("Attempt to parse " + str(total_list) + " jobs at most")
pages = math.ceil(int(total_list) / 40) # 40 is item per page
# To prevent over-scraping
if General.PAGE_THRESHOLD.value != -1 and General.PAGE_THRESHOLD.value < pages :
pages = General.PAGE_THRESHOLD.value
for page in range(1, pages + 1):
job_titles = []
job_urls = []
com_names = []
com_urls = []
locations = []
salaries = []
descriptions = []
page_criteria = {'pg': str(page)}
filter_criteria.update(page_criteria)
url_parts[4] = urlencode(filter_criteria)
page_url = urlparse.urlunparse(url_parts)
logger.info("Processing Page " + str(page) + " : " + page_url)
response = browser.open(page_url)
if response.status_code != 200:
raise ConnectionError("Cannot connect to " + page_url)
# Get each job card
raw_listing = BeautifulSoup(response.content, "html.parser").find_all("div",
{
'id' : lambda value: value and value.startswith("job_ad")
})
# For each job card, get job informations
for element in raw_listing:
# Get job general information
job_el = element.find("a", {'class' : lambda value: value and value.startswith("position-title-link")})
job_titles.append(job_el.get('data-job-title'))
job_urls.append(job_el.get('href'))
# Get company information
com_el = element.find("a", {'id' : lambda value: value and value.startswith("company_name")})
if com_el is None:
com_el = element.find("span", {'id': lambda value: value and value.startswith("company_name")})
com_names.append(com_el.string)
com_urls.append(None)
else:
com_names.append(com_el.find('span').string)
com_urls.append(com_el.get('href'))
# Get location information
loc_el = element.find("li", {'class' : 'job-location'})
locations.append(loc_el.get('title'))
sal_el = element.find("li", {'id' : 'job_salary'})
# Get salary information
if sal_el:
font = sal_el.find("font")
if font:
salaries.append(sal_el.find("font").string)
else:
salaries.append(None)
# Get job description
des_el = element.find("ul", {'id' : lambda value: value and value.startswith("job_desc_detail")}).find("li",recursive=False)
if des_el:
descriptions.append(des_el.string)
else:
descriptions.append(None)
df = pd.concat([pd.Series(job_titles),
pd.Series(job_urls),
pd.Series(com_names),
pd.Series(com_urls),
pd.Series(locations),
pd.Series(salaries),
pd.Series(descriptions),
], axis=1)
df.columns = [["Job Titles", "Job URLS", "Company Name", "Company URLS", "Location", "Salaries", "Descriptions"]]
final_df = final_df.append(df, ignore_index=True)
final_df.columns = final_df.columns.get_level_values(0)
logger.info("Parsing has ended...")
return final_df
@classmethod
def find_jobs(cls, keyword=None, location=None, minSalary=None, maxSalary=N |
legacysurvey/pipeline | py/legacyzpts/run-calib.py | Python | gpl-2.0 | 4,720 | 0.004237 | #! /usr/bin/env python
"""This script runs calibration pre-processing steps including sky and PSF models.
"""
from __future__ import print_function
from astrometry.util.fits import merge_tables
from legacypipe.survey import run_calibs, LegacySurveyData
def main():
"""Main program.
"""
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--force', action='store_true',
help='Run calib processes even if files already exist?')
parser.add_argument('--survey-dir', help='Override LEGACY_SURVEY_DIR')
parser.add_argument('--expnum', type=str, help='Cut to a single or set of exposures; comma-separated list')
parser.add_argument('--extname', '--ccdname', help='Cut to a single extension/CCD name')
parser.add_argument('--no-psf', dest='psfex', action='store_false',
help='Do not compute PsfEx calibs')
parser.add_argument('--no-sky', dest='sky', action='store_false',
help='Do not compute sky models')
parser.add_argument('--run-se', action='store_true', help='Run SourceExtractor')
parser.add_argument('--no-splinesky', dest='splinesky', default=True, action='store_false',
help='Use constant, not splinesky')
parser.add_argument('--threads', type=int, help='Run multi-threaded', default=None)
parser.add_argument('--continue', dest='cont', default=False, action='store_true',
help='Continue even if one file fails?')
parser.add_argument('--plot-base', help='Make plots with this base filename')
parser.add_argument(' | --blob-mask-dir', type=str, default=None,
help='The base directory to search for blob masks during sky model construction')
parser.add_argument('-v', '--verbose', dest='verbose', action='count',
default=0, help='Make more verbose')
parser.add_argument('args',nargs=argparse.REMAINDER)
opt | = parser.parse_args()
import logging
if opt.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
# tractor logging is *soooo* chatty
logging.getLogger('tractor.engine').setLevel(lvl + 10)
survey = LegacySurveyData(survey_dir=opt.survey_dir)
T = None
if len(opt.args) == 0:
if opt.expnum is not None:
expnums = set([int(e) for e in opt.expnum.split(',')])
T = merge_tables([survey.find_ccds(expnum=e, ccdname=opt.extname) for e in expnums])
print('Cut to', len(T), 'with expnum in', expnums, 'and extname', opt.extname)
opt.args = range(len(T))
else:
parser.print_help()
return 0
ps = None
if opt.plot_base is not None:
from astrometry.util.plotutils import PlotSequence
ps = PlotSequence(opt.plot_base)
survey_blob_mask=None
if opt.blob_mask_dir is not None:
survey_blob_mask = LegacySurveyData(opt.blob_mask_dir)
args = []
for a in opt.args:
# Check for "expnum-ccdname" format.
if '-' in str(a):
words = a.split('-')
assert(len(words) == 2)
expnum = int(words[0])
ccdname = words[1]
T = survey.find_ccds(expnum=expnum, ccdname=ccdname)
if len(T) != 1:
print('Found', len(T), 'CCDs for expnum', expnum, 'CCDname', ccdname)
print('WARNING: skipping this expnum,ccdname')
continue
t = T[0]
else:
i = int(a)
print('Index', i)
t = T[i]
im = survey.get_image_object(t)
print('Running', im.name)
kwargs = dict(psfex=opt.psfex, sky=opt.sky, ps=ps, survey=survey,
survey_blob_mask=survey_blob_mask)
if opt.force:
kwargs.update(force=True)
if opt.run_se:
kwargs.update(se=True)
if opt.splinesky:
kwargs.update(splinesky=True)
if opt.cont:
kwargs.update(noraise=True)
if opt.threads:
args.append((im, kwargs))
else:
run_calibs((im, kwargs))
if opt.threads:
from astrometry.util.multiproc import multiproc
mp = multiproc(opt.threads)
mp.map(time_run_calibs, args)
return 0
def time_run_calibs(*args):
from astrometry.util.ttime import Time
t0 = Time()
rtn = run_calibs(*args)
t1 = Time()
print('Time run_calibs:', t1-t0)
import sys
sys.stdout.flush()
sys.stderr.flush()
return rtn
if __name__ == '__main__':
import sys
sys.exit(main())
|
saurabh6790/frappe | frappe/website/doctype/web_page/test_web_page.py | Python | mit | 2,204 | 0.03902 | from __future__ import unicode_literals
import unittest
import frappe
from frappe.website.router import resolve_route
import frappe.website.render
from frappe.utils import set_request
test_records = frappe.get_test_records('Web Page')
def get_page_content(route):
set_request(method='GET', path = route)
response = frappe.website.render.render()
return frappe.as_unicode(response.data)
class TestWebPage(unittest.TestCase):
def setUp(self):
frappe.db.sql("delete from `tabWeb Page`")
for t in test_records:
frappe.get_doc(t).insert()
def test_check_sitemap(self):
resolve_route("test-web-page-1")
resolve_route("test-web-page-1/test-web-page-2")
resolve_route("test-web-page-1/test-web-page-3")
def test_base_template(self):
content = get_page_content('/_test/_test_custom_base.html')
# assert the text in base template is rendered
self.assertTrue('<h1>This is for testing</h1>' in frappe.as_unicode(content))
# assert template block rendered
self.assertTrue('<p>Test content</p>' in frappe.as_u | nicode(content))
def test_content_type(self):
web_page = frappe.get_doc(dict(
doctype = 'Web Page',
title = 'Test Content Type',
published = 1,
content_type = 'Rich Text',
main_section = 'rich text',
main_section_md = '# h1\nm | arkdown content',
main_section_html = '<div>html content</div>'
)).insert()
self.assertTrue('rich text' in get_page_content('/test-content-type'))
web_page.content_type = 'Markdown'
web_page.save()
self.assertTrue('markdown content' in get_page_content('/test-content-type'))
web_page.content_type = 'HTML'
web_page.save()
self.assertTrue('html content' in get_page_content('/test-content-type'))
web_page.delete()
def test_dynamic_route(self):
web_page = frappe.get_doc(dict(
doctype = 'Web Page',
title = 'Test Dynamic Route',
published = 1,
dynamic_route = 1,
route = '/doctype-view/<doctype>',
content_type = 'HTML',
dymamic_template = 1,
main_section_html = '<div>{{ frappe.form_dict.doctype }}</div>'
)).insert()
try:
content = get_page_content('/doctype-view/DocField')
self.assertTrue('<div>DocField</div>' in content)
finally:
web_page.delete()
|
AGarrow/larvae | larvae/tests/test_membership.py | Python | bsd-3-clause | 694 | 0 | from ..member | ship import Membership
def test_basic_invalid_membership():
""" Make sure that we can create an invalid membership and break """
membership = Membership("person_id", "orga_id")
membership.validate()
membership.person_id = None
try:
assert "nonsens | e" == membership.validate()
except ValueError:
pass
def test_basic_invalid_membership():
""" Ensure contact details work for memberships """
membership = Membership("person_id", "orga_id")
membership.validate()
membership.add_contact_detail(type='foo',
value='bar',
note='baz')
membership.validate()
|
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/_network_management_client_enums.py | Python | mit | 27,671 | 0.007192 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Access(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether the traffic is allowed or denied.
"""
ALLOW = "Allow"
DENY = "Deny"
class ApplicationGatewayBackendHealthServerHealth(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Health of backend server.
"""
UNKNOWN = "Unknown"
UP = "Up"
DOWN = "Down"
PARTIAL = "Partial"
DRAINING = "Draining"
class ApplicationGatewayCookieBasedAffinity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Cookie based affinity.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ApplicationGatewayCustomErrorStatusCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status code of the application gateway customer error.
"""
HTTP_STATUS403 = "HttpStatus403"
HTTP_STATUS502 = "HttpStatus502"
class ApplicationGatewayFirewallMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Web application firewall mode.
"""
DETECTION = "Detection"
PREVENTION = "Prevention"
class ApplicationGatewayOperationalState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operational state of the application gateway resource.
"""
STOPPED = "Stopped"
STARTING = "Starting"
RUNNING = "Running"
STOPPING = "Stopping"
class ApplicationGatewayProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol used to communicate with the backend. Possible values are 'Http' and 'Https'.
"""
HTTP = "Http"
HTTPS = "Https"
class ApplicationGatewayRedirectType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PERMANENT = "Permanent"
FOUND = "Found"
SEE_OTHER = "SeeOther"
TEMPORARY = "Temporary"
class ApplicationGatewayRequestRoutingRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Rule type.
"""
BASIC = "Basic"
PATH_BASED_ROUTING = "PathBasedRouting"
class ApplicationGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of an application gateway SKU.
"""
STANDARD_SMALL = "Standard_Small"
STANDARD_MEDIUM = "Standard_Medium"
STANDARD_LARGE = "Standard_Large"
WAF_MEDIUM = "WAF_Medium"
WAF_LARGE = "WAF_Large"
STANDARD_V2 = "Standard_v2"
WAF_V2 = "WAF_v2"
class ApplicationGatewaySslCipherSuite(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl cipher suites enums.
"""
TLS_ECDHE_RSA_WITH_AES256_CBC_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"
TLS_ECDHE_RSA_WITH_AES128_CBC_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
TLS_ECDHE_RSA_WITH_AES256_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
TLS_ECDHE_RSA_WITH_AES128_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
TLS_DHE_RSA_WITH_AES256_GCM_SHA384 = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"
TLS_DHE_RSA_WITH_AES128_GCM_SHA256 = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
TLS_DHE_RSA_WITH_AES256_CBC_SHA = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"
TLS_DHE_RSA_WITH_AES128_CBC_SHA = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"
TLS_RSA_WITH_AES256_GCM_SHA384 = "TLS_RSA_WITH_AES_256_GCM_SHA384"
TLS_RSA_WITH_AES128_GCM_SHA256 = "TLS_RSA_WITH_AES_128_GCM_SHA256"
TLS_RSA_WITH_AES256_CBC_SHA256 = "TLS_RSA_WITH_AES_256_CBC_SHA256"
TLS_RSA_WITH_AES128_CBC_SHA256 = "TLS_RSA_WITH_AES_128_CBC_SHA256"
TLS_RSA_WITH_AES256_CBC_SHA = "TLS_RSA_WITH_AES_256_CBC_SHA"
TLS_RSA_WITH_AES128_CBC_SHA = "TLS_RSA_WITH_AES_128_CBC_SHA"
TLS_ECDHE_ECDSA_WITH_AES256_GCM_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
TLS_ECDHE_ECDSA_WITH_AES128_GCM_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
TLS_ECDHE_ECDSA_WITH_AES256_CBC_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"
TLS_ECDHE_ECDSA_WITH_AES128_CBC_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
TLS_ECDHE_ECDSA_WITH_AES256_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
TLS_ECDHE_ECDSA_WITH_AES128_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
TLS_DHE_DSS_WITH_AES256_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256"
TLS_DHE_DSS_WITH_AES128_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256"
TLS_DHE_DSS_WITH_AES256_CBC_SHA = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"
TLS_DHE_DSS_WITH_AES128_CBC_SHA = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"
TLS_RSA_WITH3_DES_EDE_CBC_SHA = "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
TLS_DHE_DSS_WITH3_DES_EDE_CBC_SHA = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA"
TLS_ECDHE_RSA_WITH_AES128_GCM_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
TLS_ECDHE_RSA_WITH_AES256_GCM_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
class ApplicationGatewaySslPolicyName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl predefined policy name enums.
"""
APP_GW_SSL_POLICY20150501 = "AppGwSslPolicy20150501"
APP_GW_SSL_POLICY20170401 = "AppGwSslPolicy20170401"
| APP_GW_SSL_POLICY20170401_S = "AppGwSslPolicy20170401S"
class ApplicationGatewaySslPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Ssl Policy
"""
PREDEFINED = "Predefined"
CUSTOM = "Custom"
class ApplicationGatewaySslProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl protocol enum | s.
"""
TL_SV1_0 = "TLSv1_0"
TL_SV1_1 = "TLSv1_1"
TL_SV1_2 = "TLSv1_2"
class ApplicationGatewayTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Tier of an application gateway.
"""
STANDARD = "Standard"
WAF = "WAF"
STANDARD_V2 = "Standard_v2"
WAF_V2 = "WAF_v2"
class AssociationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The association type of the child resource to the parent resource.
"""
ASSOCIATED = "Associated"
CONTAINS = "Contains"
class AuthenticationMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client Authentication Method. Possible values are: 'EAPTLS' and 'EAPMSCHAPv2'.
"""
EAPTLS = "EAPTLS"
EAPMSCHA_PV2 = "EAPMSCHAPv2"
class AuthorizationUseStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'.
"""
AVAILABLE = "Available"
IN_USE = "InUse"
class AzureFirewallApplicationRuleProtocolType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol type of a Application Rule resource
"""
HTTP = "Http"
HTTPS = "Https"
class AzureFirewallNatRCActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a NAT rule collection
"""
SNAT = "Snat"
DNAT = "Dnat"
class AzureFirewallNetworkRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol of a Network Rule resource
"""
TCP = "TCP"
UDP = "UDP"
ANY = "Any"
ICMP = "ICMP"
class AzureFirewallRCActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a rule collection
"""
ALLOW = "Allow"
DENY = "Deny"
class BgpPeerState(with_metaclass(_CaseInsensitiv |
noironetworks/heat | heat/tests/engine/tools.py | Python | apache-2.0 | 11,355 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import keystone
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine.resources.aws.ec2 import instance as instances
from heat.engine import stack as parser
from heat.engine import template as templatem
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
wp_template = u'''
heat_template_version: 2014-10-16
description: WordPress
parameters:
KeyName:
description: KeyName
type: string
default: test\u2042
resources:
WebServer:
type: AWS::EC2::Instance
properties:
ImageId: F17-x86_64-gold
InstanceType: m1.large
KeyName: test
UserData: wordpress
'''
string_template_five = '''
heat_template_version: 2013-05-23
description: Random String templates
parameters:
salt:
type: string
default: "quickbrownfox"
resources:
A:
type: OS::Heat::RandomString
properties:
salt: {get_param: salt}
B:
type: OS::Heat::RandomString
properties:
salt: {get_param: salt}
C:
type: OS::Heat::RandomString
depends_on: [A, B]
properties:
salt: {get_attr: [A, value]}
D:
type: OS::Heat::RandomString
depends_on: C
properties:
salt: {get_param: salt}
E:
type: OS::Heat::RandomString
depends_on: C
properties:
salt: {get_param: salt}
'''
string_template_five_update = '''
heat_template_version: 2013-05-23
description: Random String templates
parameters:
salt:
type: string
default: "quickbrownfox123"
resources:
A:
type: OS::Heat::RandomString
properties:
salt: {get_param: salt}
B:
type: OS::Heat::RandomString
properties:
salt: {get_param: salt}
F:
type: OS::Heat::RandomString
depends_on: [A, B]
properties:
salt: {get_param: salt}
G:
type: OS::Heat::RandomString
depends_on: F
properties:
salt: {get_param: salt}
H:
type: OS::Heat::RandomString
depends_on: F
properties:
salt: {get_param: salt}
'''
attr_cache_template = '''
heat_template_version: 2016-04-08
resources:
A:
type: ResourceWithComplexAttributesType
B:
type: OS::Heat::RandomString
properties:
salt: {get_attr: [A, flat_dict, key2]}
C:
type: OS::Heat::RandomString
depends_on: [A, B]
properties:
salt: {get_attr: [A, nested_dict, dict, a]}
D:
type: OS::Heat::RandomString
depends_on: C
properties:
salt: {get_attr: [A, nested_dict, dict, b]}
E:
type: OS::Heat::RandomString
depends_on: C
properties:
salt: {get_attr: [A, flat_dict, key3]}
'''
def get_stack(stack_name, ctx, template=None, with_params=True,
convergence=False, **kwargs):
if template is None:
t = template_format.parse(wp_template)
if with_params:
env = environment.Envi | ronment({'KeyName': 'test'})
tmpl = templatem.Template(t, env=env)
else:
tmpl = templatem.Template(t)
else:
t = template_format.parse(template)
tmpl = templatem.Template(t)
stack = parser.Stack(ctx, stack_name, tmpl, convergence=convergence,
**kwargs)
stack.thread_group_mgr = DummyThreadGroupManager()
return stack
def setup_keystone_mocks_with_mock(test_case, stack):
fkc | = fake_ks.FakeKeystoneClient()
test_case.patchobject(keystone.KeystoneClientPlugin, '_create')
keystone.KeystoneClientPlugin._create.return_value = fkc
def setup_mock_for_image_constraint_with_mock(test_case, imageId_input,
imageId_output=744):
test_case.patchobject(glance.GlanceClientPlugin,
'find_image_by_name_or_id',
return_value=imageId_output)
def validate_setup_mocks_with_mock(stack, fc, mock_image_constraint=True,
validate_create=True):
instance = stack['WebServer']
metadata = instance.metadata_get()
if mock_image_constraint:
m_image = glance.GlanceClientPlugin.find_image_by_name_or_id
m_image.assert_called_with(
instance.properties['ImageId'])
user_data = instance.properties['UserData']
server_userdata = instance.client_plugin().build_userdata(
metadata, user_data, 'ec2-user')
nova.NovaClientPlugin.build_userdata.assert_called_with(
metadata, user_data, 'ec2-user')
if not validate_create:
return
fc.servers.create.assert_called_once_with(
image=744,
flavor=3,
key_name='test',
name=utils.PhysName(stack.name, 'WebServer'),
security_groups=None,
userdata=server_userdata,
scheduler_hints=None,
meta=None,
nics=None,
availability_zone=None,
block_device_mapping=None)
def setup_mocks_with_mock(testcase, stack, mock_image_constraint=True,
mock_keystone=True):
fc = fakes_nova.FakeClient()
testcase.patchobject(instances.Instance, 'client', return_value=fc)
testcase.patchobject(nova.NovaClientPlugin, 'client', return_value=fc)
instance = stack['WebServer']
metadata = instance.metadata_get()
if mock_image_constraint:
setup_mock_for_image_constraint_with_mock(
testcase, instance.properties['ImageId'])
if mock_keystone:
setup_keystone_mocks_with_mock(testcase, stack)
user_data = instance.properties['UserData']
server_userdata = instance.client_plugin().build_userdata(
metadata, user_data, 'ec2-user')
testcase.patchobject(nova.NovaClientPlugin, 'build_userdata',
return_value=server_userdata)
testcase.patchobject(fc.servers, 'create')
fc.servers.create.return_value = fc.servers.list()[4]
return fc
def setup_stack_with_mock(test_case, stack_name, ctx, create_res=True,
convergence=False):
stack = get_stack(stack_name, ctx, convergence=convergence)
stack.store()
if create_res:
fc = setup_mocks_with_mock(test_case, stack)
stack.create()
stack._persist_state()
validate_setup_mocks_with_mock(stack, fc)
return stack
def clean_up_stack(test_case, stack, delete_res=True):
if delete_res:
fc = fakes_nova.FakeClient()
test_case.patchobject(instances.Instance, 'client', return_value=fc)
test_case.patchobject(fc.servers, 'delete',
side_effect=fakes_nova.fake_exception())
stack.delete()
def stack_context(stack_name, create_res=True, convergence=False):
"""Decorator for creating and deleting stack.
Decorator which creates a stack by using the test case's context and
deletes it afterwards to ensure tests clean up their stacks regardless
of test success/failure.
"""
def stack_delete(test_fn):
@six.wraps(test_fn)
def wrapped_test(test_case, *args, **kwargs):
def create_stack():
ctx = getattr(test_case, 'ctx', None)
if ctx is not None:
stack = setup_stack_with_mock(test_cas |
betatim/osf-cli | setup.py | Python | bsd-3-clause | 2,524 | 0 | # Always prefer setuptools over distutils
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'VERSION')) as f:
__version__ = f.read().strip()
with open(os.path.join(here, 'requirements.txt')) as f:
required = f.read().splitlines()
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
extra_files = []
extra_files.append(os.path.join(here, 'LICENSE'))
extra_files.append(os.path.join(here, 'requirements.txt'))
extra_files.ap | pend(os.p | ath.join(here, 'VERSION'))
setup(
name='osfclient',
# update `osfclient/__init__.py` as well
version=__version__,
description='An OSF command-line library',
long_description=long_description,
# The project's main homepage.
url='https://github.com/dib-lab/osf-cli',
# Author details
author='The OSF-cli authors',
# Choose your license
license='BSD3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Environment :: Console',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: BSD License',
'Topic :: Utilities'
],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
incude_package_data=True,
package_data={'': extra_files},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=required,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'osf=osfclient.__main__:main',
],
},
)
|
Ebag333/Pyfa | eos/effects/modulebonusafterburner.py | Python | gpl-3.0 | 479 | 0.002088 | # moduleBonusAfterburner
#
# Used by:
# Modules from group: Propulsion Module (62 of 127)
type = "active"
runTime = "late" |
def handler(fit, module, context):
fit.ship.increaseItemAttr("mass", module.getModifiedItemAttr("massAddition"))
speedBoost = module.getModifiedItemAttr("speedFactor")
mass = fit.ship.getModifiedItemAttr("mass")
thrust = module.getModifiedItemAttr("speedBoostFactor")
fit.ship | .boostItemAttr("maxVelocity", speedBoost * thrust / mass)
|
h2oai/h2o-dev | h2o-py/tests/testdir_jira/pyunit_hexdev_612.py | Python | apache-2.0 | 744 | 0.004032 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test for HEXDEV-612."""
from tests import pyunit_utils
import h2o
import numpy as np
def test_hd612():
"""Test whether explicitly pr | oviding ``column_names`` to ``H2OFrame.from_python()`` produces an extra row."""
data = np.array([i for i in range(40)]).reshape(10, 4)
df = h2o.H2OFrame.from_python(data)
assert df.nrow == 10
assert df.ncol == 4
assert df.names == ["C1", "C2", "C3", "C4"]
names = ["spam", "egg", "ham", "milk"]
df = h2o.H2OFrame.from_python(data, column_names=names)
assert df.nrow == 10
assert df.ncol == 4
a | ssert df.names == names
if __name__ == "__main__":
pyunit_utils.standalone_test(test_hd612)
else:
test_hd612()
|
Nvizible/shotgunEvents | src/examplePlugins/sharedStateB.py | Python | mit | 2,306 | 0.000867 | """
For detailed information please see
http://shotgunsoftware.github.com/shotgunEvents/api.html
Args based shared state
-----------------------
This plugin demoes how three callbacks can share state through the args argument
to registerCallback.
The shared state stores two counters one (sequential) will be incremented
sequentially by each callback and will keep incrementing across event ids.
The second counter (rotating) will be incremented by each successive callback
but will be reset at each new event.
Try me
------
To try the plugin, make sure you copy it into a path mentioned in your .conf's
"paths" ([plugin] section) and change $DEMO_SCRIPT_NAME$ and $DEMO_API_KEY$ for
sane values.
"""
def registerCallbacks(reg):
"""Register all necessary or appropriate callbacks for this plugin."""
scriptName = '$DEMO_SCRIPT_NAME$'
scriptKey = '$DEMO_API_KEY$'
# Prepare the shared state object
_state = {
'sequential': -1,
'rotating': -1,
}
# Callbacks are called in registration order. So callbackA will be called
# before callbackB and callbackC
reg.registerCallback(scriptName, scriptKey, callbackA, args=_state)
reg.registerCallb | ack(scriptName, scriptKey, callbackB, args=_state)
reg.registerCallback(scriptName, scriptKey, callbackC, args=_state)
def callbackA(sg, logger, event, args):
# We know callbackA will be called first because we registered it first.
# As the first thing to run on each event, we can reinizialize the rotating
# counter.
args['rotating'] = -1
# Then we pass off to our helper function... because I'm lazy.
printIds(sg, logger, event, args)
def callbackB(*args):
# Just an exa | mple plugin, remember... Get the ids incremented and logged.
printIds(*args)
def callbackC(*args):
# Just an example plugin, remember... Get the ids incremented and logged.
printIds(*args)
def printIds(sg, logger, event, args):
# Here we can increment the two counters that are in shared state. Each
# callback has played with the contents of this shared dictionary.
args['sequential'] += 1
args['rotating'] += 1
# Log the counters so we can actually see something.
logger.info('Sequential #%d - Rotating #%d', args['sequential'], args['rotating'])
|
andymckay/zamboni | mkt/developers/management/commands/cleanup_addon_premium.py | Python | bsd-3-clause | 364 | 0 | from djang | o.core.management.base import BaseCommand
import amo
from mkt.webapps.models import AddonPremium
class Command(BaseCommand):
help = 'Clean up existing AddonPremium objects for free apps.'
def handle(self, *args, **options):
(AddonPremium.objects.filter(addon__premium_type__in= | amo.ADDON_FREES)
.delete())
|
googleapis/python-datacatalog | samples/generated_samples/datacatalog_v1beta1_generated_data_catalog_get_iam_policy_async.py | Python | apache-2.0 | 1,506 | 0.000664 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetIamPolicy
# NOTE: This snippet has been automatically generated for illu | strative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install | google-cloud-datacatalog
# [START datacatalog_v1beta1_generated_DataCatalog_GetIamPolicy_async]
from google.cloud import datacatalog_v1beta1
async def sample_get_iam_policy():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.GetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = await client.get_iam_policy(request=request)
# Handle the response
print(response)
# [END datacatalog_v1beta1_generated_DataCatalog_GetIamPolicy_async]
|
mauroalberti/gsf | pygsf/geometries/shapes/joins.py | Python | gpl-3.0 | 2,499 | 0.001601 | from enum import Enum
from typing import Union, List, Optional
from .space2d import *
from .space3d import *
class JoinTypes(Enum):
"""
Enumeration for Line and Segment type.
"""
START_START = 1 # start point coincident with start point
START_END = 2 # start point coincident with end point
END_START = 3 # end point coincident with start point
END_END = 4 # end point coincident with end point
def analizeJoins2D(
first: Union[Line2D, Segment2D],
second: Union[Line2D, Segment2D]
) -> List[Optional[JoinTypes]]:
"""
Analyze join types between two lines/segments.
:param first: a line or segment.
:param second: a line or segment.
:return: a list of join types.
Examples:
>>> first = Segment2D(Point2D(x=0,y=0), Point2D(x=1,y=0))
>>> second = Segment2D(Point2D(x=1,y=0), Point2D(x=0,y=0))
>>> analizeJoins2D(first, second)
[<JoinTypes.START_END: 2>, <JoinTypes.END_START: 3>]
>>> first = Segment2D(Point2D(x=0,y=0), Point2D(x=1,y=0))
>>> second = Segment2D(Point2D(x=2,y=0), Point2D(x=3,y=0))
>>> analizeJoins2D(first, second)
[]
"""
join_types = []
if first.start_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.START_START)
if first.start_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.START_END)
i | f first.end_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.END_START)
if first.end_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.END_END)
re | turn join_types
def analizeJoins3D(
first: Union[Line3D, Segment3D],
second: Union[Line3D, Segment3D]
) -> List[Optional[JoinTypes]]:
"""
Analyze join types between two lines/segments.
:param first: a line or segment.
:type first: Line or Segment.
:param second: a line or segment.
:param second: Line or Segment.
:return: a list of join types.
:rtype: List[Optional[JoinTypes]].
Examples:
"""
join_types = []
if first.start_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.START_START)
if first.start_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.START_END)
if first.end_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.END_START)
if first.end_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.END_END)
return join_types |
taudor/pypuf_helper | test/test_transform_id.py | Python | gpl-3.0 | 677 | 0 | import unittest
from numpy import array, int8
from numpy.testing import assert_arr | ay_equal
import pypuf_helper as ph
class TestTransformID(unittest.TestCase):
def test_01(self):
k = 3
input_test = array([
[1, -1, 1, 1],
[-1, -1, 1, 1]
], dtype=int8)
result = array([
| [[1, -1, 1, 1],
[1, -1, 1, 1],
[1, -1, 1, 1]],
[[-1, -1, 1, 1],
[-1, -1, 1, 1],
[-1, -1, 1, 1]]
], dtype=int8)
assert_array_equal(
ph.transform_id(input_test, k),
result,
"transform_id fails."
)
|
ansible/ansible | lib/ansible/utils/display.py | Python | gpl-3.0 | 19,081 | 0.001992 | # (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ctypes.util
import errno
import fcntl
import getpass
import locale
import logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six import text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
_LOCALE_INITIALIZED = False
_LOCALE_INITIALIZATION_ERR = None
def initialize_locale():
"""Set the locale to the users default setting
and set ``_LOCALE_INITIALIZED`` to indicate whether
``get_text_width`` may run into trouble
"""
global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
if _LOCALE_INITIALIZED is False:
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
_LOCALE_INITIALIZATION_ERR = e
else:
_LOCALE_INITIALIZED = True
def get_text_width(text):
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text str | ing.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
if _LOCALE_INITIALIZATION_ERR | :
Display().warning(
'An error occurred while calling ansible.utils.display.initialize_locale '
'(%s). This may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
)
elif not _LOCALE_INITIALIZED:
Display().warning(
'ansible.utils.display.initialize_locale has not been called, '
'this may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths'
)
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter and not _LOCALE_INITIALIZED:
raise EnvironmentError(
'ansible.utils.display.initialize_locale has not been called, '
'and get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
# map color to log levels
color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
C.COLOR_WARN: logging.WARNING,
C.COLOR_OK: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_VERBOSE: logging.INFO}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display(metaclass=Singleton):
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if cmd.returncode:
raise Exception
self.cows_available = {to_text(c) for c in out.split()} # set comprehension
if C.ANSIBLE_COW_ACCEPTLIST and any(C.ANSIBLE_COW_ACCEPTLIST):
self.cows_available = set(C.ANSIBLE_COW_ACCEPTLIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay |
PetukhovVictor/compiler | src/Parser/AST/expressions/logical.py | Python | mit | 1,676 | 0.002387 | from Parser.AST.base import AST
CLASS = "expressions.logical"
class RelopBexp(AST):
"""
Relation operation boolean expression class for AST.
interpret - runtime function for Evaluator (return result of applying the boolean operation to left and right values).
Example: x > 56
"""
def __init__(self, op, left, right):
super().__init__(CLASS, "relop_bexp")
self.op = op
self.left = left
self.right = right
self.children = [left, right]
class AndBexp(AST):
"""
'And' operation boolean expression class for AST.
interpret - runtime function for Evaluator (return result of applying the 'and' operation to left and right values).
Example: x > 56 and x < 61
"""
def __init__(self, left, right):
super().__init__(CLASS, "and_bexp")
self.left = left
self.right = right
self.children = [left, right]
class OrBexp(AST):
| """
'Or' operation boolean expression class for AST.
interpret - runtime function for Evaluator (return result of applying the 'or' operation to left and right values).
Example: x < 11 or x > 100
"""
def __init__(self, left, right):
super().__init__(CLASS, "or_bexp")
self.left = left
self.right = right
self.children = [left, right]
class NotBex | p(AST):
"""
'Not' operation boolean expression class for AST.
interpret - runtime function for Evaluator (return result of applying the 'not' operation to value).
Example: x not 11
"""
def __init__(self, exp):
super().__init__(CLASS, "not_bexp")
self.exp = exp
self.children = [exp]
|
barriquello/iotstack | openwsn-fw-work/firmware/openos/bootloader/telosb/lib/nesdoc/components.py | Python | mit | 3,812 | 0.018101 | # Copyright (c) 2005 Intel Corporation
# All rights reserved.
#
# This file is distributed under the terms in the attached INTEL-LICENSE
# file. If you do not find these files, copies can be found by writing to
# Intel Research Berkeley, 2150 Shattuck Avenue, Suite 1300, Berkeley, CA,
# 94704. Attention: Intel License Inquiry.
# Generate HTML file for a component
from nesdoc.utils import *
from nesdoc.generators import *
from nesdoc.html import *
__all__ = [ "generate_component" ]
# Output HTML describing a specification element
def spec_signature_html(ht, elem):
if elem.tagName == "function":
ht.pfnsig(elem, lambda (name): "<b>%s</b>" % name)
else:
assert elem.tagName == "interface"
instance = xml_tag(elem, "instance")
idef = xml_tag(instance, "interfacedef-ref")
arguments = xml_tag(instance, "arguments")
parameters = xml_tag(elem, "interface-parameters")
instance_name = elem.getAttribute("name")
def_name = idef.getAttribute("qname")
fullname = idef.getAttribute("nicename")
sig = 'interface <a href="../ihtml/%s.html">%s</a>' % (fullname, def_name)
if arguments:
iargs = join(map(lambda (arg): typename_str(arg, ""),
xml_elements(arguments)), ", ")
sig += "<" + iargs + ">"
if instance_name != def_name:
sig += " as <b>%s</b>" % instance_name
if parameters:
iparms = join(map(lambda (arg): typename_str(arg, ""),
xml_elements(parameters)), ", ")
sig += "[" + iparms + "]"
ht.p(sig)
# Output HTML for specification elements elems, with heading name
# If elems list is empty, do nothing.
def generate_speclist(ht, name, elems):
if len(elems) > 0:
ht.tag("p")
ht.heading(name)
for elem in elems:
ht.func_sig_start();
spec_signature_html(ht, elem)
doc = nd_doc_short(elem)
if doc != None:
ht.push("menu")
ht.pln(doc)
ht.popln()
ht.func_sig_stop();
def interface_compare(x, y):
if cmp(x.getAttribute("qname").lower(), y.getAttribute("qname").lower()) == 0 :
return cmp(x.getAttribute("name").lower(), y.getAttribute("name").lower())
return cmp(x.getAttribute("qname").lower(), y.getAttribute("qname").lower())
def generate_component(comp):
nicename = comp.getAttribute("nicename")
ht = Html("chtml/%s.html" % nicename )
if xml_tag(comp, "module"):
kind = "module"
else:
kind = "configuration"
ht.title("Component: " + nicename)
ht.body()
ht.push("h2");
ht.p("Component: " + nicename)
ht.popln();
# The source code name and documentation
ht.push("b")
parameters = xml_tag(comp, "parameters")
if parameters:
ht.p("generic ")
ht.p(kind + " " + comp.getAttribute("qname"))
if parameters:
ht.p(parameter_str(parameters))
ht.pop()
ht.startline()
idoc = nd_doc_long(comp)
if idoc != None:
ht.tag("p")
ht.pdoc(idoc)
ht.tag("p")
|
spec = xml_tag(comp, "specification")
interfaces = spec.getElementsByTagName("interface")
functions = spec.getElementsByTagName("function")
spec = interfaces + functions
provided = filter(lambda (x): x.getAttribute("provided") == "1", spec)
used = filter(lambda (x): x.getAttribute("provided") == "0", spec)
# sort arrays
provided.sort(interf | ace_compare)
used.sort(interface_compare)
generate_speclist(ht, "Provides", provided)
generate_speclist(ht, "Uses", used)
# wiring graph for configurations
if xml_tag(comp, "configuration"):
ht.tag("p")
ht.heading("Wiring")
ht.tag("p")
ht.pushln("map", 'name="comp"')
cmap = file("chtml/%s.cmap" % nicename)
for line in cmap.readlines():
ht.pln(line)
cmap.close()
ht.popln()
ht.tag("img", 'src="%s.png"' % nicename, 'usemap="#comp"', 'id=imgwiring')
ht.close()
|
Featuretools/featuretools | featuretools/utils/gen_utils.py | Python | bsd-3-clause | 4,834 | 0.001241 | import json
import shutil
import sys
import warnings
from itertools import zip_longest
import s3fs
from smart_open import open
from tqdm import tqdm
def session_type():
if 'IPython' not in sys.modules:
# IPython hasn't been imported, definitely not
return "python"
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
if getattr(get_ipython(), 'kernel', None) is not None:
return "kernel"
return "ipython"
def make_tqdm_iterator(**kwargs):
options = {
"file": sys.stdout,
"leave": True
}
options.update(kwargs)
if session_type() == 'kernel':
# from IPython import display
# capture_stderr = StringIO()
# with RedirectStdStreams(stderr=capture_stderr):
# try:
# iterator = tqdm_notebook(**options)
# except:
# failed = True
# else:
# failed = False
# err_out = capture_stderr.getvalue()
# capture_stderr.close()
# if failed or err_out.lower().find("widget javascript not detected") \
# >-1:
# display.clear_output(wait=True)
# iterator = tqdm(**options)
iterator = tqdm(**options)
else:
iterator = tqdm(**options)
return iterator
def get_relationship_variable_id(path):
_, r = path[0]
child_link_name = r.child_variable.id
for _, r in path[1:]:
parent_link_name = child_link_name
child_link_name = '%s.%s' % (r.parent_entity.id,
parent_link_name)
return child_link_name
def find_descendents(cls):
"""
A generator which yields all descendent classes of the given class
(including the given class)
Args:
cls (Class): the class to find descendents of
"""
yield cls
for sub in cls.__subclasses__():
for c in find_descendents(sub):
yield c
def check_schema_version(cls, cls_type):
if isinstance(cls_type, str):
if cls_type == 'entityset':
from featuretools.entityset.serialize import SCHEMA_VERSION
version_string = cls.get('schema_version')
elif cls_type == 'features':
from featuretools.feature_base.features_serializer import SCHEMA_VERSION
version_string = cls.features_dict['schema_version']
| current = SCHEMA_VERSION.split('.')
saved = version_string.split('.')
warning_text_upgrade = ('The schema version of the saved %s'
| '(%s) is greater than the latest supported (%s). '
'You may need to upgrade featuretools. Attempting to load %s ...'
% (cls_type, version_string, SCHEMA_VERSION, cls_type))
for c_num, s_num in zip_longest(current, saved, fillvalue=0):
if c_num > s_num:
break
elif c_num < s_num:
warnings.warn(warning_text_upgrade)
break
warning_text_outdated = ('The schema version of the saved %s'
'(%s) is no longer supported by this version'
'of featuretools. Attempting to load %s ...'
% (cls_type, version_string, cls_type))
# Check if saved has older major version.
if current[0] > saved[0]:
warnings.warn(warning_text_outdated)
def use_smartopen_es(file_path, path, transport_params=None, read=True):
if read:
with open(path, "rb", transport_params=transport_params) as fin:
with open(file_path, 'wb') as fout:
shutil.copyfileobj(fin, fout)
else:
with open(file_path, 'rb') as fin:
with open(path, 'wb', transport_params=transport_params) as fout:
shutil.copyfileobj(fin, fout)
def use_s3fs_es(file_path, path, read=True):
s3 = s3fs.S3FileSystem(anon=True)
if read:
s3.get(path, file_path)
else:
s3.put(file_path, path)
def use_smartopen_features(path, features_dict=None, transport_params=None, read=True):
if read:
with open(path, 'r', encoding='utf-8', transport_params=transport_params) as f:
features_dict = json.load(f)
return features_dict
else:
with open(path, "w", transport_params=transport_params) as f:
json.dump(features_dict, f)
def use_s3fs_features(file_path, features_dict=None, read=True):
s3 = s3fs.S3FileSystem(anon=True)
if read:
with s3.open(file_path, "r", encoding='utf-8') as f:
features_dict = json.load(f)
return features_dict
else:
with s3.open(file_path, "w") as f:
features = json.dumps(features_dict, ensure_ascii=False)
f.write(features)
|
cjaymes/pyscap | src/scap/model/oval_5/defs/independent/FileHash58TestElement.py | Python | gpl-3.0 | 902 | 0.001109 | # Copyright 2016 Casey Jayme | s
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed i | n the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.independent.TestType import TestType
logger = logging.getLogger(__name__)
class FileHash58TestElement(TestType):
MODEL_MAP = {
'tag_name': 'filehash58_test',
}
|
martflu/adaptive-audio | src/particle.py | Python | gpl-2.0 | 137 | 0.029197 | imp | ort rectangle
class particle:
def __init__(self, x, y, w, h, c):
self.rectangle = rectangle.rectangl | e(x, y, w, h, c) |
fangohr/oommf-python | joommf/exceptions.py | Python | bsd-2-clause | 39 | 0 | class JoommfError(Ex | ception):
pas | s
|
TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/pldata/nltk_3.0a3/nltk/app/__init__.py | Python | mit | 1,644 | 0.000608 | # Natural Language Toolkit: Applications package
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Interactive NLTK Applications:
chartparser: Chart Parser
chunkparser: Regular-Expression Chunk Parser
collocations: Find collocations in text
concordance: Part-of-speech concordancer
nemo: Finding (and Replacing) Nemo regular expression tool
rdparser: Recursive Descent Parser
srparser: Shift-Reduce Parser
wordnet: WordNet Browser
"""
# Import Tkinter-based modules if Tkinter is installed
import nltk.compat
try:
import tkinter
except ImportError:
import warnings
warnings.warn("nltk.app package not loaded "
"(please install Tkinter library).")
else:
from .chartpa | rser_app import app as chartparser
from .chunkparser_app import app as chunkparser
from .collocations_app import app as collocations
from .concordance_app import app as concordance
from .nemo_app import app as nemo
from .rdparser_app import app as rdparser
from .srparser_app import app as srparser
from .wordnet_app import | app as wordnet
try:
import pylab
except ImportError:
import warnings
warnings.warn("nltk.app.wordfreq not loaded "
"(requires the pylab library).")
else:
from .wordfreq_app import app as wordfreq
# skip doctests from this package
def setup_module(module):
from nose import SkipTest
raise SkipTest("nltk.app examples are not doctests")
|
rwl/openpowersystem | ucte/operational_limits/voltage_limit.py | Python | agpl-3.0 | 1,642 | 0.004263 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" Operational limit applied to voltage.
"""
# <<< imports
# @generated
from ucte.operational_limits.operational_limit import OperationalLimit
from ucte.domain import Voltage
from google.appengine.ext import db
# >>> imports
class VoltageLimit(OperationalLimit):
""" Operational limit applied to voltage.
"""
# <<< voltage_limit.attributes
# @generated
# Limit on voltage. High or low limit depends on the OperatoinalLimit.l | imitKind
value = Voltage
# >>> voltage_limit.attributes
# <<< voltage_limit.refer | ences
# @generated
# >>> voltage_limit.references
# <<< voltage_limit.operations
# @generated
# >>> voltage_limit.operations
# EOF -------------------------------------------------------------------------
|
mbohlool/client-python | kubernetes/client/apis/custom_objects_api.py | Python | apache-2.0 | 69,951 | 0.003374 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class CustomObjectsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_cluster_custom_object(self, group, v | ersion, plural, body, **kwargs):
"""
Creates a | cluster scoped Custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_cluster_custom_object(group, version, plural, body, async=True)
>>> result = thread.get()
:param async bool
:param str group: The custom resource's group name (required)
:param str version: The custom resource's version (required)
:param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param object body: The JSON schema of the Resource to create. (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_cluster_custom_object_with_http_info(group, version, plural, body, **kwargs)
else:
(data) = self.create_cluster_custom_object_with_http_info(group, version, plural, body, **kwargs)
return data
def create_cluster_custom_object_with_http_info(self, group, version, plural, body, **kwargs):
"""
Creates a cluster scoped Custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_cluster_custom_object_with_http_info(group, version, plural, body, async=True)
>>> result = thread.get()
:param async bool
:param str group: The custom resource's group name (required)
:param str version: The custom resource's version (required)
:param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param object body: The JSON schema of the Resource to create. (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group', 'version', 'plural', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_custom_object" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'group' is set
if ('group' not in params) or (params['group'] is None):
raise ValueError("Missing the required parameter `group` when calling `create_cluster_custom_object`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `create_cluster_custom_object`")
# verify the required parameter 'plural' is set
if ('plural' not in params) or (params['plural'] is None):
raise ValueError("Missing the required parameter `plural` when calling `create_cluster_custom_object`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_cluster_custom_object`")
collection_formats = {}
path_params = {}
if 'group' in params:
path_params['group'] = params['group']
if 'version' in params:
path_params['version'] = params['version']
if 'plural' in params:
path_params['plural'] = params['plural']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/{group}/{version}/{plural}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_custom_object(self, group, version, namespace, plural, body, **kwargs):
"""
Creates a namespace scoped Custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_namespaced_custom_object(group, version, namespace, plural, body, async=True)
>>> result = thread.get()
:param async bool
:param str group: The custom resource's group name (required)
:param str version: The custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param object body: The JSON schema of the Resource to create. (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_namespaced_custom_object_with_http_info(group, version, namespace, plural, body, **kwargs)
else:
(data) = self.create_namespaced_custom_object_with_http_info(group, version, namespace, plural, body, **kwargs)
return data
def create_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, body, **kwargs):
"""
Creates a namespace scoped Custom object
This method makes a sync |
hornn/interviews | tools/bin/gppylib/system/osInterface.py | Python | apache-2.0 | 1,258 | 0.007154 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2009. All Rights Reserved.
#
"""
This file defines the interface that can be used to
fetch and update system configuration information,
as well as the data object returned by the
"""
import os
from gppylib.gplog import *
from gppylib.utils import checkNotNone
logger = get_default_logger()
#
# An implementation of GpOsProvider will provide functionality to do native/os commands
# (like sleeping)
#
class GpOsProvider :
def __init__(self):
pass
def destroy(self):
pass
#
# Sleep for the given number of seconds (as though calli | ng python's sleep)
#
def sleep( self, sleepTime ) :
raise Exception("not implemented")
#
# Management of registered configuration provider. Right now it
# is a singleton, so initializeDatabase calls _could_ mess up
# the singleton for other parts of code. Per | haps switch later
# to a factory
#
gProvider = None
def registerOsProvider(provider):
global gProvider
checkNotNone("New global osProvider", provider)
if gProvider is not None:
gProvider.destroy()
gProvider = provider
def getOsProvider():
global gProvider
return checkNotNone("Global osProvider", gProvider)
|
cdondrup/pepper_planning | pepper_move_base/scripts/qualitative_move_base.py | Python | mit | 3,768 | 0.004246 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from actionlib import SimpleActionClient
from pnp_plugin_server.pnp_simple_plugin_server import PNPSimplePluginServer
from pepper_move_base.msg import QualitativeMovePepperAction, QualitativeMovePepperResult
from pnp_msgs.msg import ActionResult
from nao_interaction_msgs.msg import PersonDetectedArray
import tf
from geometry_msgs.msg import PoseStamped
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from pepper_move_base.msg import TrackPersonAction, TrackPersonGoal
import yaml
import math
from copy import deepcopy
class QualitativeMove(PNPSimplePluginServer):
def __init__(self, name):
rospy.loginfo("Starting %s ..." % name)
self.listener = tf.TransformListener()
self.target_frame = rospy.get_param("~target_frame", "base_link")
with open(rospy.get_param("~config_file"),'r') as f:
self.distance = yaml.load(f)["distances"]
self.move_client = SimpleActionClient("move_base", MoveBaseAction)
self._ps = PNPSimplePluginServer(
name=name,
ActionSpec=QualitativeMovePepperAction,
execute_cb=self.execute_cb,
auto_start=False
)
rospy.loginfo("Creating tracker client")
self.start_client = SimpleActionClient("/start_tracking_person", TrackPersonAction)
self.start_client.wait_for_server()
rospy.loginfo("Tracker client connected")
self._ps.start()
rospy.loginfo("... done")
def execute_cb(self, goal):
self.start_client.send_goal(TrackPersonGoal(id=goal.id, no_turn=True))
try:
msg = rospy.wait_for_message("/naoqi_driver_node/people_detected", PersonDetectedArray, timeout=5.)
except rospy.ROSException as e:
rospy.logwarn(e)
self._ps.set_aborted()
else:
int_id = int(goal.id.split("_")[1])
for p in msg.person_array:
if p.id == int_id:
try:
t = self.listener.getLatestCommonTime(self.target_frame, msg.header.frame_id)
p_pose = PoseStamped(header=msg.header, pose=p.person.position)
p_pose.header.stamp = t
bl_pose = self.listener.transformPose(self.target_frame, p_pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException) as ex:
rospy.logwarn(ex)
else:
target_dist = p.person.distance - self.distance[goal.to]
d = target_dist/p.person.distance
theta = math.atan2(bl_pose.pose.position.y, bl | _pose.pose.position.x)
target_pose = bl_pose
target_p | ose.pose.position.x *= d
target_pose.pose.position.y *= d
target_pose.pose.orientation.x = 0.
target_pose.pose.orientation.y = 0.
target_pose.pose.orientation.z = math.sin(theta/2.)
target_pose.pose.orientation.w = math.cos(theta/2.)
print target_pose
self.move_client.send_goal_and_wait(MoveBaseGoal(target_pose=target_pose))
finally:
break
res = QualitativeMovePepperResult()
res.result.append(ActionResult(cond="robot_at_home", truth_value=False))
res.result.append(ActionResult(cond="robot_pose_unknown", truth_value=True))
self._ps.set_succeeded(res)
if __name__ == "__main__":
rospy.init_node("goto")
QualitativeMove(rospy.get_name())
rospy.spin()
|
magic0704/oslo.messaging | oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_req_publisher.py | Python | apache-2.0 | 3,103 | 0 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import oslo_messaging
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._drivers.zmq_driver.client.publishers\
import zmq_publisher_base
from oslo_messaging._drivers.zmq_driver import zmq_address
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
from oslo_me | ssaging._i18n import _LE, _LI
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class ReqPublisher(zmq_publisher_base.PublisherBase):
def send_request(self, request):
if request.msg_type != zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
socket = s | elf._connect_to_host(request.target)
self._send_request(socket, request)
return self._receive_reply(socket, request)
def _connect_to_host(self, target):
try:
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.REQ)
host = self.matchmaker.get_single_host(target)
connect_address = zmq_address.get_tcp_direct_address(host)
LOG.info(_LI("Connecting REQ to %s") % connect_address)
socket.connect(connect_address)
self.outbound_sockets[str(target)] = socket
return socket
except zmq.ZMQError as e:
errmsg = _LE("Error connecting to socket: %s") % str(e)
LOG.error(_LE("Error connecting to socket: %s") % str(e))
raise rpc_common.RPCException(errmsg)
@staticmethod
def _receive_reply(socket, request):
def _receive_method(socket):
return socket.recv_pyobj()
# NOTE(ozamiatin): Check for retry here (no retries now)
with contextlib.closing(zmq_async.get_reply_poller()) as poller:
poller.register(socket, recv_method=_receive_method)
reply, socket = poller.poll(timeout=request.timeout)
if reply is None:
raise oslo_messaging.MessagingTimeout(
"Timeout %s seconds was reached" % request.timeout)
if reply[zmq_names.FIELD_FAILURE]:
raise rpc_common.deserialize_remote_exception(
reply[zmq_names.FIELD_FAILURE],
request.allowed_remote_exmods)
else:
return reply[zmq_names.FIELD_REPLY]
def close(self):
# For contextlib compatibility
self.cleanup()
|
richardliaw/ray | rllib/utils/exploration/per_worker_gaussian_noise.py | Python | apache-2.0 | 1,807 | 0 | from gym.spaces import Space
from typing import Optional
from ray.rllib.utils.exploration.gaussian_noise import GaussianNoise
from ray.rllib.utils.schedules import ConstantSchedule
class PerWorkerGaussianNoise(GaussianNoise):
"""A per-worker Gaussian noise class for distributed algorithms.
Sets the `scale` schedules of individual workers to a constant:
0.4 ^ (1 + [worker-index] / float([num-workers] - 1) * 7)
See Ape-X paper.
"""
def __init__(self, action_space: Space, *, fra | mework: Optional[str],
num_worke | rs: Optional[int], worker_index: Optional[int],
**kwargs):
"""
Args:
action_space (Space): The gym action space used by the environment.
num_workers (Optional[int]): The overall number of workers used.
worker_index (Optional[int]): The index of the Worker using this
Exploration.
framework (Optional[str]): One of None, "tf", "torch".
"""
scale_schedule = None
# Use a fixed, different epsilon per worker. See: Ape-X paper.
if num_workers > 0:
if worker_index > 0:
num_workers_minus_1 = float(num_workers - 1) \
if num_workers > 1 else 1.0
exponent = (1 + (worker_index / num_workers_minus_1) * 7)
scale_schedule = ConstantSchedule(
0.4**exponent, framework=framework)
# Local worker should have zero exploration so that eval
# rollouts run properly.
else:
scale_schedule = ConstantSchedule(0.0, framework=framework)
super().__init__(
action_space,
scale_schedule=scale_schedule,
framework=framework,
**kwargs)
|
CivicKnowledge/ambry | ambry/util/__init__.py | Python | bsd-2-clause | 39,947 | 0.001252 | """Misc support code.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
from collections import OrderedDict, defaultdict, Mapping, deque, MutableMapping, Callable
from functools import partial, reduce, wraps
import json
import hashlib
import logging
import os
import pprint
import re
import subprocess
import sys
from time import time
import yaml
from yaml.representer import RepresenterError
import warnings
from bs4 import BeautifulSoup
from six.moves import filterfalse, xrange as six_xrange
from six import iteritems, iterkeys, itervalues, print_, StringIO
from six.moves.urllib.parse import urlparse, urlsplit, urlunsplit
from six.moves.urllib.request import urlopen
from ambry.dbexceptions import ConfigurationError
logger_init = set()
def get_logger(name, file_name=None, stream=None, template=None, propagate=False, level=None):
"""Get a logger by name.
"""
logger = logging.getLogger(name)
running_tests = (
'test' in sys.argv # running with setup.py
or sys.argv[0].endswith('py.test')) # running with py.test
if running_tests and not level:
# testing without level, this means tester does not want to see any log messages.
level = logging.CRITICAL
if not level:
level = logging.INFO
logger.setLevel(level)
logger.propagate = propagate
formatter = logging.Formatter(template)
if not stream:
stream = sys.stdout
logger.handlers = []
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
if file_name:
handler = logging.FileHandler(file_name)
handler.setFormatter(logging.Formatter('%(asctime)s '+template))
logger.addHandler(handler)
return logger
# From https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
def memoize(obj):
cache = obj.cache = {}
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def expiring_memoize(obj):
"""Like memoize, but forgets after 10 seconds."""
cache = obj.cache = {}
last_access = obj.last_access = defaultdict(int)
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if last_access[key] and last_access[key] + 10 < time():
if key in cache:
del cache[key]
last_access[key] = time()
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
class Counter(dict):
"""Mapping where default values are zero."""
def __missing__(self, key):
return 0
# Stolen from:
# http://code.activestate.com/recipes/498245-lru-and-lfu-cache-decorators/
def lru_cache(maxsize=128, maxtime=60):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cach | e_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
# @ReservedAssignment
def decorating_function(
user_function,
len=len,
iter=iter,
tuple=tuple,
sorted=sorted,
KeyError=KeyError):
cache = {} # mapping of args to results
queue = deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping a | round the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result, expire_time = cache[key]
if expire_time and time() > expire_time:
raise KeyError('Expired')
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
if maxtime:
expire_time = time() + maxtime
else:
expire_time = None
cache[key] = result, expire_time
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in filterfalse(refcount.__contains__, iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
class YamlIncludeLoader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(YamlIncludeLoader, self).__init__(stream)
# From http://pypi.python.org/pypi/layered-yaml-attrdict-config/12.07.1
class OrderedDictYAMLLoader(yaml.Loader):
'Based on: https://gist.github.com/844388'
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.dir = None
for a in args:
try:
self.dir = os.path.dirname(a.name)
except:
pass
self.add_constructor(
'tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor(
'tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
self.add_constructor('!include', OrderedDictYAMLLoader.include)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(
None,
None,
'expected a mapping node, but found {}'.format(
node.id),
node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key ({})'.format(exc),
key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def include(self, node):
if not self.dir:
return "ConfigurationError: Can't include file: wasn't able to set base directory"
relpath = self. |
noironetworks/neutron | neutron/agent/linux/dibbler.py | Python | apache-2.0 | 7,239 | 0 | # Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import jinja2
from neutron_lib import constants as lib_const
from neutron_lib.utils import file as file_utils
from oslo_config import cfg
from oslo_log import log as logging
import six
from neutron.agent.linux import external_process
from neutron.agent.linux import pd
from neutron.agent.linux import pd_driver
from neutron.agent.linux import utils
LOG = logging.getLogger(__name__)
PD_SERVICE_NAME = 'dibbler'
CONFIG_TEMPLATE = jinja2.Template("""
# Config for dibbler-client.
# Use enterprise number based duid
duid-type duid-en {{ enterprise_number }} {{ va_id }}
# 8 (Debug) is most verbose. 7 (Info) is usually the best option
log-level 8
# No automatic downlink address assignment
downlink-prefix-ifaces "none"
# Use script to notify l3_agent of assigned prefix
script {{ script_path }}
# Ask for prefix over the external gateway interface
iface {{ interface_name }} {
# Bind to generated LLA
bind-to-address {{ bind_address }}
# ask for address
{% if hint_prefix != None %}
pd 1 {
prefix {{ hint_prefix }}
}
{% else %}
pd 1
{% endif %}
}
""")
# The first line must be #!/usr/bin/env bash
SCRIPT_TEMPLATE = jinja2.Template("""#!/usr/bin/env bash
exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }}
""")
class PDDibbler(pd_driver.PDDriverBase):
def __init__(self, router_id, subnet_id, ri_ifname):
super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname)
self.requestor_id = "%s:%s:%s" % (self.router_id,
self.subnet_id,
self.ri_ifname)
self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs,
self.requestor_id)
self.prefix_path = "%s/prefix" % self.dibbler_client_working_area
self.pid_path = "%s/client.pid" % self.dibbler_client_working_area
self.converted_subnet_id = self.subnet_id.replace('-', '')
def _is_dibbler_client_running(self):
return utils.get_value_from_file(self.pid_path)
def _generate_dibbler_conf(self, ex_gw_ifname, lla, hint_prefix):
dcwa = self.dibbler_client_working_area
script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True)
buf = six.StringIO()
buf.write('%s' % SCRIPT_TEMPLATE.render(
prefix_path=self.prefix_path,
l3_agent_pid=os.getpid()))
file_utils.replace_file(script_path, buf.getvalue())
os.chmod(script_path, 0o744)
dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False)
buf = six.StringIO()
buf.write('%s' % CONFIG_TEMPLATE.render(
enterprise_number=cfg.CONF.vendor_pen,
va_id='0x%s' % self.converted_subnet_id,
script_path='"%s/notify.sh"' % dcwa,
interface_name='"%s"' % ex_gw_ifname,
bind_address='%s' % lla,
hint_prefix=hint_prefix))
file_utils.replace_file(dibbler_conf, buf.getvalue())
return dcwa
def _spawn_dibbler(self, pmon, router_ns, dibbler_conf):
def callback(pid_file):
dibbler_cmd = ['dibbler-client',
| 'start',
'-w', '%s' % dibbler_conf]
return dibbler_cmd
pm = external_process.ProcessManager(
uuid=self.requestor_id,
default_cmd_callback=callback,
namespace=router_ns,
service=PD_SERVICE_NAME,
conf=cfg.CONF,
pid_file=self.pid_path)
pm.enable(reload_cfg=False)
pmon.register(uuid=self.requestor_id,
service_name=PD_SERVICE_NAME,
| monitored_process=pm)
def enable(self, pmon, router_ns, ex_gw_ifname, lla, prefix=None):
LOG.debug("Enable IPv6 PD for router %s subnet %s ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
if not self._is_dibbler_client_running():
dibbler_conf = self._generate_dibbler_conf(ex_gw_ifname,
lla, prefix)
self._spawn_dibbler(pmon, router_ns, dibbler_conf)
LOG.debug("dibbler client enabled for router %s subnet %s"
" ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
def disable(self, pmon, router_ns, switch_over=False):
LOG.debug("Disable IPv6 PD for router %s subnet %s ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
dcwa = self.dibbler_client_working_area
def callback(pid_file):
dibbler_cmd = ['dibbler-client',
'stop',
'-w', '%s' % dcwa]
return dibbler_cmd
pmon.unregister(uuid=self.requestor_id,
service_name=PD_SERVICE_NAME)
pm = external_process.ProcessManager(
uuid=self.requestor_id,
namespace=router_ns,
service=PD_SERVICE_NAME,
conf=cfg.CONF,
pid_file=self.pid_path)
if switch_over:
pm.disable()
else:
pm.disable(get_stop_command=callback)
shutil.rmtree(dcwa, ignore_errors=True)
LOG.debug("dibbler client disabled for router %s subnet %s "
"ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
def get_prefix(self):
prefix = utils.get_value_from_file(self.prefix_path)
if not prefix:
prefix = lib_const.PROVISIONAL_IPV6_PD_PREFIX
return prefix
@staticmethod
def get_sync_data():
try:
requestor_ids = os.listdir(cfg.CONF.pd_confs)
except OSError:
return []
sync_data = []
requestors = (r.split(':') for r in requestor_ids if r.count(':') == 2)
for router_id, subnet_id, ri_ifname in requestors:
pd_info = pd.PDInfo()
pd_info.router_id = router_id
pd_info.subnet_id = subnet_id
pd_info.ri_ifname = ri_ifname
pd_info.driver = PDDibbler(router_id, subnet_id, ri_ifname)
pd_info.client_started = (
pd_info.driver._is_dibbler_client_running())
pd_info.prefix = pd_info.driver.get_prefix()
sync_data.append(pd_info)
return sync_data
|
del680202/MachineLearning-memo | src/tensorflow/cnn_deep_with_dropout.py | Python | apache-2.0 | 3,448 | 0.014211 | from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
return result
def weight_variable(shape):
inital = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(inital)
def bias_variable(shape):
inital = tf.constant(0.1, shape=shape)
return tf.Variable(inital)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784]) # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 28, 28, 1])
#print(x_image.shape) #[n_samples, 28, 28, 1]
## conv1 layer ##
W_conv1 = weight_variable([5, 5, 1, 32]) #patch 5x5, in channel size 1, out size 32
## pool1 layer ##
b_conv1 = bias_variable([32])
#Combine
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) #output size 28x28x32
h_pool1 = max_pool_2x2(h_conv1) #output size 14x14x32
## conv2 layer ##
W_conv2 = weight_variable([5, 5, 32, 64]) #patch 5x5, in channel size 32, out size 64
## pool2 layer ##
b_conv2 = bias_variable([64])
#Combine
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) #output size 14x14x32
h_pool2 = max_pool_2x2(h_conv2) #output size 7x7x32
## conv3 layer ##
W_conv3 = weight_variable([5, 5, 64, 128]) #patch 5x5, in channel size 64, out size 128
## pool2 layer ##
b_conv3 = bias_variable([128])
#Combine
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3) #output size 7x7x128
h_pool3 = max_pool | _2x2(h_conv3) #output size 4x4x128
## func1 layer ##
W_fc1 = weight_variable([4*4*128, 128]) |
b_fc1 = bias_variable([128])
h_pool2_flat = tf.reshape(h_pool3, [-1, 4*4*128]) #[n_samples, 4,4,128] => [n_samples, 4*4*128]
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
### func2 layer ##
W_fc2 = weight_variable([128, 128])
b_fc2 = bias_variable([128])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)
## dropout5 layer ##
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
#output layer
W_fc3 = weight_variable([128, 10])
b_fc3 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)
sess = tf.Session()
# important step
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.initialize_all_variables())
for i in range(40001):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
if i % 1000 == 0:
print(compute_accuracy(
mnist.test.images, mnist.test.labels))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.