repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
cpcloud/numba
|
numba/cuda/args.py
|
Python
|
bsd-2-clause
| 1,978
| 0
|
"""
Hints to wrap Kernel arguments to indicate how to manage host-device
memory transfers before & after the kernel call.
"""
import abc
from numba.core.typing.typeof import typeof, Purpose
class ArgHint(metaclass=abc.ABCMeta):
def __init__(self, value):
self.value = value
@abc.abstractmethod
def to_device(self, retr, stream=0):
"""
:param stream: a stream to use when copying data
:param retr:
a list of clean-up work to do after the kernel's been run.
Append 0-arg lambdas to it!
:return: a value (usually an `DeviceNDArray`) to be passed to
the kernel
"""
pass
@property
def _numba_type_(self):
return typeof(self.value, Purpose.argument)
class In(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, _ = auto_device(
self.value,
stream=stream)
# A dummy writeback functor to keep devary alive until the kernel
# is called.
retr.append(lambda: devary)
return devary
class Out(ArgHint):
def to_device(self, retr, stream=0):
|
from .cudadrv.devicearray import auto_device
devary, conv = auto_device(
self.value,
copy=False,
|
stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
return devary
class InOut(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, conv = auto_device(
self.value,
stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
return devary
def wrap_arg(value, default=InOut):
return value if isinstance(value, ArgHint) else default(value)
__all__ = [
'In',
'Out',
'InOut',
'ArgHint',
'wrap_arg',
]
|
thanatos/lets-encrypt-preview
|
letsencrypt/plugins/webroot.py
|
Python
|
apache-2.0
| 6,187
| 0.000808
|
"""Webroot plugin."""
import errno
import logging
import os
from collections import defaultdict
import zope.interface
import six
from acme import challenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.plugins import common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(common.Plugin):
"""Webroot Authenticator."""
description = "Webroot Authenticator"
MORE_INFO = """\
Authenticator plugin that performs http-01 challenge by saving
necessary validation resources to appropriate paths on the file
system. It expects that there is some other HTTP server configured
to serve all files under specified web root ({0})."""
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return self.MORE_INFO.format(self.conf("path"))
@classmethod
def add_parser_arguments(cls, add):
# --webroot-path and --webroot-map are added in cli.py because they
# are parsed in conjunction with --domains
pass
def get_chall_pref(self, domain): # pragma: no cover
# pylint: disable=missing-docstring,no-self-use,unused-argument
return [challenges.HTTP01]
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.full_roots = {}
self.performed = defaultdict(set)
def prepare(self): # pylint: disable=missing-docstring
path_map = self.conf("map")
if not path_map:
raise errors.PluginError(
"Missing parts of webroot configuration; please set either "
"--webroot-path and --domains, or --webroot-map. Run with "
" --help webroot for examples.")
for name, path in path_map.items():
if not os.path.isdir(path):
raise errors.PluginError(path + " does not exist or is not a directory")
self.full_roots[name] = os.path.join(path, challenges.HTTP01.URI_ROOT_PATH)
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
# Change the permissions to be writable (GH #1389)
# Umask is used instead of chmod to ensure the client can also
# run as non-root (GH #1795)
old_umask = os.umask(0o022)
try:
# This is coupled with the "umask" call above because
# os.makedirs's "mode" parameter may not always work:
# https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python
os.makedirs(self.full_roots[name], 0o0755)
# Set owner as parent directory if possible
try:
stat_path = os.stat(path)
os.chown(self.full_roots[name], stat_path.st_uid,
stat_path.st_gid)
except OSError as exceptio
|
n:
if exception.errno == errno.EACCES:
logger.debug("Insufficient permissions to change owner and uid - ignoring")
else:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}", name, exception)
except OSError as exception:
|
if exception.errno != errno.EEXIST:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}", name, exception)
finally:
os.umask(old_umask)
def perform(self, achalls): # pylint: disable=missing-docstring
assert self.full_roots, "Webroot plugin appears to be missing webroot map"
return [self._perform_single(achall) for achall in achalls]
def _get_root_path(self, achall):
try:
path = self.full_roots[achall.domain]
except KeyError:
raise errors.PluginError("Missing --webroot-path for domain: {0}"
.format(achall.domain))
if not os.path.exists(path):
raise errors.PluginError("Mysteriously missing path {0} for domain: {1}"
.format(path, achall.domain))
return path
def _get_validation_path(self, root_path, achall):
return os.path.join(root_path, achall.chall.encode("token"))
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
root_path = self._get_root_path(achall)
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Attempting to save validation to %s", validation_path)
# Change permissions to be world-readable, owner-writable (GH #1795)
old_umask = os.umask(0o022)
try:
with open(validation_path, "w") as validation_file:
validation_file.write(validation.encode())
finally:
os.umask(old_umask)
self.performed[root_path].add(achall)
return response
def cleanup(self, achalls): # pylint: disable=missing-docstring
for achall in achalls:
root_path = self._get_root_path(achall)
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Removing %s", validation_path)
os.remove(validation_path)
self.performed[root_path].remove(achall)
for root_path, achalls in six.iteritems(self.performed):
if not achalls:
try:
os.rmdir(root_path)
logger.debug("All challenges cleaned up, removing %s",
root_path)
except OSError as exc:
if exc.errno == errno.ENOTEMPTY:
logger.debug("Challenges cleaned up but %s not empty",
root_path)
else:
raise
|
andrewosh/thunder-streaming
|
python/thunder_streaming/feeder/core.py
|
Python
|
apache-2.0
| 4,148
| 0.004339
|
"""Core functions used by the Thunder streaming feeder scripts, including asynchronous checking for new files.
"""
import errno
import os
import time
from thunder_streaming.feeder.utils.filenames import getFilenamePostfix, getFilenamePrefix
from thunder_streaming.feeder.utils.logger import global_logger
from thunder_streaming.feeder.utils.regex import RegexMatchToQueueName, RegexMatchToTimepointString
from thunder_streaming.feeder.utils.updating_walk import updating_walk as uw
def file_check_generator(source_dir, mod_buffer_time, max_files=-1, filename_predicate=None):
"""Generator function that polls the passed directory tree for new files, using the updating_walk.py logic.
This generator will restart the underlying updating_walk at the last seen file if the updating walk runs
out of available files.
"""
next_batch_file, walker_restart_file = None, None
walker = uw(source_dir, filefilterfunc=filename_predicate)
while True:
filebatch = []
files_left = max_files
try:
if not next_batch_file:
next_batch_file = next(walker)
walker_restart_file = next_batch_file
delta = time.time() - os.stat(next_batch_file).st_mtime
while delta > mod_buffer_time and files_left:
filebatch.append(next_batch_file)
files_le
|
ft -= 1
next_batch_file = None # reset in case of exception on next line
next_batch_file = next(walker)
delta = time.time() - os.stat(next_batch_file).st_mtime
walker_restart_file = next_batch_file
except StopIteration:
# no files left, restart after polling interval
if not
|
filebatch:
global_logger.get().info("Out of files, waiting...")
walker = uw(source_dir, walker_restart_file, filefilterfunc=filename_predicate)
yield filebatch
def build_filecheck_generators(source_dir_or_dirs, mod_buffer_time, max_files=-1, filename_predicate=None):
if isinstance(source_dir_or_dirs, basestring):
source_dirs = [source_dir_or_dirs]
else:
source_dirs = source_dir_or_dirs
file_checkers = [file_check_generator(source_dir, mod_buffer_time,
max_files=max_files, filename_predicate=filename_predicate)
for source_dir in source_dirs]
return file_checkers
def runloop(file_checkers, feeder, poll_time):
""" Main program loop. This will check for new files in the passed input directories using file_check_generator,
push any new files found into the passed Feeder subclass via its feed() method, wait for poll_time,
and repeat forever.
"""
last_time = time.time()
while True:
for file_checker in file_checkers:
# this should never throw StopIteration, will just yield an empty list if nothing is avail:
filebatch = feeder.feed(next(file_checker))
if filebatch:
global_logger.get().info("Pushed %d files, last: %s", len(filebatch), os.path.basename(filebatch[-1]))
removedfiles = feeder.clean()
if removedfiles:
global_logger.get().info("Removed %d temp files, last: %s", len(removedfiles), os.path.basename(removedfiles[-1]))
next_time = last_time + poll_time
try:
time.sleep(next_time - time.time())
except IOError, e:
if e.errno == errno.EINVAL:
# passed a negative number, which is fine, just don't sleep
pass
else:
raise e
last_time = next_time
def get_parsing_functions(opts):
if opts.prefix_regex_file:
fname_to_qname_fcn = RegexMatchToQueueName.fromFile(opts.prefix_regex_file).queueName
else:
fname_to_qname_fcn = getFilenamePrefix
if opts.timepoint_regex_file:
fname_to_timepoint_fcn = RegexMatchToTimepointString.fromFile(opts.timepoint_regex_file).timepoint
else:
fname_to_timepoint_fcn = getFilenamePostfix
return fname_to_qname_fcn, fname_to_timepoint_fcn
|
bytesized/Snappy-Symbolication-Server
|
tests/test_DiskCache.py
|
Python
|
mpl-2.0
| 6,210
| 0.002576
|
import unittest
import os
import shutil
import json
import memcache
import testUtils
testUtils.addSymServerToPath()
import quickstart
LIB_NAME = "xul.pdb"
BREAKPAD_ID = "44E4EC8C2F41492B9369D6B9A059577C2"
EXPECTED_HASH = "6e5e6e422151b7b557d913c0ff86d7cf"
class testDiskCache(unittest.TestCase):
def setUp(self):
self.config = testUtils.getDefaultConfig()
self.tempDirs = testUtils.setConfigToUseTempDirs(self.config)
# Only need DiskCache for this one
self.config['quickstart']['memcached']['start'] = False
self.config['quickstart']['SymServer']['start'] = False
if not quickstart.quickstart(configJSON=json.dumps(self.config)):
self.fail("Unable to start servers")
memcache.Client(self.config['SymServer']['memcachedServers'], debug=0).flush_all()
def tearDown(self):
if not quickstart.quickstart(configJSON=json.dumps(self.config), stop=True):
print "WARNING: Servers were not properly stopped!"
for tempDir in self.tempDirs:
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
def test_verifyCachedSymbolFile(self):
request = {
"debug": True,
"action": "cacheAddRaw",
"libName": LIB_NAME,
"breakpadId": BREAKPAD_ID
}
request = json.dumps(request)
response = testUtils.symServerRequest(request, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response")
downloadHash = testUtils.md5(response['path'])
self.assertEqual(downloadHash.lower(), EXPECTED_HASH.lower(),
"Cached symbol file hash does not match the expected hash")
def test_verifyCache(self):
# The DiskCache was created with a brand new cache directory. There should
# be nothing in the cache
request = {
"debug": True,
"action": "cacheExists",
"libName": LIB_NAME,
"breakpadId": BREAKPAD_ID
}
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('exists', response,
"No result provided in response to Exists")
self.assertFalse(response['exists'],
"Value is still in cache after eviction")
request['action'] = 'cacheAddRaw'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response to Add")
downloadHash = testUtils.md5(response['path'])
self.assertEqual(downloadHash.lower(), EXPECTED_HASH.lower(),
"Added symbol file hash does not match the expected hash")
request['action'] = 'cacheExists'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('exists', response,
"No result provided in response to Exists")
self.assertTrue(response['exists'],
"Value not in cache after adding")
request['action'] = 'cacheGet'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response to Get")
cachePath = response['path']
downloadHash = testUtils.md5(cachePath)
self.assertEqual(downloadHash.lower(), EXPECTED_HASH.lower(),
"Added symbol file hash does not match the expected hash")
request['action'] = 'cacheEvict'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('success', response,
"No result provided in response to Evict")
self.assertTrue(response['success'], "Cache eviction unsuccessful.")
self.assertFalse(os.path.exists(cachePath),
"Cache file should not exist after eviction")
request['action'] = 'cacheExists'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('exists', response,
"No result provided in response to Exists")
self.assertFalse(response['exist
|
s'],
"Value is still in cache after eviction")
request['action'] = 'cacheGet'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.veri
|
fyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response to Get")
# Don't test the md5 hash. We didn't get the raw symbol file.
self.assertTrue(os.path.exists(response['path']),
"Cached file does not exist after a cacheGet")
if __name__ == '__main__':
unittest.main()
|
CloudVLab/professional-services
|
examples/dialogflow-entities-example/main.py
|
Python
|
apache-2.0
| 4,374
| 0.000914
|
# python3
# ==============================================================================
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed o
|
n an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cloud function to create and update entities in Dialogflow.
This module is an example how to create and update entities for Dialogflow.
"""
import dialogflow_v2
import flask
import os
from typing im
|
port Dict, List
def entities_builder(request: flask.Request):
"""HTTP Cloud Function that create and update entities in Dialogflow.
Args:
request (flask.Request): The request object. More info:
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
"""
request_json = request.get_json(silent=True)
arguments = Arguments(**request_json)
project_id = arguments.project_id
client = get_dialogflow_client()
parent = get_agent(client, project_id)
if request_json and arguments.entities:
# Create entities one by one.
create_entities_type(client, arguments.entities, parent)
return
elif request_json and arguments.entities_batch:
# Create in batch using entity_type_batch_inline.
arguments.pre_process_entities_batch_name()
client.batch_update_entity_types(
parent=parent, entity_type_batch_inline=arguments.entities_batch)
return
else:
# Create in batch using entity_type_batch_uri.
response = client.batch_update_entity_types(
parent=parent, entity_type_batch_uri=arguments.bucket)
def callback(operation_future):
"""Returns a callback.
This example uses futures for long-running operations returned from Google Cloud APIs.
These futures are used asynchronously using callbacks and Operation.add_done_callback
More info: https://googleapis.dev/python/google-api-core/1.14.3/futures.html
"""
operation_future.result()
response.add_done_callback(callback)
def create_entities_type(client, entities, parent):
"""Creates entities.
Args:
client: dialogflow_v2.EntityTypesClient
entities: list of EntityTypes to create
parent: fully-qualified project_agent string
"""
for entity_type in entities:
client.create_entity_type(parent, entity_type)
def get_dialogflow_client():
"""Returns the dialogflow entity types client."""
return dialogflow_v2.EntityTypesClient()
def get_agent(client: dialogflow_v2.EntityTypesClient, project_id):
"""Returns a fully-qualified project_agent string."""
return client.project_agent_path(project_id)
class Arguments:
"""Returns the arguments pass to the cloud function or default values.
Args:
entities: a list of EntityType
entities_batch: a dict of EntityTypeBatch
project_id: id of a project in GCP
bucket: a URI to a Google Cloud Storage file containing entity types to update or create.
"""
def __init__(self,
entities: List = [],
entities_batch: Dict = {},
project_id: str = '<project-id>',
bucket: str = 'gs://dialog_entities/entities.json'):
"""Initialize the cloud function with the information pass in the call"""
self.project_id = project_id
self.entities = entities
self.entities_batch = entities_batch
self.bucket = bucket
def pre_process_entities_batch_name(self):
"""Returns a fully qualify name of the entities name.
The format is projects/<project-id>/agent/entityTypes/<entity-id>
"""
for entity in self.entities_batch['entity_types']:
if all(x in entity for x in ['name']):
entity['name'] = os.path.join('projects', self.project_id,
'agent/entityTypes',
entity['name'])
|
partofthething/home-assistant
|
tests/components/wemo/conftest.py
|
Python
|
apache-2.0
| 2,417
| 0
|
"""Fixtures for pywemo."""
import asyncio
from unittest.mock import create_autospec, patch
import pytest
import pywemo
from homeassistant.components.wemo import CONF_DISCOVERY, CONF_STATIC
from homeassistant.components.wemo.const import DOMAIN
from homeassistant.setup import async_setup_component
MOCK_HOST = "127.0.0.1"
MOCK_PORT = 50000
MOCK_NAME = "WemoDeviceName"
MOCK_SERIAL_NUMBER = "WemoSerialNumber"
@pytest.fixture(name="pywemo_model")
def pywemo_model_fixture():
"""Fixture containing a pywemo class name used by pywemo_device_fixture."""
return "Insight"
@pytest.fixture(name="pywemo_registry")
def pywemo_registry_fixture():
"""Fixture for SubscriptionRegistry instances."""
registry = create_autospec(pywemo.SubscriptionRegistry, instance=True)
registry.callbacks = {}
registry.semaphore = asyncio.Semaphore(value=0)
def on_func(device, type_filter, callback):
registry.callbacks[device.name] = callback
registry.semaphore.release()
registry.on.side_effect = on_func
with patch("pywemo.SubscriptionRegistry", return_value=registry):
yield registry
@pytest.fixture(name="pywemo_device")
def pywemo_device_fixture(pywemo_registry, pywemo_model):
"""Fixture for WeMoDevice instances."""
device = create_autospec(getattr(pywemo, pywemo_model), instance=True)
device.host = MOCK_HOST
device.port = MOCK_PORT
|
device.name = MOCK_NAME
device.serialnumber = MOCK_SERIAL_NUMBER
device.model_name = pywemo_model
device.get_state.return_value = 0 # Default to Off
url = f"http://{MOCK_HOST}:{MOCK_PORT}/setup.xml"
with patch("pywemo.setup_url_for_address", return_value=url), patch(
"pywemo.
|
discovery.device_from_description", return_value=device
):
yield device
@pytest.fixture(name="wemo_entity")
async def async_wemo_entity_fixture(hass, pywemo_device):
"""Fixture for a Wemo entity in hass."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_DISCOVERY: False,
CONF_STATIC: [f"{MOCK_HOST}:{MOCK_PORT}"],
},
},
)
await hass.async_block_till_done()
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entity_entries = list(entity_registry.entities.values())
assert len(entity_entries) == 1
yield entity_entries[0]
|
adamcandy/QGIS-Meshing
|
scripts/generate_field_xyz_data.py
|
Python
|
lgpl-2.1
| 12,891
| 0.035761
|
import numpy as np
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
from subprocess import call
import sys
import shapefile
import os
class Commands( object ):
def sysArgs( self ):
self.outfile = self.ArgList.pop()
while len(self.ArgList)>0:
carg = self.ArgList.pop(0)
eval(self.commands[carg])
#def help_func( self ):
# print self.commands
def gauss_set( self ):
#form "(cont,a,b,mean,std),..."
gausStr = self.ArgList.pop(0)
gausStr = gausStr.split(')')
gausStr[0] = ' '+gausStr[0]
gausStr = map(lambda x: x[2:],gausStr)
gausStr.pop()
for i in range(len(gausStr)):
self.f_add()
gausStr = map(lambda x: x.split(','), gausStr)
self.Guass = map(lambda x: map(lambda y: float(y), x), gausStr)
def sinx_set( self ):
#form "(cont,w,phi),..."
sinxStr = self.ArgList.p
|
op(0)
sinxStr = sinxStr.split(')')
sinxStr[0] = ' '+sinxStr[0]
sinxStr = map(lambda x: x[2:],s
|
inxStr)
sinxStr.pop()
for i in range(len(sinxStr)):
self.f_add()
sinxStr = map(lambda x: x.split(','), sinxStr)
self.Sinx = map(lambda x: map(lambda y: float(y), x), sinxStr)
def siny_set( self ):
#form "(cont,w,phi),..."
sinyStr = self.ArgList.pop(0)
sinyStr = sinyStr.split(')')
sinyStr[0] = ' '+sinyStr[0]
sinyStr = map(lambda x: x[2:],sinyStr)
sinyStr.pop()
for i in range(len(sinyStr)):
self.f_add()
sinyStr = map(lambda x: x.split(','), sinyStr)
self.Siny = map(lambda x: map(lambda y: float(y), x), sinyStr)
def lon_set( self ):
#form "(cont,a),..."
lonStr = self.ArgList.pop(0)
lonStr = lonStr.split(')')
lonStr[0] = ' '+lonStr[0]
lonStr = map(lambda x: x[2:],lonStr)
lonStr.pop()
for i in range(len(lonStr)):
self.f_add()
lonStr = map(lambda x: x.split(','), lonStr)
self.Lon = map(lambda x: map(lambda y: float(y), x), lonStr)
def lat_set( self ):
#form "(cont,a),..."
latStr = self.ArgList.pop(0)
latStr = latStr.split(')')
latStr[0] = ' '+latStr[0]
latStr = map(lambda x: x[2:],latStr)
latStr.pop()
for i in range(len(latStr)):
self.f_add()
latStr = map(lambda x: x.split(','), latStr)
self.Lat = map(lambda x: map(lambda y: float(y), x), latStr)
def sinxy( self ):
#form "(cont,w,u,phi,psi),..."
sinxyStr = self.ArgList.pop(0)
sinxyStr = sinxyStr.split(')')
sinxyStr[0] = ' '+sinxyStr[0]
sinxyStr = map(lambda x: x[2:],sinxyStr)
sinxyStr.pop()
for i in range(len(sinxyStr)):
self.f_add()
sinxyStr = map(lambda x: x.split(','), sinxyStr)
self.Sinxy = map(lambda x: map(lambda y: float(y), x), sinxyStr)
def f_add( self ):
self.filelist += [self.f_base+str(self.f_no)]
self.f_no += 1
def shortern_func( self ):
pass
def load_set( self ):
self.Load = True
def anls_set( self ):
#form "(cont,a,b,mean,std),..."
anlsStr = self.ArgList.pop(0)
anlsStr = anlsStr.split(')')
anlsStr[0] = anlsStr[0][1:]
anlsStr[1:] = anlsStr[1:][2:]
for i in range(len(anlsStr)):
self.f_add()
anlsStr = map(lambda x: x.split(','), anlsStr)
self.Annulus = map(lambda x: map(lambda y: float(y), x), anlsStr)
def join_set( self ):
self.Join = True
class NcGenerate( object ):
def nc_generate( self ):
file_insts = map(lambda x: open(x+'.xyz','w'), self.filelist)
lrud = [np.min(map(lambda x: x[0],self.Lon)), \
np.max(map(lambda x: x[0],self.Lon)), \
np.min(map(lambda x: x[0],self.Lat)), \
np.max(map(lambda x: x[0],self.Lat))]
print lrud
for x in np.linspace(lrud[0]-1.0, lrud[1]+1.0,num=(lrud[1]-lrud[0])/0.1):
for y in np.linspace(lrud[2]-1.0, lrud[3]+1.0,num=(lrud[3]-lrud[2])/0.1):
insts_no = 0
for tup in self.Guass:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.gausian( x, y, tup))+'\n')
insts_no += 1
for tup in self.Sinx:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.sinx( x, tup))+'\n')
insts_no += 1
for tup in self.Siny:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.siny( y, tup))+'\n')
insts_no += 1
for tup in self.Sinxy:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.sinxy( x, y, tup))+'\n')
insts_no += 1
for tup in self.Lon:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.lon( x, tup))+'\n')
insts_no += 1
for tup in self.Lat:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.lat( y, tup))+'\n')
insts_no += 1
for tup in self.Annulus:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.annulus( x, y, tup))+'\n')
insts_no += 1
map(lambda x: x.close(), file_insts)
map(lambda x: call(["GMT","surface", x+".xyz", '-G'+x+".nc", "-I0.1/0.1", "-Rd"+str(lrud[0]-1.0)+"/"+str(lrud[1]+1.0)+"/"+str(lrud[2]-1.0)+"/"+str(lrud[3]+1.0)]), self.filelist)
call(["rm","-f"]+map(lambda x: x+".xyz", self.filelist))
def gausian( self, x, y, tup ):
r = np.sqrt((x-tup[1])**2 + (y-tup[2])**2)
mean = tup[3]
std = tup[4]
return (100.0/(std*np.sqrt(2.0*np.pi)))*np.exp(-0.5*((r-mean)/std)**2)
def sinx( self, x, tup):
return np.sin(float(tup[1])*x*(np.pi/180.)+tup[2])
def siny( self, y, tup ):
return np.sin(float(tup[1])*y*(np.pi/180.)+tup[2])
def sinxy( self, x, y, tup ):
zx = np.sin(float(tup[1])*x*(np.pi/180.)+tup[3])
zy = np.sin(float(tup[2])*y*(np.pi/180.)+tup[4])
return 0.5-abs(zx*zy)
def lon( self, x, tup ):
return tup[1]*x
def lat( self, y, tup ):
return tup[1]*y
def annulus( self, x, y, tup ): #ignore
r = np.sqrt((x-tup[1])**2 + (y-tup[2])**2)
mean = tup[3]
std = tup[4]
return (1.0/(std*np.sqrt(2.0*np.pi)))*np.exp(-0.5*((r-mean)/std)**2)
class ShpGenerate( object ):
def shp_generate( self ):
insts_no = 0
for tup in self.Guass:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Sinx:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Siny:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Sinxy:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Lon:
self.contourmap[insts_no] = tup[0]
self.lonlatfiles += [insts_no]
insts_no += 1
for tup in self.Lat:
self.contourmap[insts_no] = tup[0]
self.lonlatfiles += [insts_no]
insts_no += 1
for tup in self.Annulus:
self.contourmap[insts_no] = tup[0]
insts_no += 1
map(lambda i: \
call(["gdal_contour","-fl",str(self.contourmap[i]),str(self.filelist[i])+'.nc',str(self
|
ScienceWorldCA/domelights
|
backend/scripts/checkerboard.py
|
Python
|
apache-2.0
| 510
| 0.041176
|
# Generates alternating frames of a checkerboard pattern.
Q_STARTING_INDEX = 150
UNIVERSE_LIGHTS = 144 #144 for side 1, #116 for side 2
flip = 0
for i in range(1,200): # 5 seconds * 40 / second (frame)
print "Record Cue " + str(Q_STARTING_INDEX + i)
for j in r
|
ange (1, UNIVERSE_LIGHTS * 3, 1): # 3 channels / light (channel)
value = 255 if flip else 0
flip = not flip
print "C"+ str(j)+ " @ #"+str(value)+";"
flip = not flip # switch the checkerboard for the next frame
print "Record
|
Stop"
|
capone212/crashtec
|
src/crashtec/db/provider/test/testroutines.py
|
Python
|
gpl-3.0
| 7,533
| 0.010885
|
'''
Created on 12.05.2013
@author: capone
'''
import unittest
from mock import patch
from mock import MagicMock
from crashtec.db.provider.routines import Record
from crashtec.db.provider import routines
from crashtec.utils.exceptions import CtCriticalError
def _get_sample_record():
return {'key1' : 'value2', 'key2' : 'value2' }
class TestRecord(unittest.TestCase):
def test01_get_value(self):
record = Record(_get_sample_record())
for key, value in _get_sample_record().iteritems():
self.assertEqual(value, record[key], 'Getter does not work')
def test02_set_values(self):
record = Record()
for key, value in _get_sample_record().iteritems():
record[key] = value
for key, value in _get_sample_record().iteritems():
self.assertEqual(value, record[key], 'Setter does not work')
def test03_update(self):
record = Record(_get_sample_record())
record['mock_key'] = 'mock_value'
for key, value in _get_sample_record().iteritems():
self.assertEqual(value, record[key], 'Setter does not work')
self.assertEqual('mock_value', record['mock_key'],
'Setter does not work')
def test04_updated_values(self):
record = Record(_get_sample_record())
initial = _get_sample_record()
modifier = {initial.keys()[1] : 'garbage', 'mock_key' : 'mock_value'}
for key, value in modifier.iteritems()
|
:
record[key] = value
updated_values = record.updated_values()
self.assertEqual(updated_values, modifier)
# Modify second time
modifier2 = {initial.keys()[0] : 'garbage2: reload',
'mock_key2' : 'mock_value2'}
for key, value in modifier2.iteritems():
record[key] = value
# Validate
modifier2.update(modifier)
updated_value
|
s = record.updated_values()
self.assertEqual(updated_values, modifier2)
class TestCursor(unittest.TestCase):
def test_fetch_one_returns_record(self):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchone'])
mock_impl.fetchone = MagicMock(return_value = self.get_sample_record())
# Do test
cursor = routines.Cursor(mock_impl)
record = cursor.fetch_one()
# Validate results
self.check_equal(record, self.get_sample_record())
def test_fetch_one_returns_none(self):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchone'])
mock_impl.fetchone = MagicMock(return_value = None)
# Do test
cursor = routines.Cursor(mock_impl)
record = cursor.fetch_one()
# Validate results
self.assertEqual(record, None)
def test_fetch_many_returns_records(self):
self.check_fetch_many(5)
def test_fetch_many_returns_empty(self):
self.check_fetch_many(0)
def test_fetch_all_returns_records(self):
self.check_fetch_all(5)
def test_fetch_all_returns_empty(self):
self.check_fetch_all(0)
def check_fetch_many(self, count):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchmany'])
mock_impl.fetchmany = MagicMock(return_value = \
(self.get_sample_record() for x in range(count)))
# Do test
cursor = routines.Cursor(mock_impl)
records = cursor.fetch_many(count)
# Validate results
mock_impl.fetchmany.assert_called_with(count)
self.assertEqual(len(records), count)
for record in records:
self.check_equal(record, self.get_sample_record())
def check_fetch_all(self, count):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchall'])
mock_impl.fetchall = MagicMock(return_value = \
(self.get_sample_record() for x in range(count)))
# Do test
cursor = routines.Cursor(mock_impl)
records = cursor.fetch_all()
# Validate results
mock_impl.fetchall.assert_called_with()
self.assertEqual(len(records), count)
for record in records:
self.check_equal(record, self.get_sample_record())
def check_equal(self, record, dict_value):
self.assertEqual(record.keys(), dict_value.keys(),
'keys are not equal')
self.assertEqual(record.values(), dict_value.values(),
'values are not equal')
def get_sample_record(self):
return {'key1':'value1', 'key2':'value2'}
@patch('crashtec.db.provider.routines.exec_sql')
class Test_create_new_record(unittest.TestCase):
def test_with_dictionary(self, pached_exec_sql):
TABLE_NAME = 'mock_table'
mock_record = {'field1' : 'value1', 'field2' : 'value2'}
routines.create_new_record(TABLE_NAME, mock_record)
EXPECTED_SQL = 'INSERT INTO mock_table (field2, field1) VALUES (%s, %s);'
# Check results
(sql_string, values), keywords = pached_exec_sql.call_args
self.assertEqual(EXPECTED_SQL, sql_string,'sql strings does not match')
self.assertEqual(list(mock_record.values()),
list(values))
def test_with_Record(self, pached_exec_sql):
TABLE_NAME = 'mock_table'
mock_record = {'field1' : 'value1', 'field2' : 'value2'}
routines.create_new_record(TABLE_NAME, Record(mock_record))
EXPECTED_SQL = 'INSERT INTO mock_table (field2, field1) VALUES (%s, %s);'
# Check results
(sql_string, values), keywords = pached_exec_sql.call_args
self.assertEqual(EXPECTED_SQL, sql_string,'sql strings does not match')
self.assertEqual(list(mock_record.values()),
list(values))
@patch('crashtec.db.provider.routines.exec_sql')
class Test_update_record(unittest.TestCase):
def test_key_field_updated(self, pached_exec_sql):
record = Record()
for key, value in self.get_mock_record().iteritems():
record[key] = value
(sql_string, values), keywords = self._do_test(record, pached_exec_sql)
EXPECTED_STRING = 'update mock_table SET field2=%s, field1=%s WHERE id = %s'
self.assertEqual(EXPECTED_STRING, sql_string)
self.assertEqual(values, record.values())
def test_no_updated_values(self, pached_exec_sql):
self._do_test(Record(self.get_mock_record()), pached_exec_sql)
self.assertFalse(pached_exec_sql.called, 'Should not be called')
def test_partial_updated(self, pached_exec_sql):
record = Record(self.get_mock_record())
MOCK_VALUE = 'mock_value'
record['field2'] = MOCK_VALUE
(sql_string, values), keywords = self._do_test(record, pached_exec_sql)
# Check results
EXPECTED_SQL = 'update mock_table SET field2=%s WHERE id = %s'
self.assertEqual(EXPECTED_SQL, sql_string)
self.assertEqual([MOCK_VALUE, record['id']], list(values))
def _do_test(self, mock_record, pached_exec_sql):
MOCK_TABLE_NAME = 'mock_table'
routines.update_record(MOCK_TABLE_NAME, mock_record)
return pached_exec_sql.call_args
def get_mock_record(self):
return {'id' : 10, 'field1' : 'value1', 'field2' : 'value2'}
if __name__ == '__main__':
unittest.main()
|
iogf/ginger
|
setup.py
|
Python
|
bsd-2-clause
| 307
| 0.003257
|
#! /usr
|
/bin/env python
from distutils.core import setup
setup(name="ginger",
version="0.1",
description="HTML/CSS in python.",
packages=["ginger"],
scripts=["ginger-designer"],
author="Iury O. G. Figueiredo",
author_email="ioliveira@id.uff.br")
| |
kleintom/dxr
|
tooling/peep.py
|
Python
|
mit
| 36,017
| 0.001083
|
#!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
fro
|
m tempfile import mkdtemp
import traceback
try:
from urllib2 import
|
build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
|
derickr/openstreetbugs
|
stats/stats.py
|
Python
|
gpl-3.0
| 2,957
| 0.016233
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Mitja Kleider
#
# This file is part of Openstreetbugs.
#
# Openstreetbugs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Openstreetbugs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Openstreetbugs. If not, see <http://www.gnu.org/licenses/>.
#
import MySQLdb
from datetime import datetime
import db_config # DATABASE CONFIGURATION
def main():
print "Content-type: text/html\n"
print """<html>
<head>
<title>Stats (OpenStreetBugs)</title>
</head>
<body>
<h1>Stats</h1>
<p><a href="recentChanges">Recent Changes</a></p>
<p>All stats are live. (As of 2009-04-28, the database is synchronized with appspot database daily.)</p>
<h2>Bugs (total)</h2>"""
connection = MySQLdb.connect(db_config.host, user=db_config.user, passwd=db_config.password, db=db_config.dbname)
cursor = connection.cursor()
cursor.execute("SELECT type,COUNT(*) FROM bugs GROUP BY type;")
result = cursor.fetchall()
bugcount = {}
bugcount["open"] = result[0][1]
bugcount["closed"] = result[1][1]
bugcount["total"] = bugcount["open"] + bugcount["closed"]
print """<table border="1">
<tr><th>open</th><th>closed</th><th>total</th></tr>
<tr><td>%(open)s</td><td>%(closed)s</td><td>%(total)s</td></tr>
</table>""" % bugcount
print """<h2>Monthly changes</h2>
<p>Please note that the current month's data will not be complete until next month.</p>
<table border="1">"""
# TODO loop for last 12 months
print "<tr><th>month</th><th>new</th><th>closed</th>"
for interval in range(-1,12):
# select bug created in the month [current month - interval months]
cursor.execute("""SELECT DATE_SUB(CURDATE(), INTERVAL """+"%d"%(interval+1)+""" MONTH) AS month, COUNT(*) as newbugs FROM bugs WHERE date_created < DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%interval+""" MONTH), "%Y-%m-01") AND date_created >= DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%(interval+1)+""" MONTH), "%Y-%m-01");""")
result = cursor.fetchone()
month = datet
|
ime.strftime(result[0],"%b %Y")
newbugs = result[1]
cursor.execute("""SELECT COUNT(*) as closedbugs FROM bugs WHERE last_changed < DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%interval+""" MONTH), "%Y-%m-01") AND last_changed >= DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%(interval+1)+""" MONTH), "%Y-%m-01");""")
result = cursor.fetchone()
closedbugs = result[0]
print "<tr><td>%s</td><td>%s<
|
/td><td>%s</td></tr>" % (month, newbugs, closedbugs)
print "</body>\n</html>"
main()
|
healthchecks/healthchecks
|
hc/front/tests/test_update_timeout.py
|
Python
|
bsd-3-clause
| 7,228
| 0.000277
|
from datetime import timedelta as td
from django.utils import timezone
from hc.api.models import Check
from hc.test import BaseTestCase
class UpdateTimeoutTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check(project=self.project, status="up")
self.check.last_ping = timezone.now()
self.check.save()
self.url = "/checks/%s/timeout/" % self.check.code
self.redirect_url = "/projects/%s/checks/" % self.project.code
def test_it_works(self):
payload = {"kind": "simple", "timeout": 3600, "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
self.assertEqual(self.check.timeout.total_seconds(), 3600)
self.assertEqual(self.check.grace.total_seconds(), 60)
# alert_after should be updated too
expected_aa = self.check.last_ping + td(seconds=3600 + 60)
self.assertEqual(self.check.alert_after, expected_aa)
def test_it_does_not_update_status_to_up(self):
self.check.last_ping = timezone.now() - td(days=2)
self.check.status = "down"
self.check.save()
# 1 week:
payload = {"kind": "simple", "timeout": 3600 * 24 * 7, "grace": 60}
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url, data=payload)
self.check.refresh_from_db()
self.assertEqual(self.check.status, "down")
def test_it_updates_status_to_down(self):
self.check.last_ping = timezone.now() - td(hours=1)
self
|
.check.status = "up"
self.check.alert_after = self.check.going_down_after()
self.check.save()
# 1 + 1 minute:
payload = {"kind": "simple", "timeout": 60, "grace": 60}
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url, data=payload)
self.check.refresh_from_db()
self.assertEqual(self.check.status, "down")
self.assertIsNone(self.check.alert_after)
def test_it_saves_cron_expression(s
|
elf):
payload = {"kind": "cron", "schedule": "5 * * * *", "tz": "UTC", "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "cron")
self.assertEqual(self.check.schedule, "5 * * * *")
def test_it_validates_cron_expression(self):
self.client.login(username="alice@example.org", password="password")
samples = ["* invalid *", "1,2 61 * * *", "0 0 31 2 *"]
for sample in samples:
payload = {"kind": "cron", "schedule": sample, "tz": "UTC", "grace": 60}
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
# Check should still have its original data:
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
def test_it_rejects_six_field_cron_expression(self):
payload = {
"kind": "cron",
"schedule": "* * * * * *", # six fields instead of five
"tz": "UTC",
"grace": 60,
}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
# Check should still have its original data:
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
def test_it_validates_tz(self):
payload = {
"kind": "cron",
"schedule": "* * * * *",
"tz": "not-a-tz",
"grace": 60,
}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
# Check should still have its original data:
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
def test_it_rejects_missing_schedule(self):
# tz field is omitted so this should fail:
payload = {"kind": "cron", "grace": 60, "tz": "UTC"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
def test_it_rejects_missing_tz(self):
# tz field is omitted so this should fail:
payload = {"kind": "cron", "schedule": "* * * * *", "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
def test_team_access_works(self):
payload = {"kind": "simple", "timeout": 7200, "grace": 60}
# Logging in as bob, not alice. Bob has team access so this
# should work.
self.client.login(username="bob@example.org", password="password")
self.client.post(self.url, data=payload)
check = Check.objects.get(code=self.check.code)
assert check.timeout.total_seconds() == 7200
def test_it_handles_bad_uuid(self):
url = "/checks/not-uuid/timeout/"
payload = {"timeout": 3600, "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(url, data=payload)
self.assertEqual(r.status_code, 404)
def test_it_handles_missing_uuid(self):
# Valid UUID but there is no check for it:
url = "/checks/6837d6ec-fc08-4da5-a67f-08a9ed1ccf62/timeout/"
payload = {"timeout": 3600, "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(url, data=payload)
assert r.status_code == 404
def test_it_checks_ownership(self):
payload = {"timeout": 3600, "grace": 60}
self.client.login(username="charlie@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 404)
def test_it_rejects_get(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 405)
def test_it_allows_cross_team_access(self):
payload = {"kind": "simple", "timeout": 3600, "grace": 60}
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
def test_it_requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
payload = {"kind": "simple", "timeout": 3600, "grace": 60}
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 403)
|
FreshXOpenSource/wallaby-base
|
wallaby/pf/peer/sm.py
|
Python
|
bsd-2-clause
| 4,120
| 0.00267
|
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
from peer import *
class SMPeer(Peer):
def __init__(self, room, statePillow, states=[], initState="Start"):
Peer.__init__(self, room)
self._routings = set()
self._transitions = set()
self._callbacks = set()
self._states = {} #Name->State
self._statePillow = statePillow
self._initState = initState
self._state = None
# if len(states) > 0:
# self._state = states[0]
# else:
# pass #TODO: Throw exception
for state in states:
self._states[state._name] = state
state._setStateMachine(self)
for pillow in self._callbacks:
self._catch(pillow, self._callback)
for pillow in self._routings:
self._catch(pillow, self._routePillow)
for pillow in self._transitions:
self._catch(pillow, self._transitionState)
def initialize(self):
self.switchState(self._initState)
def addRoutings(self, pillows):
self._routings = self._routings.union(set(pillows))
def addTransitions(self, pillows):
self._transitions = self._transitions.union(set(pillows))
def addCallbacks(self, pillows):
self._callbacks = self._callbacks.union(set(pillows))
def _routePillow(self, *args):
self._state._routePillow(*args)
def _transitionState(self, *args):
|
self._state._transitionState(*args)
def _callback(self, *args):
self._state._callback(*args)
def switchState(self, stateName):
# Already in correct state
if self._state != None and self._state._name == stateName: return
# print "Switch to state", stateName, "in context", self._roomName
if stateName in self._states:
self._state = self._states[stateName]
self._throw(self._statePillow, stateName)
self._state._state
|
Switched()
class State:
def __init__(self, name=None):
if name:
self._name = name
else:
self._name = self.__class__.__name__
self._stateMachine = None
self._routings = {}
self._transitions = {}
self._callbacks = {}
self._localCallbacks = {}
def _stateSwitched(self):
pass
def _addRouting(self, sourcePillow, destinationPillow):
if not sourcePillow in self._routings:
self._routings[sourcePillow] = set()
self._routings[sourcePillow].add(destinationPillow)
def _setTransition(self, pillow, destinationState):
self._transitions[pillow] = destinationState
def _catch(self, pillow, callback):
if not pillow in self._callbacks:
self._callbacks[pillow] = set()
self._callbacks[pillow].add(callback)
if ':' in str(pillow):
room, pillow = pillow.split(':')
if not pillow in self._localCallbacks:
self._localCallbacks[pillow] = set()
self._localCallbacks[pillow].add(callback)
def sm(self):
return self._stateMachine
def _setStateMachine(self, stateMachine):
self._stateMachine = stateMachine
self._stateMachine.addRoutings(self._routings.keys())
self._stateMachine.addTransitions(self._transitions.keys())
self._stateMachine.addCallbacks(self._callbacks.keys())
def _throw(self, pillow, feathers):
self._stateMachine._throw(pillow, feathers, self)
def _switchState(self, state):
self._stateMachine.switchState(state)
def _routePillow(self, pillow, feathers):
if pillow in self._routings:
for routing in self._routings[pillow]:
self._throw(routing, feathers)
def _transitionState(self, pillow, feathers):
if pillow in self._transitions:
self._switchState(self._transitions[pillow])
def _callback(self, pillow, feathers):
if pillow in self._localCallbacks:
for callback in self._localCallbacks[pillow]:
callback(pillow, feathers)
|
Castronova/EMIT
|
gui/examples/xml.py
|
Python
|
gpl-2.0
| 877
| 0.036488
|
__author__ = 'tonycastronova'
import xml.etree.ElementTree as et
from xml.dom import minidom
def prettify(elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = et.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
tree = et.Element('Simulation')
attributes = {'Name':'mymodel','path':'/some/path1','x':'10','y':'100'}
et.SubElement(tree,'Mo
|
del',attributes)
attributes = {'Name':'mymodel2','path':'/some/path2','x':'20','y':'200'}
et.SubElement(tree,'Model',attributes)
attributes = {'From':'mymodel','To':'mymodel2','FromItem':'variable1','ToItem':'variable2'}
et.SubElement(tree,'Link',attributes)
prettyxml = prettify(tree)
with open('/Users/tonycastronova/Documents/projects/iUtah/EMIT/gui/te
|
sts/test.xml','w') as f:
f.write(prettyxml)
print 'done'
|
declension/squeeze-alexa
|
squeezealexa/__init__.py
|
Python
|
gpl-3.0
| 1,050
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-18 Nick Boultbee
# This file is part of squeeze-alexa.
#
# squeeze-alexa is free software: you can re
|
distribute it and
|
/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# See LICENSE for full license
from os.path import dirname
from typing import Dict, Any
ROOT_DIR = dirname(dirname(__file__))
"""The squeeze-alexa root directory"""
class Settings:
"""Class-level settings base.
It's in here to avoid circular imports"""
def __str__(self) -> str:
return str(self.dict())
def dict(self) -> Dict[str, Any]:
return dict(self.__dict__.items())
def __init__(self):
# Set the instance-level things:
for k, v in type(self).__dict__.items():
if not k.startswith('_') and k not in Settings.__dict__:
setattr(self, k.lower(), v)
def configured(self):
return True
|
willkg/socorro-collector
|
collector/unittest/app/test_socorro_app.py
|
Python
|
mpl-2.0
| 8,093
| 0.003336
|
import mock
from nose.tools import eq_, ok_, assert_raises
from collector.unittest.testbase import TestCase
from configman import (
class_converter,
Namespace,
command_line,
ConfigFileFutureProxy,
)
from configman.dotdict import DotDict
from collector.app.socorro_app import (
SocorroApp,
SocorroWelcomeApp,
main,
klass_to_pypath,
)
from collector.app.for_application_defaults import ApplicationDefaultsProxy
#==============================================================================
class TestSocorroApp(TestCase):
#--------------------------------------------------------------------------
def test_instantiation(self):
config = DotDict()
sa = SocorroApp(config)
eq_(sa.get_application_defaults(), {})
assert_raises(NotImplementedError, sa.main)
assert_raises(NotImplementedError, sa._do_run)
#--------------------------------------------------------------------------
def test_run(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run(), 17)
ok_(SomeOtherApp.config_path is None)
x = SomeOtherApp.run()
eq_(x, 17)
#--------------------------------------------------------------------------
def test_run_with_alternate_config_path(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.values_source_list = values_source_list
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run('my/path'), 17)
eq_(SomeOtherApp.config_path, 'my/path')
x = SomeOtherApp.run('my/other/path')
eq_(x, 17)
eq_(SomeOtherApp.config_path, 'my/other/path')
#--------------------------------------------------------------------------
def test_run_with_alternate_values_source_list(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.values_source_list = values_source_list
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run('my/path', [{}, {}]), 17)
eq_(SomeOtherApp.config_path, 'my/path')
eq_(SomeOtherApp.values_source_list, [{}, {}])
x = SomeOtherApp.run('my/other/path', [])
eq_(x, 17)
eq_(SomeOtherApp.config_path, 'my/other/path')
eq_(SomeOtherApp.values_source_list, [])
#--------------------------------------------------------------------------
def test_do_run(self):
config = DotDict()
with mock.patch('collector.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('collector.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(SomeOtherApp)
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
ok_(
|
isinstance(kwargs['values_source_list'], list))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], './config')
ok_(kwargs['values_source_list'][-1], command_line)
ok_(isinstance(kwargs['values_source_list'][-2], DotDict))
ok_(kwargs['values_source_list'][-
|
3] is ConfigFileFutureProxy)
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(result, 17)
#--------------------------------------------------------------------------
def test_do_run_with_alternate_class_path(self):
config = DotDict()
with mock.patch('collector.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('collector.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(SomeOtherApp, 'my/other/path')
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
ok_(isinstance(kwargs['values_source_list'], list))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], 'my/other/path')
ok_(kwargs['values_source_list'][-1], command_line)
ok_(isinstance(kwargs['values_source_list'][-2], DotDict))
ok_(kwargs['values_source_list'][-3] is ConfigFileFutureProxy)
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(result, 17)
#--------------------------------------------------------------------------
def test_do_run_with_alternate_values_source_list(self):
config = DotDict()
with mock.patch('collector.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('collector.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(
SomeOtherApp,
config_path='my/other/path',
values_source_list=[{"a": 1}, {"b": 2}]
)
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], 'my/other/path')
ok_(isinstance(kwargs['values_source_list'], list))
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(kwargs['values_source_list'][1], {"a": 1})
eq_(kwargs['values_source_list'][2], {"b": 2})
eq_(result, 17)
|
rustyrazorblade/machete
|
machete/wiki/tests/test_create_page.py
|
Python
|
bsd-3-clause
| 1,075
| 0.003721
|
from unittest import TestCase
from machete.base.tests import IntegrationTestCase
from machete.wiki.models import Wiki, Page
class CreatePageTest(TestCase):
def test_create_page(self):
wiki = Wiki.create()
page = wiki.create_page("test name [Some link]",
"/index.html",
u"this is a test")
assert isinstance(page, Page)
assert page.html == u'<p>this is a test</p>'
class PageIntegrationTest(IntegrationTestCase):
def test_create_page(self):
url = "/projects/{}/wiki/".format(s
|
elf.project.vid)
response = self.post(url, {"url":"TestPage",
"name":"Whatever bro",
"text":"this is a test"})
self.assert200(response)
url = "/projects/{}/wiki/TestPage".format(self.project.vid)
response = self.get(url)
self.assert200(response)
url = "/projects/{}/wiki/".format(self.project.vid)
response = self.get(url)
self.assert200(re
|
sponse)
|
neuromat/nira
|
person/apps.py
|
Python
|
mpl-2.0
| 199
| 0
|
# member/apps.py
from django.apps import AppConfig
from django.utils.translation import ugettext_laz
|
y as _
class MemberConfig(AppConfig):
|
name = 'person'
verbose_name = _('Personal Info')
|
weiting-chen/manila
|
manila/api/urlmap.py
|
Python
|
apache-2.0
| 10,165
| 0
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import urllib2
from oslo_log import log
import paste.urlmap
from manila.api.openstack import wsgi
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(
r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = log.getLogger(__name__)
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in urllib2.parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse header into content type and options.
Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
class Accept(obj
|
ect):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a mor
|
e sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
|
google-research/tensorflow_constrained_optimization
|
tensorflow_constrained_optimization/python/rates/general_rates.py
|
Python
|
apache-2.0
| 52,315
| 0.002772
|
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Contains functions for constructing binary or multiclass rate expressions.
There are a number of rates (e.g. error_rate()) that can be defined for either
binary classification or multiclass contexts. The former rates are implemented
in binary_rates.py, and the latter in multiclass_rates.py. In this file, the
given functions choose which rate to create based on the type of the context:
for multiclass contexts, they'll call the corresponding implementation in
multiclass_rates.py, otherwise, they'll call binary_rates.py.
Many of the function
|
s in this file take the optional "positive_class" parameter,
which tells us which classes sh
|
ould be considered "positive" (for e.g. the
positive prediction rate). This parameter *must* be provided for multiclass
contexts, and must *not* be provided for non-multiclass contexts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_constrained_optimization.python.rates import basic_expression
from tensorflow_constrained_optimization.python.rates import binary_rates
from tensorflow_constrained_optimization.python.rates import defaults
from tensorflow_constrained_optimization.python.rates import deferred_tensor
from tensorflow_constrained_optimization.python.rates import expression
from tensorflow_constrained_optimization.python.rates import multiclass_rates
from tensorflow_constrained_optimization.python.rates import subsettable_context
from tensorflow_constrained_optimization.python.rates import term
def _is_multiclass(context):
"""Returns True iff we're given a multiclass context."""
if not isinstance(context, subsettable_context.SubsettableContext):
raise TypeError("context must be a SubsettableContext object")
raw_context = context.raw_context
return raw_context.num_classes is not None
def _ratio_bound(numerator_expression, denominator_expression, lower_bound,
upper_bound):
"""Creates an `Expression` for a bound on a ratio.
The result of this function is an `Expression` representing:
numerator / denominator_bound
where denominator_bound is a newly-created slack variable projected to satisfy
the following (in an update op):
denominator_lower_bound <= denominator_bound <= 1
Additionally, the following constraint will be added if lower_bound is True:
denominator_bound >= denominator_expression
and/or the following if upper_bound is true:
denominator_bound <= denominator_expression
These constraints are placed in the "extra_constraints" field of the resulting
`Expression`.
If you're going to be lower-bounding or maximizing the result of this
function, then need to set the lower_bound parameter to `True`. Likewise, if
you're going to be upper-bounding or minimizing the result of this function,
then the upper_bound parameter must be `True`. At least one of these
parameters *must* be `True`, and it's permitted for both of them to be `True`
(but we recommend against this, since it would result in equality constraints,
which might cause problems during optimization and/or post-processing).
Args:
numerator_expression: `Expression`, the numerator of the ratio.
denominator_expression: `Expression`, the denominator of the ratio. The
value of this expression must be between zero and one.
lower_bound: bool, `True` if you want the result of this function to
lower-bound the ratio.
upper_bound: bool, `True` if you want the result of this function to
upper-bound the ratio.
Returns:
An `Expression` representing the ratio.
Raises:
TypeError: if either numerator_expression or denominator_expression is not
an `Expression`.
ValueError: if both lower_bound and upper_bound are `False`.
"""
if not (isinstance(numerator_expression, expression.Expression) and
isinstance(denominator_expression, expression.Expression)):
raise TypeError(
"both numerator_expression and denominator_expression must be "
"Expressions (perhaps you need to call wrap_rate() to create an "
"Expression from a Tensor?)")
# One could set both lower_bound and upper_bound to True, in which case the
# result of this function could be treated as the ratio itself (instead of a
# {lower,upper} bound of it). However, this would come with some drawbacks: it
# would of course make optimization more difficult, but more importantly, it
# would potentially cause post-processing for feasibility (e.g. using
# "shrinking") to fail to find a feasible solution.
if not (lower_bound or upper_bound):
raise ValueError("at least one of lower_bound or upper_bound must be True")
# We use an "update_ops_fn" instead of a "constraint" (which we would usually
# prefer) to perform the projection because we want to grab the denominator
# lower bound out of the structure_memoizer.
def update_ops_fn(denominator_bound_variable, structure_memoizer,
value_memoizer):
"""Projects denominator_bound onto the feasible region."""
del value_memoizer
denominator_bound = tf.maximum(
structure_memoizer[defaults.DENOMINATOR_LOWER_BOUND_KEY],
tf.minimum(1.0, denominator_bound_variable))
return [denominator_bound_variable.assign(denominator_bound)]
# Ideally the slack variable would have the same dtype as the predictions, but
# we might not know their dtype (e.g. in eager mode), so instead we always use
# float32 with auto_cast=True.
denominator_bound = deferred_tensor.DeferredVariable(
1.0,
trainable=True,
name="tfco_denominator_bound",
dtype=tf.float32,
update_ops_fn=update_ops_fn,
auto_cast=True)
denominator_bound_basic_expression = basic_expression.BasicExpression(
[term.TensorTerm(denominator_bound)])
denominator_bound_expression = expression.ExplicitExpression(
penalty_expression=denominator_bound_basic_expression,
constraint_expression=denominator_bound_basic_expression)
extra_constraints = []
if lower_bound:
extra_constraints.append(
denominator_expression <= denominator_bound_expression)
if upper_bound:
extra_constraints.append(
denominator_bound_expression <= denominator_expression)
return expression.ConstrainedExpression(
expression=numerator_expression._positive_scalar_div(denominator_bound), # pylint: disable=protected-access
extra_constraints=extra_constraints)
def _ratio(numerator_expression, denominator_expression):
"""Creates an `Expression` for a ratio.
The result of this function is an `Expression` representing:
numerator / denominator_bound
where denominator_bound satisfies the following:
denominator_lower_bound <= denominator_bound <= 1
The resulting `Expression` will include both the implicit denominator_bound
slack variable, and implicit constraints.
Args:
numerator_expression: `Expression`, the numerator of the ratio.
denominator_expression: `Expression`, the denominator of the ratio.
Returns:
An `Expression` representing the ratio.
Raises:
TypeError: if either numerator_expression or denominator_expression is not
an `Expression`.
"""
return expression.BoundedExpression(
lower_bound=_ratio_bound(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression,
lower_bound=True,
|
shrimpboyho/git.js
|
emscript/emscripten/1.5.6/tools/asm_module.py
|
Python
|
gpl-2.0
| 10,782
| 0.013356
|
import sys, re
import shared, js_optimizer
class AsmModule():
def __init__(self, filename):
self.filename = filename
self.js = open(filename).read()
self.start_asm = self.js.find(js_optimizer.start_asm_marker)
self.start_funcs = self.js.find(js_optimizer.start_funcs_marker)
self.end_funcs = self.js.rfind(js_optimizer.end_funcs_marker)
self.end_asm = self.js.rfind(js_optimizer.end_asm_marker)
# pre and asm
self.pre_js = self.js[:self.start_asm]
self.asm_js = self.js[self.start_asm:self.end_asm]
# heap initializer
self.staticbump = int(re.search(shared.JS.memory_staticbump_pattern, self.pre_js).group(1))
if self.staticbump:
self.mem_init_js = re.search(shared.JS.memory_initializer_pattern, se
|
lf.pre_js).group(0)
# global initializers
global_inits = re.search(shared.JS.global_initializers_pattern, self.pre_js)
if global_inits:
self.global_inits_js = global_inits.group(0)
self.global_inits = map(lambda init: init.split('{')[2][1:].split('(')[0], global_inits.groups(0)[0].split(','))
else:
self.global_inits_js = ''
self.global_inits = []
# imports (and global variables)
first_var = self.js.find('var ', s
|
elf.js.find('var ', self.start_asm)+4)
self.pre_imports_js = self.js[self.start_asm:first_var]
self.imports_js = self.js[first_var:self.start_funcs]
self.imports = {}
for imp in js_optimizer.import_sig.finditer(self.imports_js):
key, value = imp.group(0).split('var ')[1][:-1].split('=', 1)
self.imports[key.strip()] = value.strip()
#print >> sys.stderr, 'imports', self.imports
# funcs
self.funcs_js = self.js[self.start_funcs:self.end_funcs]
self.funcs = set([m.group(2) for m in js_optimizer.func_sig.finditer(self.funcs_js)])
#print 'funcs', self.funcs
# tables and exports
post_js = self.js[self.end_funcs:self.end_asm]
ret = post_js.find('return ')
self.tables_js = post_js[:ret]
self.exports_js = post_js[ret:]
self.tables = self.parse_tables(self.tables_js)
self.exports = set([export.strip() for export in self.exports_js[self.exports_js.find('{')+1:self.exports_js.find('}')].split(',')])
# post
self.post_js = self.js[self.end_asm:]
self.sendings = {}
for sending in [sending.strip() for sending in self.post_js[self.post_js.find('}, { ')+5:self.post_js.find(' }, buffer);')].split(',')]:
colon = sending.find(':')
self.sendings[sending[:colon].replace('"', '')] = sending[colon+1:].strip()
self.module_defs = set(re.findall('var [\w\d_$]+ = Module\["[\w\d_$]+"\] = asm\["[\w\d_$]+"\];\n', self.post_js))
def relocate_into(self, main):
# heap initializer
if self.staticbump > 0:
new_mem_init = self.mem_init_js[:self.mem_init_js.rfind(', ')] + ', Runtime.GLOBAL_BASE+%d)' % main.staticbump
main.pre_js = re.sub(shared.JS.memory_staticbump_pattern, 'STATICTOP = STATIC_BASE + %d;\n' % (main.staticbump + self.staticbump) + new_mem_init, main.pre_js, count=1)
# Find function name replacements TODO: do not rename duplicate names with duplicate contents, just merge them
replacements = {}
for func in self.funcs:
rep = func
while rep in main.funcs:
rep += '_'
replacements[func] = rep
#print >> sys.stderr, 'replacements:', replacements
# sendings: add invokes for new tables
all_sendings = main.sendings
added_sending = False
for table in self.tables:
if table not in main.tables:
sig = table[table.rfind('_')+1:]
func = 'invoke_%s' % sig
all_sendings[func] = func
main.pre_js += 'var %s = %s;\n' % (func, shared.JS.make_invoke(sig, named=False))
added_sending = True
# imports
all_imports = main.imports
for key, value in self.imports.iteritems():
if key in self.funcs or key in main.funcs: continue # external function in one module, implemented in the other
value_concrete = '.' not in value # env.key means it is an import, an external value, and not a concrete one
main_value = main.imports.get(key)
main_value_concrete = main_value and '.' not in main_value
if value_concrete and main_value_concrete: continue # standard global var
if not main_value or value_concrete:
if '+' in value:
# relocate
value = value.replace('(', '').replace(')', '').replace('| 0', '').replace('|0', '').replace(' ', '')
left, right = value.split('+')
assert left == 'H_BASE'
value = str(main.staticbump + int(right))
all_imports[key] = value
if (value_concrete or main_value_concrete) and key in all_sendings:
del all_sendings[key] # import of external value no longer needed
main.imports_js = '\n'.join(['var %s = %s;' % (key, value) for key, value in all_imports.iteritems()]) + '\n'
# check for undefined references to global variables
def check_import(key, value):
if value.startswith('+') or value.endswith('|0'): # ignore functions
if key not in all_sendings:
print >> sys.stderr, 'warning: external variable %s is still not defined after linking' % key
all_sendings[key] = '0'
for key, value in all_imports.iteritems(): check_import(key, value)
if added_sending:
sendings_js = ', '.join(['%s: %s' % (key, value) for key, value in all_sendings.iteritems()])
sendings_start = main.post_js.find('}, { ')+5
sendings_end = main.post_js.find(' }, buffer);')
main.post_js = main.post_js[:sendings_start] + sendings_js + main.post_js[sendings_end:]
# tables
f_bases = {}
f_sizes = {}
for table, data in self.tables.iteritems():
main.tables[table] = self.merge_tables(table, main.tables.get(table), data, replacements, f_bases, f_sizes)
main.combine_tables()
#print >> sys.stderr, 'f bases', f_bases
# relocate
temp = shared.Building.js_optimizer(self.filename, ['asm', 'relocate', 'last'], extra_info={
'replacements': replacements,
'fBases': f_bases,
'hBase': main.staticbump
})
#print >> sys.stderr, 'relocated side into', temp
relocated_funcs = AsmModule(temp)
shared.try_delete(temp)
main.extra_funcs_js = relocated_funcs.funcs_js.replace(js_optimizer.start_funcs_marker, '\n')
# update function table uses
ft_marker = 'FUNCTION_TABLE_'
def update_fts(what):
updates = []
i = 1 # avoid seeing marker in recursion
while 1:
i = what.find(ft_marker, i)
if i < 0: break;
start = i
end = what.find('[', start)
table = what[i:end]
if table not in f_sizes:
# table was not modified
i += len(ft_marker)
continue
nesting = 1
while nesting > 0:
next = what.find(']', end+1)
nesting -= 1
nesting += what.count('[', end+1, next)
end = next
assert end > 0
mask = what.rfind('&', start, end)
assert mask > 0 and end - mask <= 13
fixed = update_fts(what[start:mask+1] + str(f_sizes[table]-1) + ']')
updates.append((start, end, fixed))
i = end # additional function table uses were done by recursion
# apply updates
if len(updates) == 0: return what
parts = []
so_far = 0
for i in range(len(updates)):
start, end, fixed = updates[i]
parts.append(what[so_far:start])
parts.append(fixed)
so_far = end+1
parts.append(what[so_far:])
return ''.join(parts)
main.funcs_js = update_fts(main.funcs_js)
main.extra_funcs_js = update_fts(main.extra_funcs_js)
# global initializers
if self.global_inits:
my_global_inits = map(lambda init: replacements[init] if init in replacements else init, self.global_inits)
all_global_inits = map(lambda init: '{ func: function() { %s() } }' % init, main.global_inits + my_global_inits)
all_global_inits_js = '/* global initializers */ __ATINIT__.push(' + ','.join(all_global_inits) + ');'
if main.global_inits:
target = main.global_inits_js
else:
target = '// ===
|
chronicle/api-samples-python
|
service_management/update_gcp_settings_test.py
|
Python
|
apache-2.0
| 2,741
| 0.004743
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for the "update_gcp_settings" module."""
import unittest
from unittest import mock
from google.auth.transport import requests
from . import update_gcp_settings
class UpdateGCPSettingsTest(unittest.TestCase):
def test_initialize_command_line_args_enable_ingestion(self):
actual = update_gcp_settings.initialize_command_line_args(
["--credentials_file=./foo.json", "--organization_id=123", "--enable"])
self.assertIsNotNone(actual)
def test_initialize_command_line_args_disable_ingestion(self):
actual = update_gcp_settings.initialize_command_line_args(
["--credentials_file=./foo.json", "--organization_id=123", "--disable"])
self.assertIsNotNone(actual)
def test_initialize_command_line_args_organization_id_too_big(self):
invalid_organization_id = 2**64
actual = update_gcp_settings.initialize_command_line_args(
[f"--organization_id={invalid_organization_id}"])
self.assertIsNone(actual)
def test_initialize_command_line_args_negative_organization_id(self):
actual = update_gcp_settings.initialize_command_line_args(
["--organization_id=-1"])
self.assertIsNone(actual)
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_respon
|
se).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
update_gcp_settings.update_gcp_settings(mock_session, 123, True)
|
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
update_gcp_settings.update_gcp_settings(mock_session, 123, True)
if __name__ == "__main__":
unittest.main()
|
ul-fmf/projekt-tomo
|
web/web/settings/local.py
|
Python
|
agpl-3.0
| 916
| 0.001092
|
from .common import *
INSTALLED_APPS += [
'silk',
]
MIDDLEWARE_CLASSES.insert(0, 'silk.middleware.SilkyMiddleware')
SECRET_KEY = '0vb+-_-52phz@ii^cxr+mlgvmn6fctd+v5qpnv&k+-00#u-==0'
DEBUG = True
ALLOWED_HOSTS = []
WSGI_APPLICATION = 'web.wsgi.local.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tomo',
|
'USER': 'matija',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
SUBMISSION_URL = 'http://127.0.0.1:8000'
# Use nose to run all te
|
sts
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Tell nose to measure coverage on the 'problems', 'attemtps', 'courses' and 'users' apps
NOSE_ARGS = [
'--with-coverage',
'--cover-package=problems,attempts,courses,users,utils',
]
|
tgcmteam/tgcmlinux
|
src/tgcm/ui/windows/PukDialog/__init__.py
|
Python
|
gpl-2.0
| 32
| 0
|
from PukDialog im
|
port Puk
|
Dialog
|
ST-Data-Mining/crater
|
george/nn.py
|
Python
|
mit
| 1,547
| 0.032321
|
from __future__ import division,print_function
from os import environ
im
|
port sys
HOME=environ['HOME']
PROJECT_ROOT=HOME+'/Panzer/NCSU/Spatial and Temporal/crater'
EXPTS = PROJECT_ROOT+'/expts'
sys.path.extend([PROJECT_ROOT,EXPTS])
sys.dont_write_bytecode = True
from sklearn.neural_network import BernoulliRBM
from sklearn.linear_model import LogisticRegression
|
from sklearn.pipeline import Pipeline
from george.lib import *
from expts.csvParser import parseCSV, randomPoints
import config
def builder(fname = config.TRAIN_FILE, hiddens=256, learn_rate=0.01):
points = parseCSV(fname, False)
rbm = BernoulliRBM(n_components=hiddens,learning_rate=learn_rate,n_iter=30,random_state=1)
logistic = LogisticRegression(C=20)
clf = Pipeline(steps=[('rbm', rbm), ('logistic',logistic)])
X, y = [], []
for point in points:
X.append(normalize(point.x))
y.append(point.y)
clf.fit(X,y)
return clf
def predictor(classifier, points):
X,actuals = [], []
for point in points:
X.append(normalize(point.x))
actuals.append(point.y)
predicts = classifier.predict(X)
return predicts, actuals
def _runner():
hiddens = 250
learn_rate = 0.01
points = parseCSV(config.FEATURES_FOLDER+"all.csv", False)
#points += parseCSV(config.FEATURES_FOLDER+"1_25.csv", False)
classifier = builder(config.TRAIN_FILE, hiddens, learn_rate)
predicted, actual = predictor(classifier, points)
stat = ABCD()
for p,a in zip(predicted,actual):
stat.update(p, a)
print(p, a)
print(stat)
if __name__=="__main__":
_runner()
|
google-research/google-research
|
structformer/utils.py
|
Python
|
apache-2.0
| 1,703
| 0.008808
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utils for training."""
import random
import numpy
import torch
def batchify(idxs, bsz, device, pad=0, shuffle=True):
"""Batchify the training data."""
length = [len(seq) for seq in idxs]
sorted_idx = numpy.argsort(length)
idxs_sorted = [idxs[i] for i in sorted_idx]
idxs_batched = []
i = 0
def get_batch(
|
source, i, batch_size, pad=0):
total_length = 0
data = []
w
|
hile total_length < batch_size and i < len(source):
data.append(source[i])
total_length += len(source[i])
i += 1
length = [len(seq) for seq in data]
max_l = max(length)
data_padded = []
for seq in data:
data_padded.append(seq + [pad] * (max_l - len(seq)))
data_mat = torch.LongTensor(data_padded).to(device)
return data_mat
while i < len(idxs_sorted):
idxs_batched.append(get_batch(idxs_sorted, i, bsz, pad))
i += idxs_batched[-1].size(0)
if shuffle:
sentence_idx = list(range(len(idxs_batched)))
random.shuffle(sentence_idx)
idxs_batched = [idxs_batched[i] for i in sentence_idx]
return idxs_batched
|
naresh21/synergetics-edx-platform
|
lms/djangoapps/grades/api/views.py
|
Python
|
agpl-3.0
| 6,482
| 0.001543
|
""" API v0 views. """
import logging
from django.http import Http404
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from lms.djangoapps.ccx.utils import prep_course_for_grading
from lms.djangoapps.courseware import courses
from lms.djangoapps.grades.api.serializers import GradingPolicySerializer
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin
log = logging.getLogger(__name__)
class GradeViewMixin(DeveloperErrorViewMixin):
"""
Mixin class for Grades related views.
"""
authentication_classes = (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthentication,
)
permission_classes = (IsAuthenticated,)
def _get_course(self, course_key_string, user, access_action):
"""
Returns the course for the given course_key_string after
verifying the requested access to the course by the given user.
"""
try:
course_key = CourseKey.from_string(course_key_string)
except InvalidKeyError:
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The provided course key cannot be parsed.',
error_code='invalid_course_key'
)
try:
return courses.get_course_with_access(
user,
access_action,
course_key,
check_if_enrolled=True
)
except Http404:
log.info('Course with ID "%s" not found', course_key_string)
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The user, the course or both do not exist.',
error_code='user_or_course_does_not_exist'
)
def perform_authentication(self, request):
"""
Ensures that the user is authenticated (e.g. not an AnonymousUser), unless DEBUG mode is enabled.
"""
super(GradeViewMixin, self).perform_authentication(request)
if request.user.is_anonymous():
raise AuthenticationFailed
class UserGradeView(GradeViewMixin, GenericAPIView):
"""
**Use Case**
* Get the current course grades for users in a course.
Currently, getting the grade for only an individual user is supported.
**Example Request**
GET /api/grades/v0/course_grade/{course_id}/users/?username={username}
**GET Parameters**
A GET request must include the following parameters.
* course_i
|
d: A string representation of a Course ID.
* username: A string representation of a user's username.
**GET Response Values**
If the request for information about the course grade
is successful, an HTTP 200 "OK" response is returned.
The HTTP 200 response has the following values.
* username: A string representation of a user's username passed
|
in the request.
* course_id: A string representation of a Course ID.
* passed: Boolean representing whether the course has been
passed according the course's grading policy.
* percent: A float representing the overall grade for the course
* letter_grade: A letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None
**Example GET Response**
[{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"passed": false,
"percent": 0.03,
"letter_grade": None,
}]
"""
def get(self, request, course_id):
"""
Gets a course progress status.
Args:
request (Request): Django request object.
course_id (string): URI element specifying the course location.
Return:
A JSON serialized representation of the requesting user's current grade status.
"""
username = request.GET.get('username')
# only the student can access her own grade status info
if request.user.username != username:
log.info(
'User %s tried to access the grade for user %s.',
request.user.username,
username
)
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The user requested does not match the logged in user.',
error_code='user_mismatch'
)
course = self._get_course(course_id, request.user, 'load')
if isinstance(course, Response):
return course
prep_course_for_grading(course, request)
course_grade = CourseGradeFactory().create(request.user, course)
return Response([{
'username': username,
'course_key': course_id,
'passed': course_grade.passed,
'percent': course_grade.percent,
'letter_grade': course_grade.letter_grade,
}])
class CourseGradingPolicy(GradeViewMixin, ListAPIView):
"""
**Use Case**
Get the course grading policy.
**Example requests**:
GET /api/grades/v0/policy/{course_id}/
**Response Values**
* assignment_type: The type of the assignment, as configured by course
staff. For example, course staff might make the assignment types Homework,
Quiz, and Exam.
* count: The number of assignments of the type.
* dropped: Number of assignments of the type that are dropped.
* weight: The weight, or effect, of the assignment type on the learner's
final grade.
"""
allow_empty = False
def get(self, request, course_id, **kwargs):
course = self._get_course(course_id, request.user, 'staff')
if isinstance(course, Response):
return course
return Response(GradingPolicySerializer(course.raw_grader, many=True).data)
|
tdjordan/tortoisegit
|
tracelog.py
|
Python
|
gpl-2.0
| 4,176
| 0.006705
|
#
# A PyGtk-based Python Trace Collector window
#
# Copyright (C) 2007 TK Soh <teekaysoh@gmail.com>
#
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import pango
import threading
import Queue
import win32trace
try:
from gitgtk.gitlib import toutf
except ImportError:
import locale
_encoding = locale.getpreferredencoding()
def toutf(s):
return s.decode(_encoding, 'replace').encode('utf-8')
class TraceLog():
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("Python Trace Collector")
# construct window
self.window.set_default_size(700, 400)
self.main_area = gtk.VBox()
self.window.add(self.main_area)
# mimic standard dialog widgets
self.action_area = gtk.HBox()
self.main_area.pack_end(self.action_area, False, False, 5)
sep = gtk.HSeparator()
self.main_area.pack_end(sep, False, False, 0)
self.vbox = gtk.VBox()
self.main_area.pack_end(self.vbox)
# add python trace ouput window
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.textview = gtk.TextView(buffer=None)
self.textview.set_editable(False)
self.textview.modify_font(pango.FontDescription("Mon
|
ospace"))
scrolledwindow.add(self.textview)
self.textview.set_editable(False)
self.textbuffer = self.textview.get_buffer()
self.vbox.pack_start(scrolledwindow, True, True)
self.vbox.show_all()
|
# add buttons
self._button_quit = gtk.Button("Quit")
self._button_quit.connect('clicked', self._on_ok_clicked)
self.action_area.pack_end(self._button_quit, False, False, 5)
self._button_clear = gtk.Button("Clear")
self._button_clear.connect('clicked', self._on_clear_clicked)
self.action_area.pack_end(self._button_clear, False, False, 5)
# add assorted window event handlers
self.window.connect('map_event', self._on_window_map_event)
self.window.connect('delete_event', self._on_window_close_clicked)
def _on_ok_clicked(self, button):
self._stop_read_thread()
gtk.main_quit()
def _on_clear_clicked(self, button):
self.write("", False)
def _on_window_close_clicked(self, event, param):
self._stop_read_thread()
gtk.main_quit()
def _on_window_map_event(self, event, param):
self._begin_trace()
def _begin_trace(self):
self.queue = Queue.Queue()
win32trace.InitRead()
self.write("Collecting Python Trace Output...\n")
gobject.timeout_add(10, self._process_queue)
self._start_read_thread()
def _start_read_thread(self):
self._read_trace = True
self.thread1 = threading.Thread(target=self._do_read_trace)
self.thread1.start()
def _stop_read_thread(self):
self._read_trace = False
# wait for worker thread to to fix Unhandled exception in thread
self.thread1.join()
def _process_queue(self):
"""
Handle all the messages currently in the queue (if any).
"""
while self.queue.qsize():
try:
msg = self.queue.get(0)
self.write(msg)
except Queue.Empty:
pass
return True
def _do_read_trace(self):
"""
print buffer collected in win32trace
"""
while self._read_trace:
msg = win32trace.read()
if msg:
self.queue.put(msg)
def write(self, msg, append=True):
msg = toutf(msg)
if append:
enditer = self.textbuffer.get_end_iter()
self.textbuffer.insert(enditer, msg)
else:
self.textbuffer.set_text(msg)
def main(self):
self.window.show_all()
gtk.main()
def run():
dlg = TraceLog()
dlg.main()
if __name__ == "__main__":
run()
|
meee1/pymavlink
|
mavextra.py
|
Python
|
lgpl-3.0
| 1,082
| 0.005545
|
#!/usr/bin/env python
'''
useful extra functions for use by mavlink clients
Copyright Andrew Tridgell 201
|
1
Released under GNU
|
GPL version 3 or later
'''
from math import *
def norm_heading(RAW_IMU, ATTITUDE, declination):
'''calculate heading from RAW_IMU and ATTITUDE'''
xmag = RAW_IMU.xmag
ymag = RAW_IMU.ymag
zmag = RAW_IMU.zmag
pitch = ATTITUDE.pitch
roll = ATTITUDE.roll
headX = xmag*cos(pitch) + ymag*sin(roll)*sin(pitch) + zmag*cos(roll)*sin(pitch)
headY = ymag*cos(roll) - zmag*sin(roll)
heading = atan2(-headY, headX)
heading = fmod(degrees(heading) + declination + 360, 360)
return heading
def TrueHeading(SERVO_OUTPUT_RAW):
rc3_min = 1060
rc3_max = 1850
p = float(SERVO_OUTPUT_RAW.servo3_raw - rc3_min) / (rc3_max - rc3_min)
return 172 + (1.0-p)*(326 - 172)
def kmh(mps):
'''convert m/s to Km/h'''
return mps*3.6
def altitude(press_abs, ground_press=955.0, ground_temp=30):
'''calculate barometric altitude'''
return log(ground_press/press_abs)*(ground_temp+273.15)*29271.267*0.001
|
apyrgio/ganeti
|
lib/impexpd/__init__.py
|
Python
|
bsd-2-clause
| 17,841
| 0.007679
|
#
#
# Copyright (C) 2010 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Classes and functions for import/export daemon.
"""
import os
import re
import socket
import logging
import signal
import errno
import time
from cStringIO import StringIO
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import netutils
from ganeti import compat
#: Used to recognize point at which socat(1) starts to listen on its socket.
#: The local address is required for the remote peer to connect (in particular
#: the port number).
LISTENING_RE = re.compile(r"^listening on\s+"
r"AF=(?P<family>\d+)\s+"
r"(?P<address>.+):(?P<port>\d+)$", re.I)
#: Used to recognize point at which socat(1) is sending data over the wire
TRANSFER_LOOP_RE = re.compile(r"^starting data transfer loop with FDs\s+.*$",
re.I)
SOCAT_LOG_DEBUG = "D"
SOCAT_LOG_INFO = "I"
SOCAT_LOG_NOTICE = "N"
SOCAT_LOG_WARNING = "W"
SOCAT_LOG_ERROR = "E"
SOCAT_LOG_FATAL = "F"
SOCAT_LOG_IGNORE = compat.UniqueFrozenset([
SOCAT_LOG_DEBUG,
SOCAT_LOG_INFO,
SOCAT_LOG_NOTICE,
])
#: Used to parse GNU dd(1) statistics
DD_INFO_RE = re.compile(r"^(?P<bytes>\d+)\s*byte(?:|s)\s.*\scopied,\s*"
r"(?P<seconds>[\d.]+)\s*s(?:|econds),.*$", re.I)
#: Used to ignore "N+N records in/out" on dd(1)'s stderr
DD_STDERR_IGNORE = re.compile(r"^\d+\+\d+\s*records\s+(?:in|out)$", re.I)
#: Signal upon which dd(1) will print statistics (on some platforms, SIGINFO is
#: unavailable and SIGUSR1 is used instead)
DD_INFO_SIGNAL = getattr(signal, "SIGINFO", signal.SIGUSR1)
#: Buffer size: at most this many bytes are transferred at once
BUFSIZE = 1024 * 1024
# Common options for socat
SOCAT_TCP_OPTS = ["keepalive", "keepidle=60", "keepintvl=10", "keepcnt=5"]
SOCAT_OPENSSL_OPTS = ["verify=1", "method=TLSv1",
"cipher=%s" % constants.OPENSSL_CIPHERS]
if constants.SOCAT_USE_COMPRESS:
# Disables all compression in by OpenSSL. Only supported in patched versions
# of socat (as of November 2010). See INSTALL for more information.
SOCAT_OPENSSL_OPTS.append("compress=none")
SOCAT_OPTION_MAXLEN = 400
(PROG_OTHER,
PROG_SOCAT,
PROG_DD,
PROG_DD_PID,
PROG_EXP_SIZE) = range(1, 6)
PROG_ALL = compat.UniqueFrozenset([
PROG_OTHER,
PROG_SOCAT,
PROG_DD,
PROG_DD_PID,
PROG_EXP_SIZE,
])
class
|
CommandBuilder(object):
def __init__(self, mode, opts, socat_stderr_fd, dd_stderr_fd, dd_pid_fd):
"""Initializes this class.
@param mode: Daemon mode (import or export)
@param opts: Options object
@type socat_stderr_fd: int
@param socat_stderr_fd: File descriptor socat should write its stderr to
@type dd_stderr_fd: int
@param dd_stderr_fd: File descriptor dd shoul
|
d write its stderr to
@type dd_pid_fd: int
@param dd_pid_fd: File descriptor the child should write dd's PID to
"""
self._opts = opts
self._mode = mode
self._socat_stderr_fd = socat_stderr_fd
self._dd_stderr_fd = dd_stderr_fd
self._dd_pid_fd = dd_pid_fd
assert (self._opts.magic is None or
constants.IE_MAGIC_RE.match(self._opts.magic))
@staticmethod
def GetBashCommand(cmd):
"""Prepares a command to be run in Bash.
"""
return ["bash", "-o", "errexit", "-o", "pipefail", "-c", cmd]
def _GetSocatCommand(self):
"""Returns the socat command.
"""
common_addr_opts = SOCAT_TCP_OPTS + SOCAT_OPENSSL_OPTS + [
"key=%s" % self._opts.key,
"cert=%s" % self._opts.cert,
"cafile=%s" % self._opts.ca,
]
if self._opts.bind is not None:
common_addr_opts.append("bind=%s" % self._opts.bind)
assert not (self._opts.ipv4 and self._opts.ipv6)
if self._opts.ipv4:
common_addr_opts.append("pf=ipv4")
elif self._opts.ipv6:
common_addr_opts.append("pf=ipv6")
if self._mode == constants.IEM_IMPORT:
if self._opts.port is None:
port = 0
else:
port = self._opts.port
addr1 = [
"OPENSSL-LISTEN:%s" % port,
"reuseaddr",
# Retry to listen if connection wasn't established successfully, up to
# 100 times a second. Note that this still leaves room for DoS attacks.
"forever",
"intervall=0.01",
] + common_addr_opts
addr2 = ["stdout"]
elif self._mode == constants.IEM_EXPORT:
if self._opts.host and netutils.IP6Address.IsValid(self._opts.host):
host = "[%s]" % self._opts.host
else:
host = self._opts.host
addr1 = ["stdin"]
addr2 = [
"OPENSSL:%s:%s" % (host, self._opts.port),
# How long to wait per connection attempt
"connect-timeout=%s" % self._opts.connect_timeout,
# Retry a few times before giving up to connect (once per second)
"retry=%s" % self._opts.connect_retries,
"intervall=1",
] + common_addr_opts
else:
raise errors.GenericError("Invalid mode '%s'" % self._mode)
for i in [addr1, addr2]:
for value in i:
if len(value) > SOCAT_OPTION_MAXLEN:
raise errors.GenericError("Socat option longer than %s"
" characters: %r" %
(SOCAT_OPTION_MAXLEN, value))
if "," in value:
raise errors.GenericError("Comma not allowed in socat option"
" value: %r" % value)
return [
constants.SOCAT_PATH,
# Log to stderr
"-ls",
# Log level
"-d", "-d",
# Buffer size
"-b%s" % BUFSIZE,
# Unidirectional mode, the first address is only used for reading, and the
# second address is only used for writing
"-u",
",".join(addr1), ",".join(addr2),
]
def _GetMagicCommand(self):
"""Returns the command to read/write the magic value.
"""
if not self._opts.magic:
return None
# Prefix to ensure magic isn't interpreted as option to "echo"
magic = "M=%s" % self._opts.magic
cmd = StringIO()
if self._mode == constants.IEM_IMPORT:
cmd.write("{ ")
cmd.write(utils.ShellQuoteArgs(["read", "-n", str(len(magic)), "magic"]))
cmd.write(" && ")
cmd.write("if test \"$magic\" != %s; then" % utils.ShellQuote(magic))
cmd.write(" echo %s >&2;" % utils.ShellQuote("Magic value mismatch"))
cmd.write(" exit 1;")
cmd.write("fi;")
cmd.write(" }")
elif self._mode == constants.IEM_EXPORT:
cmd.write(utils.ShellQuoteArgs(["echo", "-E", "-n", magic]))
else:
raise errors.GenericError("Invalid mode '%s'" % self._mode)
return cmd.getvalue()
def _GetDdCommand(self):
"""Returns the command for measuring throughput.
"""
dd_cmd = StringIO()
magic_cmd = self._GetMagicCommand()
if magic_cmd:
dd_cmd.write("{ ")
dd
|
Daphron/project-euler
|
p39.py
|
Python
|
gpl-3.0
| 697
| 0.005739
|
#TODO: make under 1
|
min.
#SOLVED
import math
MAX_P = 1000
best_p = 120
best_num_sides = 3
for p in range(2, MAX_P+1):
num_sides = 0
if p % 30 == 0:
print(p)
for a in range(1, MAX_P/2 + 2):
for b in range(1, MAX_P/2 + 2):
c = p - a - b
if a > b and b > c and c**2 + b**2 == a**2 and a + b + c == p and c > 0:
|
# print("sides {} {} {}".format(a,b,c))
# print("P={}".format(p))
num_sides += 1
if num_sides > best_num_sides:
# print("Change to p={}".format(p))
# import pdb; pdb.set_trace()
best_num_sides = num_sides
best_p = p
print("Done")
print(best_p)
|
schlos/OIPA-V2.1
|
OIPA/iati_synchroniser/models.py
|
Python
|
agpl-3.0
| 5,972
| 0.003684
|
from django.db import models
import datetime
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from iati_synchroniser.dataset_syncer import DatasetSyncer
from iati_synchroniser.codelist_importer import CodeListImporter
from iati.parser import Parser
from iati_synchroniser.admin_tools import AdminTools
INTERVAL_CHOICES = (
(u'YEARLY', _(u"Parse yearly")),
(u'MONTHLY', _(u"Parse monthly")),
(u'WEEKLY', _(u"Parse weekly")),
(u'DAILY', _(u"Parse daily")),
)
class Publisher(models.Model):
org_id = models.CharField(max_length=100, blank=True, null=True)
org_abbreviate = models.CharField(max_length=55, blank=True, null=True)
org_name = models.CharField(max_length=255)
default_interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES, default=u'MONTHLY')
XML_total_activity_count = models.IntegerField(null=True, default=None)
OIPA_total_activity_count = models.IntegerField(null=True, default=None)
def __unicode__(self):
return self.org_id
class IatiXmlSource(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
INTERVAL_CHOICES = (
("day", _(u"Day")),
("week", _(u"Week")),
("month", _(u"Month")),
("year", _(u"Year")),
)
ref = models.CharField(verbose_name=_(u"Reference"), max_length=70, help_text=_(u"Reference for the XML file. Preferred usage: 'collection' or single country or region name"))
title = models.CharField(max_length=255, null=True)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
publisher = models.ForeignKey(Publisher)
source_url = models.CharField(max_length=255, unique=True, help_text=_(u"Hyperlink to an iati activity or organisation XML file."))
date_created = models.DateTimeField(auto_now_add=True, editable=False)
date_updated = models.DateTimeField(auto_now_add=True, editable=False)
update_interval = models.CharField(max_length=20, choices=INTERVAL_CHOICES, default="month", null=True, blank=True)
last_found_in_registry = models.DateTimeField(default=None, null=True)
xml_activity_count = models.IntegerField(null=True, default=None)
oipa_activity_count = models.IntegerField(null=True, default=None)
iati_standard_version = models.CharField(max_length=10, null=True, default=None)
class Meta:
verbose_name_plural = "iati XML sources"
ordering = ["ref"]
def __unicode__(self):
return self.ref
def get_parse_status(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-xml='xml_%i' class='parse'><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
get_parse_status.allow_tags = True
get_parse_status.short_description = _(u"Parse status")
def process(self):
parser = Parser()
parser.parse_url(self.source_url, self.ref)
self.date_updated = datetime.datetime.now()
activity_counter = AdminTools()
self.xml_activity_count = activity_counter.get_xml_activity_amount(self.source_url)
self.oipa_activity_count = activity_counter.get_oipa_activity_amount(self.ref)
self.save(process=False)
def save(self, process=True, *args, **kwargs):
super(IatiXmlSource, self).save()
if process:
self.process()
class DatasetSync(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES)
date_updated = models.DateTimeField(auto_now=True, editable=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
def __unicode__(self):
return self.interval
class Meta:
verbose_name_plural = "dataset synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def _add_month(self, d,months=1):
year, month, day = d.timetuple()[:3]
new_month = month + months
return datetime.date(year + ((new_month-1) / 12), (new_month-1) % 12 +1, day)
def process(self):
if self.interval == u'YEARLY' and (self._add_month(self.date_updated, 12) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'MONTHLY' and (self._add_month(self.date_updated) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'WEEKLY' and (self.date_updated+datetime.timedelta(7) <= date
|
time.datetime.today()):
self.sync_dataset_with_iati_api()
elif self.interval == u'DAILY' and (self.date_updated+datetime.timedelta(1) <= datetim
|
e.datetime.today()):
self.sync_dataset_with_iati_api()
def sync_dataset_with_iati_api(self):
syncer = DatasetSyncer()
syncer.synchronize_with_iati_api(self.type)
class CodelistSync(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
verbose_name_plural = "codelist synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def sync_codelist(self):
syncer = CodeListImporter()
syncer.synchronise_with_codelists()
|
adityahase/frappe
|
frappe/modules/utils.py
|
Python
|
mit
| 8,881
| 0.026461
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Utilities for using modules
"""
import frappe, os, json
import frappe.utils
from frappe import _
from frappe.utils import cint
def export_module_json(doc, is_standard, module):
"""Make a folder for the given doc and add its json file (make it a standard
object that will be synced)"""
if (not frappe.flags.in_import and getattr(frappe.get_conf(),'developer_mode', 0)
and is_standard):
from frappe.modules.export_file import export_to_files
# json
export_to_files(record_list=[[doc.doctype, doc.name]], record_module=module,
create_init=is_standard)
path = os.path.join(frappe.get_module_path(module), scrub(doc.doctype),
scrub(doc.name), scrub(doc.name))
return path
def get_doc_module(module, doctype, name):
"""Get custom module for given document"""
module_name = "{app}.{module}.{doctype}.{name}.{name}".format(
app = frappe.local.module_app[scrub(module)],
doctype = scrub(doctype),
module = scrub(module),
name = scrub(name)
)
return frappe.get_module(module_name)
@frappe.whitelist()
def export_customizations(module, doctype, sync_on_migrate=0, with_permissions=0):
"""Export Custom Field and Property Setter for the current document to the app folder.
This will be synced with bench migrate"""
sync_on_migrate = cint(sync_on_migrate)
with_permissions = cint(with_permissions)
if not frappe.get_conf().developer_mode:
raise Exception('Not developer mode')
custom = {'custom_fields': [], 'property_setters': [], 'custom_perms': [],
'doctype': doctype, 'sync_on_migrate': sync_on_migrate}
def add(_doctype):
custom['custom_fields'] += frappe.get_all('Custom Field',
fields='*', filters={'dt': _doctype})
custom['property_setters'] += frappe.get_all('Property Setter',
fields='*', filters={'doc_type': _doctype})
add(doctype)
if with_permissions:
custom['custom_perms'] = frappe.get_all('Custom DocPerm',
fields='*', filters={'parent': doctype})
# also update the custom fields and property setters for all child tables
for d in frappe.get_meta(doctype).get_table_fields():
export_customizations(module, d.options, sync_on_migrate, with_permissions)
if custom["custom_fields"] or custom["property_setters"] or custom["custom_perms"]:
folder_path = os.path.join(get_module_path(module), 'custom')
if not os.path.exists(folder_path):
os.makedirs(folder_path)
path = os.path.join(folder_path, scrub(doctype)+ '.json')
with open(path, 'w') as f:
f.write(frappe.as_json(custom))
frappe.msgprint(_('Customizations for <b>{0}</b> exported to:<br>{1}').format(doctype,path))
def sync_customizations(app=None):
'''Sync custom fields and property setters from custom folder in each app module'''
if app:
apps = [app]
else:
apps = frappe.get_installed_apps()
for app_name in apps:
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = frappe.get_app_path(app_name, module_name, 'custom')
if os.path.exists(folder):
for fname in os.listdir(folder):
if fname.endswith('.json'):
with open(os.path.join(folder, fname), 'r') as f:
data = json.loads(f.read())
if data.get('sync_on_migrate'):
sync_customizations_for_doctype(data, folder)
def sync_customizations_for_doctype(data, folder):
'''Sync doctype customzations for a particular data set'''
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
doctype = data['doctype']
update_schema = False
def sync(key, custom_doctype, doctype_fieldname):
|
doctypes = list(set(map(lambda row: row.get(doctype_fieldname), data[key])))
# sync singl
|
e doctype exculding the child doctype
def sync_single_doctype(doc_type):
def _insert(data):
if data.get(doctype_fieldname) == doc_type:
data['doctype'] = custom_doctype
doc = frappe.get_doc(data)
doc.db_insert()
if custom_doctype != 'Custom Field':
frappe.db.sql('delete from `tab{0}` where `{1}` =%s'.format(
custom_doctype, doctype_fieldname), doc_type)
for d in data[key]:
_insert(d)
else:
for d in data[key]:
field = frappe.db.get_value("Custom Field", {"dt": doc_type, "fieldname": d["fieldname"]})
if not field:
d["owner"] = "Administrator"
_insert(d)
else:
custom_field = frappe.get_doc("Custom Field", field)
custom_field.flags.ignore_validate = True
custom_field.update(d)
custom_field.db_update()
for doc_type in doctypes:
# only sync the parent doctype and child doctype if there isn't any other child table json file
if doc_type == doctype or not os.path.exists(os.path.join(folder, frappe.scrub(doc_type)+".json")):
sync_single_doctype(doc_type)
if data['custom_fields']:
sync('custom_fields', 'Custom Field', 'dt')
update_schema = True
if data['property_setters']:
sync('property_setters', 'Property Setter', 'doc_type')
if data.get('custom_perms'):
sync('custom_perms', 'Custom DocPerm', 'parent')
print('Updating customizations for {0}'.format(doctype))
validate_fields_for_doctype(doctype)
if update_schema and not frappe.db.get_value('DocType', doctype, 'issingle'):
frappe.db.updatedb(doctype)
def scrub(txt):
return frappe.scrub(txt)
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
return scrub(dt), scrub(dn)
def get_module_path(module):
"""Returns path of the given module"""
return frappe.get_module_path(module)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
from frappe.modules.import_file import import_files
return import_files(module, dt, dn, force=force, reset_permissions=reset_permissions)
def export_doc(doctype, name, module=None):
"""Write a doc to standard path."""
from frappe.modules.export_file import write_document_file
print(doctype, name)
if not module: module = frappe.db.get_value('DocType', name, 'module')
write_document_file(frappe.get_doc(doctype, name), module)
def get_doctype_module(doctype):
"""Returns **Module Def** name of given doctype."""
def make_modules_dict():
return dict(frappe.db.sql("select name, module from tabDocType"))
return frappe.cache().get_value("doctype_modules", make_modules_dict)[doctype]
doctype_python_modules = {}
def load_doctype_module(doctype, module=None, prefix="", suffix=""):
"""Returns the module object for given doctype."""
if not module:
module = get_doctype_module(doctype)
app = get_module_app(module)
key = (app, doctype, prefix, suffix)
module_name = get_module_name(doctype, module, prefix, suffix)
try:
if key not in doctype_python_modules:
doctype_python_modules[key] = frappe.get_module(module_name)
except ImportError as e:
raise ImportError('Module import failed for {0} ({1})'.format(doctype, module_name + ' Error: ' + str(e)))
return doctype_python_modules[key]
def get_module_name(doctype, module, prefix="", suffix="", app=None):
return '{app}.{module}.doctype.{doctype}.{prefix}{doctype}{suffix}'.format(\
app = scrub(app or get_module_app(module)),
module = scrub(module),
doctype = scrub(doctype),
prefix=prefix,
suffix=suffix)
def get_module_app(module):
return frappe.local.module_app[scrub(module)]
def get_app_publisher(module):
app = frappe.local.module_app[scrub(module)]
if not app:
frappe.throw(_("App not found"))
app_publisher = frappe.get_hooks(hook="app_publisher", app_name=app)[0]
return app_publisher
def make_boilerplate(template, doc, opts=None):
target_path = get_doc_path(doc.module, doc.doctype, doc.name)
template_name = template.replace("controller", scrub(doc.name))
if template_name.endswith('._py'):
template_name = template_name[:-4] + '.py'
target_file_path = os.path.join(target_path, template_name)
if not doc: doc = {}
app_publisher = get_app_publisher(doc.module)
if not os.path.exists(target_file_path):
if not opts:
opts = {}
|
psi4/psi4meta
|
conda-recipes/psi4-docs/run_coverage.py
|
Python
|
gpl-2.0
| 4,093
| 0.001466
|
import os
import copy
import time
from pathlib import Path
import subprocess as sp
from multiprocessing import Pool
objdir = Path(os.environ['SRC_DIR']) / 'build'
cb_threads = int(os.environ['CPU_COUNT'])
coverage_exe = Path(os.environ['PREFIX']) / 'bin' / 'coverage'
lenv = copy.deepcopy(os.environ)
pythonpath = objdir / 'stage' / 'lib' / ('python' + os.environ['PY_VER']) / 'site-packages'
lenv['PYTHONPATH'] = str(pythonpath)
os.chdir(objdir)
test_time = time.time()
outfile = open("output_coverage", "w")
errfile = open("error_coverage", "w")
print('objdir/CWD:', os.getcwd())
exclude_addons_missing = [
'adcc',
'brianqc',
'cfour',
'chemps2',
'cppe',
'dkh',
'erd',
'gcp',
'gdma',
'gpu_dfcc',
'mrcc',
'optking', # RAK scratch
'pasture',
'pcmsolver',
'simint',
'snsmp2',
'v2rdm_casscf',
]
exclude_need_ctest_file_manipulation = [
'cookbook-manual-sow-reap',
'ci-property',
'cubeprop',
'cubeprop-esp',
'cubeprop-frontier',
'dftd3-psithon2',
'fcidump',
'fsapt-terms',
'fsaptd-terms',
'mp2-property',
'psiaux1-myplugin1',
'psithon2',
'pywrap-db2',
'pywrap-freq-e-sowreap',
'pywrap-freq-g-sowreap',
'scf-property',
# not actually test cases
'dft-dsd',
'fsapt-diff1',
'large-atoms',
]
exclude_too_long = [
'cbs-xtpl-func', # 200
'cc13a', # 100
'dcft7', # 100
'dft-bench-interaction', # 2500
'dft-bench-ionization', # 1300
'fd-freq-energy-large', # 200
'fd-freq-gradient-large', # 200
'frac-traverse', # 100
'fsapt-allterms', # 200
'fsapt1', # 400
'isapt1', # 300
'opt13', # 200
'python-vibanalysis', # 700
'sapt2', # 100
'sapt4', # 100
'scf-bz2', # 100
'cc5', # D4800
'opt10', # D4000
'opt-multi-frozen-dimer-c2h', # D300
'opt-multi-dimer-c2h', # D300
'opt-multi-dimer-c1', # D300
'mp2-def2', # D300
'psimrcc-fd-freq2', # D300
'optking-dlpc', # many hours
]
def do_skip(tlabel):
if tlabel in exclude_too_long:
return True
if tlabel in exclude_need_ctest_file_manipulation:
return True
for chunk in exclude_addons_missing:
if tlabel.startswith(chunk):
return True
return False
files = []
for ext in ['.dat', '.py']:
files.extend(Path('.').glob('../tests/**/input' + ext))
#files = Path('.').glob('../tests/scf*/input.dat')
#files = Path('.').glob('../tests/[jl]*/*/input.[pd]*')
idx = 1
filteredtests = []
for tpath in files:
tlabel = tpath.parent.stem
dir_up = tpath.parent.parent.stem
if dir_up != 'tests':
# e.g., dftd3-energy
tlabel = '-'.join([dir_up, tlabel])
if do_skip(tlabel):
print(" Skip {:4} {}".format('', tlabel))
else:
print(" Run {:4} {}".format('#' + str(idx), tlabel))
filteredtests.append((idx, tpath, tlabel))
idx += 1
total_files = len(filteredtests)
print("\n\n ==> Running {} test cases -j{} <== \n".format(total_files, cb_threads))
def run_test(fname):
tnum, tpath, tlabel = fname
if tpath.name == "input.dat":
cmd = [coverage_exe, "run", "--parallel-mod
|
e", "
|
stage/bin/psi4", tpath]
elif tpath.name == "input.py":
cmd = [coverage_exe, "run", "--parallel-mode", tpath]
t = time.time()
outfile.write('<<< #{} {} >>>'.format(tnum, tlabel))
retcode = sp.call(cmd, stdout=outfile, stderr=errfile, env=lenv)
total_time = time.time() - t
if retcode == 0:
print("%3d/%3d Success! %40s (%8.2f seconds)" % (tnum, total_files, tlabel, total_time))
else:
print("%3d/%3d Failure! %40s (%8.2f seconds) ***" % (tnum, total_files, tlabel, total_time))
p = Pool(cb_threads, maxtasksperchild=1)
p.map(run_test, filteredtests, chunksize=1)
print("\n\n ==> Combining Python data <== \n")
sp.call([coverage_exe, "combine"])
sp.call([coverage_exe, "report"])
outfile.close()
errfile.close()
test_time = time.time() - test_time
print("Total testing time %.2f seconds." % test_time)
|
clouserw/zamboni
|
mkt/langpacks/tests/test_models.py
|
Python
|
bsd-3-clause
| 13,411
| 0
|
# -*- coding: utf-8 -*-
import json
import os
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.forms import ValidationError
from mock import patch
from nose.tools import eq_, ok_
from lib.crypto.packaged import SigningError
from mkt.files.helpers import copyfileobj
from mkt.files.models import FileUpload, nfd_str
from mkt.files.tests.test_models import UploadTest
from mkt.langpacks.models import LangPack
from mkt.site.tests import TestCase
class TestLangPackBasic(TestCase):
def reset_uuid(self):
langpack = LangPack(uuid='12345678123456781234567812345678')
eq_(langpack.pk, '12345678123456781234567812345678')
langpack.reset_uuid()
ok_(langpack.pk != '12345678123456781234567812345678')
def test_download_url(self):
langpack = LangPack(pk='12345678123456781234567812345678')
ok_(langpack.download_url.endswith(
'/12345678123456781234567812345678/langpack.zip'))
def test_manifest_url(self):
langpack = LangPack(pk='12345678123456781234567812345678')
eq_(langpack.manifest_url, '') # Inactive langpack.
langpack.active = True
ok_(langpack.manifest_url.endswith(
'/12345678-1234-5678-1234-567812345678/manifest.webapp'))
@patch('mkt.webapps.utils.storage')
def test_get_minifest_contents(self, storage_mock):
fake_manifest = {
'name': u'Fake LangPäck',
'developer': {
'name': 'Mozilla'
}
}
langpack = LangPack(
pk='12345678123456781234567812345678',
fxos_version='2.2',
version='0.3',
manifest=json.dumps(fake_manifest))
storage_mock.size.return_value = 666
minifest_contents = json.loads(langpack.get_minifest_contents())
eq_(minifest_contents,
{'version': '0.3',
'size': 666,
'name': u'Fake LangPäck',
'package_path': langpack.download_url,
'developer': {'name': 'Mozilla'}})
return langpack, minifest_contents
def test_get_minifest_contents_caching(self):
langpack, minifest_contents = self.test_get_minifest_contents()
langpack.update(manifest='{}')
# Because of caching, get_minifest_contents should not have changed.
new_minifest_contents = json.loads(langpack.get_minifest_contents())
eq_(minifest_contents, new_minifest_contents)
def test_language_choices_and_display(self):
field = LangPack._meta.get_field('language')
eq_(len(field.choices), len(settings.LANGUAGES))
eq_(LangPack(language='fr').get_language_display(), u'Français')
eq_(LangPack(language='en-US').get_language_display(), u'English (US)')
def test_sort(self):
langpack_it = LangPack.objects.create(language='it')
langpack_de = LangPack.objects.create(language='de')
langpack_fr = LangPack.objects.create(language='fr')
eq_(list(LangPack.objects.all()),
[langpack_de, langpack_fr, langpack_it])
class UploadCreationMixin(object):
def upload(self, name, **kwargs):
if os.path.splitext(name)[-1] not in ['.webapp', '.zip']:
name = name + '.zip'
v = json.dumps(dict(errors=0, warnings=1, notices=2, metadata={}))
fname = nfd_str(self.packaged_app_path(name))
if not storage.exists(fname):
with storage.open(fname, 'w') as fs:
copyfileobj(open(fname), fs)
data = {
'path': fname,
'name': name,
'hash': 'sha256:%s' % name,
'validation': v
}
data.update(**kwargs)
return FileUpload.objects.create(**data)
class TestLangPackUpload(UploadTest, UploadCreationMixin):
# Expected manifest, to test zip file parsing.
expected_manifest = {
'languages-target': {
'app://*.gaiamobile.org/manifest.webapp': '2.2'
},
'description': 'Support for additional language: German',
'default_locale': 'de',
'icons': {
'128': '/icon.png'
},
'version': '1.0.3',
'role': 'langpack',
'languages-provided': {
|
'de': {
'version': '2014110512
|
34',
'apps': {
'app://calendar.gaiamobile.org/manifest.webapp':
'/de/calendar',
'app://email.gaiamobile.org/manifest.webapp':
'/de/email'
},
'name': 'Deutsch'
}
},
'developer': {
'name': 'Mozilla'
},
'type': 'privileged', 'locales': {
'de': {
'name': u'Sprachpaket für Gaia: Deutsch'
},
'pl': {
'name': u'Paczka językowa dla Gai: niemiecki'
}
},
'name': 'Gaia Langpack for German'
}
def create_langpack(self):
langpack = LangPack.objects.create(
language='fr', version='0.9', fxos_version='2.1', active=False,
file_version=1, manifest='{}')
return langpack
def test_upload_new(self):
eq_(LangPack.objects.count(), 0)
upload = self.upload('langpack')
langpack = LangPack.from_upload(upload)
ok_(langpack.uuid)
eq_(langpack.file_version, 1)
eq_(langpack.version, '1.0.3')
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(langpack.filename, '%s-%s.zip' % (langpack.uuid, langpack.version))
ok_(langpack.filename in langpack.file_path)
ok_(langpack.file_path.startswith(langpack.path_prefix))
ok_(os.path.exists(langpack.file_path))
eq_(langpack.get_manifest_json(), self.expected_manifest)
ok_(LangPack.objects.no_cache().get(pk=langpack.uuid))
eq_(LangPack.objects.count(), 1)
return langpack
def test_upload_existing(self):
langpack = self.create_langpack()
original_uuid = langpack.uuid
original_file_path = langpack.file_path
original_file_version = langpack.file_version
original_manifest = langpack.manifest
with patch('mkt.webapps.utils.storage') as storage_mock:
# mock storage size before building minifest since we haven't
# created a real file for this langpack yet.
storage_mock.size.return_value = 666
original_minifest = langpack.get_minifest_contents()
upload = self.upload('langpack')
langpack = LangPack.from_upload(upload, instance=langpack)
eq_(langpack.uuid, original_uuid)
eq_(langpack.version, '1.0.3')
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(langpack.filename, '%s-%s.zip' % (langpack.uuid, langpack.version))
eq_(langpack.get_manifest_json(), self.expected_manifest)
ok_(langpack.file_path.startswith(langpack.path_prefix))
ok_(langpack.filename in langpack.file_path)
ok_(langpack.file_path != original_file_path)
ok_(langpack.file_version > original_file_version)
ok_(os.path.exists(langpack.file_path))
ok_(LangPack.objects.no_cache().get(pk=langpack.uuid))
eq_(LangPack.objects.count(), 1)
ok_(langpack.manifest != original_manifest)
# We're supposed to have busted the old minifest cache.
ok_(langpack.get_minifest_contents() != original_minifest)
@patch('mkt.files.utils.WebAppParser.get_json_data')
def test_upload_language_validation(self, get_json_data_mock):
upload = self.upload('langpack')
get_json_data_mock.return_value = {
'name': 'Portuguese Langpack',
'developer': {
'name': 'Mozilla'
},
'role': 'langpack',
'languages-provided': {
'pt-BR': {}
},
'languages-target': {
'app://*.gaiamobile.org/manifest.webapp': '2.2'
},
'version': '0.1'
}
langpack = LangPack.from_upl
|
googleapis/docuploader
|
docuploader/tar.py
|
Python
|
apache-2.0
| 2,102
| 0
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles tarring up documentation directories."""
import subprocess
from docuploader import shell
def compress(directory: str, destination: str) -> subprocess.CompletedProcess:
"""Compress the given directory into the tarfile at destination."""
# Note: we don't use the stdlib's "tarfile" module for performance reasons.
# While it can handle creating tarfiles, its not as efficient on large
# numbers of files like the tar command.
return shell.run(
[
"tar",
"--create",
f"--directory={directory}",
f"--file={destination}",
# Treat a colon in the filename as part of the filename,
# not an indication of a remote file. This is required in order to
# handle canonical filenames on Windows.
"--force-local",
"--gzip",
|
"--verbose",
"
|
.",
],
hide_output=False,
)
def decompress(archive: str, destination: str) -> subprocess.CompletedProcess:
"""Decompress the given tarfile to the destination."""
# Note: we don't use the stdlib's "tarfile" module for performance reasons.
# While it can handle creating tarfiles, its not as efficient on large
# numbers of files like the tar command.
return shell.run(
[
"tar",
"--extract",
f"--directory={destination}",
f"--file={archive}",
"--gzip",
"--verbose",
],
hide_output=True,
)
|
ItsCalebJones/SpaceLaunchNow-Server
|
api/migrations/0020_launch_new_id.py
|
Python
|
apache-2.0
| 860
| 0.001163
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-04 22:15
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
def create_ids(apps, schema_editor):
Launch = apps.get_model('api', 'Launch')
for m in Launch.objects.all():
m.new_id = uuid.uuid4()
m.save()
def remove_ids(apps, schema_editor):
Launch = apps.get_model('api', 'Launch')
for m in Launch.objects.all():
m.uuid = None
m.save()
class Migration(migrations.Migration):
dependencies = [
('
|
api', '0019_auto_20181206_0135'),
]
operations = [
migrations.AddField(
model_name='launch',
name='new_id',
field=models.UUIDField(default=uuid.uuid4),
)
|
,
migrations.RunPython(code=create_ids, reverse_code=remove_ids),
]
|
DaanHoogland/cloudstack
|
plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmVmModule.py
|
Python
|
apache-2.0
| 22,802
| 0.00614
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
'''
from OvmCommonModule import *
from OvmDiskModule import *
from OvmVifModule import *
from OvmHostModule import OvmHost
from string import Template
from OVSXXenVMConfig import *
from OVSSiteVM import start_vm, stop_vm, reset_vm
from OVSSiteCluster import *
from OvmStoragePoolModule import OvmStoragePool
from OVSXXenStore import xen_get_vm_path, xen_get_vnc_port
from OVSDB import db_get_vm
from OVSXMonitor import xen_get_vm_perf_metrics, xen_get_xm_info
from OVSXXenVM import xen_migrate_vm
from OVSSiteRMVM import unregister_vm, register_vm, set_vm_status
from OVSSiteVMInstall import install_vm_hvm
from OVSSiteRMServer import get_master_ip
from OVSXXenVMInstall import xen_change_vm_cdrom
from OVSXAPIUtil import XenAPIObject, session_login, session_logout
logger = OvmLogger("OvmVm")
class OvmVmDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vm = OvmVm()
setAttrFromDict(vm, 'cpuNum', deDict, int)
setAttrFromDict(vm, 'memory', deDict, long)
setattr(vm, 'rootDisk', toOvmDisk(deDict['rootDisk']))
setattr(vm, 'vifs', toOvmVifList(deDict['vifs']))
setattr(vm, 'disks', toOvmDiskList(deDict['disks']))
setAttrFromDict(vm, 'name', deDict)
setAttrFromDict(vm, 'uuid', deDict)
setAttrFromDict(vm, 'bootDev', deDict)
setAttrFromDict(vm, 'type', deDict)
return vm
class OvmVmEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVm): raise Exception("%s is not instance of OvmVm"%type(obj))
dct = {}
safeDictSet(obj, dct, 'cpuNum')
safeDictSet(obj, dct, 'memory')
safeDictSet(obj, dct, 'powerState')
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'type')
vifs = fromOvmVifList(obj.vifs)
dct['vifs'] = vifs
rootDisk = fromOvmDisk(obj.rootDisk)
dct['rootDisk'] = rootDisk
disks = fromOvmDiskList(obj.disks)
dct['disks'] = disks
return dct
def toOvmVm(jStr):
return json.loads(jStr, cls=OvmVmDecoder)
def fromOvmVm(vm):
return normalizeToGson(json.dumps(vm, cls=OvmVmEncoder))
class OvmVm(OvmObject):
cpuNum = 0
memory = 0
rootDisk = None
vifs = []
disks = []
powerState = ''
name = ''
bootDev = ''
type = ''
def _getVifs(self, vmName):
vmPath
|
= OvmHost()._vmNameToPath(vmName)
domId = OvmHost()._getDomainIdByName(vmName)
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vifName = 'vif' + domId + '.' + k[len('vif'):]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', vifName)
|
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getVifsFromConfig(self, vmPath):
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', k)
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getIsoMountPath(self, vmPath):
vmName = basename(vmPath)
priStoragePath = vmPath.rstrip(join('running_pool', vmName))
return join(priStoragePath, 'iso_pool', vmName)
def _getVmTypeFromConfigFile(self, vmPath):
vmType = successToMap(xen_get_vm_type(vmPath))['type']
return vmType.replace('hvm', 'HVM').replace('para', 'PV')
def _tapAOwnerFile(self, vmPath):
# Create a file with name convention 'host_ip_address' in vmPath
# Because xm list doesn't return vm that has been stopped, we scan
# primary storage for stopped vm. This file tells us which host it belongs
# to. The file is used in OvmHost.getAllVms()
self._cleanUpOwnerFile(vmPath)
ownerFileName = makeOwnerFileName()
fd = open(join(vmPath, ownerFileName), 'w')
fd.write(ownerFileName)
fd.close()
def _cleanUpOwnerFile(self, vmPath):
for f in os.listdir(vmPath):
fp = join(vmPath, f)
if isfile(fp) and f.startswith(OWNER_FILE_PREFIX):
os.remove(fp)
@staticmethod
def create(jsonString):
def dumpCfg(vmName, cfgPath):
cfgFd = open(cfgPath, 'r')
cfg = cfgFd.readlines()
cfgFd.close()
logger.info(OvmVm.create, "Start %s with configure:\n\n%s\n"%(vmName, "".join(cfg)))
def setVifsType(vifs, type):
for vif in vifs:
vif.type = type
def hddBoot(vm, vmPath):
vmType = vm.type
if vmType == "FROMCONFIGFILE":
vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
cfgDict = {}
if vmType == "HVM":
cfgDict['builder'] = "'hvm'"
cfgDict['acpi'] = "1"
cfgDict['apic'] = "1"
cfgDict['device_model'] = "'/usr/lib/xen/bin/qemu-dm'"
cfgDict['kernel'] = "'/usr/lib/xen/boot/hvmloader'"
vifType = 'ioemu'
else:
cfgDict['bootloader'] = "'/usr/bin/pygrub'"
vifType = 'netfront'
cfgDict['name'] = "'%s'"%vm.name
cfgDict['disk'] = "[]"
cfgDict['vcpus'] = "''"
cfgDict['memory'] = "''"
cfgDict['on_crash'] = "'destroy'"
cfgDict['on_reboot'] = "'restart'"
cfgDict['vif'] = "[]"
items = []
for k in cfgDict.keys():
item = " = ".join([k, cfgDict[k]])
items.append(item)
vmSpec = "\n".join(items)
vmCfg = open(join(vmPath, 'vm.cfg'), 'w')
vmCfg.write(vmSpec)
vmCfg.close()
setVifsType(vm.vifs, vifType)
raiseExceptionIfFail(xen_set_vcpus(vmPath, vm.cpuNum))
raiseExceptionIfFail(xen_set_memory(vmPath, BytesToM(vm.memory)))
raiseExceptionIfFail(xen_add_disk(vmPath, vm.rootDisk.path, mode=vm.rootDisk.type))
vifs = [OvmVif.toXenString(v) for v in vm.vifs]
for vif in vifs:
raiseExceptionIfFail(xen_set_vifs(vmPath, vif))
for disk in vm.disks:
raiseExceptionIfFail(xen_add_disk(vmPath, disk.path, mode=disk.type))
raiseExceptionIfFail(xen_set_vm_vnc_password(vmPath, ""))
cfgFile = join(vmPath, 'vm.cfg')
# only HVM supports attaching cdrom
if vmType == 'HVM':
# Add an empty "hdc:cdrom" entry in config. Fisrt we set boot order to 'd' that is cdrom boot,
# then 'hdc:cdrom' entry will be in disk list. Second, change boot order to 'c' which
# is harddisk boot. VM can not start with an empty 'hdc:cdrom' when boot order is 'd'.
# it's tricky !
|
noam09/deluge-telegramer
|
telegramer/include/telegram/inline/inlinequeryresultcachedmpeg4gif.py
|
Python
|
gpl-3.0
| 3,901
| 0.003076
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Tol
|
edo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either vers
|
ion 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultMpeg4Gif."""
from telegram import InlineQueryResult
class InlineQueryResultCachedMpeg4Gif(InlineQueryResult):
"""
Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the
Telegram servers. By default, this animated MPEG-4 file will be sent by the user with an
optional caption. Alternatively, you can use :attr:`input_message_content` to send a message
with the specified content instead of the animation.
Attributes:
type (:obj:`str`): 'mpeg4_gif'.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`): Optional. Title for the result.
caption (:obj:`str`): Optional. Caption, 0-200 characters
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the MPEG-4 file.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`, optional): Title for the result.
caption (:obj:`str`, optional): Caption, 0-200 characters
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the
message to be sent instead of the MPEG-4 file.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
id,
mpeg4_file_id,
title=None,
caption=None,
reply_markup=None,
input_message_content=None,
parse_mode=None,
**kwargs):
# Required
super(InlineQueryResultCachedMpeg4Gif, self).__init__('mpeg4_gif', id)
self.mpeg4_file_id = mpeg4_file_id
# Optionals
if title:
self.title = title
if caption:
self.caption = caption
if parse_mode:
self.parse_mode = parse_mode
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
|
jodal/comics
|
comics/aggregator/utils.py
|
Python
|
agpl-3.0
| 333
| 0
|
from comics.comics import get_comic_module
SCHEDULE_DAYS = ["Su", "Mo", "Tu", "We", "Th", "Fr", "Sa"]
def get_comic_schedule(comic):
module = get_comic_module(comic.sl
|
ug)
schedule
|
= module.Crawler(comic).schedule
if not schedule:
return []
return [SCHEDULE_DAYS.index(day) for day in schedule.split(",")]
|
bhermanmit/openmc
|
tests/test_multipole/test_multipole.py
|
Python
|
mit
| 3,870
| 0.001809
|
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness, PyAPITestHarness
import openmc
from openmc.stats import Box
from openmc.source import Source
class MultipoleTestHarness(PyAPITestHarness):
def _build_inputs(self):
####################
# Materials
####################
moderator = openmc.Material(material_id=1)
moderator.set_density('g/cc', 1.0)
moderator.add_nuclide('H1', 2.0)
moderator.add_nuclide('O16', 1.0)
moderator.add_s_alpha_beta('c_H_in_H2O')
dense_fuel = openmc.Material(material_id=2)
dense_fuel.set_density('g/cc', 4.5)
dense_fuel.add_nuclide('U235', 1.0)
mats_file = openmc.Materials([moderator, dense_fuel])
mats_file.export_to_xml()
####################
# Geometry
####################
c1 = openmc.Cell(cell_id=1, fill=moderator)
mod_univ = openmc.Universe(universe_id=1, cells=(c1,))
r0 = openmc.ZCylinder(R=0.3)
c11 = openmc.Cell(cell_id
|
=11, fill=dense_fuel, region=-r0)
c11.temperature = [500, 0, 700, 800]
c12 = openmc.Cell(cell_id=12, fill=moderator, region=+r0)
fuel_univ = openmc.Universe(universe_id=11, cells=(c11, c12))
lat = openmc.RectLattice(lattice_id=101)
lat.dimension = [2, 2]
lat.lower_left = [-2.0, -2.0]
|
lat.pitch = [2.0, 2.0]
lat.universes = [[fuel_univ]*2]*2
lat.outer = mod_univ
x0 = openmc.XPlane(x0=-3.0)
x1 = openmc.XPlane(x0=3.0)
y0 = openmc.YPlane(y0=-3.0)
y1 = openmc.YPlane(y0=3.0)
for s in [x0, x1, y0, y1]:
s.boundary_type = 'reflective'
c101 = openmc.Cell(cell_id=101, fill=lat, region=+x0 & -x1 & +y0 & -y1)
root_univ = openmc.Universe(universe_id=0, cells=(c101,))
geometry = openmc.Geometry(root_univ)
geometry.export_to_xml()
####################
# Settings
####################
sets_file = openmc.Settings()
sets_file.batches = 5
sets_file.inactive = 0
sets_file.particles = 1000
sets_file.source = Source(space=Box([-1, -1, -1], [1, 1, 1]))
sets_file.output = {'summary': True}
sets_file.temperature = {'tolerance': 1000, 'multipole': True}
sets_file.export_to_xml()
####################
# Plots
####################
plots_file = openmc.Plots()
plot = openmc.Plot(plot_id=1)
plot.basis = 'xy'
plot.color_by = 'cell'
plot.filename = 'cellplot'
plot.origin = (0, 0, 0)
plot.width = (7, 7)
plot.pixels = (400, 400)
plots_file.append(plot)
plot = openmc.Plot(plot_id=2)
plot.basis = 'xy'
plot.color_by = 'material'
plot.filename = 'matplot'
plot.origin = (0, 0, 0)
plot.width = (7, 7)
plot.pixels = (400, 400)
plots_file.append(plot)
plots_file.export_to_xml()
def execute_test(self):
if not 'OPENMC_MULTIPOLE_LIBRARY' in os.environ:
raise RuntimeError("The 'OPENMC_MULTIPOLE_LIBRARY' environment "
"variable must be specified for this test.")
else:
super(MultipoleTestHarness, self).execute_test()
def _get_results(self):
outstr = super(MultipoleTestHarness, self)._get_results()
su = openmc.Summary('summary.h5')
outstr += str(su.geometry.get_all_cells()[11])
return outstr
def _cleanup(self):
f = os.path.join(os.getcwd(), 'plots.xml')
if os.path.exists(f):
os.remove(f)
super(MultipoleTestHarness, self)._cleanup()
if __name__ == '__main__':
harness = MultipoleTestHarness('statepoint.5.h5')
harness.main()
|
sergiocorato/partner-contact
|
partner_capital/models/res_partner_turnover_range.py
|
Python
|
agpl-3.0
| 431
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2015 Antonio Espinosa <antonio.espinosa@tecnativa.com>
# Copyright 2015 Jairo Llopis <jairo.llopis@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class ResPartnerTurnoverRange(models.Model):
_name = 'res.partner.turnover_range'
_descript
|
ion = "Turnover range"
name = fields.Char(required=True, translate
|
=True)
|
mcrav/XDToolkit
|
src/splash.py
|
Python
|
gpl-3.0
| 1,610
| 0.001863
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'splash.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_splash(object):
def setupUi(self, splash):
splash.setObjectName("splash")
splash.resize(400, 300)
font = QtGui.QFont()
font.setFamily("Bitstream Vera Sans Mono")
font.setPointSize(10)
splash.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("res/flatearth.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
splash.setWindowIcon(icon)
self.gridLayout = QtWidgets.QGridLayout(splash)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.progressBar = QtWidgets.QProgressBar(splash)
|
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.statusLab = QtWidgets.QLabel(splash)
self.statusLab.setText("")
self.statusLab.setObjectName("statusLab")
self.verticalLayout.addWidget(self.statusLab)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(splash)
QtCore.QMetaObject.connectSlotsByName(splash)
|
def retranslateUi(self, splash):
_translate = QtCore.QCoreApplication.translate
splash.setWindowTitle(_translate("splash", "Initializing"))
|
mozilla/amo-validator
|
tests/test_unicodehelper.py
|
Python
|
bsd-3-clause
| 1,282
| 0
|
# -*- coding: utf-8 -*-
|
import validator.unicodehelper as unicodehelper
COMPARISON = 'täst'.decode('utf-8')
def _do_test(path):
'Performs a test on a JS file'
text = open(path).read()
utext = unicodehelper.decode(text)
print utext.encode('ascii', 'backslashreplace')
assert utext == COMPARISON
def test_latin1():
'Tests utf-8 encoding is properly decoded'
_do_test('tests/resources/unicodehelper/latin_1.txt')
def test_utf8():
'Tests utf-8 w/o BOM encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-8.txt')
def test_utf8_bom():
'Tests utf-8 with BOM encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-8-bom.txt')
def test_utf16le():
'Tests utf-16 Little Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-16le.txt')
def test_utf16be():
'Tests utf-16 Big Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-16be.txt')
def test_utf32le():
'Tests utf-32 Little Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-32le.txt')
def test_utf32be():
'Tests utf-32 Big Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-32be.txt')
|
|
ntt-sic/neutron
|
neutron/openstack/common/notifier/api.py
|
Python
|
apache-2.0
| 5,734
| 0
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
from neutron.openstack.common import context
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default='$host',
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt,
CONF.default_publisher_id,
name,
CONF.default_notification_level,
body)
return fn(*args, **kwarg)
return wrapped_func
def publisher_id(service, host=None):
if not host:
host = CONF.host
return "%s.%s" % (service, host)
def notify(context, publisher_id, event_type, priority, payload):
"""Sends a notification using the specified driver
:param publisher_id: the source worker_type.host of the message
:param event_type: the literal type of event (ex. Instance Creation)
:param priority: patterned after the enumeration of Python logging
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id
a UUID representing the id for this notification
timestamp
the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = jsonutils.to_primitive(payload, convert_instances=True)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
for driver in _get_drivers():
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
_drivers = None
def _get_drivers():
"""Instantiate, cache, and return drivers based on the CONF."""
global _drivers
if _drivers is None:
_drivers = {}
for notification_driver in CONF.notification_driver:
add_driver(notific
|
ation_driver)
return _drivers.values()
def add_driver(notification_driver):
"""Add a notification driver at runtime."""
# Make sure the driver list is initialized.
_get_drivers()
if
|
isinstance(notification_driver, basestring):
# Load and add
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
else:
# Driver is already loaded; just add the object.
_drivers[notification_driver] = notification_driver
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global _drivers
_drivers = None
|
djc/awmy
|
zones.py
|
Python
|
mit
| 1,974
| 0.048632
|
#!/usr/bin/env python
import sys, os, re, tarfile, json
FILES = {
'africa', 'antarctica', 'asia', 'australasia',
'europe', 'northamerica', 'southamerica',
}
WS_SPLIT = re.compile("[ \t]+")
def lines(fn):
with tarfile.open(fn, 'r:*') as tar:
for info in tar:
if not info.isfile() or info.name not in FILES:
continue
f = tar.extractfile(info)
for ln in f:
ln = ln.decode('iso-8859-1')
ln = ln.rstrip()
ln = ln.split('#', 1)[0]
ln = ln.rstrip(' \t')
if ln:
yield ln
f.close()
def offset(s):
if s in {'-', '0'}:
return 0
dir, s = (-1, s[1:]) if s[0] == '-' else (1, s)
words = [int(n) for n in s.split(':')]
assert 1 <= len(words) < 4, words
words = words + [0] * (3 - len(words))
assert 0 <= words[0] < 24, words
assert 0 <= words[1] < 60, words
assert 0 <= words[2] < 60, words
return dir * sum((i * num) for (i, num) in zip(words, (3600, 60, 1)))
def zoneline(ls):
ls[1] = None if ls[1] == '-' else ls[1]
tmp = offset(ls[0]), ls[1], ls[2], ls[3:]
return {k: v for (k, v) in zip('orfu', tmp)}
def parse(fn):
zones, rules, zone = {}, {}, None
for ln in lines(fn):
# see zic(8) for documentation
words = WS_SPLIT.split(ln)
if words[0] == 'Zone':
assert words[1] not in zones, words[1]
zone = []
zone.append(zoneline(words[2:]))
if '/' in words[1]:
zones[words[1]] = zone
elif words[0] == '':
assert zone is not None
zone.append(zoneline(words[1:]))
elif words[0] == 'Rule':
zone = None
words[8] = offset(words[8])
rule = rules.setdefault(words[1], [])
rule.append(words[2:])
elif words[0] == 'Link':
zone = None # igno
|
re
else:
assert False, ln
return {'zones': zones, 'rules': rules}
if __name__ == '__main__':
path = sys.argv[1]
version = re.match('t
|
zdata(.*)\.tar\.gz$', os.path.basename(path))
if version is None:
raise StandardError('argument must be tzdata archive')
print(json.dumps(parse(path)))
|
nlehuby/osm2gtfs
|
osm2gtfs/tests/creators/tests_ni_managua.py
|
Python
|
gpl-3.0
| 1,515
| 0.00066
|
import unittest
import os
import logging
from osm2gtfs.tests.creators.creators_tests import CreatorsTestsAbstract
# Defin
|
e logging level
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
class TestCreatorsNiManagua(CreatorsTestsAbstract):
def _get_selector(self):
return "ni_managua"
def _get_required_variables(self):
# Define required values for the tests of this provider
return {
'routes_count': 45,
|
'stops_count': 1450,
'stations_count': 547,
'stops_osm_count': 1997,
'route_id_to_check': 111,
'gtfs_files': [
"agency.txt", "calendar.txt", "routes.txt", "shapes.txt",
"stops.txt", "stop_times.txt", "trips.txt"
],
}
def _override_configuration(self):
# Overriding some of the configuration options
# Use local timetable.json
self.config.data['schedule_source'] = os.path.join(
self.standard_variables['fixture_dir'], "timetable.json")
# Use timeframe of reference GTFS
self.config.data['start_date'] = "201780101"
self.config.data['end_date'] = "20180201"
def load_tests(loader, tests, pattern):
# pylint: disable=unused-argument
test_cases = ['test_refresh_routes_cache', 'test_refresh_stops_cache', 'test_gtfs_from_cache']
suite = unittest.TestSuite(map(TestCreatorsNiManagua, test_cases))
return suite
if __name__ == '__main__':
unittest.main()
|
grandmasterchef/WhatManager2
|
whatify/urls.py
|
Python
|
mit
| 531
| 0.001883
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'whatify.views.index'),
url(r'^search/(.+)$', 'whatify.views.search'),
url(r
|
'^torrent_groups/(\d+)$', 'whatify.views.get_torrent_group'),
url(r'^torrent_groups/(\d+)/download$', '
|
whatify.views.download_torrent_group'),
url(r'^torrent_groups/random$', 'whatify.views.random_torrent_groups'),
url(r'^torrent_groups/top10$', 'whatify.views.top10_torrent_groups'),
url(r'^artists/(\d+)$', 'whatify.views.get_artist'),
)
|
matrix-org/sydent
|
sydent/config/sms.py
|
Python
|
apache-2.0
| 2,871
| 0.001393
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from configparser import ConfigParser
from typing import Dict, List
from sydent.config._base import BaseConfig
from sydent.config.exceptions import ConfigError
class SMSConfig(BaseConfig):
def parse_config(self, cfg: "ConfigParser") -> bool:
"""
Parse the sms section of the config
:param cfg: the configuration to be parsed
"""
self.body_template = cfg.get("sms", "bodyTemplate")
# Make sure username and password are bytes otherwise we can't use them with
# b64encode.
self.api_username = cfg.get("sms", "username").encode("UTF-8")
self.api_password = cfg.get("sms", "password").encode("UTF-8")
self.originators: Dict[str, List[Dict[str, str]]] = {}
self.smsRules = {}
for opt in cfg.options("sms"):
if opt.startswith("originators."):
country = opt.split(".")[1]
rawVal = cfg.get("sms", opt)
rawList = [i.strip() for i in rawVal.split(",")]
self.originators[country] = []
for origString in rawList:
|
parts = origString.split(":")
if len(parts) != 2:
|
raise ConfigError(
"Originators must be in form: long:<number>, short:<number> or alpha:<text>, separated by commas"
)
if parts[0] not in ["long", "short", "alpha"]:
raise ConfigError(
"Invalid originator type: valid types are long, short and alpha"
)
self.originators[country].append(
{
"type": parts[0],
"text": parts[1],
}
)
elif opt.startswith("smsrule."):
country = opt.split(".")[1]
action = cfg.get("sms", opt)
if action not in ["allow", "reject"]:
raise ConfigError(
"Invalid SMS rule action: %s, expecting 'allow' or 'reject'"
% action
)
self.smsRules[country] = action
return False
|
kashiif/ShaniXBMCWork
|
plugin.video.live.streamspro/oneplay.py
|
Python
|
gpl-2.0
| 11,723
| 0.019193
|
## ONLY FOR NOOBS :D
##CONVERSION OF following encryption by shani into python
## only decryption function is implemented
'''
* jQuery JavaScript Library v1.4.2
* http://jquery.com/
*
* Copyright 2010, John Resig
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* Includes Sizzle.js
* http://sizzlejs.com/
* Copyright 2010, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
*
* Date: Sat Feb 13 22:33:48 2010 -0500
'''
import urllib
import base64
import re,urllib2,cookielib
def decode(r):
e = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
r = r.replace("\n", "");
f = []
c = [0,0,0,0]
t = [0,0,0];
# print 'rrrrrrrrrrrrrrrrrrrrrrrr',r
for n in range(0 ,len(r),4):
c[0]=-1
try:
c[0] = e.index(r[n]);
except:pass
c[1]=-1
try:
c[1] = e.index(r[n + 1])
except:pass
c[2]=-1
try:
c[2] = e.index(r[n + 2]);
except:pass
c[3]=-1
try:
c[3] = e.index(r[n + 3])
except:pass
t[0] = c[0] << 2 | c[1] >> 4
t[1] = (15 & c[1]) << 4 | c[2] >> 2
t[2] = (3 & c[2]) << 6 | c[3]
f+=[t[0], t[1], t[2]];
# print f
# print f[0:10]
return f[0: len(f) - (len(f) % 16)]
'''
def fun_e:
return unescape(encodeURIComponent(e))
} catch (r) {
throw "Error utf"
}
'''
def func_u(e):
c = [];
#if decode:
# print 'basssssssssssssssssssssss', base64.decode(e)
# return
# e= urllib.unquote(base64.decode(e))
for n in range(0, len(e)):
c.append(ord(e[n]));
return c
def fun_A(e, r):
n=0;
f = [None]*(len(e) / r);
for n in range(0, len(e),r):
f[n / r] = int(e[n:n+r], 16);
return f
'''L inner functions
'''
def func_L_r(e, r):
return e << r | e >> 32 - r ##change>>>
def func_L_n(e, r):
c = 2147483648 & e
t = 2147483648 & r
n = 1073741824 & e
f = 1073741824 & r
a = (1073741823 & e) + (1073741823 & r)
return (2147483648 ^ a ^ c ^ t) if n & f else ( (3221225472 ^ a ^ c ^ t if 1073741824 & a else 1073741824 ^ a ^ c ^ t ) if n | f else a ^ c ^ t)
def func_L_f(e, r, n):
return e & r | ~e & n
def func_L_c(e, r, n):
return e & n | r & ~n
def func_L_t(e, r, n):
return e ^ r ^ n
def func_L_a(e, r, n):
return r ^ (e | ~n)
def func_L_o(e, c, t, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_f(c, t, a), o), u))
return func_L_n(func_L_r(e, d), c)
def func_L_d(e, f, t, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_c(f, t, a), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_u(e, f, c, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_t(f, c, a), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_i(e, f, c, t, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_a(f, c, t), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_b(e):
n = len(e)
f = n + 8
c = (f - f % 64) / 64
t = 16 * (c + 1)
a = [0]*(n+1)
o = 0; d = 0
# for (var r, n = e.length, f = n + 8, c = (f - f % 64) / 64, t = 16 * (c + 1), a = [], o = 0, d = 0; n > d;) r = (d - d % 4) / 4, o = 8 * (d % 4),
for d in range(0,n):
r = (d - d % 4) / 4;
o = 8 * (d % 4);
#print a[r]
#print e[d]
a[r] = a[r] | e[d] << o
d+=1
# print a, d,n
r = (d - d % 4) / 4
o = 8 * (d % 4)
a[r] = a[r] | 128 << o
a[t - 2] = n << 3
# print 'tttttttttttttttttt',t
# print 'len a',len(a)
try:
a[t - 1] = n >> 29# >>> removed
except: pass
return a
def func_L_h(e):
f = [];
for n in range(0,4):
r = 255 & e >> 8 * n #>>> removed
f.append(r)
return f
def func_L(e):
l=0
v=0
S = [];
m = fun_A("67452301efcdab8998badcfe10325476d76aa478e8c7b756242070dbc1bdceeef57c0faf4787c62aa8304613fd469501698098d88b44f7afffff5bb1895cd7be6b901122fd987193a679438e49b40821f61e2562c040b340265e5a51e9b6c7aad62f105d02441453d8a1e681e7d3fbc821e1cde6c33707d6f4d50d87455a14eda9e3e905fcefa3f8676f02d98d2a4c8afffa39428771f6816d9d6122fde5380ca4beea444bdecfa9f6bb4b60bebfbc70289b7ec6eaa127fad4ef308504881d05d9d4d039e6db99e51fa27cf8c4ac5665f4292244432aff97ab9423a7fc93a039655b59c38f0ccc92ffeff47d85845dd16fa87e4ffe2ce6e0a30143144e0811a1f7537e82bd3af2352ad7d2bbeb86d391", 8);
# print m
# print 'eeeeeeeeeeeeeeeeeeeeee',e
S = func_L_b(e);
# print 'S is ',S
y = m[0]; k = m[1]; M = m[2]; x = m[3]
for l in range(0, len(S)
|
,16):
v = y; s = k; p = M; g = x;
y = func_L_o(y, k, M, x, S[l + 0], 7, m[4])
x = func_L_o(x, y, k, M, S[l + 1], 12, m[5])
M = func_L_o(M, x, y, k, S[l + 2], 17, m[6])
k = func_L_o(k, M, x, y, S[l + 3], 22, m[7])
y = func_L_o(y, k, M, x, S[l + 4], 7, m[8])
x = func_L_o(x, y, k, M, S[l + 5], 12, m[9])
M = func_L_o(M, x, y, k, S[l + 6],
|
17, m[10])
k = func_L_o(k, M, x, y, S[l + 7], 22, m[11])
y = func_L_o(y, k, M, x, S[l + 8], 7, m[12])
x = func_L_o(x, y, k, M, S[l + 9], 12, m[13])
M = func_L_o(M, x, y, k, S[l + 10], 17, m[14])
k = func_L_o(k, M, x, y, S[l + 11], 22, m[15])
y = func_L_o(y, k, M, x, S[l + 12], 7, m[16])
x = func_L_o(x, y, k, M, S[l + 13], 12, m[17])
M = func_L_o(M, x, y, k, S[l + 14], 17, m[18])
k = func_L_o(k, M, x, y, S[l + 15], 22, m[19])
y = func_L_d(y, k, M, x, S[l + 1], 5, m[20])
x = func_L_d(x, y, k, M, S[l + 6], 9, m[21])
M = func_L_d(M, x, y, k, S[l + 11], 14, m[22])
k = func_L_d(k, M, x, y, S[l + 0], 20, m[23])
y = func_L_d(y, k, M, x, S[l + 5], 5, m[24])
x = func_L_d(x, y, k, M, S[l + 10], 9, m[25])
M = func_L_d(M, x, y, k, S[l + 15], 14, m[26])
k = func_L_d(k, M, x, y, S[l + 4], 20, m[27])
y = func_L_d(y, k, M, x, S[l + 9], 5, m[28])
x = func_L_d(x, y, k, M, S[l + 14], 9, m[29])
M = func_L_d(M, x, y, k, S[l + 3], 14, m[30])
k = func_L_d(k, M, x, y, S[l + 8], 20, m[31])
y = func_L_d(y, k, M, x, S[l + 13], 5, m[32])
x = func_L_d(x, y, k, M, S[l + 2], 9, m[33])
M = func_L_d(M, x, y, k, S[l + 7], 14, m[34])
k = func_L_d(k, M, x, y, S[l + 12], 20, m[35])
y = func_L_u(y, k, M, x, S[l + 5], 4, m[36])
x = func_L_u(x, y, k, M, S[l + 8], 11, m[37])
M = func_L_u(M, x, y, k, S[l + 11], 16, m[38])
k = func_L_u(k, M, x, y, S[l + 14], 23, m[39])
y = func_L_u(y, k, M, x, S[l + 1], 4, m[40])
x = func_L_u(x, y, k, M, S[l + 4], 11, m[41])
M = func_L_u(M, x, y, k, S[l + 7], 16, m[42])
k = func_L_u(k, M, x, y, S[l + 10], 23, m[43])
y = func_L_u(y, k, M, x, S[l + 13], 4, m[44])
x = func_L_u(x, y, k, M, S[l + 0], 11, m[45])
M = func_L_u(M, x, y, k, S[l + 3], 16, m[46])
k = func_L_u(k, M, x, y, S[l + 6], 23, m[47])
y = func_L_u(y, k, M, x, S[l + 9], 4, m[48])
x = func_L_u(x, y, k, M, S[l + 12], 11, m[49])
M = func_L_u(M, x, y, k, S[l + 15], 16, m[50])
k = func_L_u(k, M, x, y, S[l + 2], 23, m[51])
y = func_L_i(y, k, M, x, S[l + 0], 6, m[52])
x = func_L_i(x, y, k, M, S[l + 7], 10, m[53])
M = func_L_i(M, x, y, k, S[l + 14], 15, m[54])
k = func_L_i(k, M, x, y, S[l + 5], 21, m[55])
y = func_L_i(y, k, M, x, S[l + 12], 6, m[56])
x = func_L_i(x, y, k, M, S[l + 3], 10, m[57])
M = func_L_i(M, x, y, k, S[l + 10], 15, m[58])
k = func_L_i(k, M, x, y, S[l + 1], 21, m[59])
y = func_L_i(y, k, M, x, S[l + 8], 6, m[60])
x = func_L_i(x, y, k, M, S[l + 15], 10, m[61])
M = func_L_i(M, x, y, k, S[l + 6], 15, m[62])
k = func_L_i(k, M, x, y, S[l + 1
|
terranum-ch/GraphLink
|
test/test_gk_graphic.py
|
Python
|
apache-2.0
| 1,094
| 0.003656
|
#!/urs/bin/python
import os
import sys
import pytest
from .context import graphlink
from .context import OUTPUT_TEST_PATH
from graphlink.core.gk_link import GKLink
from graphlink.core.gk_node import GKNode
from graphlink.core.gk_node import GK_SHAPE_TYPE
from graphlink.core.gk_graphic import GKGraphic
def test_gk_graphic_simple():
node1 = GKNode("Node1", shape=GK_SHAPE_TYPE[2])
node2 = GKNode("Node2")
myl1 = GKLink(node1, node2)
graph = GKGraphic()
assert graph.add_link(myl1) is True
assert graph.render(os.path.join(OUTPUT_TEST_PATH, "test_graphic_result")) is True
assert os.path.exists(os.path.join(OUTPUT_TEST_PATH, "t
|
est_graphic_result.pdf"))
def test_gk_graphic_image():
node1 = GKNode("Node1", shape=GK_SHAPE_TYPE[2])
node2 = GKNode("Node2")
myl1 = GKLink(node1, node2)
graph = GKGraphic()
assert graph.add_link(myl1) is True
assert graph.render(os.path.join(OUTPUT_TEST_PATH, "test_graphic_result"), extension="png", size=500) is True
assert os.path.exists(os.path.join(OUT
|
PUT_TEST_PATH, "test_graphic_result.png"))
|
tdsticks/crontab
|
py/wiki20/wiki20/controllers/error.py
|
Python
|
gpl-2.0
| 1,250
| 0.0008
|
# -*- coding: utf-8 -*-
"""Error controller"""
from tg import request, expose
from wiki20.lib.base import BaseController
__all__ = ['ErrorController']
class ErrorController(BaseController):
"""
Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments midd
|
leware in your config/middleware.py file.
"""
@expose('wiki20.templates.error')
def document(self, *args, **kwargs):
"""Render the error document"""
resp = request.environ.get('tg.original_response')
try:
|
# tg.abort exposes the message as .detail in response
message = resp.detail
except:
message = None
if not message:
message = ("<p>We're sorry but we weren't able to process "
" this request.</p>")
values = dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=request.params.get('code', resp.status_int),
message=request.params.get('message', message))
return values
|
goldmedal/spark
|
python/pyspark/ml/stat.py
|
Python
|
apache-2.0
| 16,949
| 0.002537
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.linalg import DenseMatrix, Vectors
from pyspark.ml.wrapper import JavaWrapper, _jvm
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.functions import lit
class ChiSquareTest(object):
"""
Conduct Pearson's independence test for every feature against the label. For each feature,
the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared
statistic is computed. All label and feature values must be categorical.
The null hypothesis is that the occurrence of the outcomes is statistically independent.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def test(dataset, featuresCol, labelCol):
"""
Perform a Pearson's independence test using dataset.
:param dataset:
DataFrame of categorical labels and categorical features.
Real-valued features will be treated as categorical for each distinct value.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:return:
DataFrame containing the test result for every feature against the label.
This DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[Int]`
- `statistics: Vector`
Each of these fields has one value per feature.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ChiSquareTest
>>> dataset = [[0, Vectors.dense([0, 0, 1])],
... [0, Vectors.dense([1, 0, 1])],
... [1, Vectors.dense([2, 1, 1])],
... [1, Vectors.dense([3, 1, 1])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label')
>>> chiSqResult.select("degreesOfFreedom").collect()[0]
Row(degreesOfFreedom=[3, 1, 0])
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ChiSquareTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol)]
return _java2py(sc, javaTestObj.test(*args))
class Correlation(object):
"""
Compute the correlation matrix for the input dataset of Vectors using the specified method.
Methods currently supported: `pearson` (default), `spearman`.
.. note:: For Spearman, a rank correlation, we need to create an RDD[Double] for each column
and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector],
which is fairly costly. Cache the input Dataset before calling corr with `method = 'spearman'`
to avoid recomputing the common lineage.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
:param dataset:
A Dataset or a DataFrame.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args))
class KolmogorovSmirnovTest(object):
"""
Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled from a continuous
distribution.
By comparing the largest difference between the empirical cumulative
distribution of the sample data and the theoretical distribution we can provide a test for the
the null hypothesis that the sample data comes from that theoretical distribution.
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def test(dataset, sampleCol, distName, *params):
"""
Conduct a one-sample, two-sided Kolmogorov-Smirnov test for probability distribution
equality. Currently supports the normal distribution, taking as parameters the mean and
standard deviation.
:param dataset:
a Dataset or a DataFrame containing the sample of data to test.
:param sampleCol:
Name of sample column in dataset, of any numerical type.
:param distName:
a `string` name for a theoretical distribution, currently only support "norm".
:param params:
a list of `Double` values specifying the parameters to be used for the theoretical
distribution. For "norm" distribution, the parameters includes mean and variance.
:return:
A DataFrame that contains the Kolmogorov-Smirnov test result for the input sampled data.
This DataFrame will contain a single Row with the following fi
|
elds:
- `pValue: Double`
- `statistic: Double`
>>> from pyspark.ml.stat import KolmogorovSmirnovTest
>>> dataset = [[-1.0], [0.0], [1.0]]
>>> dataset = spark.createDataFrame(dataset, ['s
|
ample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 0.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
>>> dataset = [[2.0], [3.0], [4.0]]
>>> dataset = spark.createDataFrame(dataset, ['
|
digitalocean/netbox
|
netbox/utilities/validators.py
|
Python
|
apache-2.0
| 1,580
| 0.003165
|
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import _lazy_re_compile, BaseValidator, URLValidator
class EnhancedURLValidator(URLValidator):
"""
Extends Django's built-in URLValidator to permit the use of hostnames with no domain extension and enforce allowed
schemes specified in the configuration.
"""
fqdn_re = URLValidator.hostname_re + URLValidator.domain_re + URLValidator.tld_re
host_res = [URLValidator.ipv4_re, URLValidator.ipv6_re, fqdn_re, URLValidator.hostname_re]
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # Scheme (enforced separately)
r'(?:\S+(?::\S*)?@)?' # HTTP basic authentication
r'(?:' + '|'.join(host_res) + ')' # IPv4, IPv6, FQDN, or hostname
r'(?::\d{2,5})?'
|
# Port number
r'(?:[/?#][^\s]*)?' # Path
r'\Z', re.IGNORECASE)
schemes = settings.ALLOWED_URL_SCHEME
|
S
class ExclusionValidator(BaseValidator):
"""
Ensure that a field's value is not equal to any of the specified values.
"""
message = 'This value may not be %(show_value)s.'
def compare(self, a, b):
return a in b
def validate_regex(value):
"""
Checks that the value is a valid regular expression. (Don't confuse this with RegexValidator, which *uses* a regex
to validate a value.)
"""
try:
re.compile(value)
except re.error:
raise ValidationError(f"{value} is not a valid regular expression.")
|
buddyd16/Structural-Engineering
|
Steel/bolt_group_istantaneous_center.py
|
Python
|
bsd-3-clause
| 8,880
| 0.021284
|
'''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __fu
|
ture__ import division
import math as m
def build_bolt_group(numCols, numRows, Colspacing, Rowspacing):
# Given a number of rows and columns
# return the x and y coordinate lists
# starting with the first bolt at (0,0)
xloc = [0]
yloc = [0]
i=0
y=0
for i in ra
|
nge(numCols):
if i == 0:
y=0
for y in range(numRows-1):
xloc.append(xloc[-1])
yloc.append(yloc[-1]+Rowspacing)
else:
x = xloc[-1] + Colspacing
xloc.append(x)
yloc.append(0)
y=0
for y in range(numRows-1):
xloc.append(x)
yloc.append(yloc[-1]+Rowspacing)
return xloc, yloc
def bolt_group_center(xloc, yloc):
#Bolt Group Centroid
if len(xloc)<3:
anchor_x_bar = (xloc[0]+xloc[1])/2.00
anchor_y_bar = (yloc[0]+yloc[1])/2.00
else:
j=0
x_tot=0
y_tot=0
for i in xloc:
x_tot = x_tot+xloc[j]
y_tot = y_tot+yloc[j]
j+=1
anchor_x_bar = x_tot/len(xloc)
anchor_y_bar = y_tot/len(yloc)
cg_anchors = [anchor_x_bar, anchor_y_bar]
return cg_anchors
def ic_brandt(IC, xloc, yloc, Mp):
num_bolts = len(xloc)
deltamax = 0.34
ICx = IC[0]
ICy = IC[1]
xIC = []
yIC = []
di = []
deltai = []
ri = []
fx = []
fy = []
moment = []
for x in xloc:
xICtemp = x - ICx
xIC.append(xICtemp)
for y in yloc:
yICtemp = y - ICy
yIC.append(yICtemp)
i=0
for i in range(num_bolts):
ditemp = m.sqrt((xIC[i]*xIC[i])+(yIC[i]*yIC[i]))
if ditemp == 0:
ditemp = 0.00000001
else:
pass
di.append(ditemp)
dmax = max(di)
i=0
for i in range(num_bolts):
deltaitemp = (di[i]/dmax)*deltamax
deltai.append(deltaitemp)
i=0
for i in range(num_bolts):
ritemp = m.pow(1-m.pow(m.e,-10.0*deltai[i]),0.55)
ri.append(ritemp)
i=0
for i in range(num_bolts):
momenttemp = ri[i]*di[i]
moment.append(momenttemp)
Mi = sum(moment)
Rult = -1*Mp/Mi
i=0
for i in range(num_bolts):
fxtemp = -1*(yIC[i]*ri[i])/di[i]
fxtemp = fxtemp * Rult
fx.append(fxtemp)
i=0
for i in range(num_bolts):
fytemp = (xIC[i]*ri[i])/di[i]
fytemp = fytemp * Rult
fy.append(fytemp)
Rx = sum(fx)
Ry = sum(fy)
table = [["Bolt x to IC",xIC],["Bolt y to IC", yIC],["di", di],["deltai", deltai],["ri", ri],["Mi", moment],["Fxi", fx],["Fyi", fy]]
return Rx, Ry, Mi, table
def brandt(xloc, yloc, P_xloc, P_yloc, P_angle, tol=0.000001):
# Bolt Group Instantaneous Center using method by G. Donald Brandt
# Rapid Determiniation of Ultimate Strength Of Eccentrically Loaded Bolt Groups
# AISC Journal 1982 2nd Quarter
detailed_output = []
num_bolts = len(xloc)
n = num_bolts
detailed_output.append(num_bolts)
#Bolt Group Centroid
if len(xloc)<3:
anchor_x_bar = (xloc[0]+xloc[1])/2.00
anchor_y_bar = (yloc[0]+yloc[1])/2.00
else:
j=0
x_tot=0
y_tot=0
for i in xloc:
x_tot = x_tot+xloc[j]
y_tot = y_tot+yloc[j]
j+=1
anchor_x_bar = x_tot/len(xloc)
anchor_y_bar = y_tot/len(yloc)
cg_anchors = [anchor_x_bar, anchor_y_bar]
detailed_output.append(["Anchor Group C.G.",cg_anchors])
# J - Polar Moment of Inertial of Bolt Group
# sum(x^2+y^2)
sum_x_square = 0
sum_y_square = 0
i=0
for i in range(num_bolts):
sum_x_square = sum_x_square + (xloc[i]-anchor_x_bar)**2
sum_y_square = sum_y_square + (yloc[i]-anchor_y_bar)**2
J = sum_x_square + sum_y_square
detailed_output.append(['Anchor Group J',J])
Px = -1*m.cos(m.radians(P_angle))
Py = -1*m.sin(m.radians(P_angle))
detailed_output.append(["Unit Forces",Px,Py])
Mo = (-1*Px*(P_yloc-anchor_y_bar))+(Py*(P_xloc-anchor_x_bar))
detailed_output.append(["Mo",Mo])
ax = (-1*Py*J) / (n * Mo)
ay = (Px*J) / (n*Mo)
detailed_output.append(["ax",ax,"ay",ay])
Mp = (-1*Px*(P_yloc-anchor_y_bar-ay))+(Py*(P_xloc-anchor_x_bar-ax))
detailed_output.append(["Mp",Mp])
IC_initial = [anchor_x_bar+ax,anchor_y_bar+ay]
Rx, Ry, Mi, table = ic_brandt(IC_initial,xloc,yloc, Mp)
detailed_output.append(["Rx",Rx,"Ry", Ry,"Mi", Mi,"Per Bolt Table", table,"First IC pass"])
fxx = Px + Rx
fyy = Py + Ry
F = m.sqrt(fxx*fxx+fyy*fyy)
detailed_output.append(["fxx",fxx,"fyy",fyy,"F",F])
ax_new = (-1*fyy*J)/(n*Mo)
ay_new = (fxx*J) / (n*Mo)
detailed_output.append(["ax",ax_new,"ay",ay_new])
IC_new = IC_initial
Cu = abs(Mi/Mp)
count = 0
iterations = 0
f_track = [F]
cu_track = [Cu]
while count<5000:
IC_new = [IC_new[0]+ax_new,IC_new[1]+ay_new]
Mp_new = (-1*Px*(P_yloc-IC_new[1]))+(Py*(P_xloc-IC_new[0]))
Rx, Ry, Mi, table = ic_brandt(IC_new,xloc,yloc, Mp_new)
fxx = Px + Rx
fyy = Py + Ry
F = m.sqrt(fxx*fxx+fyy*fyy)
f_track.append(F)
Cu = abs(Mi/Mp_new)
cu_track.append(Cu)
ax_new = ((-1*fyy*J)/(n*Mo))/10.0
ay_new = ((fxx*J) / (n*Mo))/10.0
if F <= tol:
iterations = count
count = 5000
solution = 'yes'
else:
iterations = count
count +=1
solution = 'no'
detailed_output.append(["fxx",fxx,"fyy",fyy,"F",F])
detailed_output.append(["I.C.",IC_new])
detailed_output.append(["Solution:",solution,"# Iterations:",iterations,count])
detailed_output.append(["Rx",Rx,"Ry", Ry,"Mi", Mi,"Per Bolt Table", table])
Cu = abs(Mi/Mp_new)
F_old = f_track[-2]
F = f_track[-1]
Cu_old = cu_track[-2]
try:
Cu_predict = ((F_
|
gchinellato/Self-Balance-Robot
|
nfs-server/modules/PanTilt/Test/panTilt-gpio.py
|
Python
|
gpl-3.0
| 6,361
| 0.010061
|
#!/usr/bin/python
"""
*************************************************
* @Project: Self Balance
* @Description: Pan Tilt - Micro Servo motors API with RPI.GPIO
* @Owner: Guilherme Chinellato
* @Email: guilhermechinellato@gmail.com
*************************************************
"""
import RPi.GPIO as GPIO
import time
import threading
import Queue
from constants import *
from Utils.traces.trace import *
class PanTiltThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, queue=Queue.Queue(), debug=False):
threading.Thread.__init__(self, group=group, target=target, name=name)
self.args = args
self.kwargs = kwargs
self.name = name
self.debug = debug
#Queue to communicate between threads
self._workQueue = queue
self._lock = threading.Lock()
#Event to signalize between threads
self._stopEvent = threading.Event()
self._sleepPeriod = 0.0
#Absolute and relatives angles
self.angleV = 0.0
self.angleH = 0.0
self.scaledAngleV = 0.0
self.scaledAngleH = 0.0
GPIO.setwarnings(False) # disable warnings
GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering
GPIO.setup(SERVO_V_GPIO, GPIO.OUT) # set GPIO as output
GPIO.setup(SERVO_H_GPIO, GPIO.OUT) # set GPIO as output
''' SERVO
PERIOD = 20ms (50Hz)
DT(%) Time(ms) Degree
2,5 0,5 0
5.0 1.0 45
7.5 1.5 90
10.0 2.0 135
12.5 2.5 180'''
#PWM output for f=50Hz / t=20ms
self.pwmV = GPIO.PWM(SERVO_V_GPIO, FREQ)
|
self.pwmH = GPIO.PWM(SERVO_H_GPIO, FREQ)
self.status = 0
logging.info("Pan-Tilt Thread initialized")
#Override method
def run(self):
|
self._startPWM(0, 0)
lastTime = 0.0
while not self._stopEvent.wait(self._sleepPeriod):
try:
self._lock.acquire()
currentTime = time.time()
#Calculate time since the last time it was called
#if (self.debug):
# logging.debug("Duration: " + str(currentTime - lastTime))
event = self.getEvent()
if event != None:
if self.status == 1:
if event[0] != None:
pwmVertical = self.convertTo(event[0], ANALOG_MAX, ANALOG_MIN, VERTICAL_MAX, VERTICAL_MIN)
self.angleV = self.convertTo(pwmVertical, POS_MAX, POS_MIN, ANGLE_MAX, ANGLE_MIN)
self.scaledAngleV = self.convertTo(pwmVertical, VERTICAL_MAX, VERTICAL_MIN, ANGLE_MAX, ANGLE_MIN)
self._changeV(pwmVertical)
if (self.debug):
logging.debug("PWM Vertical: " + str(pwmVertical) + "%")
logging.debug("Angle Vertical: " + str(self.angleV) + "deg")
logging.debug("Angle Scaled Vertical: " + str(self.scaledAngleV) + "deg")
if event[1] != None:
pwmHorizontal = self.convertTo(event[1], ANALOG_MAX, ANALOG_MIN, HORIZONTAL_MAX, HORIZONTAL_MIN)
self.angleH = self.convertTo(pwmHorizontal, POS_MAX, POS_MIN, ANGLE_MAX, ANGLE_MIN)
self.scaledAngleH = self.convertTo(pwmHorizontal, HORIZONTAL_MAX, HORIZONTAL_MIN, ANGLE_MAX, ANGLE_MIN)
self._changeH(pwmHorizontal)
if (self.debug):
logging.debug("PWM Horizontal: " + str(pwmHorizontal) + "%")
logging.debug("Angle Horizontal: " + str(self.angleH) + "deg")
logging.debug("Angle Scaled Horizontal: " + str(self.scaledAngleH) + "deg")
except Queue.Empty:
if (self.debug):
logging.debug("Queue Empty")
self.pause()
pass
finally:
lastTime = currentTime
self._lock.release()
#Override method
def join(self, timeout=None):
#Stop the thread and wait for it to end
self._stopEvent.set()
self._stopPWM()
threading.Thread.join(self, timeout=timeout)
def getEvent(self, timeout=1):
return self._workQueue.get(timeout=timeout)
def putEvent(self, event):
#Bypass if full, to not block the current thread
if not self._workQueue.full():
self._workQueue.put(event)
# As Raspberry does not have hardware PWM pins, it is used a software one, then it is necessary to do a workaround.
def pause(self):
self.status = 0
self._changeV(0)
self._changeH(0)
def resume(self):
self.status = 1
def getAbsoluteAngles(self):
#Get absolute angle - real angle
return self.angleV, self.angleH
def getScaledAngles(self):
#Get angle relative the limits
return self.scaledAngleV, self.scaledAngleH
def convertTo(self, value, fromMax, fromMin, toMax, toMin):
if not value >= fromMin and value <= fromMax:
logging.warning("Value out of the range (Max:"+str(fromMax)+" , Min:"+str(fromMin)+")")
if value > fromMax:
value = fromMax
elif value < fromMin:
value = fromMin
factor = (value-fromMin)/(fromMax-fromMin)
return factor*(toMax-toMin)+toMin
def _startPWM(self, dutyCycleV, dutyCycleH):
self.pwmV.start(dutyCycleV)
self.pwmH.start(dutyCycleH)
self.status = 1
def _stopPWM(self):
self.pwmV.stop()
self.pwmH.stop()
self.status = 0
def _changeV(self, dutyCycleV):
self.pwmV.ChangeDutyCycle(dutyCycleV)
def _changeH(self, dutyCycleH):
self.pwmH.ChangeDutyCycle(dutyCycleH)
|
jeromeetienne/webglmeeting0
|
apprtc/apprtc.py
|
Python
|
mit
| 6,313
| 0.015207
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
# pylint: disable-msg=C6310
"""WebRTC Demo
This module demonstrates the WebRTC API by implementing a simple video chat app.
"""
import datetime
import logging
import os
import random
import re
from google.appengine.api import channel
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
def generate_random(len):
word = ''
for i in range(len):
word += random.choice('0123456789')
return word
def sanitize(key):
return re.sub("[^
|
a-zA-Z0-9\-]", "-", key);
def make_token(room, user):
return room.key().id_or_name() + '/' + user
def make_pc_config(stun_serve
|
r):
if stun_server:
return "STUN " + stun_server
else:
return "STUN stun.l.google.com:19302"
class Room(db.Model):
"""All the data we store for a room"""
user1 = db.StringProperty()
user2 = db.StringProperty()
def __str__(self):
str = "["
if self.user1:
str += self.user1
if self.user2:
str += ", " + self.user2
str += "]"
return str
def get_occupancy(self):
occupancy = 0
if self.user1:
occupancy += 1
if self.user2:
occupancy += 1
return occupancy
def get_other_user(self, user):
if user == self.user1:
return self.user2
elif user == self.user2:
return self.user1
else:
return None
def has_user(self, user):
return (user and (user == self.user1 or user == self.user2))
def add_user(self, user):
if not self.user1:
self.user1 = user
elif not self.user2:
self.user2 = user
else:
raise RuntimeError('room is full')
self.put()
def remove_user(self, user):
if user == self.user2:
self.user2 = None
if user == self.user1:
if self.user2:
self.user1 = self.user2
self.user2 = None
else:
self.user1 = None
if self.get_occupancy() > 0:
self.put()
else:
self.delete()
class ConnectPage(webapp.RequestHandler):
def post(self):
key = self.request.get('from')
room_key, user = key.split('/');
logging.info('User ' + user + ' connected to room ' + room_key)
class DisconnectPage(webapp.RequestHandler):
def post(self):
key = self.request.get('from')
room_key, user = key.split('/');
logging.info('Removing user ' + user + ' from room ' + room_key)
room = Room.get_by_key_name(room_key)
if room and room.has_user(user):
other_user = room.get_other_user(user)
room.remove_user(user)
logging.info('Room ' + room_key + ' has state ' + str(room))
if other_user:
channel.send_message(make_token(room, other_user), 'BYE')
logging.info('Sent BYE to ' + other_user)
else:
logging.warning('Unknown room ' + room_key)
class MessagePage(webapp.RequestHandler):
def post(self):
message = self.request.body
room_key = self.request.get('r')
room = Room.get_by_key_name(room_key)
if room:
user = self.request.get('u')
other_user = room.get_other_user(user)
if other_user:
# special case the loopback scenario
if other_user == user:
message = message.replace("\"OFFER\"",
"\"ANSWER\",\n \"answererSessionId\" : \"1\"")
message = message.replace("a=crypto:0 AES_CM_128_HMAC_SHA1_32",
"a=xrypto:0 AES_CM_128_HMAC_SHA1_32")
channel.send_message(make_token(room, other_user), message)
logging.info('Delivered message to user ' + other_user);
else:
logging.warning('Unknown room ' + room_key)
class MainPage(webapp.RequestHandler):
"""The main UI page, renders the 'index.html' template."""
def get(self):
"""Renders the main page. When this page is shown, we create a new
channel to push asynchronous updates to the client."""
room_key = sanitize(self.request.get('r'));
debug = self.request.get('debug')
stun_server = self.request.get('ss');
if not room_key:
room_key = generate_random(8)
redirect = '/?r=' + room_key
if debug:
redirect += ('&debug=' + debug)
if stun_server:
redirect += ('&ss=' + stun_server)
self.redirect(redirect)
logging.info('Redirecting visitor to base URL to ' + redirect)
return
user = None
initiator = 0
room = Room.get_by_key_name(room_key)
if not room and debug != "full":
# New room.
user = generate_random(8)
room = Room(key_name = room_key)
room.add_user(user)
if debug != "loopback":
initiator = 0
else:
room.add_user(user)
initiator = 1
elif room and room.get_occupancy() == 1 and debug != "full":
# 1 occupant.
user = generate_random(8)
room.add_user(user)
initiator = 1
else:
# 2 occupants (full).
path = os.path.join(os.path.dirname(__file__), 'full.html')
self.response.out.write(template.render(path, { 'room_key': room_key }));
logging.info('Room ' + room_key + ' is full');
return
room_link = 'https://webglmeeting.appspot.com/?r=' + room_key
if debug:
room_link += ('&debug=' + debug)
if stun_server:
room_link += ('&ss=' + stun_server)
token = channel.create_channel(room_key + '/' + user)
pc_config = make_pc_config(stun_server)
template_values = {'token': token,
'me': user,
'room_key': room_key,
'room_link': room_link,
'initiator': initiator,
'pc_config': pc_config
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
logging.info('User ' + user + ' added to room ' + room_key);
logging.info('Room ' + room_key + ' has state ' + str(room))
application = webapp.WSGIApplication([
('/', MainPage),
('/message', MessagePage),
('/_ah/channel/connected/', ConnectPage),
('/_ah/channel/disconnected/', DisconnectPage)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
zhangvs1988/zhangyl-Djangodemo
|
usercenter/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 1,078
| 0.001873
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-14 06:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
|
name='ActivateCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=100, verbose_name='激活码')),
|
('expire_timestamp', models.DateTimeField()),
('create_timestamp', models.DateTimeField(auto_now_add=True)),
('last_update_timestamp', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
),
]
|
whileLooper/MangaSpider
|
withCookie.py
|
Python
|
mpl-2.0
| 1,694
| 0.004825
|
import http.cookiejar
import urllib
import urllib.request
import re
import gzip
__author__ = 'bochen'
def makeMyOpener(head):
header = []
cookieJar = http.cookiejar.CookieJar()
processor = urllib.request.HTTPCookieProcessor(cookieJar)
opener = urllib.request.build_opener(processor)
for key, value in head.items():
e = (key, value)
header.append(e)
opener.addheaders = header
return opener
def saveData(data):
save_path = '/Users/bochen/git/Training/python/temp.html'
f_obj = open(save_path, 'wb')
f_obj.write(data)
f_obj.close()
def getXSRF(data):
xsrfRe = re.compile('name="\_xsrf\" value=\"(.*)\"', flags=0)
xsrfStr = xsrfRe.findall(data)
return xsrfStr[0]
def ungzip(data):
try:
print('正在解压...')
data = gzip.decompress(data)
print('完成解压!')
except:
print('未经压缩
|
,无需解压')
return data
header = {
'Collection': 'Keep-Alive',
'Accept': 'text/html,application/xhtml+xml,*/*',
'Accept-Language': 'en-US,en;q=0.8,ja;q=0.6,zh-CN;q=0.4,zh;q=0.2,it;q=0.2',
'User-Agent': 'Chrome/45.0.2454.101'
}
url = 'http://www.zhihu.com/'
opener = makeMyOpener(header)
urlopen = opener.open(url)
data = urlopen.read()
unzipData = ungzip(data)
_xsrf = getXSRF(unzipData.decode())
print('_xsrf: ', _xsrf)
url += 'login'
loginEmail =
|
'bochentheone@hotmail.com'
password = 'BOboris8878'
postDict = {
'_xsrf': _xsrf,
'email': loginEmail,
'password': password,
'rememberme': 'y'
}
postData = urllib.parse.urlencode(postDict).encode()
op = opener.open(url, postData)
data = op.read()
data = ungzip(data)
print(data.decode())
|
ejoonie/heart_sound
|
main_waveform_20170517.py
|
Python
|
gpl-3.0
| 3,229
| 0.013936
|
# coding = utf-8
# import modules
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import my_config
path = my_config.ROOT_
|
DIR # Please create your config file
file = my_config.FILE # Please create your config file
# get time series for ch0 and plot
import wave
def time_series(file, i_ch = 0):
with wave.open(file,'r') as wav_file:
# Extract Raw Audio from Wav File
signal = wav_file
|
.readframes(-1)
signal = np.fromstring(signal, 'Int16')
# Split the data into channels
channels = [[] for channel in range(wav_file.getnchannels())]
for index, datum in enumerate(signal):
channels[index%len(channels)].append(datum)
#Get time from indices
fs = wav_file.getframerate()
Time = np.linspace(0, len(signal)/len(channels)/fs, num=len(signal)/len(channels))
# return
return fs, Time, channels[i_ch]
fs, t, y = time_series(os.path.join(path, file), i_ch = 0)
plt.figure(1)
plt.plot(t, y)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.grid()
# detrend and plot
from scipy.signal import detrend
y_detrend = detrend(y)
plt.figure(2)
plt.plot(t, y_detrend)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal-detrend')
plt.grid()
# get auto-correlation and plot
from scipy.signal import correlate, convolve
corr = correlate(y_detrend, y_detrend, mode = 'full')
n_data = np.minimum(len(t), len(corr))
plt.figure(3)
plt.plot(t[0:n_data], corr[0:n_data])
plt.title('Auto-Correlation (Fs = {})'.format(fs))
plt.xlabel('Time Lag [s]')
plt.ylabel('Auto-Correlation')
plt.grid()
# get-filterred signal and plot
from scipy.signal import butter, lfilter
cutoff = 500
N = 4 # filter oder
Wn = cutoff / (fs * 0.5)
b, a = butter(N, Wn , btype = 'low', analog = False)
y_filtered = lfilter(b, a, y_detrend) # low pass filter
plt.figure(4)
plt.plot(t, y_filtered)
plt.title('Time series (Fs = {}) (Cutoff Freq. = {})'.format(fs, cutoff))
plt.xlabel('Time [s]')
plt.ylabel('Signal - filtered')
plt.grid()
# get fft and plot
T = 1.0 / fs # time interval
n_sample = len(y_filtered)
freq = np.linspace(0.0, 1.0/(2.0*T), n_sample//2)
yf = sp.fft(y_filtered)
plt.figure(5)
plt.plot(freq, 2.0/n_sample * np.abs(yf[0:n_sample//2]))
plt.title('FFT')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Fourier Coef.')
plt.grid()
# get psd and plot
from scipy.signal import welch
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped rate 90%
f, Pxx = welch(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(6)
plt.plot(f, Pxx)
plt.title('PSD')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Power')
plt.grid()
# get spectrogram
from scipy.signal import spectrogram
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped at 90%
f, t, Sxx = spectrogram(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(7)
plt.pcolormesh(t, f, Sxx)
plt.title('Spectrogram')
plt.xlabel('Time [s]')
plt.ylabel('Freq. [Hz]')
plt.grid()
plt.show()
|
marxin/youtube-dl
|
youtube_dl/extractor/tumblr.py
|
Python
|
unlicense
| 2,338
| 0.002568
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
'info_dict': {
'id': '54196191430',
'ext': 'mp4',
'title': 'tatiana maslany news, Orphan Black || DVD extra - behin
|
d the scenes ↳...',
'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
'thumbnail': 're:http://.*\.jpg',
}
}, {
'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all',
'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359',
'info_dict': {
'id':
|
'90208453769',
'ext': 'mp4',
'title': '5SOS STRUM ;]',
'description': 'md5:dba62ac8639482759c8eb10ce474586a',
'thumbnail': 're:http://.*\.jpg',
}
}]
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
video_id = m_url.group('id')
blog = m_url.group('blog_name')
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage = self._download_webpage(url, video_id)
iframe_url = self._search_regex(
r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
webpage, 'iframe url')
iframe = self._download_webpage(iframe_url, video_id)
video_url = self._search_regex(r'<source src="([^"]+)"',
iframe, 'video url')
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
video_title = self._html_search_regex(
r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
}
|
lukecwik/incubator-beam
|
sdks/python/apache_beam/io/filesystems_test.py
|
Python
|
apache-2.0
| 8,401
| 0.005476
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
# pytype: skip-file
import filecmp
import logging
import os
impor
|
t shutil
import tempfile
import unittest
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator), ) + paths)
return _join
class FileSystemsTe
|
st(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_get_scheme(self):
self.assertIsNone(FileSystems.get_scheme('/abc/cdf'))
self.assertIsNone(FileSystems.get_scheme('c:\\abc\cdf')) # pylint: disable=anomalous-backslash-in-string
self.assertEqual(FileSystems.get_scheme('gs://abc/cdf'), 'gs')
def test_get_filesystem(self):
self.assertTrue(
isinstance(
FileSystems.get_filesystem('/tmp'),
localfilesystem.LocalFileSystem))
self.assertTrue(isinstance(FileSystems.get_filesystem('c:\\abc\def'), # pylint: disable=anomalous-backslash-in-string
localfilesystem.LocalFileSystem))
with self.assertRaises(ValueError):
FileSystems.get_filesystem('error://abc/def')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to', 'file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/', 'tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/', 'path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', r'to\file'))
self.assertEqual(
r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path\\', 'to', 'file'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
FileSystems.mkdirs(path)
with self.assertRaises(IOError):
FileSystems.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaisesRegex(BeamIOError,
r'^Unable to get the Filesystem') as error:
FileSystems.match([None])
self.assertEqual(list(error.exception.exception_details), [None])
def test_match_directory_with_files(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertCountEqual(files, [path1, path2])
def test_match_directory(self):
result = FileSystems.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Copy operation failed') as error:
FileSystems.copy([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
FileSystems.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path1], [path2])
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Rename operation failed') as error:
FileSystems.rename([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path_t1], [path_t2])
self.assertTrue(FileSystems.exists(path_t2))
self.assertFalse(FileSystems.exists(path_t1))
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
self.assertFalse(FileSystems.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
FileSystems.delete([path1])
self.assertFalse(FileSystems.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaisesRegex(BeamIOError,
r'^Delete operation fa
|
RagnarDanneskjold/omniwallet
|
api/user_service.py
|
Python
|
agpl-3.0
| 4,740
| 0.016034
|
import os
import base64
import werkzeug.security as ws
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from flask import Flask, request, jsonify, abort, json
from simplekv.fs import FilesystemStore
from uuid import UUID
ACCOUNT_CREATION_DIFFICULTY = '0400'
LOGIN_DIFFICULTY = '0400'
SERVER_SECRET = 'SoSecret!'
SESSION_SECRET = 'SuperSecretSessionStuff'
data_dir_root = os.environ.get('DATADIR')
store_dir = data_dir_root + '/sessions/'
session_store = FilesystemStore(store_dir) # TODO: Need to roll this into a SessionInterface so multiple services can hit it easily
app = Flask(__name__)
app.debug = True
@app.route('/challenge')
def challenge():
uuid = request.args.get('uuid')
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_challenge = session + "_challenge"
session_pow_challenge = session + "_pow_challenge"
if session_pow_challenge in session_store:
session_store.delete(session_pow_challenge)
if session_challenge in session_store:
session_store.delete(session_challenge)
salt = ws.hashlib.sha256(SERVER_SECRET + uuid).hexdigest()
pow_challenge = ws.gen_salt(32)
challenge = ws.gen_salt(32)
session_store.put(session_pow_challenge, pow_challenge)
session_store.put(session_challenge, challenge)
response = {
'salt': salt,
'pow_challenge': pow_challenge,
'challenge': challenge
}
return jsonify(response)
@app.route('/create', methods=['POST'])
def create():
uuid = request.form['uuid']
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_pow_challenge = session + "_pow_challenge"
if session_pow_challenge not in session_store:
print 'UUID
|
not in session'
abort(403)
nonce = request.form['nonce']
public_key = request.form['public_key'].encode('UTF-8')
wallet = request.form['wallet
|
']
pow_challenge = session_store.get(session_pow_challenge)
if failed_challenge(pow_challenge, nonce, ACCOUNT_CREATION_DIFFICULTY):
print 'Aborting: Challenge was not met'
abort(403)
if exists(uuid):
print 'UUID already exists'
abort(403)
write_wallet(uuid, wallet)
session_store.delete(session_pow_challenge)
session_public_key = session + "_public_key"
session_store.put(session_public_key, public_key)
return ""
@app.route('/update', methods=['POST'])
def update():
uuid = request.form['uuid']
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_challenge = session + "_challenge"
session_pubkey = session + "_public_key"
if session_challenge not in session_store:
print 'Challenge not in session'
abort(403)
if session_pubkey not in session_store:
print 'Public key not in session'
abort(403)
challenge = session_store.get(session_challenge)
signature = request.form['signature']
wallet = request.form['wallet']
pubkey = session_store.get(session_pubkey)
key = RSA.importKey(pubkey)
h = SHA.new(challenge)
verifier = PKCS1_v1_5.new(key)
if not verifier.verify(h, signature.decode('hex')):
print 'Challenge signature not verified'
abort(403)
write_wallet(uuid, wallet)
session_store.delete(session_challenge)
return ""
@app.route('/login')
def login():
uuid = request.args.get('uuid')
public_key = base64.b64decode(request.args.get('public_key').encode('UTF-8'))
nonce = request.args.get('nonce')
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_pow_challenge = session + "_pow_challenge"
if session_pow_challenge not in session_store:
print 'UUID not in session'
abort(403)
pow_challenge = session_store.get(session_pow_challenge)
if failed_challenge(pow_challenge, nonce, LOGIN_DIFFICULTY):
print 'Failed login challenge'
abort(403)
if not exists(uuid):
print 'Wallet not found'
abort(404)
wallet_data = read_wallet(uuid)
session_store.delete(session_pow_challenge)
session_public_key = session + "_public_key"
session_store.put(session_public_key, public_key)
return wallet_data
# Utility Functions
def failed_challenge(pow_challenge, nonce, difficulty):
pow_challenge_response = ws.hashlib.sha256(pow_challenge + nonce).hexdigest()
return pow_challenge_response[-len(difficulty):] != difficulty
def write_wallet(uuid, wallet):
validate_uuid = UUID(uuid)
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'w') as f:
f.write(wallet)
def read_wallet(uuid):
validate_uuid = UUID(uuid)
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'r') as f:
return f.read()
def exists(uuid):
filename = data_dir_root + '/wallets/' + uuid + '.json'
return os.path.exists(filename)
|
wanghewen/CommonModules
|
CommonModules/Log.py
|
Python
|
mit
| 3,611
| 0.008585
|
# -*- coding:utf-8 -*-
""" Provide log related functions. You need to Initialize the logger and use the logger to make logs.
Example:
>>> logger = Initialize()
Use logger.level(\*msg) to log like:
>>> logger.error("Pickle data writing Failed.")
>>> logger.info("Pickle data of ", foo, " written successfully.")
The log will be stored into LogFile.log by default.
"""
__author__ = "Wang Hewen"
import sys
import logging
logging.currentframe = lambda: sys._getframe(5)
class Logger(logging.Logger):
def debug(self, *args, **kwargs):
super().log("".join([str(arg) for arg in args]), **kwargs)
def info(self, *args, **kwargs):
super().info("".join([str(arg) for arg in args]), **kwargs)
def warning(self, *args, **kwargs):
super().warning("".join([str(arg) for arg in args]), **kwargs)
def warn(self, *args, **kwargs):
super().warn("".join([str(arg) for arg in args]), **kwargs)
def error(self, *args, **kwargs):
super().error("".join([str(arg) for arg in args]), **kwargs)
def exception(self, *args, exc_info=True, **kwargs):
super().exception("".join([str(arg) for arg in args]), exc_info = exc_info, **kwargs)
def critical(self, *args, **kwargs):
super().critical("".join([str(arg) for arg in args]), **kwargs)
def log(self, level, *args, **kwargs):
super().log(level, "".join([str(arg) for arg in args]), **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
super()._log(level, msg, args, exc_info=None, extra=None, stack_info=False)
def Initialize(FileName = "LogFile.log", LogLevel = "INFO", WriteToStream = False):
'''
Initialize loggers for logging. A logger will be returned.
:param String FileName: Path of the log file
:param String LogLevel: LogLevel of the logger, which can be "DEBUG", "INFO", "ERROR"
:param Boolean WriteToStream: Whether to write to stdout
:return: logger: The logger used for logging
:rtype: logging.loggger
'''
if LogLevel not in ["DEBUG", "INFO", "ERROR"]:
raise ValueError("LogLevel is not correctly set.")
logging.Logger.manager.setLoggerClass(Logger)
logger = logging.getLogger(__name__) #__name__ == CommonModules.Log
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
fileHandler = logging.FileHandler(FileName)
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))
if LogLevel == "DEBUG":
streamHandler = logging.StreamHandler(stream = sys.stdout)
streamHandler.setLevel(logging.DEBUG)
fileHandler.
|
setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
if LogLevel == "INFO":
streamHandler = logging.StreamHandler(stream = sys.stdout)
streamHandler.setLevel(logging.INFO)
fileHandler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
if LogLev
|
el == "ERROR":
streamHandler = logging.StreamHandler(stream = sys.stderr)
streamHandler.setLevel(logging.ERROR)
fileHandler.setLevel(logging.ERROR)
logger.setLevel(logging.ERROR)
streamHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))
if WriteToStream:
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
return logger
|
davidwhogg/MagicCube
|
code/projection.py
|
Python
|
gpl-2.0
| 5,116
| 0.000391
|
import numpy as np
class Quaternion:
"""Quaternion Rotation:
Class to aid in representing 3D rotations via quaternions.
"""
@classmethod
def from_v_theta(cls, v, theta):
"""
Construct quaternions from unit vectors v and rotation angles theta
Parameters
----------
v : array_like
array of vectors, last dimension 3. Vectors will be normalized.
theta : array_like
array of rotation angles in radians, shape = v.shape[:-1].
Returns
-------
q : quaternion object
quaternion representing the rotations
"""
theta = np.asarray(theta)
v = np.asarray(v)
s = np.sin(0.5 * theta)
c = np.cos(0.5 * theta)
v = v * s / np.sqrt(np.sum(v * v, -1))
x_shape = v.shape[:-1] + (4,)
x = np.ones(x_shape).reshape(-1, 4)
x[:, 0] = c.ravel()
x[:, 1:] = v.reshape(-1, 3)
x = x.reshape(x_shape)
return cls(x)
def __init__(self, x):
self.x = np.asarray(x, dtype=float)
def __repr__(self):
return "Quaternion:\n" + self.x.__repr__()
def __mul__(self, other):
# multiplication of two quaternions.
# we don't implement multiplication by a scalar
sxr = self.x.reshape(self.x.shape[:-1] + (4, 1))
oxr = other.x.reshape(other.x.shape[:-1] + (1, 4))
prod = sxr * oxr
return_shape = prod.shape[:-1]
prod = prod.reshape((-1, 4, 4)).transpose((1, 2, 0))
ret = np.array([(prod[0, 0] - prod[1, 1]
- prod[2, 2] - prod[3, 3]),
(prod[0, 1] + prod[1, 0]
+ prod[2, 3] - prod[3, 2]),
(prod[0, 2] - prod[1, 3]
+ prod[2, 0] + prod[3, 1]),
(prod[0, 3] + prod[1, 2]
- prod[2, 1] + prod[3, 0])],
dtype=np.float,
order='F').T
return self.__class__(ret.reshape(return_shape))
def as_v_theta(self):
"""Return the v, theta equivalent of the (normalized) quaternion"""
x = self.x.reshape((-1, 4)).T
# compute theta
norm = np.sqrt((x ** 2).sum(0))
theta = 2 * np.arccos(x[0] / norm)
# compute the unit vector
v = np.array(x[1:], order='F', copy=True)
v /= np.sqrt(np.sum(v ** 2, 0))
# reshape the results
v = v.T.reshape(self.x.shape[:-1] + (3,))
theta = theta.reshape(self.x.shape[:-1])
return v, theta
def as_rotation_matrix(self):
"""Return the rotation matrix of the (normalized) quaternion"""
v, theta = self.as_v_theta()
shape = theta.shape
theta = theta.reshape(-1)
v = v.reshape(-1, 3).T
c = np.cos(theta)
s = np.sin(theta)
mat = np.array([[v[0] * v[0] * (1. - c) + c,
v[0] * v[1] * (1. - c) - v[2] * s,
v[0] * v[2] * (1. - c) + v[1] * s],
[v[1] * v[0] * (1. - c) + v[2] * s,
v[1] * v[1] * (1. - c) + c,
v[1] * v[2] * (1. - c) - v[0] * s],
[v[2] * v[0] * (1. - c) - v[1] * s,
v[2] * v[1] * (1. - c) + v[0] * s,
v[2] * v[2] * (1. - c) + c]],
order='F').T
return mat.reshape(shape + (3, 3))
def rotate(self, points):
M = self.as_rotation_matrix()
return np.dot(points, M.T)
def project_points(points, q, view, vertical
|
=[0, 1, 0]):
"""Project points using a quaternion q and a view v
Parameters
----------
points : array_like
array of last-dimension 3
q : Quaternion
quaternion representation of the rotation
view : array_like
length-3 vector giving the point of view
vertical : array_like
direction of y-axis for view. An error will be raised if it
is paralle
|
l to the view.
Returns
-------
proj: array_like
array of projected points: same shape as points.
"""
points = np.asarray(points)
view = np.asarray(view)
xdir = np.cross(vertical, view).astype(float)
if np.all(xdir == 0):
raise ValueError("vertical is parallel to v")
xdir /= np.sqrt(np.dot(xdir, xdir))
# get the unit vector corresponing to vertical
ydir = np.cross(view, xdir)
ydir /= np.sqrt(np.dot(ydir, ydir))
# normalize the viewer location: this is the z-axis
v2 = np.dot(view, view)
zdir = view / np.sqrt(v2)
# rotate the points
R = q.as_rotation_matrix()
Rpts = np.dot(points, R.T)
# project the points onto the view
dpoint = Rpts - view
dpoint_view = np.dot(dpoint, view).reshape(dpoint.shape[:-1] + (1,))
dproj = -dpoint * v2 / dpoint_view
trans = list(range(1, dproj.ndim)) + [0]
return np.array([np.dot(dproj, xdir),
np.dot(dproj, ydir),
-np.dot(dpoint, zdir)]).transpose(trans)
|
sncosmo/sncosmo
|
sncosmo/tests/test_spectrum.py
|
Python
|
bsd-3-clause
| 6,754
| 0
|
import numpy as np
import pytest
from astropy.table import Table
from numpy.testing import assert_allclose
import sncosmo
try:
import iminuit
HAS_IMINUIT = True
except ImportError:
HAS_IMINUIT = False
def test_bin_edges_linear():
"""Ensure that we can recover consistent bin edges for a spectrum from bin
centers.
Internally, the bin edges are stored rather than the bin centers.
"""
wave = np.linspace(3000, 8000, 100)
flux = np.ones_like(wave)
spec = sncosmo.Spectrum(wave, flux)
assert_allclose(wave, spec.wave, rtol=1.e-5)
def test_bin_edges_log():
"""Ensure that we can recover consistent bin edges for a spectrum from bin
centers.
Internally, the bin edges are stored rather than the bin centers.
"""
wave = np.logspace(np.log10(3000), np.log10(8000), 100)
flux = np.ones_like(wave)
spec = sncosmo.Spectrum(wave, flux)
assert_allclose(wave, spec.wave, rtol=1.e-5)
class TestSpectrum:
def setup_class(self):
# Simulate a spectrum
model = sncosmo.Model(source='hsiao-subsampled')
params = {'t0': 10., 'amplitude': 1.e-7, 'z': 0.2}
start_params = {'t0': 0., 'amplitude': 1., 'z': 0.}
model.set(**params)
# generate a fake spectrum with no errors. note: we simulate a high
# resolution spectrum and then bin it up. we also include large
# covariance between spectral elements to verify that we are handling
# covariance properly.
spec_time = params['t0'] + 5.
sim_wave = np.arange(3000, 9000)
sim_flux = model.flux(spec_time, sim_wave)
sim_fluxcov = 0.01 * np.max(sim_flux)**2 * np.ones((len(sim_flux),
len(sim_flux)))
sim_fluxcov += np.diag(0.1 * sim_flux**2)
spectrum = sncosmo.Spectrum(sim_wave, sim_flux, fluxcov=sim_fluxcov,
time=spec_time)
# generate a binned up low-resolution spectrum.
bin_wave = np.linspace(3500, 8500, 200)
bin_spectrum = spectrum.rebin(bin_wave)
# generate fake photometry with no errors
points_per_band = 12
bands = points_per_band * ['bessellux', 'bessellb', 'bessellr',
'besselli']
times = params['t0'] + np.linspace(-10., 60., len(bands))
zp = len(bands) * [25.]
zpsys = len(bands) * ['ab']
flux = model.bandflux(bands, times, zp=zp, zpsys=zpsys)
fluxerr = len(bands) * [0.1 * np.max(flux)]
photometry = Table({
'time': times,
'band': bands,
'flux': flux,
'fluxerr': fluxerr,
'zp': zp,
'zpsys': zpsys
})
self.model = model
self.photometry = photometry
self.spectrum = spectrum
self.bin_spectrum = bin_spectrum
self.params = params
self.start_params = start_params
def test_bandflux(self):
"""Check synthetic photometry.
We compare synthetic photometry on high and low resolution spectra. It
should stay the
|
same.
"""
bandflux_highres = self.spectrum.bandflux('sdssg')
bandflux_lowres = self.bin_spectrum.bandflux('sdssg')
assert_allclose(bandflux_highres, bandflux_lowres, rtol=1.e-3)
def test_bandflux_multi(self):
"""Check synthetic photometry with multiple bands."""
bands = ['sdssg', 'sdssr', 'sdssi']
|
bandflux_highres = self.spectrum.bandflux(bands)
bandflux_lowres = self.bin_spectrum.bandflux(bands)
assert_allclose(bandflux_highres, bandflux_lowres, rtol=1.e-3)
def test_bandflux_zpsys(self):
"""Check synthetic photometry with a magnitude system."""
bands = ['sdssg', 'sdssr', 'sdssi']
bandflux_highres = self.spectrum.bandflux(bands, 25., 'ab')
bandflux_lowres = self.spectrum.bandflux(bands, 25., 'ab')
assert_allclose(bandflux_highres, bandflux_lowres, rtol=1.e-3)
def test_bandfluxcov(self):
"""Check synthetic photometry with covariance."""
bands = ['sdssg', 'sdssr', 'sdssi']
flux_highres, cov_highres = self.spectrum.bandfluxcov(bands)
flux_lowres, cov_lowres = self.bin_spectrum.bandfluxcov(bands)
assert_allclose(flux_highres, flux_lowres, rtol=1.e-3)
assert_allclose(cov_highres, cov_lowres, rtol=1.e-3)
def test_bandmag(self):
"""Check synthetic photometry in magnitudes."""
bands = ['sdssg', 'sdssr', 'sdssi']
bandmag_highres = self.spectrum.bandmag(bands, 'ab')
bandmag_lowres = self.bin_spectrum.bandmag(bands, 'ab')
assert_allclose(bandmag_highres, bandmag_lowres, rtol=1.e-3)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc_spectra(self):
"""Check fit results for a single high-resolution spectrum."""
self.model.set(**self.start_params)
res, fitmodel = sncosmo.fit_lc(model=self.model,
spectra=self.bin_spectrum,
vparam_names=['amplitude', 'z', 't0'],
bounds={'z': (0., 0.3)})
# set model to true parameters and compare to fit results.
self.model.set(**self.params)
assert_allclose(res.parameters, self.model.parameters, rtol=1.e-3)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc_both(self):
"""Check fit results for both spectra and photometry."""
self.model.set(**self.start_params)
res, fitmodel = sncosmo.fit_lc(self.photometry, model=self.model,
spectra=self.bin_spectrum,
vparam_names=['amplitude', 'z', 't0'],
bounds={'z': (0., 0.3)})
# set model to true parameters and compare to fit results.
self.model.set(**self.params)
assert_allclose(res.parameters, self.model.parameters, rtol=1.e-3)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc_multiple_spectra(self):
"""Check fit results for multiple spectra."""
self.model.set(**self.start_params)
res, fitmodel = sncosmo.fit_lc(model=self.model,
spectra=[self.bin_spectrum,
self.bin_spectrum],
vparam_names=['amplitude', 'z', 't0'],
bounds={'z': (0., 0.3)})
# set model to true parameters and compare to fit results.
self.model.set(**self.params)
assert_allclose(res.parameters, self.model.parameters, rtol=1.e-3)
|
shahabedinh/gettor
|
upload/fetch_latest_torbrowser.py
|
Python
|
bsd-3-clause
| 4,130
| 0.000484
|
# -*- coding: utf-8 -*-
#
# This file is part of GetTor, a Tor Browser distribution system.
#
# :authors: Israel Leiva <ilv@torproject.org>
#
# :copyright: (c) 2015, The Tor Project, Inc.
# (c) 2015, Israel Leiva
#
# :license: This is Free Software. See LICENSE for license information.
#
import os
import urllib2
import json
import argparse
import ConfigParser
import shutil
# this path should be relative to this script (or absolute)
UPLOAD_SCRIPTS = {
'dropbox': 'bundles2dropbox.py',
'drive': 'bundles2drive.py'
}
# "regex" for filtering downloads in wget
OS_RE = {
'windows': '%s.exe,%s.exe.asc',
'linux': '%s.tar.xz,%s.tar.xz.asc',
'osx': '%s.dmg,%s.dmg.asc',
}
def main():
"""Script to fetch the latest Tor Browser.
Fetch the latest version of Tor Browser and upload it to the supported
providers (e.g. Dropbox). Ideally, this script should be executed with
a cron in order to automate the updating of the files served by GetTor
when a new version of Tor Browser is released.
Usage: python2.7 fetch.py --os=<OS> --lc=<LC>
Some fetch examples:
Fetch Tor Browser for all platforms and languages:
$ python2.7 fetch.py
Fetch Tor Browser only for Linux:
$ python2.7 fetch.py --os=linux
Fetch Tor Browser only for Windows and in US English:
$ python2.7 fetch.py --os=windows --lc=en-US
Fetch Tor Browser for all platforms, but only in Spanish:
$ python2.7 fetch.py --lc=es-ES
"""
parser = argparse.ArgumentParser(
description='Utility to fetch the latest Tor Browser and upload it \
to popular cloud services.'
)
# if no OS specified, download all
parser.add_argument('-o', '--os', default=None,
help='filter by OS')
# if no LC specified, download all
parser.add_argument('-l', '--lc', default='',
help='filter by locale')
args = parser.parse_args()
# server from which to download Tor Browser
dist_tpo = 'https://dist.torproject.org/torbrowser/'
# find out the latest version
url = 'https://www.torproject.org/projects/torbrowser/RecommendedTBBVersions'
response = urllib2.urlopen(url)
json_response = json.load(response)
latest_version = json_response[0]
# find out the current version delivered by GetTor
config = ConfigParser.RawConfigParser()
config.read('latest_torbrowser.cfg')
current_version = config.get('version', 'current')
if current_version != latest_version:
mirror = '%s%s/' % (dist_tpo, latest_version)
# what LC should we download?
lc_re = args.lc
# what OS should we download?
if args.os == 'windows':
os_re = OS_RE['windows'] % (lc_re, lc_re)
elif args.os == 'osx':
os_re = OS_RE['osx'] % (lc_re, lc_re)
elif args.os == 'linux':
os_re = OS_RE['linux'] % (lc_re, lc_re)
else:
os_re = '%s.exe,%s.exe.asc,%s.dmg,%s.dmg.asc,%s.tar.xz,%s.tar'\
'.xz.asc' % (lc_re, lc_re, lc_re, lc_re, lc_re, lc_re)
params = "-nH --cut-dirs=1 -L 1 --accept %s" % os_re
# in wget we trust
cmd = 'wget %s --mirror %s' % (params, mirror)
print "Going to execute %s" % cmd
# make t
|
he mirror
# a folder with the value of 'l
|
atest_version' will be created
os.system(cmd)
# everything inside upload will be uploaded by the provivers' scripts
shutil.move('latest', 'latest_backup')
shutil.move(latest_version, 'latest')
shutil.rmtree('latest_backup')
# latest version of Tor Browser has been syncronized
# let's upload it
for provider in UPLOAD_SCRIPTS:
os.system('python2.7 %s' % UPLOAD_SCRIPTS[provider])
# if everything is OK, update the current version delivered by GetTor
config.set('version', 'current', latest_version)
with open(r'latest_torbrowser.cfg', 'wb') as config_file:
config.write(config_file)
if __name__ == "__main__":
main()
|
TD22057/T-Home
|
conf/eagle.py
|
Python
|
bsd-2-clause
| 953
| 0.007345
|
#===========================================================================
#
# Port to use for the web server. Configure the Eagle to use this
# port as it's 'cloud provider' using http://host:
|
PORT
#
#===========================================================================
httpPort = 22042
#===========================================================================
#
# MQTT topic names
#
#===========================================================================
# Meter reading topi
|
c (reports current meter reading in kWh)
mqttEnergy = 'power/elec/Home/energy'
# Instantaneous power usage topic (reports power usage in W)
mqttPower = 'power/elec/Home/power'
#===========================================================================
#
# Logging configuration. Env variables are allowed in the file name.
#
#===========================================================================
logFile = '/var/log/tHome/eagle.log'
logLevel = 40
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/billiard/five.py
|
Python
|
mit
| 5,421
| 0
|
# -*- coding: utf-8 -*-
"""
celery.five
~~~~~~~~~~~
Compatibility implementations of features
only available in newer Python versions.
"""
from __future__ import absolute_import
# ############# py3k #########################################################
import sys
PY3 = sys.version_info[0] == 3
try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa
try:
from UserList import UserList # noqa
except ImportError: # pragma: no cover
from collections import UserList # noqa
try:
from UserDict import UserDict # noqa
except ImportError: # pragma: no cover
from collections import UserDict # noqa
# ############# time.monotonic ###############################################
if sys.version_info < (3, 3):
import platform
SYSTEM = platform.system()
try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa
if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
def _monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import ctypes
import os
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]
def _monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else:
from time import time as _monotonic
try:
from time import monotonic
except ImportError:
monotonic = _monotonic # noqa
if PY3:
import builtins
from queue import Queue, Empty, Full
from itertools import zip_longest
from io import StringIO, BytesIO
map = map
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int, )
def items(d):
return d.items()
def keys(d):
return d.keys()
def values(d):
return d.values()
def nextfun(it):
return it.__next__
exec_ = getattr(builtins, 'ex
|
ec')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class WhateverIO(StringIO):
def write(self, data):
if isinstance(data, byt
|
es):
data = data.encode()
StringIO.write(self, data)
else:
import __builtin__ as builtins # noqa
from Queue import Queue, Empty, Full # noqa
from itertools import imap as map, izip_longest as zip_longest # noqa
from StringIO import StringIO # noqa
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode
long_t = long # noqa
range = xrange
int_types = (int, long)
def items(d): # noqa
return d.iteritems()
def keys(d): # noqa
return d.iterkeys()
def values(d): # noqa
return d.itervalues()
def nextfun(it): # noqa
return it.next
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
BytesIO = WhateverIO = StringIO # noqa
def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
"""Class decorator to set metaclass.
Works with both Python 2 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).
"""
def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)
return _clone_with_metaclass
|
apache/incubator-allura
|
ForgeSVN/forgesvn/tests/model/test_svnimplementation.py
|
Python
|
apache-2.0
| 5,921
| 0.000169
|
# Licensed to the Apache Software
|
Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain
|
a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from mock import Mock, patch
from nose.tools import assert_equal
from pylons import app_globals as g
from alluratest.controller import setup_unit_test
from allura.model.repo import Commit
from forgesvn.model.svn import SVNImplementation
class TestSVNImplementation(object):
def setUp(self):
setup_unit_test()
def test_compute_tree_new(self):
self._test_compute_tree_new('/trunk/foo/')
self._test_compute_tree_new('/trunk/foo')
self._test_compute_tree_new('trunk/foo/')
self._test_compute_tree_new('trunk/foo')
@patch('allura.model.repo.LastCommitDoc.m.update_partial')
@patch('allura.model.repo.TreesDoc.m.update_partial')
@patch('allura.model.repo.Tree.upsert')
@patch('allura.model.repo.Tree.query.get')
def _test_compute_tree_new(self, path, tree_get, tree_upsert, treesdoc_partial, lcd_partial):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('foo', Mock())]
tree_get.return_value = None # no existing tree
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
tree_upsert.return_value = (Mock(), True)
tree_id = impl.compute_tree_new(commit, path)
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk/foo')
treesdoc_partial.assert_called()
lcd_partial.assert_called()
def test_last_commit_ids(self):
self._test_last_commit_ids('/trunk/foo/')
self._test_last_commit_ids('/trunk/foo')
self._test_last_commit_ids('trunk/foo/')
self._test_last_commit_ids('trunk/foo')
def _test_last_commit_ids(self, path):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('trunk', Mock()), ('foo', Mock())]
impl._svn.info2.return_value[1][1].last_changed_rev.number = '1'
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
entries = impl.last_commit_ids(commit, [path])
assert_equal(entries, {path.strip('/'): '5057636b9c1040636b81e4b1:1'})
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk')
@patch('forgesvn.model.svn.svn_path_exists')
def test__path_to_root(self, path_exists):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
path_exists.return_value = False
# edge cases
assert_equal(impl._path_to_root(None), '')
assert_equal(impl._path_to_root(''), '')
assert_equal(impl._path_to_root('/some/path/'), '')
assert_equal(impl._path_to_root('some/path'), '')
# tags
assert_equal(impl._path_to_root('/some/path/tags/1.0/some/dir'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/1.0/'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/'), '')
# branches
assert_equal(impl._path_to_root('/some/path/branches/b1/dir'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/b1/'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/'), '')
# trunk
assert_equal(impl._path_to_root('/some/path/trunk/some/dir/'),
'some/path/trunk')
assert_equal(impl._path_to_root('/some/path/trunk'), 'some/path/trunk')
# with fallback to trunk
path_exists.return_value = True
assert_equal(impl._path_to_root(''), 'trunk')
assert_equal(impl._path_to_root('/some/path/'), 'trunk')
assert_equal(impl._path_to_root('/tags/'), 'trunk')
assert_equal(impl._path_to_root('/branches/'), 'trunk')
assert_equal(impl._path_to_root('/tags/1.0'), 'tags/1.0')
assert_equal(impl._path_to_root('/branches/branch'), 'branches/branch')
@patch('forgesvn.model.svn.svn_path_exists')
def test_update_checkout_url(self, svn_path_exists):
impl = SVNImplementation(Mock())
opts = impl._repo.app.config.options = {}
svn_path_exists.side_effect = lambda path: False
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], '')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = ''
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
|
MasterAlish/kyrgyz_tili
|
generator.py
|
Python
|
gpl-3.0
| 232
| 0.004386
|
# coding=utf-8
import sys
|
from kg.db.generate_words import generate
try:
if len(sys.argv) > 1:
generate(sys.argv[1])
else:
generate()
except Exception as e:
print(u"Ката:")
print(
|
"\t"+e.message)
|
denismakogon/aria-cli
|
aria_cli/config/parser_config.py
|
Python
|
apache-2.0
| 7,852
| 0.001783
|
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa
import argparse
from aria_cli import commands as aria
from aria_cli.config import argument_utils
from argcomplete import completers
yaml_files_completer = completers.FilesCompleter(['*.yml', '*.yaml'])
archive_files_completer = completers.FilesCompleter(
['*.zip', '*.tar', '*.tar.gz', '*.tar.bz2'])
FORMAT_INPUT_AS_YAML_OR_DICT = 'formatted as YAML or as "key1=value1;key2=value2"'
def workflow_id_argument(hlp):
return {
'metavar': 'WORKFLOW',
'dest': 'workflow_id',
'type': str,
'required': True,
'help': hlp,
}
def parser_config():
return {
'description': 'Manages ARIA in different Cloud Environments',
'arguments': {
'--version': {
'help': 'show version information and exit',
'action': aria.version
}
},
'commands': {
'validate': {
'arguments': {
'-p,--blueprint-path': {
'metavar': 'BLUEPRINT_FILE',
'type': argparse.FileType(),
'dest': 'blueprint_path',
'required': True,
'help': "Path to the application's blueprint file",
'completer': yaml_files_completer
}
},
'help': 'command for validating a blueprint',
'handler': aria.local.validate
},
'init': {
'help': 'Init a local workflow execution environment in '
'in the current working directory',
'arguments': {
'-p,--blueprint-path': {
'dest': 'blueprint_path',
'metavar': 'BLUEPRINT_PATH',
'type': str,
'required': True,
'help': 'Path to a blueprint'
},
'-i,--inputs': {
'metavar': 'INPUTS',
'dest': 'inputs',
'required': False,
'help': 'Inputs file/string for the local workflow creation ({0})'
.format(FORMAT_INPUT_AS_YAML_OR_DICT)
},
'--install-plugins': {
'dest': 'install_plugins_',
'action': 'store_true',
'default': False,
'help': 'Install necessary plugins of the given blueprint.'
}
},
'handler': aria.local.init
},
'install-plugins': {
'help': 'Installs the necessary plugins for a given blueprint',
'arguments': {
'-p,--blueprint-path': {
'dest': 'blueprint_path',
'metavar': 'BLUEPRINT_PATH',
'type': str,
'required': True,
'help': 'Path to a blueprint'
}
},
'handler': aria.local.install_plugins
},
'create-requirements': {
'help': 'Creates a PIP compliant requirements file for the given blueprint',
'arguments': {
'-p,--blueprint-path': {
'dest': 'blueprint_path',
'metavar': 'BLUEPRINT_PATH',
'type': str,
'required': True,
'help': 'Path to a blueprint'
},
'-o,--output': {
'metavar': 'REQUIREMENTS_OUTPUT',
'dest': 'output',
'required': False,
'help': 'Path to a file that will hold the '
'requirements of the blueprint'
|
}
},
'handler': aria.local.create_requirements
},
'exec
|
ute': {
'help': 'Execute a workflow locally',
'arguments': {
'-w,--workflow':
argument_utils.remove_completer(
workflow_id_argument(
hlp='The workflow to execute locally')),
'-p,--parameters': {
'metavar': 'PARAMETERS',
'dest': 'parameters',
'default': {},
'type': str,
'required': False,
'help': 'Parameters for the workflow execution ({0})'
.format(FORMAT_INPUT_AS_YAML_OR_DICT)
},
'--allow-custom-parameters': {
'dest': 'allow_custom_parameters',
'action': 'store_true',
'default': False,
'help': 'A flag for allowing the passing of custom parameters ('
"parameters which were not defined in the workflow's schema in "
'the blueprint) to the execution'
},
'--task-retries': {
'metavar': 'TASK_RETRIES',
'dest': 'task_retries',
'default': 0,
'type': int,
'help': 'How many times should a task be retried in case '
'it fails'
},
'--task-retry-interval': {
'metavar': 'TASK_RETRY_INTERVAL',
'dest': 'task_retry_interval',
'default': 1,
'type': int,
'help': 'How many seconds to wait before each task is retried'
},
'--task-thread-pool-size': {
'metavar': 'TASK_THREAD_POOL_SIZE',
'dest': 'task_thread_pool_size',
'default': 1,
'type': int,
'help': 'The size of the thread pool size to execute tasks in'
}
},
'handler': aria.local.execute
},
'outputs': {
'help': 'Display outputs',
'arguments': {},
'handler': aria.local.outputs
},
'instances': {
'help': 'Display node instances',
'arguments': {
'--node-id': {
'metavar': 'NODE_ID',
'dest': 'node_id',
'default': None,
'type': str,
'required': False,
'help': 'Only display node instances of this node id'
}
},
'handler': aria.local.instances
}
}
}
|
OCA/carrier-delivery
|
delivery_carrier_label_dispatch/__init__.py
|
Python
|
agpl-3.0
| 988
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# Th
|
is program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for mo
|
re details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import picking_dispatch
from . import wizard
|
spasovski/zamboni
|
mkt/site/tests/test_middleware.py
|
Python
|
bsd-3-clause
| 15,746
| 0.000191
|
import datetime
from django.conf import settings
from django.test.utils import override_settings
import mock
from nose.tools import eq_, ok_
from test_utils import RequestFactory
import amo.tests
from users.models import UserProfile
from mkt.site.middleware import DeviceDetectionMiddleware
from mkt.site.fixtures import fixture
_langs = ['cs', 'de', 'en-US', 'es', 'fr', 'pt-BR', 'pt-PT']
@mock.patch.object(settings, 'LANGUAGES', [x.lower() for x in _langs])
class TestRedirectPrefixedURIMiddleware(amo.tests.TestCase):
def test_redirect_for_good_application(self):
for app in amo.APPS:
r = self.client.get('/%s/' % app)
self.assert3xx(r, '/', 302)
def test_redirect_for_bad_application(self):
r = self.client.get('/mosaic/')
eq_(r.status_code, 404)
def test_redirect_for_good_locale(self):
redirects = [
('/en-US/', '/?lang=en-us'),
('/pt-BR/', '/?lang=pt-br'),
('/pt-br/', '/?lang=pt-br'),
('/fr/', '/?lang=fr'),
('/es-PE/', '/?lang=es'),
]
for before, after in redirects:
r = self.client.get(before)
self.assert3xx(r, after, 302)
def test_preserve_qs_for_lang(self):
r = self.client.get('/pt-BR/firefox/privacy-policy?omg=yes')
self.assert3xx(r, '/privacy-policy?lang=pt-br&omg=yes', 302)
r = self.client.get('/pt-BR/privacy-policy?omg=yes')
self.assert3xx(r, '/privacy-policy?lang=pt-br&omg=yes', 302)
def test_switch_locale(self):
# Locale in URL prefix takes precedence.
r = self.client.get('/pt-BR/?lang=de')
self.assert3xx(r, '/?lang=pt-br', 302)
def test_no_locale(self):
r = self.client.get('/robots.txt')
eq_(r.status_code, 200)
r = self.client.get('/robots.txt?lang=fr')
eq_(r.status_code, 200)
def test_redirect_for_good_region(self):
redirects = [
('/restofworld/', '/?region=restofworld'),
('/worldwide/', '/?region=restofworld'),
('/br/', '/?region=br'),
('/us/', '/?region=us'),
('/BR/', '/?region=br'),
]
for before, after in redirects:
r = self.client.get(before)
self.assert3xx(r, after, 302)
def test_redirect_for_good_locale_and_region(self):
r = self.client.get('/en-US/br/developers/support?omg=yes',
follow=True)
# Can you believe this actually works?
self.assert3xx(r,
'/developers/support?lang=en-us®ion=br&omg=yes', 302)
def test_preserve_qs_for_region(self):
r = self.client.get('/br/developers/support?omg=yes')
self.assert3xx(r, '/developers/support?region=br&omg=yes', 302)
def test_switch_region(self):
r = self.client.get(
|
'/restofworld/?region=brazil')
self.assert3xx(r, '/?region=restofworld', 302)
def test_404_for_bad_prefix(self):
for url in ['/xxx', '/xxx/search/',
'/brazil/', '/BRAZIL/',
'/pt/?lang=de', '/pt-XX/brazil/']:
r = self.client.get(url)
got = r.status_code
eq_(got, 404, "For %r: expected '404' but got %r" % (url, got))
@mock.patch.
|
object(settings, 'LANGUAGES', [x.lower() for x in _langs])
@mock.patch.object(settings, 'LANGUAGE_URL_MAP',
dict([x.lower(), x] for x in _langs))
class TestLocaleMiddleware(amo.tests.TestCase):
def test_accept_good_locale(self):
locales = [
('en-US', 'en-US', 'en-US,en-US'),
('pt-BR', 'pt-BR', 'pt-BR,en-US'),
('pt-br', 'pt-BR', None),
('fr', 'fr', 'fr,en-US'),
('es-PE', 'es', 'es,en-US'),
('fr', 'fr', 'fr,en-US'),
]
for locale, r_lang, c_lang in locales:
r = self.client.get('/robots.txt?lang=%s' % locale)
if c_lang:
eq_(r.cookies['lang'].value, c_lang)
else:
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, r_lang)
def test_accept_language_and_cookies(self):
# Your cookie tells me pt-BR but your browser tells me en-US.
self.client.cookies['lang'] = 'pt-BR,pt-BR'
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, 'en-US,')
eq_(r.context['request'].LANG, 'en-US')
# Your cookie tells me pt-br but your browser tells me en-US.
self.client.cookies['lang'] = 'pt-br,fr'
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, 'en-US,')
eq_(r.context['request'].LANG, 'en-US')
# Your cookie tells me pt-BR and your browser tells me pt-BR.
self.client.cookies['lang'] = 'pt-BR,pt-BR'
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, 'pt-BR')
# You explicitly changed to fr, and your browser still tells me pt-BR.
# So no new cookie!
self.client.cookies['lang'] = 'fr,pt-BR'
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, 'fr')
# You explicitly changed to fr, but your browser still tells me es.
# So make a new cookie!
self.client.cookies['lang'] = 'fr,pt-BR'
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='es')
eq_(r.cookies['lang'].value, 'es,')
eq_(r.context['request'].LANG, 'es')
def test_ignore_bad_locale(self):
# Good? Store language.
r = self.client.get('/robots.txt?lang=fr')
eq_(r.cookies['lang'].value, 'fr,en-US')
# Bad? Reset language.
r = self.client.get('/robots.txt?lang=')
eq_(r.cookies['lang'].value, 'en-US,en-US')
# Still bad? Don't change language.
for locale in ('xxx', '<script>alert("ballin")</script>'):
r = self.client.get('/robots.txt?lang=%s' % locale)
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, settings.LANGUAGE_CODE)
# Good? Change language.
r = self.client.get('/robots.txt?lang=fr')
eq_(r.cookies['lang'].value, 'fr,en-US')
def test_already_have_cookie_for_bad_locale(self):
for locale in ('', 'xxx', '<script>alert("ballin")</script>'):
self.client.cookies['lang'] = locale
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, settings.LANGUAGE_CODE + ',')
eq_(r.context['request'].LANG, settings.LANGUAGE_CODE)
def test_no_cookie(self):
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, settings.LANGUAGE_CODE + ',')
eq_(r.context['request'].LANG, settings.LANGUAGE_CODE)
def test_no_api_cookie(self):
res = self.client.get('/api/v1/apps/schema/?region=restofworld',
HTTP_ACCEPT_LANGUAGE='de')
ok_(not res.cookies)
def test_cookie_gets_set_once(self):
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='de')
eq_(r.cookies['lang'].value, 'de,')
# Since we already made a request above, we should remember the lang.
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='de')
eq_(r.cookies.get('lang'), None)
def test_accept_language(self):
locales = [
('', settings.LANGUAGE_CODE),
('de', 'de'),
('en-us, de', 'en-US'),
('en-US', 'en-US'),
('fr, en', 'fr'),
('pt-XX, xx, yy', 'pt-PT'),
('pt', 'pt-PT'),
('pt, de', 'pt-PT'),
('pt-XX, xx, de', 'pt-PT'),
('pt-br', 'pt-BR'),
('pt-BR', 'pt-BR'),
('xx, yy, zz', settings.LANGUAGE_CODE),
('<script>alert("ballin")</script>', settings.LANGUAGE_CODE),
('en-us;q=0.5, de', 'de'),
('es-PE', 'es'),
]
for given, expected in locales:
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAG
|
last-g/pyshop
|
pyshop/views/xmlrpc.py
|
Python
|
bsd-3-clause
| 8,468
| 0.001299
|
# -*- coding: utf-8 -*-
"""
Implement PyPiXmlRpc Service.
See: http://wiki.python.org/moin/PyPiXmlRpc
"""
import logging
from pyramid_xmlrpc import XMLRPCView
from pyshop.models import DBSession, Package, Release, ReleaseFile
from pyshop.helpers import pypi
log = logging.getLogger(__name__)
# XXX not tested.
class PyPI(XMLRPCView):
def list_packages(self):
"""
Retrieve a list of the package names registered with the package index.
Returns a list of name strings.
"""
session = DBSession()
names = [p.name for p in Package.all(session, order_by=Package.name)]
return names
def package_releases(self, package_name, show_hidden=False):
"""
Retrieve a list of the releases registered for the given package_name.
Returns a list with all version strings if show_hidden is True or
only the non-hidden ones otherwise."""
session = DBSession()
package = Package.by_name(session, package_name)
return [rel.version for rel in package.sorted_releases]
def package_roles(self, package_name):
"""
Retrieve a list of users and their attributes roles for a given
package_name. Role is either 'Maintainer' or 'Owner'.
"""
session = DBSession()
package = Package.by_name(session, package_name)
owners = [('Owner', o.name) for o in package.owners]
maintainers = [('Maintainer', o.name) for o in package.maintainers]
return owners + maintainers
def user_packages(self, user):
"""
Retrieve a list of [role_name, package_name] for a given username.
Role is either 'Maintainer' or 'Owner'.
"""
session = DBSession()
owned = Package.by_owner(session, user)
maintained = Package.by_maintainer(session, user)
owned = [('Owner', p.name) for p in owned]
maintained = [('Maintainer', p.name) for p in maintained]
return owned + maintained
def release_downloads(self, package_name, version):
"""
Retrieve a list of files and download count for a given package and
release version.
"""
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
if release_files:
release_files = [(f.release.package.name,
f.filename) for f in release_files]
return release_files
def release_urls(self, package_name, version):
"""
Retrieve a list of download URLs for the given package release.
Returns a list of dicts with the following keys:
url
packagetype ('sdist', 'bdist', etc)
filename
size
md5_digest
downloads
has_sig
python_version (required version, or 'source', or 'any')
comment_text
"""
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
return [{'url': f.url,
'packagetype': f.package_type,
'filename': f.filename,
'size': f.size,
'md5_digest': f.md5_digest,
'downloads': f.downloads,
'has_sig': f.has_sig,
'comment_text': f.comment_text,
'python_version': f.python_version
}
for f in release_files]
def release_data(self, package_name, version):
"""
Retrieve metadata describing a specific package release.
Returns a dict with keys for:
name
version
stable_version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
classifiers (list of classifier strings)
requires
requires_dist
provides
provides_dist
requires_external
requires_python
obsoletes
obsoletes_dist
project_url
docs_url (URL of the packages.python.org docs
if they've been supplied)
If the release does not exist, an empty dictionary is returned.
"""
session = DBSession()
release = Release.by_version(session, package_name, version)
if release:
result = {'name': release.package.name,
'version': release.version,
'stable_version': '',
'author': release.author.name,
'author_email': release.author.email,
'home_page': release.home_page,
'license': release.license,
'summary': release.summary,
'description': release.description,
'keywords': release.keywords,
'platform': release.platform,
'download_url': release.download_url,
'classifiers': [c.name for c in release.classifiers],
#'requires': '',
#'requires_dist': '',
#'provides': '',
#'provides_dist': '',
#'requires_external': '',
#'requires_python': '',
#'obsoletes': '',
#'obsoletes_dist': '',
'bugtrack_url': release.bugtrack_url,
'docs_url': release.docs_url,
}
if release.maintainer:
result.update({'maintainer': release.maintainer.name,
'maintainer_email': release.maintainer.email,
})
return dict([(key, val or '') for key, val in result.items()])
def search(self, spec, operator='and'):
"""
Search the package database using the indicated search spec.
The spec may include any of the keywords described in th
|
e above list
(except 'stable_version' and 'classifiers'),
for example: {'description': 'spam'} will search description fields.
Within the spec, a field's value can be a string or a list of strings
(the values within the list are combined with an OR),
for example: {'name': ['
|
foo', 'bar']}.
Valid keys for the spec dict are listed here. Invalid keys are ignored:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
Arguments for different fields are combined using either "and"
(the default) or "or".
Example: search({'name': 'foo', 'description': 'bar'}, 'or').
The results are returned as a list of dicts
{'name': package name,
'version': package release version,
'summary': package release summary}
"""
api = pypi.proxy
rv = []
# search in proxy
for k, v in spec.items():
rv += api.search({k: v}, True)
# search in local
session = DBSession()
release = Release.search(session, spec, operator)
rv += [{'name': r.package.name,
'version': r.version,
'summary': r.summary,
# hack https://mail.python.org/pipermail/catalog-sig/2012-October/004633.html
'_pypi_ordering':'',
} for r in release]
return rv
def browse(self, classifiers):
"""
Retrieve a list of (name, version) pairs of all releases classified
with all of the given classifiers. 'classifiers' must be a list of
Trove classifier strings.
changelog(since)
Retrieve a list of four-tuples (name, version, timestamp, action)
since the given
|
kingtaurus/cs224d
|
assignment3/codebase_release/rnn_tensorarray.py
|
Python
|
mit
| 22,077
| 0.006749
|
import sys
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import math
import time
import itertools
import shutil
import tensorflow as tf
import tree as tr
from utils import Vocab
from collections import OrderedDict
import seaborn as sns
sns.set_style('whitegrid')
def initialize_uninitialized_vars(session):
uninitialized = [ var for var in tf.all_variables()
if not session.run(tf.is_variable_initialized(var)) ]
session.run(tf.initialize_variables(uninitialized))
def variable_summaries(variable, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(variable)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(variable - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(variable))
tf.summary.scalar('min/' + name, tf.reduce_min(variable))
# tf.summary.histogram(name, variable)
RESET_AFTER = 50
class Config(object):
"""Holds model hyperparams and data information.
Model objects are passed a Config() object at instantiation.
"""
embed_size = 50
label_size = 2
early_stopping = 2
anneal_threshold = 0.99
anneal_by = 1.5
max_epochs = 30
lr = 0.01
l2 = 0.02
model_name = 'rnn_embed=%d_l2=%f_lr=%f.weights'%(embed_size, l2, lr)
#initial attempt to create graph
# currently implicitly assumes tree structure (which can't be passed into tf)
# vector_stack = tf.TensorArray(tf.float32,
# size=0,
#
|
dynamic_size=True,
# clear_after_read=True,
# infer_shape=True)
# index = tf.placeholder(shape=(), dtype=tf.int32)
# def embed_word(word_index):
# with tf.device('/cpu:0'):
# with tf.variable_scope("Composition", reuse=True):
# embedding = tf.g
|
et_variable('embedding')
# return tf.expand_dims(tf.gather(embedding, word_index), 0)
# def combine_children(left_location, right_location):
# with tf.variable_scope('Composition', reuse=True):
# W1 = tf.get_variable('W1')
# b1 = tf.get_variable('b1')
# return tf.nn.relu(tf.matmul(tf.concat(1, [vector_stack.read(left_location), vector_stack.read(right_location)]), W1) + b1)
# tf.gather(is_leaf, index)
# #get if this a leaf
# tf.gather(word, index)
# #get the word associated
# tf.gather(left_child, index)
# tf.gather(right_child, index)
## ORIGINAL IDEA:
# def walk_node(index):
# #tf.cond(tf.gather(isLeaf, index,), ..
# if in_node.isLeaf is True:
# #push the value onto the stack and return index?
# word_id = self.vocab.encode(in_node.word)
# print("word_id = ", word_id)
# vector_stack.write(vector_stack.size() - 1, embed_word(word_id))
# return vector_stack.size() - 1
# #so we return the index
# if in_node.isLeaf is False:
# left_node = walk_node(in_node.left, vocab)
# right_node = walk_node(in_node.right, vocab)
# vector_stack.concat(combine_children(left_node, right_node))
# return vector_stack.size() - 1
# #merge the left - right pair, add it back to the stack
# #this should never be hit(?)
# return 0
class RNN_Model():
def __init__(self, config):
self.config = config
self.load_data()
self.merged_summaries = None
self.summary_writer = None
self.is_a_leaf = tf.placeholder(tf.bool, [None], name="is_a_leaf")
self.left_child = tf.placeholder(tf.int32, [None], name="lchild")
self.right_child = tf.placeholder(tf.int32, [None], name="rchild")
self.word_index = tf.placeholder(tf.int32, [None], name="word_index")
self.labelholder = tf.placeholder(tf.int32, [None], name="labels_holder")
self.add_model_vars()
self.tensor_array = tf.TensorArray(tf.float32,
size=0,
dynamic_size=True,
clear_after_read=False,
infer_shape=False)
#tensor array stores the vectors (embedded or composed)
self.tensor_array_op = None
self.prediction = None
self.logits = None
self.root_logits = None
self.root_predict = None
self.root_loss = None
self.full_loss = None
self.training_op = None
#tensor_array_op is the operation on the TensorArray
# private functions used to construct the graph.
def _embed_word(self, word_index):
with tf.variable_scope("Composition", reuse=True) as scope:
print(scope.name)
embedding = tf.get_variable("embedding")
print(embedding.name)
return tf.expand_dims(tf.gather(embedding, word_index), 0)
# private functions used to construct the graph.
def _combine_children(self, left_index, right_index):
left_tensor = self.tensor_array.read(left_index)
right_tensor = self.tensor_array.read(right_index)
with tf.variable_scope('Composition', reuse=True):
W1 = tf.get_variable('W1')
b1 = tf.get_variable('b1')
return tf.nn.relu(tf.matmul(tf.concat(1, [left_tensor, right_tensor]), W1) + b1)
# i is the index (over data stored in the placeholders)
# identical type[out] = type[in]; can be used in while_loop
# so first iteration -> puts left most leaf on the tensorarray (and increments i)
# next iteration -> puts next left most (leaf on stack) and increments i
# ....
# until all the leaves are on the stack in the correct order
# starts combining the leaves after and adding to the stack
def _loop_over_tree(self, tensor_array, i):
is_leaf = tf.gather(self.is_a_leaf, i)
word_idx = tf.gather(self.word_index, i)
left_child = tf.gather(self.left_child, i)
right_child = tf.gather(self.right_child, i)
node_tensor = tf.cond(is_leaf, lambda : self._embed_word(word_idx),
lambda : self._combine_children(left_child, right_child))
tensor_array = tensor_array.write(i, node_tensor)
i = tf.add(i,1)
return tensor_array, i
def construct_tensor_array(self):
loop_condition = lambda tensor_array, i: \
tf.less(i, tf.squeeze(tf.shape(self.is_a_leaf)))
#iterate over all leaves + composition
tensor_array_op = tf.while_loop(cond=loop_condition,
body=self._loop_over_tree,
loop_vars=[self.tensor_array, 0],
parallel_iterations=1)[0]
return tensor_array_op
def inference_op(self, predict_only_root=False):
if predict_only_root:
return self.root_logits_op()
return self.logits_op()
def load_data(self):
"""Loads train/dev/test data and builds vocabulary."""
self.train_data, self.dev_data, self.test_data = tr.simplified_data(700, 100, 200)
# build vocab from training data
self.vocab = Vocab()
train_sents = [t.get_words() for t in self.train_data]
self.vocab.construct(list(itertools.chain.from_iterable(train_sents)))
def add_model_vars(self):
'''
You model contains the following parameters:
embedding: tensor(vocab_size, embed_size)
W1: tensor(2* embed_size, embed_size)
b1: tensor(1, embed_size)
U: tensor(embed_size, output_size)
bs: tensor(1, output_size)
Hint: Add the tensorflow variables to the graph here and *reuse* them while building
the compution graphs for composition and projection for each tree
Hint: Use a variable_scope "Composition" for the composition layer, and
"Projection") for the linear transformations preceding the softmax.
'''
with tf.variable_scope('Composition') as scope:
### YOUR CODE HERE
|
baroquebobcat/pants
|
tests/python/pants_test/backend/codegen/wire/java/test_wire_integration.py
|
Python
|
apache-2.0
| 4,420
| 0.009955
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import open_zip
from pants.util.process_handler import subprocess
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
from pants_test.testutils.file_test_util import exact_files
class WireIntegrationTest(PantsRunIntegrationTest):
def test_good(self):
# wire example should compile without warnings with correct wire files.
# force a compile to happen, we count on compile output in this test
self.assert_success(self.run_pants(['clean-all']))
with self.temporary_workdir() as workdir:
cmd = ['compile', 'examples/src/java/org/pantsbuild/example/wire/temperatureservice']
pants_run = self.run_pants_with_workdir(cmd, workdir)
self.assert_suc
|
cess(pants_run)
pattern = 'gen/wire/[^/]*/[^/]*/[^/]*/org/pantsbuild/example/temperature/Temperature.java'
files = exact_files(workdir)
self.assertTrue(any(re.match(pattern, f) is not None for f in files),
'Expected pattern: {} in {}'.format(pattern, files))
def test_bundle_wire_normal(self):
with self.pants_results(['bundle.jvm',
|
'--deployjar',
'examples/src/java/org/pantsbuild/example/wire/temperatureservice']
) as pants_run:
self.assert_success(pants_run)
out_path = os.path.join(get_buildroot(), 'dist',
('examples.src.java.org.pantsbuild.example.wire.temperatureservice.'
'temperatureservice-bundle'))
args = ['java', '-cp', 'wire-temperature-example.jar',
'org.pantsbuild.example.wire.temperatureservice.WireTemperatureExample']
java_run = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=out_path)
java_retcode = java_run.wait()
java_out = java_run.stdout.read()
self.assertEquals(java_retcode, 0)
self.assertIn('19 degrees celsius', java_out)
def test_bundle_wire_dependent_targets(self):
with self.pants_results(['bundle.jvm',
'examples/src/java/org/pantsbuild/example/wire/element']
) as pants_run:
self.assert_success(pants_run)
out_path = os.path.join(get_buildroot(), 'dist',
'examples.src.java.org.pantsbuild.example.wire.element.element-bundle')
java_run = subprocess.Popen(['java', '-cp', 'wire-element-example.jar',
'org.pantsbuild.example.wire.element.WireElementExample'],
stdout=subprocess.PIPE,
cwd=out_path)
java_retcode = java_run.wait()
java_out = java_run.stdout.read()
self.assertEquals(java_retcode, 0)
self.assertIn('Element{symbol=Hg, name=Mercury, atomic_number=80, '
'melting_point=Temperature{unit=celsius, number=-39}, '
'boiling_point=Temperature{unit=celsius, number=357}}', java_out)
self.assertIn('Compound{name=Water, primary_element=Element{symbol=O, name=Oxygen, '
'atomic_number=8}, secondary_element=Element{symbol=H, name=Hydrogen, '
'atomic_number=1}}', java_out)
def test_compile_wire_roots(self):
pants_run = self.run_pants(['binary.jvm',
'examples/src/java/org/pantsbuild/example/wire/roots'])
self.assert_success(pants_run)
out_path = os.path.join(get_buildroot(), 'dist', 'wire-roots-example.jar')
with open_zip(out_path) as zipfile:
jar_entries = zipfile.namelist()
def is_relevant(entry):
return (entry.startswith('org/pantsbuild/example/roots/') and entry.endswith('.class')
and '$' not in entry)
expected_classes = {
'org/pantsbuild/example/roots/Bar.class',
'org/pantsbuild/example/roots/Foobar.class',
'org/pantsbuild/example/roots/Fooboo.class',
}
received_classes = {entry for entry in jar_entries if is_relevant(entry)}
self.assertEqual(expected_classes, received_classes)
|
evax/ansible-modules-core
|
cloud/amazon/rds.py
|
Python
|
gpl-3.0
| 42,330
| 0.005599
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
|
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created.
|
Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ fa
|
sidzan/netforce
|
netforce_mfg/netforce_mfg/models/bom.py
|
Python
|
mit
| 3,042
| 0.005588
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
from datetime import *
import time
|
class Bom(Model):
_name = "bom"
_string = "Bill of Material"
_name_field = "number"
_key = ["number"]
_fields = {
"number": fields.Char("Number", required=True, search=True),
"product_id": fields.Many2One("product", "Product", required=True, search=True),
"qty": fields.Decimal("Qty", required=True, scale=6),
"uom_id": fields.Many2One("uom", "UoM", required=True),
"location_id":
|
fields.Many2One("stock.location", "FG Warehouse"),
"routing_id": fields.Many2One("routing", "Routing"),
"lines": fields.One2Many("bom.line", "bom_id", "Lines"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"max_qty_loss": fields.Decimal("Max Qty Loss", scale=6),
"container": fields.Selection([["sale", "From Sales Order"]], "FG Container"),
"lot": fields.Selection([["production", "From Production Order"]], "FG Lot"),
"qc_tests": fields.Many2Many("qc.test", "QC Tests"),
}
def _get_number(self, context={}):
while 1:
num = get_model("sequence").get_number("bom")
if not num:
return None
res = self.search([["number", "=", num]])
if not res:
return num
get_model("sequence").increment("bom")
_defaults = {
"number": _get_number,
}
def onchange_product(self,context={}):
data=context['data']
path=context['path']
line=get_data_path(data,path,parent=True)
product_id=line['product_id']
if product_id:
product=get_model('product').browse(product_id)
line['uom_id']=product.uom_id.id
return data
Bom.register()
|
chasemp/sup
|
suplib/run.py
|
Python
|
mit
| 152
| 0.006579
|
import subprocess
def runBash(cmd):
p = subproc
|
ess.Popen(cmd, shell=True, stdout=subproces
|
s.PIPE)
out = p.stdout.read().strip()
return out
|
frossigneux/blazar
|
climate/openstack/common/config/generator.py
|
Python
|
apache-2.0
| 10,412
| 0.000096
|
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from climate.openstack.common import gettextutils
from climate.openstack.common import importutils
gettextutils.install('climate')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rf
|
ind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_
|
str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'climate'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%
|
Sanqui/spline-pokedex
|
splinext/pokedex/controllers/pokedex_gadgets.py
|
Python
|
mit
| 30,941
| 0.004467
|
# encoding: utf8
from __future__ import absolute_import, division
from collections import defaultdict, namedtuple
import colorsys
import logging
import wtforms.validators
from wtforms import Form, ValidationError, fields
from wtforms.ext.sqlalchemy.fields import QuerySelectField
import pokedex.db
import pokedex.db.tables as tables
import pokedex.formulae
from pylons import config, request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from sqlalchemy import and_, or_, not_
from sqlalchemy.orm import aliased, contains_eager, eagerload, eagerload_all, join
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import func
from spline import model
from spline.model import meta
from spline.lib import helpers as h
from spline.lib.base import BaseController, render
from spline.lib.forms import DuplicateField, QueryTextField
from splinext.pokedex import helpers as pokedex_helpers
import splinext.pokedex.db as db
from splinext.pokedex.forms import PokedexLookupField
log = logging.getLogger(__name__)
### Capture rate ("Pokéball performance") stuff
class OptionalLevelField(fields.IntegerField):
"""IntegerField subclass that requires either a number from 1 to 100, or
nothing.
Also overrides the usual IntegerField logic to default to an empty field.
Defaulting to 0 means the field can't be submitted from scratch.
"""
def __init__(self, label=u'', validators=[], **kwargs):
validators.extend([
wtforms.validators.NumberRange(min=1, max=100),
wtforms.validators.Optional(),
])
super(OptionalLevelField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return unicode(self.data or u'')
class CaptureRateForm(Form):
pokemon = PokedexLookupField(u'Wild Pokémon', valid_type='pokemon')
current_hp = fields.IntegerField(u'% HP left', [wtforms.validators.NumberRange(min=1, max=100)],
default=100)
status_ailment = fields.SelectField('Status ailment',
choices=[
('', u'—'),
('PAR', 'PAR'),
('SLP', 'SLP'),
('PSN', 'PSN'),
('BRN', 'BRN'),
('FRZ', 'FRZ'),
],
default=u'',
)
### Extras
level = OptionalLevelField(u'Wild Pokémon\'s level', default=u'')
your_level = OptionalLevelField(u'Your Pokémon\'s level', default=u'')
terrain = fields.SelectField(u'Terrain',
choices=[
('land', u'On land'),
('fishing', u'Fishing'),
('surfing', u'Surfing'),
],
default='land',
)
twitterpating = fields.BooleanField(u'Wild and your Pokémon are opposite genders AND the same species')
caught_before = fields.BooleanField(u'Wild Pokémon is in your Pokédex')
is_dark = fields.BooleanField(u'Nighttime or walking in a cave')
# ...
is_pokemon_master = fields.BooleanField(u'Holding Up+B')
def expected_attempts(catch_chance):
u"""Given the chance to catch a Pokémon, returns approximately the number
of attempts required to succeed.
"""
# Hey, this one's easy!
return 1 / catch_chance
def expected_attempts_oh_no(partitions):
"""Horrible version of the above, used for Quick and Timer Balls.
Now there are a few finite partitions at the beginning. `partitions` looks
like:
[
(catch_chance, number_of_turns),
(catch_chance, number_of_turns),
...
]
For example, a Timer Ball might look like [(0.25, 10), (0.5, 10), ...].
The final `number_of_turns` must be None to indicate that the final
`catch_chance` lasts indefinitely.
"""
turn = 0 # current turn
p_got_here = 1 # probability that we HAVE NOT caught the Pokémon yet
expected_attempts = 0
# To keep this "simple", basically just count forwards each turn until the
# partitions are exhausted
for catch_chance, number_of_turns in partitions:
if number_of_turns is None:
# The rest of infinity is covered by the usual expected-value formula with
# the final catch chance, but factoring in the probability that the Pokémon
# is still uncaught, and that turns have already passed
expected_attempts += p_got_here * (1 / catch_chance + turn)
# Done!
break
for _ in range(number_of_turns):
# Add the contribution of possibly catching it this turn. That's
# the chance that we'll catch it this turn, times the turn number
# -- times the chance that we made it this long without catching
turn += 1
expected_attempts += p_got_here * catch_chance * turn
# Probability that we get
|
to the next turn is decreased by the
# probability that we didn't catch it this turn
p_got_here *= 1 - catch_
|
chance
return expected_attempts
CaptureChance = namedtuple('CaptureChance', ['condition', 'is_active', 'chances'])
class StatCalculatorForm(Form):
pokemon = PokedexLookupField(u'Pokémon', valid_type='pokemon')
level = fields.IntegerField(u'Level', [wtforms.validators.NumberRange(min=1, max=100)],
default=100)
nature = QuerySelectField('Nature',
query_factory=lambda: db.pokedex_session.query(tables.Nature).order_by(tables.Nature.name),
get_pk=lambda _: _.name.lower(),
get_label=lambda _: _.name,
allow_blank=True,
)
def stat_graph_chunk_color(gene):
"""Returns a #rrggbb color, given a gene. Used for the pretty graph."""
hue = gene / 31
r, g, b = colorsys.hls_to_rgb(hue, 0.75, 0.75)
return "#%02x%02x%02x" % (r * 256, g * 256, b * 256)
class PokedexGadgetsController(BaseController):
def capture_rate(self):
"""Find a page in the Pokédex given a name.
Also performs fuzzy search.
"""
c.javascripts.append(('pokedex', 'pokedex-gadgets'))
c.form = CaptureRateForm(request.params)
valid_form = False
if request.params:
valid_form = c.form.validate()
if valid_form:
c.results = {}
c.pokemon = c.form.pokemon.data
level = c.form.level.data
# Overrule a 'yes' for opposite genders if this Pokémon has no
# gender
if c.pokemon.gender_rate == -1:
c.form.twitterpating.data = False
percent_hp = c.form.current_hp.data / 100
status_bonus = 10
if c.form.status_ailment.data in ('PAR', 'BRN', 'PSN'):
status_bonus = 15
elif c.form.status_ailment.data in ('SLP', 'FRZ'):
status_bonus = 20
# Little wrapper around capture_chance...
def capture_chance(ball_bonus=10, **kwargs):
return pokedex.formulae.capture_chance(
percent_hp=percent_hp,
capture_rate=c.pokemon.capture_rate,
status_bonus=status_bonus,
ball_bonus=ball_bonus,
**kwargs
)
### Do some math!
# c.results is a dict of ball_name => chance_tuples.
# (It would be great, but way inconvenient, to use item objects.)
# chance_tuples is a list of (condition, is_active, chances):
# - condition: a string describing some mutually-exclusive
# condition the ball responds to
# - is_active: a boolean indicating whether this condition is
# currently met
# - chances: an iterable of chances as returned from capture_chance
# This is a teeny shortcut.
only = lambda _: [CaptureChance( '', True, _ )]
normal_chance = capture_chance()
# Gen I
c.results[u'Poké Ball'] = only(normal_chance)
c.results[u'Great Ball'] = only(capture_chance(15))
c.results[u'Ultra Ball'] =
|
endlessm/chromium-browser
|
third_party/catapult/dashboard/dashboard/oauth2_decorator.py
|
Python
|
bsd-3-clause
| 648
| 0
|
# Copyright 2015 The Chromium Authors. All rig
|
hts reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides oauth2 decorators in a mockable way."""
from __future__ import print_func
|
tion
from __future__ import division
from __future__ import absolute_import
from oauth2client.appengine import OAuth2Decorator
from dashboard.common import utils
DECORATOR = OAuth2Decorator(
client_id='425761728072.apps.googleusercontent.com',
client_secret='9g-XlmEFW8ROI01YY6nrQVKq',
scope=utils.EMAIL_SCOPE,
message='Oauth error occurred!',
callback_path='/oauth2callback')
|
bqlabs/horus
|
src/horus/gui/engine.py
|
Python
|
gpl-2.0
| 1,802
| 0.000555
|
# -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <jesus.arroyo@bq.com>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
from horus.engine.driver.driver import Driver
from horus.engine.scan.ciclop_scan import CiclopScan
from horus.engine.scan.current_video import CurrentVideo
from horus.engine.calibration.pattern import Pattern
from horus.engine.calibration.calibration_data import CalibrationData
from horus.engine.calibration.camera_intrinsics import CameraIntrinsics
from horus.engine.calibration.autocheck import Autocheck
from horus.engine.calibration.laser_triangulation import LaserTriangulation
from horus.engine.calibration.platform_extrinsics import PlatformExtrinsics
from horus.engine.calibration.combo_calibration import ComboCalibration
from horus.engine.algorithms.image_capture import ImageCapture
from horus.engine.algorithms.image_detection import ImageDetection
from horus.engine.algor
|
ithms.laser_segmentation import LaserSegmentation
from horus.engine.algorithms.point_cloud_generation import PointCloudGeneration
from horus.engine.algorithms.point_cloud_roi import PointCloudROI
# Instances of engine modules
driver = Driver()
ciclop_scan = CiclopScan()
current_video = CurrentVideo()
pat
|
tern = Pattern()
calibration_data = CalibrationData()
camera_intrinsics = CameraIntrinsics()
scanner_autocheck = Autocheck()
laser_triangulation = LaserTriangulation()
platform_extrinsics = PlatformExtrinsics()
combo_calibration = ComboCalibration()
image_capture = ImageCapture()
image_detection = ImageDetection()
laser_segmentation = LaserSegmentation()
point_cloud_generation = PointCloudGeneration()
point_cloud_roi = PointCloudROI()
|
gajim/gajim
|
gajim/gtk/chat_list_stack.py
|
Python
|
gpl-3.0
| 11,132
| 0
|
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from typing import Dict
from typing import Optional
from typing import cast
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import Gio
from gi.repository import GLib
from nbxmpp import JID
from gajim.common import app
from gajim.common import events
from . import structs
from .chat_filter import ChatFilter
from .chat_list import ChatList
from .chat_list import ChatRow
HANDLED_EVENTS = (
events.MessageReceived,
events.MamMessageReceived,
events.GcMessageReceived,
events.MessageUpdated,
events.PresenceReceived,
events.MessageSent,
events.JingleRequestReceived,
events.FileRequestReceivedEvent
)
class ChatListStack(Gtk.Stack):
__gsignals__ = {
'unread-count-changed': (GObject.SignalFlags.RUN_LAST,
None,
(str, int)),
'chat-selected': (GObject.SignalFlags.RUN_LAST,
None,
(str, str, object)),
'chat-unselected': (GObject.SignalFlags.RUN_LAST,
None,
()),
'chat-removed': (GObject.SignalFlags.RUN_LAST,
None,
(str, object, str)),
}
def __init__(self,
chat_filter: ChatFilter,
search_entry: Gtk.SearchEntry
) -> None:
Gtk.Stack.__init__(self)
self.set_hexpand(True)
self.set_vexpand(True)
self.set_vhomogeneous(False)
self._chat_lists: Dict[str, ChatList] = {}
self._last_visible_child_name: str = 'default'
self.add_named(Gtk.Box(), 'default')
self.connect('notify::visible-child-name', self._on_visible_child_name)
search_entry.connect('search-changed', self._on_search_changed)
chat_filter.connect('filter-changed', self._on_filter_changed)
self._add_actions()
self.show_all()
def _add_actions(self) -> None:
actions = [
('toggle-chat-pinned', 'as', self._toggle_chat_pinned),
('move-chat-to-workspace', 'a{sv}', self._move_chat_to_workspace),
('mark-as-read', 'as', self._mark_as_read),
]
for action in actions:
action_name, variant, func = action
if variant is not None:
variant = GLib.VariantType.new(variant)
act = Gio.SimpleAction.new(action_name, variant)
act.connect('activate', func)
app.window.add_action(act)
def _on_visible_child_name(self, _stack: Gtk.Stack, _param: str) -> None:
if self._last_v
|
isible_child_name == self.get_visible_child_name():
return
if self._last_visible_child_name != 'default':
chat_list = cast(
|
ChatList,
self.get_child_by_name(self._last_visible_child_name))
chat_list.set_filter_text('')
last_child = self.get_visible_child_name() or 'default'
self._last_visible_child_name = last_child
def get_chatlist(self, workspace_id: str) -> ChatList:
return self._chat_lists[workspace_id]
def get_selected_chat(self) -> Optional[ChatRow]:
chat_list = self.get_current_chat_list()
if chat_list is None:
return None
return chat_list.get_selected_chat()
def get_current_chat_list(self) -> Optional[ChatList]:
workspace_id = self.get_visible_child_name()
if workspace_id == 'empty' or workspace_id is None:
return None
return self._chat_lists[workspace_id]
def is_chat_active(self, account: str, jid: JID) -> bool:
chat = self.get_selected_chat()
if chat is None:
return False
if chat.account != account or chat.jid != jid:
return False
return chat.is_active
def _on_filter_changed(self, _filter: ChatFilter, name: str) -> None:
chat_list = cast(ChatList, self.get_visible_child())
chat_list.set_filter(name)
def _on_search_changed(self, search_entry: Gtk.SearchEntry) -> None:
chat_list = cast(ChatList, self.get_visible_child())
chat_list.set_filter_text(search_entry.get_text())
def add_chat_list(self, workspace_id: str) -> ChatList:
chat_list = ChatList(workspace_id)
chat_list.connect('row-selected', self._on_row_selected)
self._chat_lists[workspace_id] = chat_list
self.add_named(chat_list, workspace_id)
return chat_list
def remove_chat_list(self, workspace_id: str) -> None:
chat_list = self._chat_lists[workspace_id]
self.remove(chat_list)
for account, jid, _, _ in chat_list.get_open_chats():
self.remove_chat(workspace_id, account, jid)
self._chat_lists.pop(workspace_id)
chat_list.destroy()
def _on_row_selected(self,
_chat_list: ChatList,
row: Optional[ChatRow]
) -> None:
if row is None:
self.emit('chat-unselected')
return
self.emit('chat-selected', row.workspace_id, row.account, row.jid)
def show_chat_list(self, workspace_id: str) -> None:
cur_workspace_id = self.get_visible_child_name()
if cur_workspace_id == workspace_id:
return
if cur_workspace_id != 'default' and cur_workspace_id is not None:
self._chat_lists[cur_workspace_id].unselect_all()
self.set_visible_child_name(workspace_id)
def add_chat(self, workspace_id: str, account: str, jid: JID, type_: str,
pinned: bool = False) -> None:
chat_list = self._chat_lists.get(workspace_id)
if chat_list is None:
chat_list = self.add_chat_list(workspace_id)
chat_list.add_chat(account, jid, type_, pinned)
def select_chat(self, account: str, jid: JID) -> None:
chat_list = self.find_chat(account, jid)
if chat_list is None:
return
self.show_chat_list(chat_list.workspace_id)
chat_list.select_chat(account, jid)
def store_open_chats(self, workspace_id: str) -> None:
chat_list = self._chat_lists[workspace_id]
open_chats = chat_list.get_open_chats()
app.settings.set_workspace_setting(
workspace_id, 'open_chats', open_chats)
def _toggle_chat_pinned(self,
_action: Gio.SimpleAction,
param: GLib.Variant
) -> None:
workspace_id, account, jid = param.unpack()
jid = JID.from_string(jid)
chat_list = self._chat_lists[workspace_id]
chat_list.toggle_chat_pinned(account, jid)
self.store_open_chats(workspace_id)
@structs.actionmethod
def _move_chat_to_workspace(self,
_action: Gio.SimpleAction,
params: structs.MoveChatToWorkspaceAP
) -> None:
current_chatlist = cast(ChatList, self.get_visible_child())
type_ = current_chatlist.get_chat_type(params.account, params.jid)
if type_ is None:
return
current_chatlist.remove_chat(params.account, params.jid)
new_chatlist = self.get_chatlist(params.workspace_id)
new_chatlist.add_chat(params.account, params.jid, type_)
self.store_open_chats(current_chatlist.workspace_id)
self.store_open_chats(params.workspace_id)
de
|
SpaceWars/spacewars
|
src/layers/base_layers.py
|
Python
|
gpl-3.0
| 1,054
| 0
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2015 Luiz Fernando Oliveira, Carlos Oliveira, Matheus Fernandes
This program
|
is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOU
|
T ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from cocos.layer import Layer
from pyglet import resource
from pyglet.gl import glPushMatrix, glPopMatrix
class BackgroundLayer(Layer):
""" A simple layer with a image background. """
def __init__(self, background):
super(BackgroundLayer, self).__init__()
self.image = resource.image(background)
def draw(self):
glPushMatrix()
self.transform()
self.image.blit(0, 0)
glPopMatrix()
|
QingChenmsft/azure-cli
|
src/command_modules/azure-cli-consumption/azure/cli/command_modules/consumption/__init__.py
|
Python
|
mit
| 704
| 0.005682
|
# --------------------------------------------------------------------
|
------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.consumption._help # pylint: disa
|
ble=unused-import
def load_params(_):
import azure.cli.command_modules.consumption._params # pylint: disable=redefined-outer-name, unused-variable
def load_commands():
import azure.cli.command_modules.consumption.commands # pylint: disable=redefined-outer-name, unused-variable
|
AlbertoPeon/invenio
|
modules/miscutil/lib/plotextractor_unit_tests.py
|
Python
|
gpl-2.0
| 15,778
| 0.006401
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the plotextract script."""
__revision__ = "$Id$"
import os
import unittest
from invenio.plotextractor import put_it_together, \
find_open_and_close_braces, \
intelligently_find_filenames, \
assemble_caption
from invenio.plotextractor_output_utils import remove_dups, \
get_converted_image_name
from invenio.config import CFG_TMPDIR, CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite
from invenio.shellutils import run_shell_command
class PutItTogetherTest(unittest.TestCase):
"""Test functions related to the put_it_together function."""
def setUp(self):
self.empty_images_and_captions = []
self.dummy_line_index = -1
self.empty_lines = []
self.tex_file = 'unimportant'
def test_with_singles(self):
"""plotextractor - put_it_together with singles"""
single_image = 'singleimage'
single_caption = 'singlecaption'
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(single_image, single_caption, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('singleimage', 'singlecaption', \
'singlelabel')], \
'failed to zip captions correctly')
def test_with_multiples_0(self):
"""plotextractor - put_it_together with multiples"""
no_main_two_subs = ['', ['img1', 'img2']]
single_caption = 'singlecaption'
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(no_main_two_subs, single_caption, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('img1', 'singlecaption', 'singlelabel'), \
('img2', 'singlecaption', 'singlelabel')], \
'didn\'t zip multiple images to one caption correctly')
def test_with_multiples_1(self):
"""plotextractor - put_it_together with multiples 1"""
no_main_two_subs = ['', ['sub1', 'sub2']]
main_and_two_sub_captions = ['main caption', ['subcap1', 'subcap2']]
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(no_main_two_subs, main_and_two_sub_captions, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('sub1', 'main caption : subcap1', \
'singlelabel'), \
('sub2', 'main caption : subcap2', \
'singlelabel')], \
'didn\'t zip multiple images to main and subcaps correctly')
def test_with_multiples_2(self):
"""plotextractor - put_it_together with multiples 2"""
main_and_two_sub_images = ['main', ['sub1', 'sub2']]
main_and_two_sub_captions = ['main caption', ['subcap1', 'subcap2']]
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(main_and_two_sub_images,
|
main_and_two_sub_captions,
single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('main', 'main caption', 'singlelabel'),
('sub1', 'main caption : subcap1', 'si
|
nglelabel'), \
('sub2', 'main caption : subcap2', 'singlelabel')], \
'didn\'t zip {main,sub}{images,captions} together properly')
def test_with_multiples_3(self):
"""plotextractor - put_it_together with multiples 3"""
single_image = 'singleimage'
no_main_two_subcaptions = ['', ['subcap1', 'subcap2']]
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(single_image, no_main_two_subcaptions, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('singleimage', 'subcap1 : subcap2', \
'singlelabel')], \
'didn\'t zip a single image to multiple subcaps correctly')
def test_extract_caption(self):
"""plotextractor - put_it_together with extract caption"""
self.example_lines = ['{some caption}', '[something else]', 'unrelated']
single_image = 'singleimage'
no_caption = ''
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(single_image, no_caption, single_label,
self.empty_images_and_captions, 1,
self.example_lines)
self.assertTrue(images_and_captions == [('singleimage', 'some caption', 'singlelabel')], \
'didn\'t correctly extract the caption for zipping')
class TestFindOpenAndCloseBraces(unittest.TestCase):
def test_simple_test(self):
"""plotextractor - find_open_and_close_braces simple"""
simple_test_lines = ['{simple}']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '{', simple_test_lines)
self.assertTrue(start == 0, 'didn\'t identify start index')
self.assertTrue(start_line == 0, 'didn\'t identify start line')
self.assertTrue(end == 7, 'didn\'t identify end index')
self.assertTrue(end_line == 0, 'didn\'t identify end line')
def test_braces_start_on_next_line_test(self):
"""plotextractor - find_open_and_close_braces next line"""
start_on_next_line_lines = ['nothing here', 'chars{morestuff', 'last}']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '{',
start_on_next_line_lines)
self.assertTrue(start == 5, 'didn\'t identify start index')
self.assertTrue(start_line == 1, 'didn\'t identify start line')
self.assertTrue(end == 4, 'didn\'t identify end index')
self.assertTrue(end_line == 2, 'didn\'t identify end line')
def test_confounding_braces(self):
"""plotextractor - find_open_and_close_braces confounding"""
confounding_braces_lines = ['{brace{bracebrace}{}', 'brace{{brace}',
'brace}', '}']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '{',
confo
|
spencerkclark/aospy-obj-lib
|
aospy_user/variables/idealized_moist/dynamics.py
|
Python
|
gpl-3.0
| 3,317
| 0
|
from aospy import Var
from aospy_user import calcs, units
from aospy_user.variables.universal.energy_native import (swdn_sfc, olr,
lwdn_sfc, lwup_sfc)
from aospy_user.variables.idealized_moist.energy import flux_t, flux_lhe
# Model native (or self-coded) diagnostics
umse_vint = Var(
name='umse_vint',
domain='atmos',
description=('u*mse integrated vertically in the idealized model'),
units=units.m3_s3_v,
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
vmse_vint = Var(
name='vmse_vint',
domain='atmos',
description=('v*mse integrated vertically in the idealized model'),
units=units.m3_s3_v,
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
omega_mse_vint = Var(
name='omega_mse_vint',
domain='atmos',
description=('omega*mse integrated vertically in the idealized model'),
units=units.J_Pa_kg_s_v,
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
umse = Var(
name='umse',
domain='atmos',
description=('u*mse in idealized model'),
units=units.m3_s3,
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
vmse = Var(
name='vmse',
domain='atmos',
description=('v*mse in idealized model'),
units=units.m3_s3,
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
omega_mse = Var(
name='omega_mse',
domain='atmos',
description=('omega*mse in idealized model'),
units=units.J_Pa_kg_s,
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
# Computed variables
aht_im = Var(
name='aht_im',
domain='atmos',
description=('atmospheric heat transport'),
variables=(swdn_sfc, olr, lwdn_sfc, lwup_sfc, flux_t, flux_lhe),
def_time=True,
def_vert=False,
def_lat=True,
|
def_lon=False,
func=calcs.idealize
|
d_moist.energy.aht,
units=units.W
)
# Continue supporting these?
dmv_dx_im = Var(
name='dmv_dx_im',
domain='atmos',
description=('Zonal flux divergence of mse.'),
variables=(umse,),
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_zonal_flux_divg_im,
units=units.W
)
dmv_dx_v_im = Var(
name='dmv_dx_v_im',
domain='atmos',
description=('Vertical integral of zonal flux divergence of mse.'),
variables=(umse_vint,),
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_zonal_flux_divg_v_im,
units=units.W
)
dmv_dy_v_im = Var(
name='dmv_dy_v_im',
domain='atmos',
description=('Vertical integral of meridional flux divergence of mse.'),
variables=(vmse_vint,),
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_merid_flux_divg_v_im,
units=units.W
)
dmv_dy_im = Var(
name='dmv_dy_im',
domain='atmos',
description=('Meridional flux divergence of mse.'),
variables=(vmse,),
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_merid_flux_divg_im,
units=units.W
)
|
jabbalaci/PrimCom
|
data/python/my_pickle.py
|
Python
|
gpl-2.0
| 793
| 0.012642
|
try:
import cPickle as pickle # faster
except:
import pickle
data1 = [ { 'a':'one', 'b':2, 'c':3.0 } ]
print 'DATA: ',
print(data1)
data1_string = pickle.dumps(data1)
|
# here: pickling
print 'PICKLE:', data1_string
data2 = pickle.loads(data1_string) # here: unpickling
print 'UNPICKLED:',
print(data2)
print 'SAME?:', (data1 is data2)
print 'EQUAL?:', (data1 == data2)
# * By default, the pickled byte stream contains ASCII characters only.
# * The pickle format is specific to Python.
# * Never unpickle data received from an untrusted or unauthenticated source.
# * Only the data for the instance is pickled, not the
|
class definition, thus
# when you want to unpickle instances of a class, don’t forget to import
# the definition of this class!
|
taiwenko/python
|
acs/acs_cold_start.py
|
Python
|
mit
| 7,587
| 0.019639
|
#!/usr/bin/env python
from time import sleep
import twk_utils
import math
import sys
import xpf6020
import tools.utils as tools
import watlowf4
from tools import shell
from blessings import Terminal
t = Terminal()
franz_num = raw_input('How many Franz are you testing? [1,2,3,or 4]: ').strip()
cycle_num = raw_input('How many temp cycles would you like to run?: ').strip()
utils = twk_utils.Twk_utils()
print "Accessing the XPF6020 Power Supplies"
ps1_path = '/dev/serial/by-id/usb-Prolific_Technology_Inc._USB-Serial_Controller_D-if00-port0'
ps2_path = '/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A703PO3I-if00-port0'
pfc1_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_1a-if01-port0'
pfc2_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_2a-if01-port0'
pfc3_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_1b-if01-port0'
pfc4_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_2b-if01-port0'
print "Accessing the Temperature Chamber"
tchamber_path = '/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A603R0MG-if00-port0'
chamber = watlowf4.WatlowF4(tchamber_path)
chamber.conditioning_on(True)
def ps_measure_check(ch, current_min, current_max, voltage_min, voltage_max, tolerance, max_cycle):
cycle = 0
avg_volt = 0
avg_current = 0
while cycle != max_cycle:
if ch == '1':
[r_mppt_v, r_mppt_i] = ps1.measure('1')
elif ch == '2':
[r_mppt_v, r_mppt_i] = ps1.measure('2')
elif ch == '3':
[r_mppt_v, r_mppt_i] = ps2.measure('1')
elif ch == '4':
[r_mppt_v, r_mppt_i] = ps2.measure('2')
else:
print 'Unknown Input Channel'
volt = float(r_mppt_v.split("V")[0])
curr = float(r_mppt_i.split("A")[0])
avg_volt = avg_volt + volt
avg_current = avg_current + curr
cycle = cycle + 1
sleep(1)
r_mppt_v = avg_volt / cycle;
r_mppt_i = avg_current / cycle;
if float(r_mppt_i) > float(current_max):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_i) < float(current_min):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_v) > float(voltage_max):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_v) < float(voltage_min):
result = t.bold_red('FAILED')
result_count = 1
else:
result = t.bold_green('PASSED')
r
|
esult_count = 0
print 'Franz CH%s @ %sV, %sA....[%s]' %(ch, r_mppt_v, r_mppt_i, result)
print ''
return result_count
def config_acs(pfc_path):
sleep(5)
tom = shell
|
.Shell(pfc_path)
sleep(1)
sb = shell.Scoreboard(tom,'acs')
sleep(1)
tom.sendline('power on acs')
sleep(3)
print sb.query('power_acs_enabled')
sleep(1)
tom.close()
def clean_acs(pfc_path):
sleep(5)
tom = shell.Shell(pfc_path)
sleep(1)
sb = shell.Scoreboard(tom,'acs')
sleep(1)
tom.sendline('power off acs')
sleep(3)
print sb.query('power_acs_enabled')
sleep(1)
tom.close()
# Test starts here
offtime = 1 #15 #mins
offtime_sec = offtime * 60
run_count = 0
max_run_count = cycle_num
ch1result = 0
ch2result = 0
ch3result = 0
ch4result = 0
ts = utils.get_timestamp()
print '*** Franz test started @ %s***' % ts
batt_vin = 48
batt_iin = 20
ps1 = xpf6020.Xpf6020(ps1_path)
ps1.reset_ps()
ps2 = xpf6020.Xpf6020(ps2_path)
ps2.reset_ps()
ps1.set_voltage(1, batt_vin)
ps1.set_currentlimit(1, batt_iin)
if franz_num == '2':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
elif franz_num == '3':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
ps2.set_voltage(1,batt_vin)
ps2.set_currentlimit(1,batt_iin)
elif franz_num == '4':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
ps2.set_voltage(1,batt_vin)
ps2.set_currentlimit(1,batt_iin)
ps2.set_voltage(2,batt_vin)
ps2.set_currentlimit(2,batt_iin)
else:
if franz_num != '1':
print 'Unknown franz amount. Can only test up to 4 franz at a time.'
sys.exit()
# Setup chamber
cold_temp = 20 #-60
soak_time = 1 #45 # min
chamber.ramp_down(cold_temp)
chamber.soak_time(soak_time)
while True:
# Turn on power supplies
ps1.ind_output('1','on')
if franz_num == '2':
ps1.ind_output('2','on')
elif franz_num == '3':
ps1.ind_output('2','on')
ps2.ind_output('1','on')
elif franz_num == '4':
ps1.ind_output('2','on')
ps2.ind_output('1','on')
ps2.ind_output('2','on')
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Turn on ACS using PFC
config_acs(pfc1_path)
if franz_num == '2':
config_acs(pfc2_path)
elif franz_num == '3':
config_acs(pfc2_path)
config_acs(pfc3_path)
elif franz_num == '4':
config_acs(pfc2_path)
config_acs(pfc3_path)
config_acs(pfc4_path)
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Measure current draw from PS
measurement_count = 5
print 'Averaging %d measurement...' % measurement_count
current = 0.12
voltage = 48
tolerance = 0.05
current_max = float(current) * (1 + tolerance)
current_min = float(current) * (1 - tolerance)
voltage_max = float(voltage) * (1 + tolerance)
voltage_min = float(voltage) * (1 - tolerance)
print 'Voltage Limits should be within %f to %fV' %(voltage_min, voltage_max)
print 'Current Limits should be within %f to %fA' %(current_min, current_max)
print ''
rc1 = ps_measure_check('1', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch1result = ch1result + rc1
if franz_num == '2':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
elif franz_num == '3':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
rc3 = ps_measure_check('3', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch3result = ch3result + rc3
elif franz_num == '4':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
rc3 = ps_measure_check('3', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch3result = ch3result + rc3
rc4 = ps_measure_check('4', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch4result = ch4result + rc4
else:
if franz_num != '1':
print 'Unknown franz amount.'
# Turn off ACS using PFC
clean_acs(pfc1_path)
if franz_num == '2':
clean_acs(pfc2_path)
elif franz_num == '3':
clean_acs(pfc2_path)
clean_acs(pfc3_path)
elif franz_num == '4':
clean_acs(pfc2_path)
clean_acs(pfc3_path)
clean_acs(pfc4_path)
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Turn off power supplies
ps1.all_output('off')
ps2.all_output('off')
run_count = run_count + 1
if run_count == int(max_run_count):
break;
ts = utils.get_timestamp()
print 'Off for %s min started @ %s' % (offtime, ts)
sleep(offtime_sec)
hot_temp = 24
print 'Ramping up to 24C'
chamber.ramp_up(hot_temp)
ts = utils.get_timestamp()
msg = '*** ACS test completed @ %s***' % ts
msg = msg + ', CH1 failed %s out of %s cycles' % (ch1result, max_run_count)
msg = msg + ', CH2 failed %s out of %s cycles' % (ch2result, max_run_count)
msg = msg + ', CH3 failed %s out of %s cycles' % (ch3result, max_run_count)
msg = msg + ', CH4 failed %s out of %s cycles' % (ch4result, max_run_count)
print msg
utils.send_email('ACS Cold-Start', msg)
|
DarkSand/Sasila
|
sasila/system_normal/processor/car_processor.py
|
Python
|
apache-2.0
| 5,995
| 0.00184
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from bs4 import BeautifulSoup as bs
from sasila.system_normal.spider.spider_core import SpiderCore
from sasila.system_normal.pipeline.console_pipeline import ConsolePipeline
from sasila.system_normal.processor.base_processor import BaseProcessor
from sasila.system_normal.downloader.http.spider_request import Request
from sasila.system_normal.utils.decorator import checkResponse
import json
import time
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
class Car_Processor(BaseProcessor):
spider_id = 'car_spider'
spider_name = 'car_spider'
allowed_domains = ['che168.com']
start_requests = [Request(url='http://www.che168.com', priority=0)]
@checkResponse
def process(self, response):
soup = bs(response.m_response.content, 'lxml')
province_div_list = soup.select('div.city-list div.cap-city > div.fn-clear')
for province_div in province_div_list:
province_name = province_div.select('span.capital a')[0].text
city_list = province_div.select('div.city a')
for city in city_list:
city_name = city.text
pinyin = city['href'].strip('/').split('/')[0]
request = Request(
url='http://www.che168.com/handler/usedcarlistv5.ashx?act
|
ion=brandlist&area=%s' % pinyin,
priority=1, callback=self.process_page_1)
request.meta['province'] = province_name
request.meta['city'] = city_name
yield request
@checkResponse
def process_page_1(self, response):
brand_list = list(json.loads(response.m_response.content.decode('gb2312')))
for brand in brand_list:
brand_dict = dict(brand)
brand_n
|
ame = brand_dict['name']
url = response.nice_join(brand_dict['url']) + '/'
request = Request(url=url, priority=2, callback=self.process_page_2)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = brand_name
yield request
@checkResponse
def process_page_2(self, response):
soup = bs(response.m_response.content, 'lxml')
cars_line_list = soup.select('div#series div.content-area dl.model-list dd a')
for cars_line in cars_line_list:
cars_line_name = cars_line.text
url = 'http://www.che168.com' + cars_line['href']
request = Request(url=url, priority=3, callback=self.process_page_3)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = response.request.meta['brand']
request.meta['cars_line'] = cars_line_name
yield request
@checkResponse
def process_page_3(self, response):
soup = bs(response.m_response.content, 'lxml')
car_info_list = soup.select('div#a2 ul#viewlist_ul li a.carinfo')
for car_info in car_info_list:
url = 'http://www.che168.com' + car_info['href']
request = Request(url=url, priority=4, callback=self.process_page_4)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = response.request.meta['brand']
request.meta['cars_line'] = response.request.meta['cars_line']
yield request
next_page = soup.find(lambda tag: tag.name == 'a' and '下一页' in tag.text)
if next_page:
url = 'http://www.che168.com' + next_page['href']
request = Request(url=url, priority=3, callback=self.process_page_3)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = response.request.meta['brand']
request.meta['cars_line'] = response.request.meta['cars_line']
yield request
@checkResponse
def process_page_4(self, response):
soup = bs(response.m_response.content, 'lxml')
# <html><head><title>Object moved</title></head><body>
# <h2>Object moved to <a href="/CarDetail/wrong.aspx?errorcode=5&backurl=/&infoid=21415515">here</a>.</h2>
# </body></html>
if len(soup.select('div.car-title h2')) != 0:
car = soup.select('div.car-title h2')[0].text
detail_list = soup.select('div.details li')
if len(detail_list) == 0:
soup = bs(response.m_response.content, 'html5lib')
detail_list = soup.select('div.details li')
mileage = detail_list[0].select('span')[0].text.replace('万公里', '')
first_borad_date = detail_list[1].select('span')[0].text
gear = detail_list[2].select('span')[0].text.split('/')[0]
displacement = detail_list[2].select('span')[0].text.split('/')[1]
price = soup.select('div.car-price ins')[0].text.replace('¥', '')
crawl_date = time.strftime('%Y-%m-%d', time.localtime(time.time()))
item = dict()
item['car'] = car
item['mileage'] = mileage
item['first_borad_date'] = first_borad_date
item['gear'] = gear
item['displacement'] = displacement
item['price'] = price
item['crawl_date'] = crawl_date
item['province'] = response.request.meta['province']
item['city'] = response.request.meta['city']
item['brand'] = response.request.meta['brand']
item['cars_line'] = response.request.meta['cars_line']
yield item
if __name__ == '__main__':
SpiderCore(Car_Processor(), test=True).set_pipeline(ConsolePipeline()).start()
|
LePastis/pyload
|
module/web/cnl_app.py
|
Python
|
gpl-3.0
| 4,421
| 0.005202
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join
import re
from urllib import unquote
from base64 import standard_b64decode
from binascii import unhexlify
from bottle import route, request, HTTPError
from webinterface import PYLOAD, DL_ROOT, JS
try:
from Crypto.Cipher import AES
except:
pass
def local_check(function):
def _view(*args, **kwargs):
if request.environ.get('REMOTE_ADDR', "0") in ('127.0.0.1', 'localhost') \
or request.environ.get('HTTP_HOST','0') == '127.0.0.1:9666':
return function(*args, **kwargs)
else:
return HTTPError(403, "Forbidden")
return _view
@route("/flash")
@route("/flash/:id")
@route("/flash", method="POST")
@local_check
def flash(id="0"):
return "JDownloader\r\n"
@route("/flash/add", method="POST")
@local_check
def add(request):
package = request.POST.get('referer', None
|
)
urls = filter(
|
lambda x: x != "", request.POST['urls'].split("\n"))
if package:
PYLOAD.addPackage(package, urls, 0)
else:
PYLOAD.generateAndAddPackages(urls, 0)
return ""
@route("/flash/addcrypted", method="POST")
@local_check
def addcrypted():
package = request.forms.get('referer', 'ClickAndLoad Package')
dlc = request.forms['crypted'].replace(" ", "+")
dlc_path = join(DL_ROOT, package.replace("/", "").replace("\\", "").replace(":", "") + ".dlc")
dlc_file = open(dlc_path, "wb")
dlc_file.write(dlc)
dlc_file.close()
try:
PYLOAD.addPackage(package, [dlc_path], 0)
except:
return HTTPError()
else:
return "success\r\n"
@route("/flash/addcrypted2", method="POST")
@local_check
def addcrypted2():
package = request.forms.get("source", None)
crypted = request.forms["crypted"]
jk = request.forms["jk"]
crypted = standard_b64decode(unquote(crypted.replace(" ", "+")))
if JS:
jk = "%s f()" % jk
jk = JS.eval(jk)
else:
try:
jk = re.findall(r"return ('|\")(.+)('|\")", jk)[0][1]
except:
## Test for some known js functions to decode
if jk.find("dec") > -1 and jk.find("org") > -1:
org = re.findall(r"var org = ('|\")([^\"']+)", jk)[0][1]
jk = list(org)
jk.reverse()
jk = "".join(jk)
else:
print "Could not decrypt key, please install py-spidermonkey or ossp-js"
try:
Key = unhexlify(jk)
except:
print "Could not decrypt key, please install py-spidermonkey or ossp-js"
return "failed"
IV = Key
obj = AES.new(Key, AES.MODE_CBC, IV)
result = obj.decrypt(crypted).replace("\x00", "").replace("\r","").split("\n")
result = filter(lambda x: x != "", result)
try:
if package:
PYLOAD.addPackage(package, result, 0)
else:
PYLOAD.generateAndAddPackages(result, 0)
except:
return "failed can't add"
else:
return "success\r\n"
@route("/flashgot_pyload")
@route("/flashgot_pyload", method="POST")
@route("/flashgot")
@route("/flashgot", method="POST")
@local_check
def flashgot():
if request.environ['HTTP_REFERER'] != "http://localhost:9666/flashgot" and request.environ['HTTP_REFERER'] != "http://127.0.0.1:9666/flashgot":
return HTTPError()
autostart = int(request.forms.get('autostart', 0))
package = request.forms.get('package', None)
urls = filter(lambda x: x != "", request.forms['urls'].split("\n"))
folder = request.forms.get('dir', None)
if package:
PYLOAD.addPackage(package, urls, autostart)
else:
PYLOAD.generateAndAddPackages(urls, autostart)
return ""
@route("/crossdomain.xml")
@local_check
def crossdomain():
rep = "<?xml version=\"1.0\"?>\n"
rep += "<!DOCTYPE cross-domain-policy SYSTEM \"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\">\n"
rep += "<cross-domain-policy>\n"
rep += "<allow-access-from domain=\"*\" />\n"
rep += "</cross-domain-policy>"
return rep
@route("/flash/checkSupportForUrl")
@local_check
def checksupport():
url = request.GET.get("url")
res = PYLOAD.checkURLs([url])
supported = (not res[0][1] is None)
return str(supported).lower()
@route("/jdcheck.js")
@local_check
def jdcheck():
rep = "jdownloader=true;\n"
rep += "var version='9.581;'"
return rep
|
lxr0827/weChatOrder
|
weChatOrder/WeInterc/get_access_token.py
|
Python
|
gpl-2.0
| 1,465
| 0.007655
|
# -*- coding: utf-8 -*-
__author__ = 'lxr0827'
import pymysql,requests,json
import datetime
#定时运行该脚本获取accesstoken,记录到accesstoken modu
|
le里
#test
APPID = "wx243dd553e7ab9da7"
SECRET = "57f109fd1cce0913a76a1700f94c4e2d"
AccessTokenURL = 'https://api.weixin.
|
qq.com/cgi-bin/token?grant_type=client_credential&appid=' + APPID + '&secret=' + SECRET
r = requests.get(AccessTokenURL)
if (r.status_code == requests.codes.ok): # @UndefinedVariable
res = json.loads(r.text)
if res.get('errcode') == None:
accessToken = res['access_token']
conn = pymysql.connect(host='localhost', user='root', passwd='5817802', db='wechatorderdb', port=3306, charset='utf8')
cur = conn.cursor()
nowTime = datetime.datetime.now().strftime("%y-%m-%d %H:%M:%S")
count = cur.execute("select * from WeInterc_accesstoken limit 0,1")
if count == 0:
insertStr = "insert into WeInterc_accesstoken values(1,'%s','%s')" % (accessToken,nowTime)
print(insertStr)
cur.execute(insertStr)
conn.commit()
cur.close()
conn.close()
else:
result = cur.fetchone()
updateStr = "update WeInterc_accesstoken set accessToken = '%s',getTokenTime = '%s'where id = 1" % (accessToken, nowTime)
print(updateStr)
cur.execute(updateStr)
conn.commit()
cur.close()
conn.close()
|
nafitzgerald/allennlp
|
tests/modules/seq2vec_encoders/boe_encoder_test.py
|
Python
|
apache-2.0
| 3,106
| 0.002254
|
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import torch
from torch.autograd import Variable
from allennlp.common import Params
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfEmbeddingsEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=5)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
encoder = BagOfEmbeddingsEncoder(embedding_dim=12)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_can_construct_from_params(self):
params = Params({
'embedding_dim': 5,
})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
params = Params({
'embedding_dim': 12,
'averaged': True
})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_forward_does_correct_computation(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]], [[.5, .3], [1.4, 1.1], [.3, .9]]]))
mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0]]))
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[.7 + .1 + .3, .8 + 1.5 + .6], [.5 + 1.4, .3 + 1.1]]))
def test_forward_does_correct_computation_with_average(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]],
[[.5, .3], [1.4, 1.1], [.3, .9]],
[[.4, .3], [.4, .3], [1.4, 1.7]]]))
mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0], [0, 0, 0]]))
encoder_output = encod
|
er(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[(.7 + .1 + .3)/3, (.8 + 1.5 + .6)/3],
[(.5 + 1.4)/2, (.3 + 1.1)/2],
[0., 0.]]))
def test_forward_does_correct_computation_with_average_no_mask(self):
encoder = BagOfEmbeddin
|
gsEncoder(embedding_dim=2, averaged=True)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]], [[.5, .3], [1.4, 1.1], [.3, .9]]]))
encoder_output = encoder(input_tensor)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[(.7 + .1 + .3)/3, (.8 + 1.5 + .6)/3],
[(.5 + 1.4 + .3)/3, (.3 + 1.1 + .9)/3]]))
|
pomegranited/edx-platform
|
common/djangoapps/third_party_auth/tests/specs/test_google.py
|
Python
|
agpl-3.0
| 6,324
| 0.003637
|
"""Integration tests for Google providers."""
import base64
import hashlib
import hmac
from django.conf import settings
from django.core.urlresolvers import reverse
import json
from mock import patch
from social.exceptions import AuthException
from student.tests.factories import UserFactory
from third_party_auth import pipeline
from third_party_auth.tests.specs import base
class GoogleOauth2IntegrationTest(base.Oauth2IntegrationTest):
"""Integration tests for provider.GoogleOauth2."""
def setUp(self):
super(GoogleOauth2IntegrationTest, self).setUp()
self.provider = self.configure_google_provider(
enabled=True,
key='google_oauth2_key',
secret='google_oauth2_secret',
)
TOKEN_RESPONSE_DATA = {
'access_token': 'access_token_value',
'expires_in': 'expires_in_value',
'id_token': 'id_token_value',
'token_type': 'token_type_value',
}
USER_RESPONSE_DATA = {
'email': 'email_value@example.com',
'family_name': 'family_name_value',
'given_name': 'given_name_value',
'id': 'id_value',
'link': 'link_value',
'locale': 'locale_value',
'name': 'name_value',
'picture': 'picture_value',
'verified_email': 'verified_email_value',
}
def get_username(self):
return self.get_response_data().get('email').split('@')[0]
def assert_redirect_to_provider_looks_correct(self, response):
super(GoogleOauth2IntegrationTest, self).assert_redirect_to_provider_looks_correct(response)
self.assertIn('google.com', response['Location'])
def test_custom_form(self):
"""
Use the Google provider to test the custom login/register form feature.
"""
# The pipeline starts by a user GETting /auth/login/google-oauth2/?auth_entry=custom1
# Synthesize that request and check that it redirects to the correct
# provider page.
auth_entry = 'custom1' # See definition in lms/envs/test.py
login_url = pipeline.get_login_url(self.provider.provider_id, auth_entry)
login_url += "&next=/misc/final-destination"
self.assert_redirect_to_provider_looks_correct(self.client.get(login_url))
def fake_auth_complete(inst, *args, **kwargs):
""" Mock the backend's auth_complete() method """
kwargs.update({'response': self.get_response_data(), 'backend': inst})
return inst.strategy.authenticate(*args, **kwargs)
# Next, the provider makes a request against /auth/complete/<provider>.
complete_url = pipeline.get_complete_url(self.provider.backend_name)
with patch.object(self.provider.backend_class, 'auth_complete', fake_auth_complete):
response = self.client.get(complete_url)
# This should redirect to the custom login/register form:
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://example.none/auth/custom_auth_entry')
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertIn('action="/misc/my-custom-registration-form" method="post"', response.content)
data_decoded = base64.b64decode(response.context['data']) # pylint: disable=no-member
data_parsed = json.loads(data_decoded)
# The user's details get passed to the custom page as a base64 encoded query parameter:
self.assertEqual(data_parsed, {
'user_details': {
'username': 'email_value',
'email': 'email_value@example.com',
'fullname': 'name_value',
'first_name': 'given_name_value',
'last_name': 'family_name_value',
}
})
# Check the hash that is used to confirm the user's data in the GET parameter is correct
secret_key = settings.THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS['custom1']['secret_key']
hmac_expected = hmac.new(secret_key, msg=data_decoded, digestmod=hashlib.sha256).digest()
self.assertEqual(base64.b64decode(response.context['hmac']), hmac_expected) # pylint: disable=no-member
# Now our custom registration form creates or logs in the user:
email, password = data_parsed['user_details']['email'], 'random_password'
created_user = UserFactory(email=email, password=password)
login_response = self.client.post(reverse('login'), {'email': email, 'password': password})
self.assertEqual(login_response.status_code, 200)
# Now our custom login/registration page must resume the pipeline:
response = self.client.get(complete_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://example.none/misc/final-destination')
_, strategy = self.get_request_and_strategy()
self.assert_social_auth_exists_for_user(created_user, strategy)
def test_custom_form_error(self):
"""
Use the Google provider to test the custom login/register failure redirects.
"""
# The pipeline starts by a user GETting /auth/login/google-oauth2/?auth_entry=custom1
# Synthesize that request and check that it redirects to the correct
# provider page.
|
auth_entry = 'custom1' # See definition in lms/envs/test.py
login_url = pipeline.get_login_url(self.provider.provider_id, auth_entry)
login_url += "&next=/misc/final-destination"
self.assert_redirect_to_provider_looks_correct(sel
|
f.client.get(login_url))
def fake_auth_complete_error(_inst, *_args, **_kwargs):
""" Mock the backend's auth_complete() method """
raise AuthException("Mock login failed")
# Next, the provider makes a request against /auth/complete/<provider>.
complete_url = pipeline.get_complete_url(self.provider.backend_name)
with patch.object(self.provider.backend_class, 'auth_complete', fake_auth_complete_error):
response = self.client.get(complete_url)
# This should redirect to the custom error URL
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://example.none/misc/my-custom-sso-error-page')
|
us-ignite/us_ignite
|
us_ignite/hubs/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 25,457
| 0.007621
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("apps", "0001_initial"),
("organizations", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'HubRequest'
db.create_table(u'hubs_hubrequest', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.Hub'], null=True, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=500, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubRequest'])
# Adding model 'NetworkSpeed'
db.create_table(u'hubs_networkspeed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, unique=True, populate_from='name', overwrite=False)),
))
db.send_create_signal(u'hubs', ['NetworkSpeed'])
# Adding model 'Hub'
db.create_table(u'hubs_hub', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, unique=True, populate_from='name', overwrite=False)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('connections', self.gf('django.db.models.fields.TextField')(blank=True)),
('contact', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organizations.Organization'], null=True, on_delete=models.SET_NULL, blank=True)),
('network_speed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.NetworkSpeed'], null=True, on_delete=models.SET_NULL, blank=True)),
('is_advanced', self.gf('django.db.models.fields.BooleanField')(default=False)),
('experimentation', self.gf('django.db.models.fields.IntegerField')(default=2)),
('estimated_passes', self.gf('django.db.models.fields.TextField')(blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=500, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=500, blank=True)),
('position', self.gf('geoposition.fields.GeopositionField')(default='0,0', max_length=42, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
('is_featured', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['Hub'])
# Adding M2M table for field applications on 'Hub'
m2m_table_name = db.shorten_name(u'hubs_hub_applications')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('hub', models.ForeignKey(orm[u'hubs.hub'], null=False)),
('application', models.ForeignKey(orm[u'apps.application'], null=False))
))
db.create_unique(m2m_table_name, ['hub_id', 'application_id'])
# Adding M2M table for field features on 'Hub'
m2m_table_name = db.shorten_name(u'hubs_hub_features')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('hub', models.ForeignKey(orm[u'hubs.hub'], null=False)),
('feature', models.ForeignKey(orm[u'apps.feature'], null=False))
))
db.create_unique(m2m_table_name, ['hub_id', 'feature_id'])
# Adding model 'HubActivity'
db.create_table(u'hubs_hubactivity', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=or
|
m['hubs.Hub'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=500, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('created', self.gf('django.db.models.
|
fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubActivity'])
# Adding model 'HubMembership'
db.create_table(u'hubs_hubmembership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.Hub'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubMembership'])
# Adding model 'HubAppMembership'
db.create_table(u'hubs_hubappmembership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.Hub'])),
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['apps.Application'])),
('is_featured', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubAppMembership'])
def backwards(self, orm):
# Deleting model 'HubRequest'
db.delete_table(u'hubs_hubrequest')
# Deleting model 'NetworkSpeed'
db.delete_table(u'hubs_networkspeed')
# Deleting model 'Hub'
db.delete_table(u'hubs_hub')
# Removing M2M table for field applications on 'Hub'
db.delete_table(db.shorten_name(u'hubs_hub_applications'))
# Removing M2M table for field features on 'Hub'
db.delete_table(db.shorten_name(u'hubs_hub_features'))
# Deleting model 'HubActivity'
db.delete_table(u'hubs_hubactivity')
# Deleting model 'HubMembership'
|
overfl0/Bulletproof-Arma-Launcher
|
dependencies/libtorrent/setup.py
|
Python
|
gpl-3.0
| 303
| 0.016502
|
from distutils.core import setup
# Dummy setup.
|
py to install libtorrent for python 2.7 using pip
setup(
name='libtorrent',
version='1.0.9',
packages=['libtorrent',],
data_files=[('Lib', ['libtorrent/libtorrent.pyd']),],
)
# Install in "editable mode" for development:
|
# pip install -e .
|
dustincys/rednotebook
|
rednotebook/util/statistics.py
|
Python
|
gpl-2.0
| 3,670
| 0.000545
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
from __future__ import division
class Statistics(object):
def __init__(self, journal):
self.journal = journal
def get_number_of_words(self):
number_of_words = 0
for day in self.days:
number_of_words += day.get_number_of_words()
return number_of_words
def get_number_of_distinct_words(self):
return len(self.journal.get_word_count_dict())
def get_number_of_chars(self):
number_of_chars = 0
for day in self.days:
number_of_chars += len(day.text)
return number_of_chars
def get_number_of_usage_days(self):
'''Returns the timespan between the first and last entry'''
sorted_days = self.days
if len(sorted_days) <= 1:
return len(sorted_days)
first_day = sorted_days[0]
last_day = sorted_days[-1]
timespan = last_day.date - first_day.date
return abs(timespan.days) + 1
def get_number_of_entries(self):
return len(self.days)
def get_edit_percentage(self):
total = self.get_number_of_usage_days()
edited = self.get_number_of_entries()
if total == 0:
return 0
percent = round(100 * edited / total, 2)
return '%s%%' % percent
def get_average_number_of_words(self):
if self.get_number_of_entries() == 0:
return 0
return round(self.get_number_of_words() / self.get_number_of_entries(), 2)
@property
def overall_pairs(self):
return [
[_('Words'), self.get_number_of_words()],
[_('Distinct Words'), self.get_number_of_distinct_words()],
[_('Edited Days'), self.get_number_of_entries()],
[_('Letters'), self.get_number_of_chars()],
[_('Days between first and last Entry'), self.get_number_of_usage_days()],
[_('Average number of Words'), self.get_average_number_of_words()],
[_('Percentage of edited Days'), self.get_edit_percentage()],
]
@property
def day_pairs(self):
day = self.journal.
|
day
return [
[_('Words'), day.get_number_of_words()],
[_('Lines'), len(day.text.splitlines())],
[_('Letters'), len(day.text)],
]
def show_dialog(self, dialog):
self.journal.save_old_day()
self.days
|
= self.journal.days
day_store = dialog.day_list.get_model()
day_store.clear()
for pair in self.day_pairs:
day_store.append(pair)
overall_store = dialog.overall_list.get_model()
overall_store.clear()
for pair in self.overall_pairs:
overall_store.append(pair)
dialog.show_all()
dialog.run()
dialog.hide()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.