repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
PetePriority/home-assistant | homeassistant/components/smartthings/__init__.py | Python | apache-2.0 | 8,575 | 0 | """SmartThings Cloud integration for Home Assistant."""
import asyncio
import logging
from typing import Iterable
from aiohttp.client_exceptions import (
ClientConnectionError, ClientResponseError)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .config_flow import SmartThingsFlowHandler # noqa
from .const import (
CONF_APP_ID, CONF_INSTALLED_APP_ID, DATA_BROKERS, DATA_MANAGER, DOMAIN,
EVENT_BUTTON, SIGNAL_SMARTTHINGS_UPDATE, SUPPORTED_PLATFORMS)
from .smartapp import (
setup_smartapp, setup_smartapp_endpoint, validate_installed_app)
REQUIREMENTS = ['pysmartapp==0.3.0', 'pysmartthings==0.6.0']
DEPENDENCIES = ['webhook']
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Initialize the SmartThings platform."""
await setup_smartapp_endpoint(hass)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Initialize config entry which represents an installed SmartApp."""
from pysmartthings import SmartThings
if not hass.config.api.base_url.lower().startswith('https://'):
_LOGGER.warning("The 'base_url' of the 'http' component must be "
"configured and start with 'https://'")
return False
api = SmartThings(async_get_clientsession(hass),
entry.data[CONF_ACCESS_TOKEN])
remove_entry = False
try:
# See if the app is already setup. This occurs when there are
# installs in multiple SmartThings locations (valid use-case)
manager = hass.data[DOMAIN][DATA_MANAGER]
smart_app = manager.smartapps.get(entry.data[CONF_APP_ID])
if not smart_app:
# Validate and setup the app.
app = await api.app(entry.data[CONF_APP_ID])
smart_app = setup_smartapp(hass, app)
# Validate and retrieve the installed app.
installed_app = await validate_installed_app(
api, entry.data[CONF_INSTALLED_APP_ID])
# Get devices and their current status
devices = await api.devices(
location_ids=[installed_app.location_id])
async def retrieve_device_status(device):
try:
await device.status.refresh()
except ClientResponseError:
_LOGGER.debug("Unable to update status for device: %s (%s), "
"the device will be ignored",
device.label, device.device_id, exc_info=True)
devices.remove(device)
await asyncio.gather(*[retrieve_device_status(d)
for d in devices.copy()])
# Setup device broker
broker = DeviceBroker(hass, devices,
installed_app.installed_app_id)
broker.event_handler_disconnect = \
smart_app.connect_event(broker.event_handler)
hass.data[DOMAIN][DATA_BROKERS][entry.entry_id] = broker
except ClientResponseError as ex:
if ex.status in (401, 403):
_LOGGER.exception("Unable to setup config entry '%s' - please "
"reconfigure the integration", entry.title)
remove_entry = True
else:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady
except (ClientConnectionError, RuntimeWarning) as ex:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady
if remove_entry:
hass.async_create_task(
hass.config_entries.async_remove(entry.entry_id))
# only create new flow if there isn't a pending one for SmartThings.
flows = hass.config_entries.flow.async_progress()
| if not [flow for flow in flows if flow['handler'] == DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={'source': 'import'}))
return False
for component in SUPPORTED_PLATFORMS:
hass.async_create_task(hass.config_entries.asyn | c_forward_entry_setup(
entry, component))
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS].pop(entry.entry_id, None)
if broker and broker.event_handler_disconnect:
broker.event_handler_disconnect()
tasks = [hass.config_entries.async_forward_entry_unload(entry, component)
for component in SUPPORTED_PLATFORMS]
return all(await asyncio.gather(*tasks))
class DeviceBroker:
"""Manages an individual SmartThings config entry."""
def __init__(self, hass: HomeAssistantType, devices: Iterable,
installed_app_id: str):
"""Create a new instance of the DeviceBroker."""
self._hass = hass
self._installed_app_id = installed_app_id
self.devices = {device.device_id: device for device in devices}
self.event_handler_disconnect = None
async def event_handler(self, req, resp, app):
"""Broker for incoming events."""
from pysmartapp.event import EVENT_TYPE_DEVICE
from pysmartthings import Capability, Attribute
# Do not process events received from a different installed app
# under the same parent SmartApp (valid use-scenario)
if req.installed_app_id != self._installed_app_id:
return
updated_devices = set()
for evt in req.events:
if evt.event_type != EVENT_TYPE_DEVICE:
continue
device = self.devices.get(evt.device_id)
if not device:
continue
device.status.apply_attribute_update(
evt.component_id, evt.capability, evt.attribute, evt.value)
# Fire events for buttons
if evt.capability == Capability.button and \
evt.attribute == Attribute.button:
data = {
'component_id': evt.component_id,
'device_id': evt.device_id,
'location_id': evt.location_id,
'value': evt.value,
'name': device.label
}
self._hass.bus.async_fire(EVENT_BUTTON, data)
_LOGGER.debug("Fired button event: %s", data)
updated_devices.add(device.device_id)
_LOGGER.debug("Update received with %s events and updated %s devices",
len(req.events), len(updated_devices))
async_dispatcher_send(self._hass, SIGNAL_SMARTTHINGS_UPDATE,
updated_devices)
class SmartThingsEntity(Entity):
"""Defines a SmartThings entity."""
def __init__(self, device):
"""Initialize the instance."""
self._device = device
self._dispatcher_remove = None
async def async_added_to_hass(self):
"""Device added to hass."""
async def async_update_state(devices):
"""Update device state."""
if self._device.device_id in devices:
await self.async_update_ha_state(True)
self._dispatcher_remove = async_dispatcher_connect(
self.hass, SIGNAL_SMARTTHINGS_UPDATE, async_update_state)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect the device when removed."""
if self._dispatcher_remove:
self._dispatcher_remove()
@property
def device_info(self):
"""Get attributes about the device."""
return {
'identifiers': {
(DOMAIN, self._device.device_id)
},
'name': self._device.label,
'model': self._device.device_type_name,
'manu |
Chilledheart/chromium | tools/telemetry/telemetry/internal/backends/chrome/ios_browser_backend.py | Python | bsd-3-clause | 4,612 | 0.00954 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import logging
import re
import urllib2
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.backends.chrome import system_info_backend
class IosBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
_DEBUGGER_URL_BUILDER = 'ws://localhost:%i/devtools/page/%i'
_DEBUGGER_URL_REGEX = r'ws://localhost:(\d+)/devtools/page/(\d+)'
_DEVICE_LIST_URL = 'http://localhost:9221/json'
def __init__(self, ios_platform_backend, browser_options):
super(IosBrowserBackend, self).__init__(
ios_platform_backend,
supports_tab_control=False,
supports_extensions=False,
browser_options=browser_options,
output_profile_path=".",
extensions_to_load=None)
self._webviews = []
self._port = None
self._page = None
self._system_info_backend = None
self.UpdateRunningBrowsersInfo()
def UpdateRunningBrowsersInfo(self):
""" Refresh to match current state of the running browser.
"""
device_urls = self.GetDeviceUrls()
urls = self.GetWebSocketDebuggerUrls(device_urls)
for url in urls:
m = re.match(self._DEBUGGER_URL_REGEX, url)
if m:
self._webviews.append([int(m.group(1)), int(m.group(2))])
else:
logging.error('Unexpected url format: %s' % url)
# TODO(baxley): For now, grab first item from |_webviews|. Ideally, we'd
# prefer to have the currently displayed tab, or something similar.
if self._webviews:
self._port = self._webviews[0][0]
self._page = self._webviews[0][1]
def GetDeviceUrls(self):
device_urls = []
try:
with contextlib.closing(
urllib2.urlopen(self._DEVICE_LIST_URL)) as device_list:
json_urls = device_list.read()
device_urls = json.loads(json_urls)
if not device_urls:
logging.debug('No iOS devices found. Will not try searching for iOS '
'browsers.')
return []
except urllib2.URLError as e:
logging.debug('Error communicating with iOS device.')
logging.debug(str(e))
return []
return device_urls
def GetWebSocketDebuggerUrls(self, device_urls):
""" Get a list of the websocket debugger URLs to communicate with
all running UIWebViews.
"""
data = []
# Loop through all devices.
for d in device_urls:
def GetData():
try:
with contextlib.closing(
# pylint: disable=cell-var-from-loop
urllib2.urlopen('http://%s/json' % d['url'])) as f:
json_result = f.read()
data = json.loads(json_result)
return data
| except urllib2.URLError as e:
logging.debug('Error commu | nicating with iOS device.')
logging.debug(e)
return False
try:
# Retry a few times since it can take a few seconds for this API to be
# ready, if ios_webkit_debug_proxy is just launched.
data = util.WaitFor(GetData, 5)
except exceptions.TimeoutException as e:
logging.debug('Timeout retrieving data from iOS device')
logging.debug(e)
return []
# Find all running UIWebViews.
debug_urls = []
for j in data:
debug_urls.append(j['webSocketDebuggerUrl'])
return debug_urls
def GetSystemInfo(self):
if self._system_info_backend is None:
self._system_info_backend = system_info_backend.SystemInfoBackend(
self._port, self._page)
return self._system_info_backend.GetSystemInfo()
def IsBrowserRunning(self):
return bool(self._webviews)
#TODO(baxley): The following were stubbed out to get the sunspider benchmark
# running. These should be implemented.
@property
def browser_directory(self):
logging.warn('Not implemented')
return None
@property
def profile_directory(self):
logging.warn('Not implemented')
return None
def Start(self):
logging.warn('Not implemented')
def extension_backend(self):
logging.warn('Not implemented')
return None
def GetBrowserStartupArgs(self):
logging.warn('Not implemented')
return None
def HasBrowserFinishedLaunching(self):
logging.warn('Not implemented')
return False
def GetStandardOutput(self):
raise NotImplementedError()
def GetStackTrace(self):
raise NotImplementedError()
|
brunosmmm/hdltools | hdltools/patterns/__init__.py | Python | mit | 2,709 | 0.000369 | """Signal pattern matching."""
import re
from typing import Union
class PatternError(Exception):
"""Pattern error."""
class Pattern:
"""Signal pattern representation."""
PATTERN_REGEX = re.compile(r"[01xX]+")
PATTERN_REGEX_BYTES = re.compile(b"[01xX]+")
def __init__(self, pattern: Union[str, bytes]):
"""Initialize."""
if not isinstance(pattern, (str, bytes)):
raise TypeError("pattern must be a string or bytes")
# tolerate some variations
if isinstance(pattern, str):
if pattern.endswith("h"):
pattern = self.hex_to_bin(pattern)
if pattern.startswith("0b"):
pattern = pattern[2:]
m = self.PATTERN_REGEX.match(pattern)
elif isinstance(pattern, int):
self._pattern = bin(pattern)
return
else:
m = self.PATTERN_REGEX_BYTES.match(pattern)
if m is None:
raise PatternError(f"pattern is invalid: {pattern}")
self._pattern = pattern
@property
def pattern(self):
"""Get pattern."""
return self._pattern
def __repr__(self):
"""Get representation."""
return self.pattern
def __len__(self):
"""Get length."""
return len(self._pattern)
def match(self, value: Union[str, bytes]) -> bool:
"""Match against value."""
if not isinstance(value, (str, bytes)):
raise TypeError(
f"value must be string or bytes, got {type(value)}"
)
if type(value) != type(self._pattern):
raise TypeError("incompatible types for value and pattern")
pattern = self._pattern
if len(value) < len(self._pattern):
# zero-extend incomin value
count = len(self._pattern) - len(value)
value = "0" * count + value
elif len(value) > len(self._pattern):
# zero-extend pattern
count = len(value) | - len(self._pattern)
pattern = "0" * count + self._pattern
for value_bit, expected_bit in zip(value, pattern):
if expected_bit in ("x", "X"):
# don't care
continue
if expect | ed_bit != value_bit:
return False
return True
@staticmethod
def hex_to_bin(hexstr):
"""Convert hex to binary including don't cares."""
if hexstr.endswith("h"):
hexstr = hexstr[:-1]
hexstr = hexstr.replace("x", "X")
split = hexstr.split("X")
ret = ""
for fragment in split:
ret += bin(int(fragment, 16)) if fragment else "xxxx"
return ret
|
FulcronZ/NyuziProcessor | tests/misc/perf_counters/runtest.py | Python | apache-2.0 | 1,011 | 0.007913 | #!/usr/bin/env python
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import subprocess
sys.path.insert(0, '../..')
import test_harness
def perf_counters_test(name):
test_harness.compile_test('perf_counters. | c')
result = test_harness.run_verilator()
if result.find('PASS') == -1:
raise test_harness.TestException('test p | rogram did not indicate pass\n' + result)
test_harness.register_tests(perf_counters_test, ['perf_counters'])
test_harness.execute_tests()
|
ampproject/amp-github-apps | project-metrics/metrics_service/server.py | Python | apache-2.0 | 6,263 | 0.009261 | #!/usr/bin/env python
"""Entry point to run app.
Used to launch the REST and Cron servers.
"""
import logging
import io
import os
import flask
from flask_api import status
from google.cloud import storage
from typing import Text
logging.getLogger().setLevel(logging.INFO)
from metrics import base
import env
import metric_plot
import scrapers
app = flask.Flask(__name__)
HISTORY_DAYS = 180
BADGE_COLORS = [
'#EEEEEE',
'indianred',
'orange',
'yellow',
'green',
'forestgreen',
]
def _get_cloud_blob(filename: Text) -> storage.Blob:
client = storage.Client()
bucket = client.get_bucket(env.get('CLOUD_STORAGE_BUCKET'))
return storage.Blob(filename, bucket)
def _save_to_cloud(data: bytes, filename: Text, content_type: Text):
"""Saves data to a Google Cloud Storage blob.
Args:
data: byte-string to store
filename: key under which to store the file in the Cloud Storage bucket
content_type: content type of the file
"""
_get_cloud_blob(filename).upload_from_string(data, content_type=content_type)
def _get_from_cloud(filename: Text) -> bytes:
"""Download data from a Google Cloud Storage blob.
Args:
filename: key under which the file in the Cloud Storage bucket is stored
Returns:
The blob data as a byte-string.
"""
return _get_cloud_blob(filename).download_as_string()
@app.route('/_cron/scrape/<scrape_target>')
def scrape_latest(scrape_target: Text):
# This header is added to cron requests by GAE, and stripped from any external
# requests. See
# https://cloud.google.com/appengine/docs/standard/python3/scheduling-jobs-with-cron-yaml#validating_cron_requests
if not flask.request.headers.get('X-Appengine-Cron'):
return 'Attempted to access internal endpoint.', status.HTTP_403_FORBIDDEN
scrapers.scrape(scrape_target)
return 'Successfully scraped latest %s.' % scrape_target, status.HTTP_200_OK
@app.route('/_cron/recompute/<metric_cls_name>')
def recompute(metric_cls_name: Text):
# This header is added to cron requests by GAE, and stripped from any external
# requests. See
# https://cloud.google.com/appengine/docs/standard/python3/scheduling-jobs-with-cron-yaml#validating_cron_requests
if not flask.request.headers.get('X-Appengine-Cron'):
return 'Attempted to access internal endpoint.', status.HTTP_403_FORBIDDEN
try:
metric_cls = base.Metric.get_metric(metric_cls_name)
except KeyError:
logging.error('No active metric found for %s.', metric_cls_name)
return ('No active metric found for %s.' % metric_cls_name,
status.HTTP_404_NOT_FOUND)
logging.info('Recomputing %s.', metric_cls_name)
metric_cls().recompute()
return 'Successfully recomputed %s.' % metric_cls_name, status.HTTP_200_OK
@app.route(
'/_cron/plot_metric_history', defaults={'history_days': HISTOR | Y_DAYS})
@app.route('/_cron/plot_metric_history/<history_days>')
def render_metric_history_plot(history_days: Text):
# This header is added to cron requests by GAE, and stripped from any external
# requests. See
# https://cloud.google.com/appengine/docs/standard/python3/scheduling-jobs-with-cron-yaml#validating_cron_requests
if not flask.request.headers.get('X-Appengine-Cron'):
return 'Attempted | to access internal endpoint.', status.HTTP_403_FORBIDDEN
history_days = int(history_days)
logging.info('Rendering metric history plots for last %d days', history_days)
for metric_cls in base.Metric.get_active_metrics():
metric = metric_cls()
plotter = metric_plot.MetricHistoryPlotter(
metric, history_days=history_days)
plot_buffer = plotter.plot_metric_history()
_save_to_cloud(plot_buffer.read(),
'%s-history-%dd.png' % (metric.name, history_days),
'image/png')
return 'History plots updated.', status.HTTP_200_OK
@app.route('/api/metrics')
def list_metrics():
try:
results = base.Metric.get_latest().values()
except Exception as error:
return flask.jsonify({'error': error.message}), status.HTTP_500_SERVER_ERROR
return flask.jsonify({'metrics': [metric.serializable for metric in results]
}), status.HTTP_200_OK
@app.route(
'/api/plot/<metric_cls_name>.png', defaults={'history_days': HISTORY_DAYS})
@app.route('/api/plot/<history_days>/<metric_cls_name>.png')
def metric_history_plot(history_days: Text, metric_cls_name: Text):
try:
base.Metric.get_metric(metric_cls_name)
except KeyError:
logging.error('No active metric found for %s.', metric_cls_name)
return ('No active metric found for %s.' %
metric_cls_name), status.HTTP_404_NOT_FOUND
history_days = int(history_days)
plot_bytes = _get_from_cloud('%s-history-%dd.png' %
(metric_cls_name, history_days))
return flask.send_file(io.BytesIO(plot_bytes), mimetype='image/png')
@app.route('/api/badge/<metric_cls_name>')
def metric_badge(metric_cls_name: Text):
"""Provides a response for sheilds.io to render a badge for GitHub.
See https://shields.io/endpoint.
"""
response = {
'schemaVersion': 1,
'color': 'lightgray',
'label': metric_cls_name,
'message': '?',
}
try:
metric = base.Metric.get_latest()[metric_cls_name]
response['color'] = BADGE_COLORS[metric.score.value]
response['label'] = metric.label
response['message'] = metric.formatted_result
except KeyError:
logging.error('No active metric found for %s.', metric_cls_name)
finally:
return flask.jsonify(response), status.HTTP_200_OK
@app.route('/')
def show_metrics():
metrics = base.Metric.get_latest().values()
return flask.render_template(
'show_metrics.html', github_repo=env.get('GITHUB_REPO'), metrics=metrics)
@app.route('/history', defaults={'history_days': HISTORY_DAYS})
@app.route('/history/<history_days>')
def show_metric_history(history_days: Text):
history_days = int(history_days)
metric_names = [cls.__name__ for cls in base.Metric.get_active_metrics()]
return flask.render_template(
'show_metric_history.html',
github_repo=env.get('GITHUB_REPO'),
metric_names=metric_names,
history_days=history_days)
if __name__ == '__main__':
app.run(port=os.environ.get('PORT', 8080), debug=True)
|
openstack/trove | trove/tests/unittests/api/common/test_extensions.py | Python | apache-2.0 | 3,517 | 0 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import os
import pkg_resources
from unittest import mock
import trove
from trove.common import extensions
from trove.extensions.routes.mgmt import Mgmt
from trove.extensions.routes.mysql import Mysql
from trove.tests.unittests import trove_testtools
DEFAULT_EXTENSION_MAP = {
'Mgmt': [Mgmt, extensions.ExtensionDescriptor],
'MYSQL': [Mysql, extensions.ExtensionDescriptor]
}
EP_TEXT = '''
mgmt = trove.extensions.routes.mgmt:Mgmt
mysql = trove.extensions.routes.mysql:Mysql
invalid = trove.tests.unittests.api.common.test_extensions:InvalidExtension
'''
class InvalidExtension(object):
def get_name(self):
return "Invalid"
def get_description(self):
return "Invalid Extension"
def get_alias(self):
return "Invalid"
def get_namespace(self):
return "http://TBD"
def get_updated(self):
return "2014-08-14T13:25:27-06:00"
def get_resources(self):
return []
class TestExtensionLoading(trove_testtools.TestCase):
def setUp(self):
super(TestExtensionLoading, self).setUp()
def tearDown(self):
super(TestExtensionLoading, self).tearDown()
def _assert_default_extensions(self, ext_list):
for alias, ext in ext_list.items():
for clazz in DEFAULT_EXTENSION_MAP[alias]:
self.assertIsInstance(ext, clazz, "Improper extension class")
@mock.patch("pkg_resources.iter_entry_points")
def test_default_extensions(self, mock_iter_eps):
trove_base = os.path.abspath(os.path.join(
os.path.dirname(trove.__file__), ".."))
setup_path = "%s/setup.cfg" % trove_base
# check if we are running as unit test without module installed
if os.path.isfile(setup_path):
parser = configparser.ConfigParser()
parser.read(setup_path)
entry_points = parser.get(
'entry_points', extensions.ExtensionManager.EXT_NAMESPACE)
eps = pkg_resources.EntryPoint.parse_group('plugins', entry_points)
mock_iter_eps.return_value = eps.values()
extension_mgr = extensions.ExtensionManager()
self.assertEqual(sorted(DEFAULT_EXTENSION_MAP.keys()),
sorted(extension_mgr.extensions.keys()),
"Invalid extension names")
self._assert_default_extensions(extension_mgr.extensions)
@mock.patch("pkg_resources.iter_entry_points")
def test_invalid_extension(self, mock_iter_eps):
eps = pkg_resources.EntryPoint.parse_gr | oup('mock', EP_TEXT)
| mock_iter_eps.return_value = eps.values()
extension_mgr = extensions.ExtensionManager()
self.assertEqual(len(DEFAULT_EXTENSION_MAP.keys()),
len(extension_mgr.extensions),
"Loaded invalid extensions")
self._assert_default_extensions(extension_mgr.extensions)
|
hwroitzsch/BikersLifeSaver | src/bike_module/processor/CameraDataProcessor.py | Python | mit | 5,814 | 0.023769 | __author__ = 'Hans-Werner Roitzsch'
from datetime import datetime
import sched
import cv2 as opencv
import numpy as np
from config import *
from processor.SensorDataProcessor import SensorDataProcessor
from model.ProcessedCameraData import ProcessedCameraData
from writer.ImageFileWriter import ImageFileWriter
class CameraDataProcessor(SensorDataProcessor):
def __init__(self):
self.image_file_writer = ImageFileWriter()
self.processed_image_counter = 0
print('using OpenCV version:', opencv.__version__)
# self.lower_blinker_hsv = np.uint8([80, 150, 220])
# self.upper_blinker_hsv = np.uint8([100, 220, 255])
self.lower_blinker_hsv = np.uint8([260, 150, 220]) # 360° - 80°
self.upper_blinker_hsv = np.uint8([280, 220, 255]) # 360° - 100°
# self.lower_blinker_hsv = np.uint8([180, 150, 220]) # 360° - 80°
# self.upper_blinker_hsv = np.uint8([190, 220, 255]) # 360° - 100°
def create_kernel(self, rows=3, cols=3):
return np.ones((rows, cols), dtype=np.int)
def print_rows_cols(self, image):
rows_and_cols = image.shape
print('Rows and cols:', rows_and_cols)
def add_border(self, image, top, bottom, left, right, color=0):
return opencv.copyMakeBorder(
image,
top, bottom, left, right,
opencv.BORDER_CONSTANT,
value=color
)
def remove_border(self, image, top, bottom, left, right):
rows_and_cols = image.shape
return image[top:rows_and_cols[0] - bottom, left:rows_and_cols[1] - right]
def process_data(self, camera_data):
image = camera_data.data
if image.shape is not None:
t1_mean_filtering = datetime.now()
# mean filter to reduce noise
kern | el = np.ones((6, 6), dtype=np.float32) / 36
mean_filtered = opencv.filter2D(image, -1, kernel)
t2_mean_filtering = datetime.now()
# convert to HSV image
# hsv_image = opencv.cvtColor(mean_filtered, opencv.COLOR_RGB2HSV)
t1_hsv_image = datetime.now()
hsv_image = opencv.cvtColor(mean_filtered, opencv.COLOR_BGR2HSV)
t2_hsv_image = datetime.now()
# HSV color segmentation
t1_mask = datetime | .now()
mask_image = opencv.inRange(hsv_image, self.lower_blinker_hsv, self.upper_blinker_hsv) # select only the pixels with HSV in the given range
t2_mask = datetime.now()
# closing to make segments compact
t1_closing = datetime.now()
kernel = self.create_kernel(rows=40, cols=40)
closing_image = opencv.morphologyEx(mask_image, opencv.MORPH_CLOSE, kernel)
t2_closing = datetime.now()
# create border around the image to create "fair" conditions for each pixel in the closing and erode step
t1_bordering = datetime.now()
border_top = 3
border_bottom = 3
border_left = 3
border_right = 3
bordered_image = self.add_border(closing_image, border_top, border_bottom, border_left, border_right)
t2_bordering = datetime.now()
# erode to remove noise
t1_erode = datetime.now()
kernel = self.create_kernel(rows=3, cols=3)
eroded_image = opencv.erode(bordered_image, kernel=kernel, iterations=3)
# remove border for bitwise AND operation with original image
eroded_image = self.remove_border(
eroded_image,
border_top,
border_bottom,
border_left,
border_right
)
t2_erode = datetime.now()
# set the result
result_image = eroded_image
self.processed_image_counter += 1
if config_development_mode:
print(self.processed_image_counter, 'images processed')
# TODO: candidate for asyncIO???
if config_development_mode:
original_image_file_path = 'test_image_' + str(self.processed_image_counter) + '.PNG'
hsv_image_file_path = 'test_image_hsv_' + str(self.processed_image_counter) + '.PNG'
processed_image_file_path = 'test_image_eroded_' + str(self.processed_image_counter) + '.PNG'
self.image_file_writer.write_images(
original_image_file_path, image,
hsv_image_file_path, hsv_image,
processed_image_file_path, result_image
)
t1_search = datetime.now()
if any(255 in x for x in result_image):
t2_search = datetime.now()
if config_development_mode:
print('TIMINGS FOR PROCESSING:')
print('TIME MEAN FILTERING: ', calculate_time_diff(t1_mean_filtering, t2_mean_filtering), 's', sep='')
print('TIME HSV CONVERSION: ', calculate_time_diff(t1_hsv_image, t2_hsv_image), 's', sep='')
print('TIME MASKING: ', calculate_time_diff(t1_mask, t2_mask), 's', sep='')
print('TIME CLOSING: ', calculate_time_diff(t1_closing, t2_closing), 's', sep='')
print('TIME BORDERING: ', calculate_time_diff(t1_bordering, t2_bordering), 's', sep='')
print('TIME ERODING: ', calculate_time_diff(t1_erode, t2_erode), 's', sep='')
print('TIME SEARCH FOR LABELS: ', calculate_time_diff(t1_search, t2_search), 's', sep='')
print('found direction indicator')
return ProcessedCameraData(probability=100.0, result=True)
else:
t2_search = datetime.now()
if config_development_mode:
print('TIMINGS FOR PROCESSING:')
print('TIME MEAN FILTERING: ', calculate_time_diff(t1_mean_filtering, t2_mean_filtering), 's', sep='')
print('TIME HSV CONVERSION: ', calculate_time_diff(t1_hsv_image, t2_hsv_image), 's', sep='')
print('TIME MASKING: ', calculate_time_diff(t1_mask, t2_mask), 's', sep='')
print('TIME CLOSING: ', calculate_time_diff(t1_closing, t2_closing), 's', sep='')
print('TIME BORDERING: ', calculate_time_diff(t1_bordering, t2_bordering), 's', sep='')
print('TIME ERODING: ', calculate_time_diff(t1_erode, t2_erode), 's', sep='')
print('TIME SEARCH FOR LABELS: ', calculate_time_diff(t1_search, t2_search), 's', sep='')
print('no direction indicator found')
return ProcessedCameraData(probability=100.0, result=False)
def calculate_time_diff(t1, t2):
return (t2 - t1).microseconds / (1*10**6) + (t2-t1).seconds
|
ruthger/Archipel | ArchipelAgent/archipel-agent-virtualmachine-storage/setup.py | Python | agpl-3.0 | 3,255 | 0.006452 | #
# setup.py
#
# Copyright (C) 2010 Antoine Mercadal <antoine.mercadal@inframonde.eu>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
VERSION = '0.6.0'
AUTHOR = 'Antoine Mercadal'
MAIL = 'antoine.mercadal@archipelproject.org'
URL = 'http://archipelproject.org'
LICENSE = 'AGPL'
NAME = 'archipel- | agent-virtualmachine-storage'
SHORTDESCRIPTION = "Manage virtual machine storages"
LONGDESCRIPTION = ""
ENTRY_POINTS = { 'archipel.plugin.virtualmachine': [
'factory=archipelagentvirtualmachinestorage:make_archipel_plugin'],
'archipel.plugin' : [
'version=archipelagentvirtualmachinestorage:version']}
RPM_REQUIRED_DEPS = "archipel-core"
RPM_POST_INSTALL = "%post\narchipel-initinstall -m { | 0}\n".format(NAME)
## HACK FOR DEPS IN RPMS
from setuptools.command.bdist_rpm import bdist_rpm
def custom_make_spec_file(self):
spec = self._original_make_spec_file()
lineDescription = "%description"
spec.insert(spec.index(lineDescription) - 1, "requires: %s" % RPM_REQUIRED_DEPS)
spec.append(RPM_POST_INSTALL)
return spec
bdist_rpm._original_make_spec_file = bdist_rpm._make_spec_file
bdist_rpm._make_spec_file = custom_make_spec_file
## END OF HACK
setup(name=NAME,
version=VERSION,
description=SHORTDESCRIPTION,
long_description=LONGDESCRIPTION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: System :: Emulators',
'Topic :: System :: Operating System'],
keywords='archipel, virtualization, libvirt, orchestration',
author=AUTHOR,
author_email=MAIL,
url=URL,
license=LICENSE,
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
"archipel-core>=0.6.0beta"
],
entry_points=ENTRY_POINTS
)
|
nacc/autotest | client/tests/kvm/kvm.py | Python | gpl-2.0 | 663 | 0 | from autotest.client.virt import virt_test
class kvm(virt_test.virt_test):
"""
Suite of KVM virtualization functional tests.
Contains tests for testin | g both KVM kernel code and userspace code.
@copyright: Red Hat 2008-2009
@author: Uri Lublin (uril@redhat.com)
@author: Dror Russo (drusso@redhat.com)
@author: Michael Goldish (mgoldish@redhat.com)
@author: David Huff (dhuff@redhat.com)
@author: Alexey Eromenko (aeromenk@redhat.com)
| @author: Mike Burns (mburns@redhat.com)
@see: http://www.linux-kvm.org/page/KVM-Autotest/Client_Install
(Online doc - Getting started with KVM testing)
"""
pass
|
infobeisel/polyvr | extras/blender_scripts/remove_double_vertices_and_faces.py | Python | gpl-3.0 | 2,067 | 0.013062 | #-----------------------------------------------------------------------------
#remove duplicates v1.3
#best way to remove duplicates, just select the objects you want the duplicates removed, then run this scrpit
import bpy
for obj in bpy.context.selected_objects:
if obj.type == 'MESH':
bpy.data.scenes[0].objects.active = obj # make obj active to do operations on it
bpy.ops.object.mode_set(mode='OBJECT', toggle=False) # set 3D View to Object Mode (probably redundant)
bpy.ops.object.mode_set(mode='EDIT', toggle=False) # set 3D View to Edit Mode
bpy.context.tool_settings.mesh_select_mode = [False, False, True] # set to face select in 3D View Editor
bpy.ops.mesh.select_all(action='SELECT') # make sure all faces in mesh are selected
bpy.ops.object.mode_set(mode='OBJECT', toggle=False) # very silly, you have to be in object mode to select faces!!
found = set([]) # set of found sorted vertices pairs
for face in obj.data.polygons:
facevertsorted = sorted(face.vertices[:]) # sort vertices of the face to compare later
if str(facevertsorted) not in found: # if sorted vertices are not in the set
found.add(str(facevertsorted)) # add them in the set
obj.data.polygons[face.index].select = False # deselect faces i want to keep
bpy.ops.object.mode_set(mode='EDIT', toggle=False) # set to Edit Mode AGAIN
bpy.ops.mesh.delete(type='FACE') # delete double faces |
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False) # recalculate normals
bpy.ops.mesh.remove_doubles(thre | shold=0.0001, use_unselected=False) #remove doubles
bpy.ops.mesh.normals_make_consistent(inside=False) # recalculate normals (this one or two lines above is redundant)
bpy.ops.object.mode_set(mode='OBJECT', toggle=False) # set to Object Mode AGAIN
|
elerno/cascaBell | normalizeFiles.py | Python | agpl-3.0 | 1,436 | 0.026462 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Normalize soundfiles in a folder, and write them to a new folder
# called normalized/
# Import Python modules
import contextlib
import os
import shutil
import sys
import wave
# Import user modules
def normalize():
""" Normalizes a set of sound files to norm-To dB
return -->> 1
"""
# Get the names of the files in sortFolder.
files = os.listdir(folderToSort)
# Make a directory for the renamed sorted files:
dirname = folderToSort + 'normalized/'
try:
os.makedirs(dirname)
except OSError:
if os.path.exists(dirname):
pass
else:
raise
for singleFile in files:
#Only work with .wav files
if singleFile[-4:] == '.wav':
inputFile = folderToSort + singleFile
outfile = dirname + singleFile
command = 'sox --norm={0} {1} {2}'.format(normalizeTo, inputFile,
outfile)
os.system(command)
return 1
def inputCheck(argValues):
""" Check whether the input data is valid. If not print usage
information.
argValues ---> a list of the scripts command-line parameters.
"""
return 1
# Check that the input parameters are valid. Get the name of the folder
# that contains the | sound files and th | e sort type from the command-line
# arguments.
argValues = sys.argv
inputCheck(argValues)
folderToSort = argValues[1]
try:
normalizeTo = argValues[2]
except IndexError:
normalizeTo = -3
print 'Normalizing to -3dB'
# Exectue the script.
normalize()
|
gklyne/annalist | src/annalist_root/annalist_site/settings/devel.py | Python | mit | 1,731 | 0.008666 | """
Development settings
Data is kept within the project directory
(initialize as required, e.g. by copying initial testdata).
Service configuration is kept under personal home directory to
protect secret keys, etc.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
from .common import *
ANNALIST_VERSION_MSG = "Annalist version %s (development configuration)"%(ANNALIST_VERSION)
SETTINGS_MODULE = __name__
SITE_DIR_NAME = "annalist_site"
BASE_DATA_DIR = SITE_SRC_ROOT+"/devel"
BASE_SITE_DIR = os.path.join(BASE_DATA_DIR, SITE_DIR_NAME)
CONFIG_BASE = os.path.join(os.path.expanduser("~"), ".annalist/")
STATIC_ROOT = os.path.join(BASE_SITE_DIR, 'static')
BASE_LOG_DIR = BASE_SITE_DIR+"/"
ANNALIST_LOG_PATH = "None (output to console)"
ACCESS_LOG_PATH = "None"
ERROR_LOG_PATH = "None"
DATABASE_PATH = os.path.join(BASE_SITE_DIR, 'db.sqlite3')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DATABASE_PATH,
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# import logging
# log = logging.getLogger(__name__)
# log.info("Annalist version %s (development configuration)"%(ANNALIST_VERSION))
# log.info(ANNALIST_VERSION_MSG)
# log.info("SETTINGS_MODULE: "+SETTINGS_MODULE)
# log.info("BASE_DATA_DIR: "+BASE_DATA_DIR)
# log.info("CONFIG_BASE: "+CONFIG_BASE)
# log.info("DJANGO_ROOT: "+DJANGO_ROOT)
# log.info("SITE_CONFIG_DIR: "+SITE_CONFIG_DIR)
# log.info("SITE_SRC_ROOT: "+SITE_SRC_ROOT)
# log.info("STATICFILES_DIRS: "+repr(STATICFILES_DIRS))
# log.info("DB PATH: "+DATABASES[ | 'default']['NAME'] | )
# End.
|
dbmi-pitt/dbmi-annotator | translation/mp-evidence-base-ETL/deprecated/mpEvidenceQry.py | Python | apache-2.0 | 12,990 | 0.032333 | # Copyright 2016-2017 University of Pittsburgh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http:www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, uuid, datetime
from sets import Set
from model.micropublication import Annotation, DataMaterialRow, DMItem, DataRatioItem, MaterialDoseItem, MaterialParticipants, MaterialPhenotypeItem, DataReviewer, DataDips
######################### QUERY MP Annotation ##########################
# query all mp annotations
# return annotations with claim, data and material
def queryAllMpAnnotation(conn):
mpAnnotations = []
claimAnns = queryAllMpClaim(conn)
for claimId,claimAnn in claimAnns.items():
claimDataAnno = queryMpData(conn, claimAnn, claimId)
claimDataMatAnno = queryMpMaterial(conn, claimDataAnno, claimId)
mpAnnotations.append(claimDataMatAnno)
return mpAnnotations
# query all mp annotations
# return annotations with claim, data and material
def queryMpAnnotationByUrn(conn, annotationUrn):
claimAnn = queryMpClaimByUrn(conn, annotationUrn)
claimDataAnn = queryMpData(conn, claimAnn, claimAnn.claimid)
claimDataMatAnn = queryMpMaterial(conn, claimDataAnn, cla | imAnn.claimid)
return claimDataMatAnn
######################### QUERY MP Claim ##########################
## query all claim annotation by document URL
## return {{key: id-1, value: Ann-1"}, {key: i | d-2, value: Ann-2"}, ...}
def queryAllMpClaim(conn):
annotations = {} # key: id, value obj Annotation
cur = conn.cursor()
qry = """
select cann.id, t.has_source, cann.creator, cann.date_created, s.exact, s.prefix, s.suffix, cbody.label, qualifierrole(q.subject, q.predicate, q.object) as qtype, qvalue, cann.rejected_statement, cann.rejected_statement_reason, cann.rejected_statement_comment, met.entered_value, cann.negation, q.enantiomer, q.metabolite
from mp_claim_annotation cann join oa_claim_body cbody on cann.has_body = cbody.id
join qualifier q on cbody.id = q.claim_body_id
join method met on cann.id = met.mp_claim_id
join oa_target t on cann.has_target = t.id
join oa_selector s on t.has_selector = s.id;
"""
cur.execute(qry)
for row in cur.fetchall():
id = row[0]
if id not in annotations: ## Using existing annotation if it's available
annotation = Annotation()
annotations[id] = annotation
else:
annotation = annotations[id]
drugPC = "" ## define parent compound string
if row[15] and not row[16]:
drugPC = "enantiomer|"
elif row[16] and not row[15]:
drugPC = "|metabolite"
elif row[15] and row[16]:
drugPC = "enantiomer|metabolite"
## claim qualifiers
if row[8] == "subject":
annotation.csubject = row[9]
annotation.setSubjectPC(drugPC) # parent compound for subject
elif row[8] == "predicate":
annotation.cpredicate = row[9]
elif row[8] == "object":
annotation.cobject = row[9]
annotation.setObjectPC(drugPC) # parent compound for object
elif row[8] == "qualifer":
annotation.qualifier = row[9]
annotation.setQualifierPC(drugPC) # parent compound for qualifier
else:
print "[ERROR] qualifier role unidentified qvalue: %s (claimid %s)" % (row[8], id)
## claim source and label
if annotation.source == None:
annotation.source = row[1]
if annotation.label == None:
annotation.label = row[7]
## claim text selector
if annotation.exact == None:
annotation.setOaSelector(row[5], row[4], row[6])
## user entered method
if annotation.method == None:
annotation.method = row[13]
## rejected reason
if annotation.rejected == None and row[10] == True:
annotation.rejected = row[11] + "|" + row[12]
## assertion negation
if annotation.negation == None and row[14] != None:
annotation.negation = row[14]
return annotations
def queryMpClaimByUrn(conn, urn):
"""
query claim annotation by annotationId
return Annotation
"""
cur = conn.cursor()
qry = """
select cann.id, t.has_source, cann.creator, cann.date_created, s.exact, s.prefix, s.suffix, cbody.label, qualifierrole(q.subject, q.predicate, q.object) as qtype, qvalue, cann.rejected_statement, cann.rejected_statement_reason, cann.rejected_statement_comment, met.entered_value, cann.negation, q.enantiomer, q.metabolite
from mp_claim_annotation cann join oa_claim_body cbody on cann.has_body = cbody.id
join qualifier q on cbody.id = q.claim_body_id
join method met on cann.id = met.mp_claim_id
join oa_target t on cann.has_target = t.id
join oa_selector s on t.has_selector = s.id
where cann.urn = '%s'; """ % (urn)
cur.execute(qry)
annotation = Annotation()
for row in cur.fetchall():
annotation.claimid = row[0]
annotation.urn = urn
drugPC = "" ## define parent compound string
if row[15] and not row[16]:
drugPC = "enantiomer|"
elif row[16] and not row[15]:
drugPC = "|metabolite"
elif row[15] and row[16]:
drugPC = "enantiomer|metabolite"
## claim qualifiers
if row[8] == "subject":
annotation.csubject = row[9]
annotation.setSubjectPC(drugPC) # parent compound for subject
elif row[8] == "predicate":
annotation.cpredicate = row[9]
elif row[8] == "object":
annotation.cobject = row[9]
annotation.setObjectPC(drugPC) # parent compound for object
elif row[8] == "qualifer":
annotation.qualifier = row[9]
annotation.setQualifierPC(drugPC) # parent compound for qualifier
else:
print "[ERROR] qualifier role unidentified qvalue: %s (claimid %s)" % (row[8], annotation.claimid)
## claim source and label
if annotation.source == None:
annotation.source = row[1]
if annotation.label == None:
annotation.label = row[7]
## claim text selector
if annotation.exact == None:
annotation.setOaSelector(row[5], row[4], row[6])
## rejected reason
if annotation.rejected == None and row[10] == True:
annotation.rejected = row[11] + "|" + row[12]
## user entered method
if annotation.method == None:
annotation.method = row[13]
## assertion negation
if annotation.negation == None and row[14] != None:
annotation.negation = row[14]
return annotation
######################### QUERY MP Data ##########################
# query data items for claim annotation
# return list of annotation with data items attached
def queryMpData(conn, annotation, claimid):
qry = """
select dann.type, df.data_field_type, df.value_as_string, df.value_as_number, s.exact, s.prefix, s.suffix, dann.mp_data_index, dann.ev_supports, dann.rejected, dann.rejected_reason, dann.rejected_comment, met.entered_value, met.inferred_value, eq.question, eq.value_as_string
from mp_data_annotation dann
join oa_data_body dbody on dann.has_body = dbody.id
join data_field df on df.data_body_id = dbody.id
left join oa_target t on dann.has_target = t.id
left join oa_selector s on t.has_selector = s.id
join method met on dann.mp_claim_id = met.mp_claim_id and met.mp_data_index = dann.mp_data_index
left join evidence_question eq on met.id = eq.method_id
where dann.mp_claim_id = %s
""" % (claimid)
cur = conn.cursor()
cur.execute(qry)
for row in cur.fetchall():
dType = row[0] # data type
dfType = row[1] # data field
exact = row[4]; value = str(row[2] or row[3]) # value as string or number
index = row[7] # data index
evRelationship = row[8] # EV supports or refutes
dmRow = None
if annotation.getSpecificDataMaterial(index) == None:
dmRow = DataMaterialRow() # create new row of data & material
annotation.setSpecificDataMaterial(dmRow, index)
else: # current row of data & material exists
dmRow = annotation.getSpecificDataMaterial(index)
if dType in ["auc", "cmax" , "clearance", "halflife"]:
if dmRow.getDataRatioItemInRow(dType): # DataRatioItem exists
dataRatioItem = dmRow.getDataRatioItemInRow(dType)
else: # create new |
icoxfog417/pykintone | pykintone/user_api/export.py | Python | apache-2.0 | 1,411 | 0.002835 | from pykintone.base_api import BaseAPI
import pykintone.user_api.user_api_result as ur
class Export(BaseAPI):
def __init__(self, account, requests_options=()):
super(Export, self).__init__(account=account, requests_options=requests_options)
def get_users(self, ids=(), codes=(), offset=-1, size=0):
| url = "https://{0}.cybozu.com/v1/users.json".format(self.account.domain)
params = {}
if len(ids) > 0:
params["ids"] = ids
if len(codes) > 0:
params["codes"] = codes
if offset > -1:
params["offset"] = offset
if size > 0:
params["size"] = | size
resp = self._request("GET", url, params_or_data=params)
r = ur.GetUsersResult(resp)
return r
def get_user_organization_titles(self, code):
url = "https://{0}.cybozu.com/v1/user/organizations.json".format(self.account.domain)
params = {
"code": code
}
resp = self._request("GET", url, params_or_data=params)
r = ur.UserOrganizationTitlesResult(resp)
return r
def get_user_groups(self, code):
url = "https://{0}.cybozu.com/v1/user/groups.json".format(self.account.domain)
params = {
"code": code
}
resp = self._request("GET", url, params_or_data=params)
r = ur.GetUserGroupsResult(resp)
return r
|
PearsonIOKI/compose-forum | askbot/const/message_keys.py | Python | gpl-3.0 | 1,683 | 0.004753 | '''
This file must hold keys for translatable messages
that are used as variables
it is important that a dummy _() function is used here
this way message key will be pulled into django.po
and can still be used as a variable in python files. |
'''
_ = lambda v:v
#NOTE: all strings must be explicitly put into this dictionary,
#because you don't want to import _ from here with import *
__all__ = []
#messages loaded in the templates via direct _ calls
_('most relevant questions')
_('click to see most relevant questions')
_('by relevance')
_('click to see the oldest questions')
_('by date')
_('click to see the newest questions')
_('click to see the leas | t recently updated questions')
_('by activity')
_('click to see the most recently updated questions')
_('click to see the least answered questions')
_('by answers')
_('click to see the most answered questions')
_('click to see least voted questions')
_('by votes')
_('click to see most voted questions')
_('interesting')
_('ignored')
_('subscribed')
TAGS_ARE_REQUIRED_MESSAGE = _('tags are required')
TAG_WRONG_CHARS_MESSAGE = _(
'please use letters, numbers and characters "-+.#"'
)
TAG_WRONG_FIRST_CHAR_MESSAGE = _(
'# is not a valid character at the beginning of tags, use only letters and numbers'
)
ACCOUNT_CANNOT_PERFORM_ACTION = _(
'Sorry, you cannot %(perform_action)s because %(your_account_is)s'
)
MIN_REP_REQUIRED_TO_PERFORM_ACTION = _('>%(min_rep)s points required to %(perform_action)s')
CANNOT_PERFORM_ACTION_UNTIL = _('Sorry, you will be able to %(perform_action)s after %(until)s')
MODERATORS_OR_AUTHOR_CAN_PEFROM_ACTION = _(
'Sorry, only moderators or the %(post_author)s %(perform_action)s'
)
|
typesupply/glyph-nanny | source/code/glyphNanny/__init__.py | Python | mit | 201 | 0.004975 | from . imp | ort tests
from . import defaults
from .scripting import (
registeredTests,
testGlyph,
testLayer,
testFont,
formatGlyphRe | port,
formatLayerReport,
formatFontReport
) |
jsubpy/jsub | jsub/log.py | Python | mit | 1,326 | 0.004525 | import time
import logging
def time_zone(t):
if t.tm_isdst == 1 and time.daylight == 1:
tz_sec = time.altzone
tz_name = time.tzname[1]
else:
tz_sec = time.timezone
tz_name = time.tzname[0]
if tz_sec > 0:
tz_sign = '-'
else:
tz_sign = '+'
tz_offset = '%s%02d%02d' % (tz_sign, abs(tz_sec)//3600, abs(tz_sec//60)%60)
return (tz_offset, tz_name)
class JsubFormatter(logging.Formatter):
# Add this method in order to display time zone offset correctly under python 2.x
def formatTime(self, record, datefmt=None):
ct = time.localtime(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime('%Y-%m-%d %H:%M:%S', ct)
ms = '%03d' % record.msecs
tz_offset, tz_name = time_zone(ct)
s = '%s.%03d %s %s' % (t, record.msecs, tz_offset, tz_name)
return s
_FORMATTER = JsubFormatter('[%(asctime)s][%(name)s|%(levelname)s]: %(message)s')
#_FORMATTER = logging.Formatter('[%(asc | time)s](%(name)s | :%(levelname)s) %(message)s', '%Y-%m-%d %H:%M:%S')
def add_stream_logger(level):
logger = logging.getLogger('JSUB')
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setFormatter(_FORMATTER)
logger.addHandler(ch)
|
HackBulgaria/Programming101-2 | week8/1-Generating-Tests/is_prime.py | Python | mit | 33 | 0 | def is_prime(n):
ret | urn | True
|
csebastian2/study | study/__init__.py | Python | gpl-3.0 | 886 | 0 | """
Study Project
Copyright (C) 2015 Study Project Authors and Contributors
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hop | e that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ imp | ort absolute_import
__license__ = "GNU General Public License Version 3"
__authors__ = [
"Tomasz Kajtoch",
]
VERSION = (1, 0, 0, 'alpha')
__version__ = "1.0.0alpha"
|
sysadminmatmoz/odoo-clearcorp | TODO-7.0/hr_payroll_sort_employees/__init__.py | Python | agpl-3.0 | 1,063 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TO | DAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public Lic | ense as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_sort_employees
|
Garrett-R/scikit-learn | sklearn/gaussian_process/gaussian_process.py | Python | bsd-3-clause | 34,404 | 0.000116 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_consistent_length
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of ob | servations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
| corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared |
Sutto/cloud-custodian | tests/test_redshift.py | Python | apache-2.0 | 20,641 | 0.001502 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
class TestRedshift(BaseTest):
def test_redshift_security_group_filter(self):
factory = self.replay_flight_data("test_redshift_security_group_filter")
p = self.load_policy(
{
"name": "redshift-query",
"resource": "redshift",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ClusterIdentifier"], "dev-test")
def test_redshift_subnet_filter(self):
factory = self.replay_flight_data("test_redshift_subnet_filter")
p = self.load_policy(
{
"name": "redshift-query",
"resource": "redshift",
"filters": [
{"type": "subnet", "key": "MapPublicIpOnLaunch", "value": True}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ClusterIdentifier"], "dev-test")
def test_redshift_query(self):
factory = self.replay_flight_data("test_redshift_query")
p = self.load_policy(
{"name": "redshift-query", "resource": "redshift"}, session_factory=factory
)
resources = p.run()
self.assertEqual(resources, [])
def test_redshift_parameter(self):
factory = self.replay_flight_data("test_redshift_parameter")
p = self.load_policy(
{
"name": "redshift-ssl",
"resource": "redshift",
"filters": [{"type": "param", "key": "require_ssl", "value": False}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_redshift_simple_tag_filter(self):
factory = self.replay_flight_data("test_redshift_tag_filter")
client = factory().client("redshift")
p = self.load_policy(
{
"name": "redshift-tag-filter",
"resource": "redshift",
"filters": [{"tag:maid_status": "not-null"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["ClusterIdentifier"])
tags = client.describe_tags(ResourceName=arn)["TaggedResources"]
tag_map = {t["Tag"]["Key"] for t in tags}
self.assertTrue("maid_status" in tag_map)
def test_redshift_cluster_mark(self):
factory = self.replay_flight_data("test_redshift_cluster_mark")
client = factory().client("redshift")
p = self.load_policy(
{
"name": "redshift-cluster-mark",
"resource": "redshift",
"filters": [
{"type": "value", "key": "ClusterIdentifier", "value": "c7n"}
],
"actions": [{"type": "mark-for-op", "days": 30, "op": "delete"}],
},
session_factory=factory,
)
resources = p | .run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["ClusterIdentifier"])
tags = client.describe_tags(ResourceName=arn)["TaggedResources"]
tag_map = {t["Tag"]["Key"] for t in tags}
self.assertTrue("maid_status" in tag_map)
def test_redshift_cluster_unmark(self):
factory = self.replay_flight_data("test_redshift_cluster_unmark")
client = factory().client("redshift")
p = self.load_p | olicy(
{
"name": "redshift-cluster-unmark",
"resource": "redshift",
"filters": [
{"type": "value", "key": "ClusterIdentifier", "value": "c7n"}
],
"actions": [{"type": "unmark"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["ClusterIdentifier"])
tags = client.describe_tags(ResourceName=arn)["TaggedResources"]
tag_map = {t["Tag"]["Key"] for t in tags}
self.assertFalse("maid_status" in tag_map)
def test_redshift_delete(self):
factory = self.replay_flight_data("test_redshift_delete")
p = self.load_policy(
{
"name": "redshift-ssl",
"resource": "redshift",
"filters": [{"ClusterIdentifier": "c7n-test"}],
"actions": [{"type": "delete", "skip-snapshot": True}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_redshift_default_vpc(self):
session_factory = self.replay_flight_data("test_redshift_default_vpc")
p = self.load_policy(
{
"name": "redshift-default-filters",
"resource": "redshift",
"filters": [{"type": "default-vpc"}],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_redshift_retention(self):
session_factory = self.replay_flight_data("test_redshift_retention")
p = self.load_policy(
{
"name": "redshift-retention",
"resource": "redshift",
"filters": [
{"type": "value", "key": "ClusterIdentifier", "value": "aaa"}
],
"actions": [{"type": "retention", "days": 21}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_redshift_snapshot(self):
factory = self.replay_flight_data("test_redshift_snapshot")
client = factory().client("redshift")
cluster_tags = []
p = self.load_policy(
{
"name": "redshift-snapshot",
"resource": "redshift",
"filters": [
{
"type": "value",
"key": "ClusterIdentifier",
"value": "test-cluster",
"op": "eq",
}
],
"actions": [{"type": "snapshot"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
cluster = client.describe_clusters(
ClusterIdentifier=resources[0]["ClusterIdentifier"]
)
id_cluster = cluster.get("Clusters")[0].get("ClusterIdentifier")
snapshot = client.describe_cluster_snapshots(
SnapshotIdentifier="backup-test-cluster-2017-01-12"
)
get_snapshots = snapshot.get("Snapshots")
id_snapshot = get_snapshots[0].get("ClusterIdentifier")
tag_snapshot = get_snapshots[0].get("Tags")
self.assertEqual(id_cluster, id_snapshot)
arn = p.resource_manager.generate_arn(resources[0]["ClusterIdentifier"])
cluster_tags_array = client.describe_t |
mrachinskiy/blender-addon-jewelcraft | ops_utils/scene_ops.py | Python | mit | 1,477 | 0 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from bpy.types import Operator
class SCENE_OT_jew | elcraft_scene_units_set(Operator):
bl_label = "Set Units"
bl_description = "Set optimal unit settings for jewelry modelling"
bl_idname = "scene.jewelcraft_scene_units_set"
bl_options = {"REGISTER", "UNDO", "INTERNAL"}
def execute(self, context):
unit_settings = context.scene.unit_settings
unit_settings.system = "METRIC"
unit_settings.length_unit = "MILLIMETERS"
unit_settings.scale_lengt | h = 0.001
context.space_data.overlay.grid_scale = 0.001
self.report({"INFO"}, "Optimal unit settings are in use")
return {"FINISHED"}
|
CiwPython/Ciw | ciw/data_record.py | Python | mit | 371 | 0.002695 | from collections import namedtuple
DataRecord = namedtuple('Record', [
| 'id_number',
'customer_class',
'node',
'arrival_date',
'waiting_time',
'service_start_date',
'service | _time',
'service_end_date',
'time_blocked',
'exit_date',
'destination',
'queue_size_at_arrival',
'queue_size_at_departure',
'server_id'
]) |
justinpotts/mozillians | mozillians/users/migrations/0039_auto__add_externalaccount.py | Python | bsd-3-clause | 12,687 | 0.008276 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ExternalAccount'
db.create_table('users_externalaccount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['users.UserProfile'])),
('username', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('type', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('privacy', self.gf('django.db.models.fields.PositiveIntegerField')(default=3)),
))
db.send_create_signal('users', ['ExternalAccount'])
def backwards(self, orm):
# Deleting model 'ExternalAccount'
db.delete_table('users_externalaccount')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'steward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'system': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wiki': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'groups.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'groups.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'users.externalaccount': {
'Meta': {'object_name': 'ExternalAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'privacy': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user': (' | django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': | 'True'})
},
'users.usernameblacklist': {
'Meta': {'ordering': "['value']", 'object_name': 'UsernameBlacklist'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_regex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'users.userprofile': {
'Meta': {'ordering': "['full_name']", 'object_name': 'UserProfile', 'db_table': "'profile'"},
'allows_community_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allows_mozilla_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'basket_token': ('django.db.model |
40423118/2017springcd_hw | w10/appdiv.py | Python | agpl-3.0 | 78 | 0.051282 | impor | t sys
sys.path.append("./m1")
import div
sum=div.div(1,200000)
pr | int(sum) |
City-of-Helsinki/kore | schools/api.py | Python | agpl-3.0 | 24,286 | 0.002512 | from rest_framework import routers, serializers, viewsets, mixins, filters, relations
from munigeo.api import GeoModelSerializer
from rest_framework.serializers import ListSerializer, LIST_SERIALIZER_KWARGS
import datetime
from .models import *
import django_filters
from django import forms
from rest_framework.exceptions import ParseError
YEARS_OF_PRIVACY = 100
# for censoring principals in related instances
class CensoredManyRelatedField(relations.ManyRelatedField):
"""
Handles view permissions for related field listings with Principal or Employership instances.
"""
def to_representation(self, iterable):
if iterable.model is Employership:
iterable = iterable.filter(end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
if iterable.mode is Principal:
iterable.filter(employers__end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
return super().to_representation(iterable)
class CensoredListSerializer(serializers.ListSerializer):
"""
Handles view permissions for list serializers with Principal or Employership instances.
"""
def to_representation(self, data):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, models.Manager) else data
if iterable.model is Employership:
iterable = iterable.filter(end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
if iterable.model is Principal:
iterable = iterable.filter(employers__end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
return [
self.child.to_representation(item) for item in iterable
]
class CensoredHyperlinkedRelatedField(relations.HyperlinkedRelatedField):
"""
Handles view permissions for related field listings with Principal or Employership instances.
"""
@classmethod
def many_init(cls, *args, **kwargs):
# the c | orrect arguments must be passed on to the parent
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in relations.MANY_RELATION_KWARGS:
list_kwargs[ | key] = kwargs[key]
return CensoredManyRelatedField(**list_kwargs)
class CensoredHyperlinkedModelSerializer(serializers.HyperlinkedModelSerializer):
"""
Handles view permissions for related field listings with Principal or Employership instances.
"""
serializer_related_field = CensoredHyperlinkedRelatedField
# the actual serializers
class SchoolNameSerializer(serializers.ModelSerializer):
official_name = serializers.CharField(allow_null=True, source='get_official_name')
other_names = serializers.ListField(
source='get_other_names',
child=serializers.DictField(child=serializers.CharField())
)
class Meta:
model = SchoolName
exclude = ('school',)
class SchoolLanguageSerializer(serializers.ModelSerializer):
language = serializers.CharField(source='language.name')
class Meta:
model = SchoolLanguage
exclude = ('school',)
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = '__all__'
class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Language.objects.all()
serializer_class = LanguageSerializer
class SchoolTypeNameSerializer(serializers.ModelSerializer):
class Meta:
model = SchoolTypeName
fields = '__all__'
class SchoolTypeNameViewSet(viewsets.ReadOnlyModelViewSet):
queryset = SchoolTypeName.objects.all()
serializer_class = SchoolTypeNameSerializer
paginate_by = 50
class SchoolTypeSerializer(serializers.ModelSerializer):
type = SchoolTypeNameSerializer()
class Meta:
model = SchoolType
exclude = ('school',)
class SchoolFieldNameSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='description')
class Meta:
model = SchoolFieldName
exclude = ('description',)
class SchoolFieldNameViewSet(viewsets.ReadOnlyModelViewSet):
queryset = SchoolFieldName.objects.all()
serializer_class = SchoolFieldNameSerializer
class SchoolFieldSerializer(serializers.ModelSerializer):
field = SchoolFieldNameSerializer()
class Meta:
model = SchoolField
exclude = ('school',)
class SchoolGenderSerializer(serializers.ModelSerializer):
class Meta:
model = SchoolGender
exclude = ('school',)
class SchoolNumberOfGradesSerializer(serializers.ModelSerializer):
class Meta:
model = NumberOfGrades
exclude = ('school',)
class NeighborhoodSerializer(serializers.ModelSerializer):
class Meta:
model = Neighborhood
fields = '__all__'
class AddressLocationSerializer(GeoModelSerializer):
class Meta:
model = AddressLocation
exclude = ('id', 'address')
class AddressSerializer(serializers.ModelSerializer):
location = AddressLocationSerializer(required=False)
def to_representation(self, obj):
ret = super(AddressSerializer, self).to_representation(obj)
if ret['location']:
ret['location'] = ret['location']['location']
return ret
class Meta:
model = Address
fields = '__all__'
class DataTypeSerializer(serializers.ModelSerializer):
class Meta:
model = DataType
fields = '__all__'
class ArchiveDataSerializer(serializers.ModelSerializer):
url = serializers.URLField(source='link.url')
data_type = DataTypeSerializer()
class Meta:
model = ArchiveData
exclude = ('id',)
class OwnerFounderSerializer(serializers.ModelSerializer):
type = serializers.CharField(source='type.description')
class Meta:
model = OwnerFounder
fields = '__all__'
class SchoolOwnershipSerializer(serializers.ModelSerializer):
owner = OwnerFounderSerializer()
class Meta:
model = SchoolOwnership
exclude = ('school',)
class SchoolFounderSerializer(serializers.ModelSerializer):
founder = OwnerFounderSerializer()
class Meta:
model = SchoolFounder
exclude = ('school',)
class BuildingOwnershipSerializer(serializers.ModelSerializer):
owner = OwnerFounderSerializer()
class Meta:
model = BuildingOwnership
exclude = ('building',)
class SchoolBuildingPhotoSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
# we have to reformat the URL representation so that our API serves the corresponding photo URL
# this method will have to be updated whenever Finna API changes!
representation = super(SchoolBuildingPhotoSerializer, self).to_representation(instance)
representation['url'] = representation['url'].replace(
'.finna.fi/Record/',
'.finna.fi/Cover/Show?id='
) + '&w=1200&h=1200'
return representation
class Meta:
model = SchoolBuildingPhoto
exclude = ('school_building',)
class BuildingForSchoolSerializer(serializers.ModelSerializer):
neighborhood = serializers.CharField(source='neighborhood.name')
addresses = AddressSerializer(many=True)
owners = BuildingOwnershipSerializer(many=True)
photos = serializers.ListField(
source='get_photos',
child=SchoolBuildingPhotoSerializer()
)
class Meta:
model = Building
# fields must be declared here to get both id and url
fields = ('url', 'id', 'neighborhood', 'addresses', 'construction_year',
'architect', 'architect_firm', 'property_number', 'sliced',
'comment', 'reference', 'approx', 'owners', 'photos')
class PrincipalForSchoolSerializer(serializers.ModelSerializer):
"""
This class is needed for the School endpoint
"""
class Meta:
model = Principal
list_serializer_class = CensoredListSerialize |
MaartenGr/BERTopic | bertopic/plotting/__init__.py | Python | mit | 616 | 0 | from ._topics import visualize_topics
from ._heatmap import visualize_heatmap
from ._barchart import visualize_barchart
from ._term_ra | nk import visualize_term_rank
from ._hierarchy import visualize_hierarchy
from ._distribution import visualize_distribution
from ._topics_over_time import visualize_topi | cs_over_time
from ._topics_per_class import visualize_topics_per_class
__all__ = [
"visualize_topics",
"visualize_heatmap",
"visualize_barchart",
"visualize_term_rank",
"visualize_hierarchy",
"visualize_distribution",
"visualize_topics_over_time",
"visualize_topics_per_class"
]
|
capitalk/system_k | configuration_server/configuration_server.py | Python | agpl-3.0 | 4,240 | 0.014387 | import zmq
import sys
import ConfigParser
import os.path
import proto_objs.venue_configuration_pb2
import daemon
import signal
import lockfile
from optparse import OptionParser
import datetime
full_config = proto_objs.venue_configuration_pb2.configuration()
bind_addr="tcp://127.0.0.1:11111"
def parse(filename):
config = ConfigParser.ConfigParser()
config.read(filename)
sections = config.sections()
full_config.Clear()
i = 0
for s in sections:
if s == 'global':
full_config.trade_serialization_addr = config.get(s, 'trade_serialization_addr')
full_config.recovery_listener_addr = config.get(s, 'recovery_listener_addr')
full_config.aggregated_bbo_book_addr = config.get(s, 'aggregated_bbo_book_addr')
full_config.aggregated_bbo_book_id = config.getint(s, 'aggregated_bbo_book_id')
else:
i+=1
print ("Adding venue: %d " % i)
single_venue_config = full_config.configs.add()
make_protobuf(s, config, single_venue_config)
print full_config.__str__()
return True
def make_protobuf(section, config, single_venue_config):
single_venue_config.venue_id = config.getint(section, 'venue_id')
single_venue_config.mic_name = config.get(section, 'mic_name')
single_venue_config.order_interface_addr = config.get(section, 'order_interface_addr')
single_venue_config.order_ping_addr = config.get(section, 'order_ping_addr')
single_venue_config.market_data_broadcast_addr = config.get(section, 'market_data_broadcast_addr')
if config.has_option(section, 'use_synthetic_cancel_replace'):
single_venue_config.use_synthetic_cancel_replace = config.getboolean(section, 'use_synthetic_cancel_replace')
def run(config_filename):
# Create context and connect
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.setsockopt(zmq.LINGER, 0)
print "Binding to: ", bind_addr
socket.bind(bind_addr)
while True:
contents = socket.recv()
print datetime.datetime.now(), "Received msg:<", contents, ">"
if contents == 'R':
print "Refresh request"
refresh_ret = parse(config_filename)
if (refresh_ret == True):
refresh_status = "OK"
else:
refresh_status = "ERROR"
socket.send_multipart(["REFRESH", refresh_status])
elif contents == 'C':
print "Config request"
socket.send_multipart(["CONFIG", full_config.SerializeToString()])
else:
print "Unknown request - ERROR"
socket.send_multipart(["ERROR", "unknown message"])
def terminate():
print "Terminate"
socket.close()
context.close()
def main():
parser = OptionParser(usage="usage: %prog [options] <config_filename>")
parser.add_option("-D", "--daemon",
dest="runAsDaemon",
help="Run configuration server as daemon",
action="store_true",
default=False)
(options, args) = parser.parse_args();
if len(args) < 1:
parser.error("Missing arguments")
config_filename = args[0]
log_filename = "configuration_server.log"
log = open(log_filename, 'w+ | ')
print "Using config file: ", config_filename
if os.path.exists(config_filename) == False:
print "Config file: ", config_filename, " does not exist"
raise Exception("Con | fig file: ", config_filename, " does not exist")
if options.runAsDaemon == True:
context = daemon.DaemonContext(
working_directory='.',
umask=0o002,
#pidfile=lockfile.FileLock('./configuration_server.pid'),
stdout=log,
stderr=log)
#context.signal_map = {
#signal.SIGTERM: 'terminate',
#signal.SIGHUP: 'terminate',
#signal.SIGUSR1: 'terminate',
#}
#with daemon.DaemonContext():
with context:
parse(config_filename)
run(config_filename)
else:
parse(config_filename)
run(config_filename)
if __name__ == "__main__":
main()
|
houssine78/addons | website_product_subscription/__init__.py | Python | agpl-3.0 | 25 | 0.04 | from . i | mport | controllers |
Furzoom/learnpython | app/test/TsTclntSS.py | Python | mit | 455 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
HOST = 'localhost'
PORT = | 1234
ADDR = (HOST, PORT)
BUFSIZE = 1024
while True:
tcpCliSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpCliSock.connect(ADDR)
data = raw_input('> ')
if not data:
break
tcpCliSock.send('{}\r\n'.format(data))
data = tcpCliSoc | k.recv(BUFSIZE)
if not data:
break
print(data.strip())
tcpCliSock.close()
|
cieplak/msgr | setup.py | Python | apache-2.0 | 678 | 0 | #!/usr/bin/env python
from setupt | ools import setup
requirements = [
'kombu',
'pika',
'gevent',
'click',
'coid',
'click',
'kafka-python', # should use pykafka
'kazoo',
'pyes',
'redis',
'sqlalchemy',
'psycopg2',
'pilo',
]
setup(
name='msgr',
version='0.0.1',
url='https://www.github.com/cieplak/msgr',
author='patrick cieplak',
author_email='pa | trick.cieplak@gmail.com',
description='broker agnostic messaging library',
packages=['msgr'],
license=open('LICENSE').read(),
include_package_data=True,
install_requires=requirements,
tests_require=['nose'],
scripts=['bin/msgr']
)
|
PublicShawn/eye | util.py | Python | mit | 2,148 | 0.006052 | import glob
import subprocess
from threading import Timer
import logging
import distutils.dir_util
import sys
import multiprocessing
import os
from shutil import copy2
import eyetype
import shutil
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def runWithTimeOutByDaemon(cmd, workin | g_dir, timeout_sec, verbose, idn, shell=False):
outpipe = sys.stdout if verbose else subprocess.PIPE
errpipe = sys.stderr if verbose else subprocess.PIPE
try:
proc = subprocess.Popen(cmd, cwd=working_dir, stdout=outpipe,
stderr=errpipe, shell=shell)
except Excepti | on as e:
# TODO may clean up
return idn, -1, "", str(e)
kill_proc = lambda p: p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
stdout, stderr = proc.communicate()
finally:
timer.cancel()
return idn, proc.returncode, stdout, stderr
def parallel_startDeamon(workerNum, deamonProcess, argsList, callback=eyetype.noop):
pool = multiprocessing.Pool(workerNum)
res = [pool.apply_async(deamonProcess, anarg, callback=callback) for anarg in argsList]
pool.close()
pool.join()
return [r.get() for r in res]
def copytree(src, dst):
distutils.dir_util.copy_tree(src, dst)
log_level_mapping = {'info': logging.INFO,
'warn': logging.WARN,
'debug': logging.DEBUG}
def drawProgressBar(percent, suffix, barLen = 50):
sys.stdout.write("\r")
progress = ""
for i in range(barLen):
if i < int(barLen * percent):
progress += "="
else:
progress += " "
sys.stdout.write("[ %s ] %.2f%% %s" % (progress, percent * 100, suffix))
# erase to the end
sys.stdout.write("\033[K")
if percent >= 1:
sys.stdout.write("\n")
sys.stdout.flush()
def findAllFilePathUnderFolder(dirname, filename):
res = []
for filename in glob.iglob(dirname+'/**/'+filename, recursive=True):
res.append(filename)
return res
def copySingleFile(src, dst):
return copy2(src, dst)
|
herove/dotfiles | sublime/Packages/Package Control/package_control/upgraders/hg_upgrader.py | Python | mit | 2,518 | 0 | import os
from ..cache import set_cache, get_cache
from ..show_error import show_error
from .vcs_upgrader import VcsUpgrader
class HgUpgrader(VcsUpgrader):
"""
Allows upgrading a local mercurial-repository-based package
"""
cli_name = 'hg'
def retrieve_binary(self):
"""
Returns the path to the hg executable
:return: The string path to the executable or False on error
"""
name = 'hg'
if os.name == 'nt':
name += '.exe'
binary = self.find_binary(name)
if not binary:
show_error(
u'''
Unable to find %s.
Please set the "hg_binary" setting by accessing the
Preferences > Package Settings > Package Control > Settings
\u2013 User menu entry.
The Settings \u2013 Default entry can be used for reference,
but changes to that will be overwritten upon next upgrade.
''',
name
)
return False
return binary
def run(self):
"""
Updates the repository with remote changes
:return: False or error, or True on success
"""
binary = self.retrieve_binary()
if not binary:
return False
args = [binary]
args.extend(self.update_command)
args.append(' | default')
self.execute(args, self.working_copy, meaningful_output=True)
return | True
def incoming(self):
""":return: bool if remote revisions are available"""
cache_key = self.working_copy + '.incoming'
incoming = get_cache(cache_key)
if incoming is not None:
return incoming
binary = self.retrieve_binary()
if not binary:
return False
args = [binary, 'in', '-q', 'default']
output = self.execute(args, self.working_copy, meaningful_output=True)
if output is False:
return False
incoming = len(output) > 0
set_cache(cache_key, incoming, self.cache_length)
return incoming
def latest_commit(self):
"""
:return:
The latest commit hash
"""
binary = self.retrieve_binary()
if not binary:
return False
args = [binary, 'id', '-i']
output = self.execute(args, self.working_copy)
if output is False:
return False
return output.strip()
|
dreglad/telesurvideos | telesurvideos/migrations/0007_remove_videolistpluginmodel_tiempo.py | Python | mit | 413 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-10 23:56
from __future__ import unicode_literals
| from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('telesurvideos', '0006_auto_20170110_1710'),
]
operations = [
migrations.RemoveField(
model_name='videolistpluginmodel',
name='tiempo',
),
| ]
|
msfrank/terane | terane/bier/ql/__init__.py | Python | gpl-3.0 | 2,731 | 0.005492 | # Copyright 2010,2011,2012 Michael Frank <msfrank@syntaxjockey.com>
#
# This file is part of Terane.
#
# Terane is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Terane is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Terane. If not, see <http://www.gnu.org/licenses/>.
import datetime
from pyparsing import ParseBaseException
from terane.bier.matching import Every
from terane.bier.searching import Period
from terane.bier.ql.queries import iterQuery, tailQuery
from terane.loggers import getLogger
logger = getLogger('terane.bier.ql')
class QuerySyntaxError(BaseException):
"""
There was an error parsing the query synatx.
"""
def __init__(self, exc, qstring):
self._exc = exc
try:
tokens = qstring[exc.col-1:].splitlines()[0]
except:
tokens = ''
if exc.msg != '':
self._message = "Syntax error starting at '%s' (line %i, col %i): %s" % (
tokens, exc.lineno, exc.col, exc.msg)
else:
self._message = "Syntax error starting at '%s' (line %i, col %i)" % (
tokens, exc.lineno, exc.col)
def __str__(self):
return self._message
@logger.tracedfunc
def parseIterQuery(string):
"""
Parse the specified iter query.
:param string: The query string.
:type string: unicode
:returns: A (query,period) tuple.
:rtype: tuple
"""
try:
query,where = iterQuery.parseString(string, parseAll=True).asList()[0]
if where == None:
utcnow = datetime.datetime.utcnow()
onehourago = utcnow - datetime.timedelta(hours=1)
where = {'dateFrom': onehourago, 'dateTo': utcnow, 'fromExcl': False, 'toExcl': False}
return query, Period(w | here['dateFrom'], where['dateTo'], where['fromExcl'], where['toE | xcl'])
except ParseBaseException, e:
raise QuerySyntaxError(e, string)
@logger.tracedfunc
def parseTailQuery(string):
"""
Parse the specified tail query.
:param string: The query string.
:type string: unicode
:returns: A (query,period) tuple.
:rtype: tuple
"""
try:
return tailQuery.parseString(string, parseAll=True).asList()[0]
except ParseBaseException, e:
raise QuerySyntaxError(e, string)
|
CCI-MOC/GUI-Backend | api/tests/v2/test_reporting.py | Python | apache-2.0 | 4,602 | 0.001956 | import json
from unittest import skip, skipUnless
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.tests.factories import UserFactory, AnonymousUserFactory
from api.v2.views import ReportingViewSet
from core.models import AtmosphereUser
def contains_user(username):
"""
Test if the username exists
"""
try:
AtmosphereUser.objects.get_by_natural_key(username=username)
return True
except AtmosphereUser.DoesNotExist:
return False
class ReportingTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.view = ReportingViewSet.as_view({'get': 'list'})
# @skipUnless(contains_user('test-julianp'), 'The database does not contain the user test-julianp')
@skip('skip for now')
def test_a_sanity_check(self):
"""Will only work with a correct database.
TODO: Create providers and fixtures necessary to get working.
"""
factory = APIRequestFactory()
url = '/api/v2/reporting?start_date=2016-01-01&end_date=2016-10-25&provider_id=1&provider_id=2&provider_id=3&' \
'provider_id=4&provider_id=5&provider_id=6'
request = factory.get(url)
sanity_user = AtmosphereUser.objects.get_by_natural_key('test-julianp')
f | orce_authenticate(request, user=sanity_user)
response = self.view(request)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(response.data), 1)
received_data = json.loads(json.dumps(response.data, indent=2))
expected_data_json = '''[
{
"id": 29792,
"instance_id": "57259394-a1d2-4318-a0c0-5764f42db4be",
"username": "test-julianp", |
"staff_user": "False",
"provider": "iPlant Workshop Cloud - Tucson",
"image_name": "Ubuntu 14.04.2 XFCE Base",
"version_name": "1.0",
"is_featured_image": true,
"hit_aborted": false,
"hit_active_or_aborted": 1,
"hit_active_or_aborted_or_error": 1,
"hit_active": true,
"hit_deploy_error": false,
"hit_error": false,
"size": {
"id": 105,
"uuid": "60fccf16-d0ba-488d-b8a5-46a9752dc2ca",
"url": "http://testserver/api/v2/sizes/60fccf16-d0ba-488d-b8a5-46a9752dc2ca",
"alias": "1",
"name": "tiny1",
"cpu": 1,
"disk": 0,
"mem": 4096,
"active": true,
"start_date": "2014-06-06T20:50:08.387646Z",
"end_date": null
},
"start_date": "09/20/16 17:36:57",
"end_date": null
}
]'''
expected_data = json.loads(expected_data_json)
dict_eq_(self, received_data, expected_data)
def test_is_not_public(self):
factory = APIRequestFactory()
url = reverse('api:v2:reporting-list')
request = factory.get(url)
force_authenticate(request, user=self.anonymous_user)
response = self.view(request)
self.assertEquals(response.status_code, 403)
def test_no_query_params(self):
factory = APIRequestFactory()
url = reverse('api:v2:reporting-list')
request = factory.get(url)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEquals(response.status_code, 400)
self.assertEqual(response.data['errors'][0]['message'],
"The reporting API should be accessed via the query parameters:"
" ['start_date', 'end_date', 'provider_id']")
def test_invalid_query_params(self):
factory = APIRequestFactory()
invalid_urls = [
'api/v2/reporting?start_date=3077-10-29&end_date=1901-10-29&provider_id=some_provider',
'api/v2/reporting?start_date=blah&end_date=1901-10-29&provider_id=1',
'api/v2/reporting?start_date=3077-10-29&end_date=blah&provider_id=1'
]
for url in invalid_urls:
request = factory.get(url)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEquals(response.status_code, 400)
self.assertEqual(response.data['errors'][0]['message'], 'Invalid filter parameters')
@skip('skip for now')
def test_access_invalid_provider(self):
raise NotImplementedError
@skip('skip for now')
def test_access_not_allowed_provider(self):
raise NotImplementedError
|
gtimelog/gtimelog | setup.py | Python | gpl-2.0 | 2,464 | 0.000406 | #!/usr/bin/env python
import ast
import os
import re
import sys
from setuptools import find_packages, setup
here = os.path.dirname(__file__)
def read(filename):
with open(os.path.join(here, filename), encoding='utf-8') as f:
return f.read()
metadata = {
k: ast.literal_eval(v)
for k, v in re.findall(
'^(__version__|__author__|__url__|__licence__) = (.*)$',
read('src/gtimelog/__init__.py'),
flags=re.MULTILINE,
)
}
version = metadata['__version__']
changes = read('CHANGES.rst').split('\n\n\n')
changes_in_latest_versions = '\n\n\n'.join(changes[:3])
older_changes = '''
Older versions
~~~~~~~~~~~~~~
See the `full changelog`_.
.. _full changelog: https://github.com/gtimelog/gtimelog/blob/master/CHANGES.rst
'''
short_description = 'A Gtk+ time tracking application'
long_description = ''.join([
read('README.rst'),
'\n\n',
changes_in_latest_versions,
'\n\n',
older_changes,
])
tests_require = ['freezegun']
if sys.version_info < (3, 6, 0):
sys.exit("Python 3.6 is the min | imum required version")
setup(
name='gtimelog',
version=version,
author='Marius Gedminas',
author_email='marius@gedmin.as',
url='https://gtimelog.org/',
description=short_description,
long_description=long_description,
license='GPL',
keywords='time log logging timesheets gnome gtk',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: GTK',
'License :: OSI Approved :: GNU General Public License (GPL)',
| 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Office/Business',
],
python_requires='>= 3.6',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
package_data={'': ['locale/*/LC_MESSAGES/gtimelog.mo']},
test_suite='gtimelog.tests',
tests_require=tests_require,
extras_require={
'test': [
'freezegun',
],
},
zip_safe=False,
entry_points="""
[gui_scripts]
gtimelog = gtimelog.main:main
""",
install_requires=['PyGObject'],
)
|
phracek/devassistant | devassistant/__init__.py | Python | gpl-2.0 | 63 | 0 | """PEP- | 396 compliant package version"""
__version__ | = '0.10.1'
|
centricular/meson | test cases/common/16 configure file/generator.py | Python | apache-2.0 | 223 | 0.004484 | #!/usr/bin/env python3
import sys, os
if len(sys.argv) != 3:
print("Wrong amount of parameters.")
assert(os.path.exists(sys.argv[1]))
with open(sys. | argv[2], 'w') as | ofile:
ofile.write("#define ZERO_RESULT 0\n")
|
Sunhick/hacker_rank | Algorithms/Bit Manipulation/Maximizing-Xor.py | Python | mit | 418 | 0.007177 | #!/usr/bin/python3
def maxXor(l, r):
"""
Max Xor
"""
i = l
max_so_f | ar = 0
while(l <= i and i <= r):
j = l
while(l <= j and j <= r):
tmp = i ^ | j
if max_so_far < tmp:
max_so_far = tmp
j += 1
i += 1
return max_so_far
if __name__ == '__main__':
l = int(input())
r = int(input())
print(maxXor(l, r))
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/wkssvc/NetWkstaInfo1059.py | Python | gpl-2.0 | 675 | 0.007407 | # encoding: utf-8
# module samba.dcerpc.wkssvc
# from /usr/ | lib/python2.7/dist-packages/samba/dcerpc/wkssvc.so
# by generator 1.135
""" wkssvc DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class NetWkstaInfo1059(__talloc.Object):
# no doc
def __i | nit__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
buf_read_only_files = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
saltstack/salt | tests/unit/modules/test_drbd.py | Python | apache-2.0 | 6,104 | 0.000328 | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import salt.modules.drbd as drbd
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class DrbdTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.drbd
"""
def setup_loader_modules(self):
return {drbd: {}}
# 'overview' function tests: 1
def test_overview(self):
"""
Test if it shows status of the DRBD devices
"""
ret = {
"connection state": "True",
"device": "Stack",
"fs": "None",
"local disk state": "UpToDate",
"local role": "master",
"minor number": "Salt",
"mountpoint": "True",
"partner disk state": "UpToDate",
"partner role": "minion",
"percent": "888",
"remains": "666",
"total size": "50",
"used": "50",
}
mock = MagicMock(
return_value=(
"Salt:Stack True master/minion UpToDate/UpToDate True None 50 50 666 888"
)
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
self.assertDictEqual(drbd.overview(), ret)
ret = {
"connection state": "True",
"device": "Stack",
"local disk state": "UpToDate",
"local role": "master",
"minor number": "Salt",
"partner disk state": "partner",
"partner role": "minion",
"synched": "5050",
"synchronisation: ": "syncbar",
}
mock = MagicMock(
return_value=(
"Salt:Stack True master/minion UpToDate/partner syncbar None 50 50"
)
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
self.assertDictEqual(drbd.overview(), ret)
def test_status(self):
"""
Test if it shows status of the DRBD resources via drbdadm
"""
ret = [
{
"local role": "Primary",
"local volumes": [{"disk": "UpToDate"}],
"peer nodes": [
{
"peer volumes": [
{
"done": "96.47",
"peer-disk": "Inconsistent",
| "replication": "SyncSource",
}
],
"peernode name": "opensuse-node2",
"role": "Secondary",
}
] | ,
"resource name": "single",
}
]
mock = MagicMock(
return_value="""
single role:Primary
disk:UpToDate
opensuse-node2 role:Secondary
replication:SyncSource peer-disk:Inconsistent done:96.47
"""
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
try: # python2
self.assertItemsEqual(drbd.status(), ret)
except AttributeError: # python3
self.assertCountEqual(drbd.status(), ret)
ret = [
{
"local role": "Primary",
"local volumes": [
{"disk": "UpToDate", "volume": "0"},
{"disk": "UpToDate", "volume": "1"},
],
"peer nodes": [
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node2",
"role": "Secondary",
},
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node3",
"role": "Secondary",
},
],
"resource name": "test",
},
{
"local role": "Primary",
"local volumes": [
{"disk": "UpToDate", "volume": "0"},
{"disk": "UpToDate", "volume": "1"},
],
"peer nodes": [
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node2",
"role": "Secondary",
},
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node3",
"role": "Secondary",
},
],
"resource name": "res",
},
]
mock = MagicMock(
return_value="""
res role:Primary
volume:0 disk:UpToDate
volume:1 disk:UpToDate
node2 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
node3 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
test role:Primary
volume:0 disk:UpToDate
volume:1 disk:UpToDate
node2 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
node3 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
"""
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
try: # python2
self.assertItemsEqual(drbd.status(), ret)
except AttributeError: # python3
self.assertCountEqual(drbd.status(), ret)
|
ragibson/Steganography | tests/test_wavsteg.py | Python | mit | 3,422 | 0.001461 | import numpy as np
import os
from random import choice
from stego_lsb.WavSteg import hide_data, recover_data
import string
import unittest
import wave
class TestWavSteg(unittest.TestCase):
def write_random_wav(self, filename, num_channels, sample_width, framerate, num_frames):
if sample_width != 1 and sample_width != 2:
# WavSteg doesn't support higher sample widths
raise ValueError("File has an unsupported bit-depth")
file = wave.open(filename, "w")
file.setnchannels(num_channels)
file.setsampwidth(sample_width)
file.setframerate(framerate)
if sample_width == 1:
dtype = np.uint8
else:
dtype = np.uint16
data = np.random.randint(
0, 2 ** (8 * sample_width), dtype=dtype, size=num_frames * num_channels
)
file.writeframes(data)
def write_random_file(self, filename, num_bytes):
with open(filename, "wb") as file:
file.write(os.urandom(num_bytes))
def check_random_interleaving(self, byte_depth=1, num_trials=256, filename_length=5):
filename = "".join(
choice(string.ascii_lowercase) for _ in range(filename_length)
)
wav_input_filename = filename + ".wav"
payload_input_filename = filename + ".txt"
wav_output_filename = filename + "_steg.wav"
payload_output_filename = filename + "_recovered.txt"
np.random.seed(0)
for _ in range(num_trials):
num_channels = np.random.randint(1, 64)
num_frames = np.random.randint(1, 16384)
num_lsb = np.random.randint(1, 8 * byte_depth + 1)
payload_len = (num_frames * num_lsb * num_channels) // 8
self.write_random_wav(
wav_input_filename,
num_channels=num_channels,
sample_width=byte_depth,
framerate=44100,
num_frames=num_frames,
)
self.write_random_file(payload_input_filename, num_bytes=payload_len)
try:
hide_data(
wav_input_filename,
payload_input_filename,
wav_output_filename,
num_lsb,
)
recover_data(
wav_output_filename, payload_output_filename, num_lsb, payload_len
)
except ValueError as e:
os.remove(wav_input_filename)
os.remove(payload_input_filename)
os.remove(wav_output_filename)
os.remove(payload_output_filename)
raise e
with open(payload_input_filename, "rb") as input_file, open(
payload_output_filename, "rb"
) as output_file:
input_payload_data = input_file.read()
output_payload_data = output_file.read()
os.remove(wav_input_filename)
os.remove(payload_input_filename)
os.remove(wav_output_filename)
os.remove(payload_output_filename)
self.assertEqual(input_payload_data, output_payload_data)
def test_consistency_8bit(self):
self.check_random_interleaving(byte_depth=1 | )
def test_consistency_16bit(self):
self.check_random_interleaving(byte_depth | =2)
if __name__ == "__main__":
unittest.main()
|
rivasd/djPsych | djstim/migrations/0006_auto_20161130_1641.py | Python | gpl-3.0 | 625 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-30 21:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djstim', '0005_auto_20160414_2205'),
]
operations = [
migrations.AlterField(
model_ | name='microcomponentpair',
name='first',
field=models.C | harField(max_length=128),
),
migrations.AlterField(
model_name='microcomponentpair',
name='second',
field=models.CharField(max_length=128),
),
]
|
ioGrow/iogrowCRM | crm/iomodels/casestatuses.py | Python | agpl-3.0 | 1,555 | 0.019293 | from endpoints_proto_datastore.ndb import EndpointsModel
from google.appengine.api import search
from google.appengine.ext import ndb
from protorpc import messages
class CaseStatusSchema(messages.Message):
name = messages.StringField(1)
status_changed_at = messages.StringField(2)
class Casestatus (EndpointsModel):
_message_fields_schema = ('id','entityKey','created_at','updated_at','status','owner','organization')
owner = ndb.StringProperty()
organiza | tion = ndb.KeyProperty()
created_at = ndb.DateTimeProperty(auto_now_add=True)
updated_at = ndb.DateTimeProperty(auto_now=True)
status = ndb.StringProperty()
created_by = ndb.KeyProperty()
last_modified_by = ndb.KeyProperty()
def put(self, **kwargs):
ndb.Model.put | (self, **kwargs)
self.put_index()
def put_index(self):
""" index the element at each"""
empty_string = lambda x: x if x else ""
organization = str(self.organization.id())
my_document = search.Document(
doc_id = str(self.key.id()),
fields=[
search.TextField(name=u'type', value=u'Casestatus'),
search.TextField(name='organization', value = empty_string(organization) ),
search.TextField(name='owner', value = empty_string(self.owner) ),
search.TextField(name='title', value = empty_string(self.status) ),
search.DateField(name='created_at', value = self.created_at),
])
my_index = search.Index(name="GlobalIndex")
my_index.put(my_document)
|
hairychris/energenie-test | src/mihome_energy_monitor.py | Python | mit | 1,609 | 0.003729 | # mihome_energy_monitor.py 28/05/2016 D.J.Whale
#
# A simple demo of monitoring and logging energy usage of mihome devices
#
# Logs all messages to screen and to a file energenie.csv
# Any device that has a switch, it toggles it every 2 seconds.
# Any device that offers a power reading, it displays it.
import energenie
import Logger
import time
APP_DELAY = 2
switch_state = False
def energy_monitor_loop():
global switch_state
# Process any received messages from the real radio
energenie.loop()
# For all devices in the registry, if they have a switch, toggle it
for d in energenie.registry.devices():
if d.has_switch():
d.set_switch(switch_state)
switch_state = not switch_state
# For all devices in the registry, if they have a get_power(), call it
print("Checking device statu | s")
for d in energenie.registry.devices():
print(d)
try:
p = d.get_power()
print("Power: %s" % str(p))
except:
pass # Ignore it if can't provide a power
time.sleep(APP_DELAY)
if __name__ == "__main__":
print("Starting energy monitor example")
energeni | e.init()
# provide a default incoming message handler, useful for logging every message
def incoming(address, message):
print("\nIncoming from %s" % str(address))
Logger.logMessage(message)
energenie.fsk_router.when_incoming(incoming)
print("Logging to file:%s" % Logger.LOG_FILENAME)
try:
while True:
energy_monitor_loop()
finally:
energenie.finished()
# END
|
renatopp/marioai | agents/__init__.py | Python | mit | 27 | 0.037037 | from .random_ | agent | import * |
mzdaniel/oh-mainline | vendor/packages/kombu/kombu/transport/beanstalk.py | Python | agpl-3.0 | 3,201 | 0.00125 | """
kombu.transport.beanstalk
=========================
Beanstalk transport.
:copyright: (c) 2010 - 2011 by David Ziegler.
:license: BSD, see LICENSE for more details.
"""
import socket
from Queue import Empty
from anyjson import serialize, deserialize
from beanstalkc import Connection, BeanstalkcException, SocketError
from kombu.transport import virtual
DEFAULT_PORT = 11300
__author__ = "David Ziegler <david.ziegler@gmail.com>"
class Channel(virtual.Channel):
_client = None
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = deserialize(job.body)
dest = job.stats()["tube"]
except Exception:
job.bury()
else:
job.delete()
else:
raise Empty()
return item, dest
def _put(self, queue, message, **kwargs):
priority = message["properties"]["delivery_info"]["priority"]
self.client.use(queue)
self.client.put(serialize(message), priority=priority)
def _get(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
job = self.client.reserve(timeout=1)
item | , dest = self._parse_job(job)
return item
def _get_many(self, queues, timeout=1):
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
watching = self.client.watching()
| [self.client.watch(active)
for active in queues
if active not in watching]
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def _purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
count = 0
while 1:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
def _size(self, queue):
return 0
def _open(self):
conninfo = self.connection.client
port = conninfo.port or DEFAULT_PORT
conn = Connection(host=conninfo.hostname, port=port)
conn.connect()
return conn
def close(self):
if self._client is not None:
return self._client.close()
super(Channel, self).close()
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
interval = 1
default_port = DEFAULT_PORT
connection_errors = (socket.error,
SocketError,
IOError)
channel_errors = (socket.error,
IOError,
SocketError,
BeanstalkcException)
|
BorgERP/borg-erp-6of3 | verticals/garage61/acy_mrp_procurement_virtual_stock/procurement.py | Python | agpl-3.0 | 4,154 | 0.005296 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Acysos S.L. (http://acysos.com) All Rights Reserved.
# Ignacio Ibeas <ignacio@acysos.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This p | rogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this prog | ram. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from osv import fields
from osv import osv
from tools.translate import _
import ir
import netsvc
import time
class procurement_order(osv.osv):
_inherit = 'procurement.order'
def make_mo(self, cr, uid, ids, context=None):
""" Make Manufacturing(production) order from procurement
@return: New created Production Orders procurement wise
"""
res = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context).company_id
production_obj = self.pool.get('mrp.production')
move_obj = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
procurement_obj = self.pool.get('procurement.order')
produce_ids = []
for procurement in procurement_obj.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
loc_id = procurement.location_id.id
newdate = datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.product_id.product_tmpl_id.produce_delay or 0.0)
newdate = newdate - relativedelta(days=company.manufacturing_lead)
if procurement.product_id.virtual_available < 0:
produce_id = production_obj.create(cr, uid, {
'origin': procurement.origin,
'product_id': procurement.product_id.id,
'product_qty': - procurement.product_id.virtual_available,
'product_uom': procurement.product_uom.id,
'product_uos_qty': procurement.product_uos and procurement.product_uos_qty or False,
'product_uos': procurement.product_uos and procurement.product_uos.id or False,
'location_src_id': procurement.location_id.id,
'location_dest_id': procurement.location_id.id,
'bom_id': procurement.bom_id and procurement.bom_id.id or False,
'date_planned': newdate.strftime('%Y-%m-%d %H:%M:%S'),
'move_prod_id': res_id,
'company_id': procurement.company_id.id,
})
res[procurement.id] = produce_id
produce_ids.append(produce_id)
bom_result = production_obj.action_compute(cr, uid,
[produce_id], properties=[x.id for x in procurement.property_ids])
self.write(cr, uid, [procurement.id], {'state': 'running'})
else:
self.write(cr, uid, [procurement.id], {'state': 'ready','message':'from stock: products assigned.','procure_method':'make_to_stock'})
move_obj.write(cr, uid, [res_id],
{'state': 'assigned'})
move_obj.write(cr, uid, [res_id],
{'location_id': procurement.location_id.id})
for produce_id in produce_ids:
wf_service.trg_validate(uid, 'mrp.production', produce_id, 'button_confirm', cr)
return res
procurement_order() |
philippks/OSMNames | tests/prepare_data/test_merge_corresponding_linestrings.py | Python | gpl-2.0 | 7,141 | 0.003781 | from geoalchemy2.elements import WKTElement
from osmnames.prepare_data.prepare_data import merge_corresponding_linestrings
def test_touching_linestrings_with_same_name_and_parent_id_get_merged(session, tables):
session.add(
tables.osm_linestring(
id=1,
osm_id=1111,
name="Rigiweg",
parent_id=1337,
geometry=WKTElement("""LINESTRING(944848.776557897 5985402.86960293,
944850.474743831 5985427.66032806,944850.064193386
5985444.35251452)""", srid=3857)
)
)
session.add(
tables.osm_linestring(
id=2,
osm_id=2222,
name="Rigiweg",
parent_id=1337,
geometry=WKTElement("""LINESTRING(944850.064193386 5985444.35251452,
944841.125390515 5985474.18953402,944830.553716556 5985520.36149253,
944826.821439784 5985550.17127335)""", srid=3857)
)
)
session.commit()
merge_corresponding_linestrings()
assert session.query(tables.osm_merged_linestring).get(1).member_ids == [1, 2]
assert session.query(tables.osm_linestring).get(1).merged_into == 1111
assert session.query(tables.osm_linestring).get(2).merged_into == 1111
def test_multiple_touching_linestrings_with_same_name_and_parent_id_get_merged(session, tables):
# following geometries are simplified from the osm linestring with the osm_id 35901448
session.add(
tables.osm_linestring(
id=1,
osm_id=1111,
name="Dorfstrasse",
parent_id=1337,
geometry=WKTElement("""LINESTRING(945262.014242162 5985606.22988835,
945125.963423109 5985669.20516832,944921.48130943 5985680.63151807,
944732.478813664 5985815.76 | 883825,
944577.598658291 5985883.07702847)""", srid=3857)
)
)
session.add(
tables.osm_linestring(
id=2,
osm_id=2222,
name="Dorfstrasse",
parent_id=133 | 7,
geometry=WKTElement("""LINESTRING(944410.8312014 5985761.48265348,
944216.360920161 5985861.25509228)""", srid=3857)
)
)
session.add(
tables.osm_linestring(
id=3,
osm_id=3333,
name="Dorfstrasse",
parent_id=1337,
geometry=WKTElement("""LINESTRING(944410.8312014 5985761.48265348,
944577.598658291 5985883.07702847)""", srid=3857)
)
)
session.add(
tables.osm_linestring(
id=4,
osm_id=4444,
name="Dorfstrasse",
parent_id=1337,
geometry=WKTElement("""LINESTRING(945286.283371876 5985592.46613797,
945284.781130476 5985609.66739185,945262.014242162 5985606.22988835,
945266.045101078 5985588.14864235,
945286.283371876 5985592.46613797)""", srid=3857)
)
)
session.commit()
merge_corresponding_linestrings()
assert session.query(tables.osm_merged_linestring).get(1).member_ids == [1, 2, 3, 4]
assert session.query(tables.osm_linestring).get(1).merged_into == 1111
assert session.query(tables.osm_linestring).get(2).merged_into == 1111
assert session.query(tables.osm_linestring).get(3).merged_into == 1111
assert session.query(tables.osm_linestring).get(4).merged_into == 1111
def test_almost_touching_linestrings_with_same_name_and_parent_id_get_merged(session, tables):
# the following geometries do not touch directly but has to be merged
session.add(
tables.osm_linestring(
id=1,
name="Oberseestrasse",
parent_id=1337,
osm_id=24055427,
geometry=WKTElement("""LINESTRING(981453.976751762
5978726.11248254,981467.114366002 5978716.22031828,981491.02892942
5978722.30674579,981536.264123906 5978726.22239555)""", srid=3857)
)
)
session.add(
tables.osm_linestring(
id=2,
name="Oberseestrasse",
parent_id=1337,
osm_id=308577271,
geometry=WKTElement("""LINESTRING(981558.359202398
5978726.38726504,981674.610293174 5978708.37529047)""", srid=3857)
)
)
session.commit()
merge_corresponding_linestrings()
assert session.query(tables.osm_merged_linestring).get(1).member_ids == [1, 2]
assert session.query(tables.osm_linestring).get(1).merged_into == 24055427
assert session.query(tables.osm_linestring).get(2).merged_into == 24055427
def test_touching_linestrings_with_same_name_but_different_parent_id_dont_get_merged(session, tables):
session.add(
tables.osm_linestring(
id=1,
name="Rigiweg",
parent_id=1337,
geometry=WKTElement("""LINESTRING(944848.776557897 5985402.86960293,
944850.474743831 5985427.66032806,944850.064193386
5985444.35251452)""", srid=3857)
)
)
session.add(
tables.osm_linestring(
id=2,
name="Rigiweg",
parent_id=9999,
geometry=WKTElement("""LINESTRING(944850.064193386 5985444.35251452,
944841.125390515 5985474.18953402,944830.553716556 5985520.36149253,
944826.821439784 5985550.17127335)""", srid=3857)
)
)
session.commit()
merge_corresponding_linestrings()
assert session.query(tables.osm_linestring).get(1).merged_into is None
assert session.query(tables.osm_linestring).get(2).merged_into is None
def test_touching_linestrings_with_same_parent_id_but_different_name_dont_get_merged(session, tables):
session.add(
tables.osm_linestring(
id=1,
name="Rigiweg",
parent_id=1337,
geometry=WKTElement("""LINESTRING(944848.776557897 5985402.86960293,
944850.474743831 5985427.66032806,944850.064193386
5985444.35251452)""", srid=3857)
)
)
session.add(
tables.osm_linestring(
id=2,
name="Zueristrasse",
parent_id=1337,
geometry=WKTElement("""LINESTRING(944850.064193386 5985444.35251452,
944841.125390515 5985474.18953402,944830.553716556 5985520.36149253,
944826.821439784 5985550.17127335)""", srid=3857)
)
)
session.commit()
merge_corresponding_linestrings()
assert session.query(tables.osm_linestring).get(1).merged_into is None
assert session.query(tables.osm_linestring).get(2).merged_into is None
|
ryjmacdonell/geomtools | setup.py | Python | mit | 1,588 | 0 | """
Setup script for the Gimbal package
"""
from setuptools import setup
from setuptools import find_packages
def readme():
"""Ret | urns the contents of the README without the header image."""
header = '======\nGimbal\n=== | ===\n'
with open('README.rst', 'r') as f:
f.readline()
return header + f.read()
def requirements():
"""Returns the requirement list."""
with open('requirements.txt', 'r') as f:
return [line.strip() for line in f.readlines()]
# read the current version number
exec(open('gimbal/_version.py').read())
setup(
name='gimbal',
version=__version__,
description=('Tools for importing, creating, editing and querying ' +
'molecular geometries'),
long_description=readme(),
long_description_content_type='text/x-rst',
keywords='gimbal molecule geometry displacement transformation 3D',
url='https://github.com/ryjmacdonell/gimbal',
author='Ryan J. MacDonell',
author_email='rmacd054@uottawa.ca',
license='MIT',
packages=find_packages(),
scripts=['bin/convgeom', 'bin/measure', 'bin/nudge', 'bin/subst'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Chemistry'
],
install_requires=requirements()
)
|
adviti/melange | thirdparty/google_appengine/lib/webapp2/tests/extras_appengine_sessions_memcache_test.py | Python | apache-2.0 | 4,072 | 0.000491 | # -*- coding: utf-8 -*-
import webapp2
from webapp2_extras import sessions
from webapp2_extras import sessions_memcache
import test_base
app = webapp2.WSGIApplication(config={
'webapp2_extras.sessions': {
'secret_key': 'my-super-secret',
},
})
class TestMemcacheSession(test_base.BaseTestCase):
#factory = sessions_memcache.MemcacheSessionFactory
def test_get_save_session(self):
# Round 1 -------------------------------------------------------------
req = webapp2.Request.blank('/')
req.app = app
store = sessions.SessionStore(req)
session = store.get_session(backend='memcache')
rsp = webapp2.Response()
# Nothing changed, we want to test anyway.
store.save_sessions(rsp)
session['a'] = 'b'
session['c'] = 'd'
session['e'] = 'f'
store.save_sessions(rsp)
# Round 2 -------------------------------------------------------------
cookies = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookies)])
req.app = app
store = sessions.SessionStore(req)
session = store.get_session(backend='memcache')
self.assertEqual(session['a'], 'b')
self.assertEqual(session['c'], 'd')
self.assertEqual(session['e'], 'f')
session['g'] = 'h'
rsp = webapp2.Response()
store.save_sessions(rsp)
# Round 3 -------------------------------------------------------------
cookies = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookies)])
req.app = app
store = sessions.SessionStore(req)
session = store.get_session(backend='memcache')
self.assertEqual(session['a'], 'b')
self.assertEqual(session['c'], 'd')
self.assertEqual(session['e'], 'f')
self.assertEqual(session['g'], 'h')
def test_flashes(self):
# Round 1 -------------------------------------------------------------
req = webapp2.Request.blank('/')
req.app = app
store = sessions.SessionStore(req)
session = store.get_session(backend='memcache')
flashes = session.get_flashes()
self.assertEqual(flashes, [])
session.add_flash('foo')
rsp = webapp2.Response()
store.save_sessions(rsp)
# Round 2 -------------------------------------------------------------
cookies = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookies)])
req.app = app
| store = sessions.SessionStore(req)
session = store.get_session(backend='memcache')
flashes = session.get_flashes()
self.assertEqual(flashes, [(u'foo', None)])
flashes = session.get_flashes()
self.assertEqual(flashes, [])
session.add_flash('bar')
session.add_flash('baz', 'important')
rsp = we | bapp2.Response()
store.save_sessions(rsp)
# Round 3 -------------------------------------------------------------
cookies = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookies)])
req.app = app
store = sessions.SessionStore(req)
session = store.get_session(backend='memcache')
flashes = session.get_flashes()
self.assertEqual(flashes, [(u'bar', None), (u'baz', 'important')])
flashes = session.get_flashes()
self.assertEqual(flashes, [])
rsp = webapp2.Response()
store.save_sessions(rsp)
# Round 4 -------------------------------------------------------------
cookies = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookies)])
req.app = app
store = sessions.SessionStore(req)
session = store.get_session(backend='memcache')
flashes = session.get_flashes()
self.assertEqual(flashes, [])
if __name__ == '__main__':
test_base.main()
|
PG-TUe/tpot | tpot/config/regressor.py | Python | lgpl-3.0 | 5,754 | 0.000348 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or mo | dify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without | even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
# Check the TPOT documentation for information on the structure of config dicts
regressor_config_dict = {
'sklearn.linear_model.ElasticNetCV': {
'l1_ratio': np.arange(0.0, 1.01, 0.05),
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
},
'sklearn.ensemble.ExtraTreesRegressor': {
'n_estimators': [100],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.ensemble.GradientBoostingRegressor': {
'n_estimators': [100],
'loss': ["ls", "lad", "huber", "quantile"],
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'subsample': np.arange(0.05, 1.01, 0.05),
'max_features': np.arange(0.05, 1.01, 0.05),
'alpha': [0.75, 0.8, 0.85, 0.9, 0.95, 0.99]
},
'sklearn.ensemble.AdaBoostRegressor': {
'n_estimators': [100],
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'loss': ["linear", "square", "exponential"],
'max_depth': range(1, 11)
},
'sklearn.tree.DecisionTreeRegressor': {
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21)
},
'sklearn.neighbors.KNeighborsRegressor': {
'n_neighbors': range(1, 101),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
'sklearn.linear_model.LassoLarsCV': {
'normalize': [True, False]
},
'sklearn.svm.LinearSVR': {
'loss': ["epsilon_insensitive", "squared_epsilon_insensitive"],
'dual': [True, False],
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'epsilon': [1e-4, 1e-3, 1e-2, 1e-1, 1.]
},
'sklearn.ensemble.RandomForestRegressor': {
'n_estimators': [100],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.linear_model.RidgeCV': {
},
'xgboost.XGBRegressor': {
'n_estimators': [100],
'max_depth': range(1, 11),
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'subsample': np.arange(0.05, 1.01, 0.05),
'min_child_weight': range(1, 21),
'nthread': [1]
},
# Preprocesssors
'sklearn.preprocessing.Binarizer': {
'threshold': np.arange(0.0, 1.01, 0.05)
},
'sklearn.decomposition.FastICA': {
'tol': np.arange(0.0, 1.01, 0.05)
},
'sklearn.cluster.FeatureAgglomeration': {
'linkage': ['ward', 'complete', 'average'],
'affinity': ['euclidean', 'l1', 'l2', 'manhattan', 'cosine', 'precomputed']
},
'sklearn.preprocessing.MaxAbsScaler': {
},
'sklearn.preprocessing.MinMaxScaler': {
},
'sklearn.preprocessing.Normalizer': {
'norm': ['l1', 'l2', 'max']
},
'sklearn.kernel_approximation.Nystroem': {
'kernel': ['rbf', 'cosine', 'chi2', 'laplacian', 'polynomial', 'poly', 'linear', 'additive_chi2', 'sigmoid'],
'gamma': np.arange(0.0, 1.01, 0.05),
'n_components': range(1, 11)
},
'sklearn.decomposition.PCA': {
'svd_solver': ['randomized'],
'iterated_power': range(1, 11)
},
'sklearn.preprocessing.PolynomialFeatures': {
'degree': [2],
'include_bias': [False],
'interaction_only': [False]
},
'sklearn.kernel_approximation.RBFSampler': {
'gamma': np.arange(0.0, 1.01, 0.05)
},
'sklearn.preprocessing.RobustScaler': {
},
'sklearn.preprocessing.StandardScaler': {
},
'tpot.builtins.ZeroCount': {
},
'tpot.builtins.OneHotEncoder': {
'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25],
'sparse': [False]
},
# Selectors
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': np.arange(0.05, 1.01, 0.05)
},
'sklearn.feature_selection.SelectFromModel': {
'threshold': np.arange(0, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesRegressor': {
'n_estimators': [100],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
}
}
|
ceramos/micropython | tests/basics/builtin_property.py | Python | mit | 1,132 | 0.0053 | # test builtin property
# create a property object explicitly
property()
property(1, 2, 3)
# use its accessor methods
p = property()
p.getter(1)
p.setter(2)
p.deleter(3)
# basic use as a decorator
class A:
def __init__(se | lf, x):
self._x = x
@property
def x(self):
print("x get")
return self._x
a = A(1)
print(a.x)
try:
a.x = 2
except AttributeError:
print("AttributeError")
# explicit use within a class
class B:
def __init__(self, x):
self._x = x
def xget(self):
print("x get")
return self._x
def xset(self, value):
print("x s | et")
self._x = value
def xdel(self):
print("x del")
x = property(xget, xset, xdel)
b = B(3)
print(b.x)
b.x = 4
print(b.x)
del b.x
# full use as a decorator
class C:
def __init__(self, x):
self._x = x
@property
def x(self):
print("x get")
return self._x
@x.setter
def x(self, value):
print("x set")
self._x = value
@x.deleter
def x(self):
print("x del")
c = C(5)
print(c.x)
c.x = 6
print(c.x)
del c.x
|
django-salesforce/django-salesforce | tests/test_app_label/settings.py | Python | mit | 439 | 0.004556 | """Test that app config can override a directory name conflict (.e.g. "salesforce")"""
from salesforce.testrunner.settings import * # NOQA pylint: disable=unused-wildcard-import,wildcard | -import
from salesforce.testrunner.settings import INSTALLED_APPS
INSTALLED_APPS = [x for | x in INSTALLED_APPS if x != 'salesforce.testrunner.example']
INSTALLED_APPS += ['tests.test_app_label.salesforce.apps.TestSalesForceConfig']
ROOT_URLCONF = None
|
consultit/Ely | setup.py | Python | lgpl-3.0 | 1,423 | 0.009136 | #!/usr/bin/env python
'''
Created on Jan 6, 2018
@author: consultit
'''
from panda3d.core import Filename
import sys, os
from subprocess import call
### NOTE: currently this script works only on GNU/Linux
currdir = os.path.abspath(sys.path[0])
builddir = Filename.from_os_specific(os.path.join(currdir, '/ely/')).get_fullpath()
elydir = Filename.fromOsSpecific(os.path.join(currdir, '/ely/')).getFullpath()
lpref = ''
mpref = ''
lsuff = '.so'
###
tools = 'libtools'
modules = ['ai', 'audio', 'control', 'physics']
if __name__ == '__main__':
# cwd
os.chdir(currdir + builddir)
# build 'tools'
libtools = lpref + tools + lsuff
print('building "' + libtools + '" ...')
toolsdir = '..' + elydir + tools
args = ['build.py', '--dir', toolsdir, '--clean']
call(['/usr/bin/python'] + args)
#print('installing "' + libtools + '" ...')
#args = [libtools, toolsdir]
#call(['/usr/bin/install'] + args)
# build modules
for module in modules:
modulelib = mpref + module + lsuff
print('building "' + modulelib + '" ...')
modul | edir = '..' + elydir + module
args = ['build.py', '--dir', moduledir, '--libs', libtools, '--libs_src',
toolsdir, '--clean']
call(['/usr/bin/python'] + args)
#print('installing "' + modulelib + '" | ...')
#args = [modulelib, moduledir]
#call(['/usr/bin/install'] + args)
|
MHenderson/graph-visualisations | lib/lombardi/LombardiSpirograph.py | Python | mit | 18,845 | 0.005678 | """LombardiSpirograph - draw rotationally symmetric drawings in Lombardi style
David Eppstein, UC Irvine, March 2010
For usage information type "python LombardiSpirography.py"
without any additional arguments.
"""
from pyx import canvas,path,color
from optparse import OptionParser
from math import *
import sys
# ============================================================
# Pre-determined graphs by name
# ============================================================
namedGraphs = {
"andrasfai4": "11-ad",
"andrasfai5": "14-gad",
"andrasfai6": "17-dag",
"andrasfai7": "20-jdag",
"antihole7": "7-cb",
"antihole8": "8-dab",
"antihole9": "9-cab",
"antiprism4": "4-a1-a",
"antiprism5": "5-a1-a",
"antiprism6": "6-a1-a",
"brinkmann": "7-c2-1-b",
"c5xc6": "15-c5-c",
"cogwheel3": "3-x-1-0",
"cogwheel4": "4-x-1-0",
"cogwheel5": "5-x-1-0",
"cogwheel6": "6-x-1-0",
"complete5": "5-ab",
"complete6-a": "6-cab",
"complete6-b": "5-x-ab",
"complete6-c": "3-a02-a", # minimum-crossing drawing
"complete7": "7-bac",
"complete8-a": "8-dcab",
"complete8-b": "7-x-bac",
"crown5": "10-ac",
"crown6": "6-a04-a",
"crown7": "14-cae",
"cube": "4-a-a",
"cuboctahedron": "4-a1-1-a",
"dodecahedron-a": "5-a-1-0-a",
"dodecahedron-b": "10-a-b",
"desargues": "10-a-c",
"durer": "6-a-b",
"dyck": "8-a-2-0-c",
"f40": "10-a-4-0-a",
"grotzsch": "5-x-1-b",
"hypercube": "8-c2-a",
"icosahedron": "3-a01-01-1-a",
"icosidodecahedron": "10-a1-2-b",
"k33": "6-ca",
"k44": "8-ca",
"k55": "10-eac",
"k66": "12-eac",
"mobiuskantor": "8-a-c",
"nauru": "12-a-e",
"octahedron": "3-a1-a",
"paley13": "13-dac",
"pappus": "6-c2-0-a",
"prism3": "3-a-a",
"prism5": "5-a-a",
"prism6": "6-a-a",
"petersen": "5-a-b",
# "shrikhande": "8-ba1-bc", # requires arcs to pass through vertices
"sun3": "3-a1-0",
"sun4": "4-ba1-0",
"sun5": "5-ba1-0",
"sun6": "6-cba1-0",
"tetrahedron-a": "3-x-a",
"tetrahedron-b": "4-ba",
"utility": "6-ca",
"wagner": "8-da",
"wheel4": "4-x-a",
"wheel5": "5-x-a",
"wheel6": "6-x-a",
}
# ============================================================
# Command-line options
# ============================================================
parser = OptionParser()
parser.add_option("-f","--format", dest="show_format", action="store_true",
help = "describe the graph input format and exit")
parser.add_option("-n","--names", dest="show_names", action="store_true",
help = "show a description of graph names and exit")
parser.add_option("-s","--scale", dest="scale", action="store",
type="float", default="1.0",
help = "size of overall drawing relative to default")
parser.add_option("-r","--radius",dest="radius", action="store",
type="float", default="1.0",
help = "radius of vertices relative to default")
parser.add_option("-c","--color",dest="color", action="store",
type="string", default="red",
help = "vertex color (e.g. blue or 76B3DF)")
parser.add_option("-o","--outline", dest="outline", action="store_true",
help = "avoid drawing outlines around vertices")
options,args = parser.parse_args()
def abort(message):
print >>sys.stderr,message
sys.exit(-1)
graphName = "-".join(args).lower()
if options.show_format:
if graphName:
abort("--format option does not take any arguments")
print '''The graph should be described as a sequence of alphanumeric words,
separated either by spaces or by blank lines. The first word gives the
order of symmetry of the drawing (the number of vertices in each
concentric layer) and each subsequent word describes the vertices in
a single layer of the graph.
Each word after the first takes the form of a (possibly empty) sequence
of letters followed by a (possibly empty) number. The letters describe
edges connecting two vertices in the same layer: "a" for a connection
between consecutive vertices in the same layer, "b" for a connection
between vertices two steps away from each other, etc. The letters should
be listed in the order the connections should appear at the vertex,
starting from the edge closest to the center of the drawing and
progressing outwards. Only connections that span less than half the circle
are possible, except that the first layer may have connections spanning
exactly half the circle.
The numeric part of a word describes the connection from one layer to the
next layer. If this number is zero, then vertices in the inner layer are
connected to vertices in the next layer radially by straight line segments.
Otherwise, pairs of vertices from the inner layer, the given number of
steps apart, are connected to single vertices in the outer layer. A nonzero number written with a leading zero (e.g. "01" in place of "1") indicates that, as well as connections with the given number of steps, there should also be a radial connection from the inner layer to the next layer that has vertices
aligned with it; this may not necessarily be the layer immediately outward.
In the innermost layer, the special word "x" may be used to indicate that
the layer consists of a single vertex at the center of the drawing. "x0" indicates that this central vertex is connected both to every vertex in
the adjacent layer and also to every vertex in the next layer that is
staggered with respect to the inner two layers.
'''
sys.exit(0)
if options.show_names:
if graphName:
if graphName not in namedGraphs:
print '''Graph name''',graphName,'''is not recognized.
Run
python LombardiSpirograph --names
without any command line arguments to get a list of recognized names.'''
else:
print graphName,"is equivalent to",namedGraphs[graphName]
sys.exit(0)
print '''This program has built into it a set of graph names that may
be used as the command-line argument to specify the graph to be
drawn. They are:
'''
graphs = namedGraphs.items()
graphs.sort()
graphs = [("Name","Description"),("====","===========")] + graphs
for name,description in graphs:
print " " + name + " "*(20-len(name)) + description
sys.exit(0)
if not graphName:
print '''This program draws rotationally-symmetric graphs in Lombardi style:
the edges are circular arcs that meet at equal angles at each vertex.
To use it, type
python LombardiSpirograph.py [graph] >output.svg
to a command line, where [graph] is replaced by a name or description
of the graph to be drawn.
For a list of available graph names, type
python LombardiSpirograph.py --names
For help with the input format for graph descriptions, type
python LombardiSpirograph.py --format
For a list of other command line options, type
python LombardiSpirograph.py --help
'''
sys.exit(0)
# ============================================================
# Command line parsing
# ============================================================
if graphName in namedGraphs:
graphName = namedGraphs[graphName]
try:
# Split command line argument into symmetry and level descriptors
nameComponents = graphName.split("-")
symmetry = int(nameComponents[0])
vertexDescriptors = nameComponents[1:]
levels = len(vertexDescriptors)
# Parse out the X for the descriptor at the inner level
central = [False]*levels
radialzero = False
if vertexDescriptors[0] == "x":
vertexDescriptors[0] = ""
central[0] = True
elif vertexDescriptors[0] == "x0":
vertexDescriptors[0] = ""
central[0] = True
| radialzero = True
# Parse out the letters for the circulant at each level
circulant = [None]*levels
for i in range(levels):
circulant[i] = [ord(x) - ord('a') + 1 for x in vertexDescriptors | [i]
if x >= "a" and x < "x"]
vertexDescriptors[i] = vertexDescriptors[i][len(circulant[i]):]
# Parse out the numbers for which other |
smrmkt/project_euler | problem_029.py | Python | mit | 657 | 0.004566 | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Starting with the number 1 and moving to the right
in a clockwise direction a 5 by 5 spiral is formed as follows:
21 22 23 24 25
20 7 8 9 10
19 6 1 2 11
18 5 4 3 12
17 16 15 14 13
It can be verified that the sum of the numbers on t | he diagonals is 101.
What is the sum of the numbers on the diagonals
in a 1001 by 1001 spiral formed in the same way?
'''
import timeit
def calc(a, b):
return len(set([i**j for i in range(2, a+1) for j in range(2 | , b+1)]))
if __name__ == '__main__':
print calc(100, 100)
print timeit.Timer('problem_029.calc(100, 100)', 'import problem_029').timeit(10)
|
Xelaadryth/Xelabot | quest_bot/quest_player_manager.py | Python | mit | 11,668 | 0.0024 | from copy import deepcopy
import settings
from twitch.player_manager import PlayerManager
class QuestPlayerManager(PlayerManager):
"""
Functions like add_gold perform a raw store action and then save. __add_gold is the raw store action in this case.
Properties of raw store actions:
- Call username.lower()
- Touch self.players with that name
- Do not save to file
Properties of store actions:
- Do nothing other than call a raw action and then save
Some actions can also take a list of elements. These are all of the form:
def foo(username **kwargs):
if not (isinstance(username), str):
for user in username:
foo(username, **kwargs)
else:
ORIGINAL FUNCTION BODY
Note that both store actions and raw store actions qualify for this.
"""
default_player = deepcopy(PlayerManager.default_player)
default_player.update({
'exp': 0,
'prestige': 0,
'gold': 0,
'items': {}
})
def __add_gold(self, username, gold, prestige_benefits=True):
"""
Gives gold to the specified player.
:param username: str - The player who you are modifying
:param gold: float - How much gold to give that player
:param prestige_benefits: bool - Whether this gold increase is affected by prestige bonuses
"""
# Don't magnify negative amounts of gold
if prestige_benefits and gold > 0:
gold *= 1 + self.players[username]['prestige'] * settings.PRESTIGE_GOLD_AMP
self.players[username]['gold'] += gold
if self.players[username]['gold'] < 0:
self.players[username]['gold'] = 0
def add_gold(self, username, gold, prestige_benefits=True):
"""
Gives gold to the specified player.
:param username: str - The player who you are modifying
:param gold: float - How much gold to give that player
:param prestige_benefits: bool - Whether this gold increase is affected by prestige bonuses
"""
self.__add_gold(username, gold, prestige_benefits=prestige_benefits)
self.save_player(username)
def __add_exp(self, username, exp):
"""
Gives exp to the specified player.
:param username: str - The player who you are modifying
:param exp: float - How much exp to give that player
"""
self.players[username]['exp'] += exp
def add_exp(self, username, exp):
"""
Gives exp to the specified player.
:param username: str - The player who you are modifying
:param exp: float - How much exp to give that player
"""
self.__add_exp(username, exp)
self.save_player(username)
def __add_item(self, username, item):
"""
Item to give to the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - The name of the item(s) we are giving to the player
"""
if not isinstance(item, str):
# We must be a list of items
for single_item in item:
self.__add_item(username, single_item)
else:
if item not in self.players[username]['items']:
self.players[username]['items'][item] = 1
else:
self.players[username]['items'][item] += 1
def add_item(self, username, item):
"""
It | em to give to the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - The name of the item(s) we are giving to the player
"""
self.__add_item(username, item)
self.save_player(username)
def __remove_item(self, username, item):
"""
Item to take from the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - | The name of the item(s) we are giving to the player
"""
if not isinstance(item, str):
# We must be a list of items
for single_item in item:
self.__remove_item(username, single_item)
else:
# If we don't have the item, do nothing
if item in self.players[username]['items']:
self.players[username]['items'][item] -= 1
if self.players[username]['items'][item] <= 0:
del self.players[username]['items'][item]
def remove_item(self, username, item):
"""
Item to take from the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - The name of the item(s) we are giving to the player
"""
self.__remove_item(username, item)
self.save_player(username)
def __reward(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player.
:param username: str - The player who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.__reward(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__add_gold(username, gold, prestige_benefits=prestige_benefits)
self.__add_exp(username, exp)
if item:
self.__add_item(username, item)
def reward(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player(s).
:param username: str or list<str> - The player(s) who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.reward(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__reward(username, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
self.save_player(username)
def __penalize(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player(s).
:param username: str or list<str> - The player(s) who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.__penalize(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__reward(username, gold=-gold, exp=-exp, item=None, prestige_benefits=prestige_benefits)
if item:
self.__remove_item(username, item)
def penalize(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player(s).
:param username: str or list<str> - The player(s) who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.penalize(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__penalize(username, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
self.save_player(username)
def get_gold(self, username):
"""
Gets how much gold a given player has.
:param username: str - The player who you are modifying
"""
return self.players[username]['gold']
def get_exp(self, username):
|
NicovincX2/Python-3.5 | Représentations graphiques/Bitmap/bitmap.py | Python | gpl-3.0 | 2,087 | 0.001437 | # -*- coding: utf-8 -*-
import os
from collections import namedtuple
from copy import copy
Colour = namedtuple('Colour', 'r,g,b')
Colour.copy = lambda self: copy(self)
black = Colour(0, 0, 0)
white = Colour(255, 255, 255) # Colour ranges are not enforced.
class Bitmap():
def __init__(self, width=40, height=40, background=white):
assert width > 0 and height > 0 and type(background) == Colour
self.width = width
self.height = height
self.background = background
self.map = [[background.copy() for w in range(width)]
for h in range(height)]
def fillrect(self, x, y, width, height, colour=bla | ck):
assert x > | = 0 and y >= 0 and width > 0 and height > 0 and type(
colour) == Colour
for h in range(height):
for w in range(width):
self.map[y + h][x + w] = colour.copy()
def chardisplay(self):
txt = [''.join(' ' if bit == self.background else '@'
for bit in row)
for row in self.map]
# Boxing
txt = ['|' + row + '|' for row in txt]
txt.insert(0, '+' + '-' * self.width + '+')
txt.append('+' + '-' * self.width + '+')
print('\n'.join(reversed(txt)))
def set(self, x, y, colour=black):
assert type(colour) == Colour
self.map[y][x] = colour
def get(self, x, y):
return self.map[y][x]
bitmap = Bitmap(20, 10)
bitmap.fillrect(4, 5, 6, 3)
assert bitmap.get(5, 5) == black
assert bitmap.get(0, 1) == white
bitmap.set(0, 1, black)
assert bitmap.get(0, 1) == black
bitmap.chardisplay()
'''
The origin, 0,0; is the lower left, with x increasing to the right,
and Y increasing upwards.
The program above produces the following display :
+--------------------+
| |
| |
| @@@@@@ |
| @@@@@@ |
| @@@@@@ |
| |
| |
| |
|@ |
| |
+--------------------+
'''
os.system("pause")
|
cdcf/time_tracker | app/auth/forms.py | Python | bsd-3-clause | 1,942 | 0.006179 | __author__ = 'Cedric Da Costa Faro'
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, validators
from wtforms.validators import Required, Length, Email, Reg | exp, EqualTo
from wtfor | ms import ValidationError
from ..models import User
# We allow here a user to be created within the app, however this user will NOT be an admin user.
# Users have to insert both an e-mail address which can be only unique and a username that we also want to be unique.
class RegistrationForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64),
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Username must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[Required(), validators.Length(min=6, message=
('Please choose a password with at least 6 characters')), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already registered.')
# We allow here an registered user to login into the app
class LoginForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
|
pi19404/robosub-1 | src/movement/fuzzification/test/xbox_controller.py | Python | gpl-3.0 | 2,612 | 0.011103 | from xboxdrv_parser import Controller
from time import sleep
import argparse
import os
import sys
sys.path.append(os.path.abspath("../../.."))
from util.communication.grapevine import Communicator
from robosub_settings import settings
def main (args):
com = Communicator (args.module_name)
controller = Controller (["X1", "Y1", "X2", "Y2", "R2", "L2"], ["right/left", "forward/backward", "yaw", "pitch", "up", "down"], (0, 255), (-1, 1))
while True:
control_packet = controller.get_values ()
try:
outgoing_packet = {"right/left": 0.0, "forward/backward": 0.0, "yaw": 0.0, "pitch": 0.0, "up/down": 0.0, "roll": 0.0}
# Further parse controller values here
# Controller's sticks Y axis are switched
control_packet["forward/backward"] = -control_packet["forward/backward"]
control_packet["pitch"] = -control_packet["pitch"]
# Up and Down are not -1 to 1. Just 0 - 1
control_packet["up"] = controller.map_range(control_packet["up"], -1, 1, 0, 1)
control_packet["down"] = controller.map_range(control_packet["down"], -1, 1, 0, -1)
# Transferring to outgoing packet
outgoing_packet["forward/backward"] = control_packet["forward/backward"]
outgoing_packet["right/left"] = control_packet["right/left"]
outgoing_packet["up/down"] = control_packet["up"] + control_packet["down"]
outgoing_packet["yaw"] = control_packet["yaw"]
outgoing_packet["pitch"] = control_packet["pitch"]
#outgoing_packet["roll"] = control_packet["roll"]
outgoing_packet["roll"] = 0.0
# Controller sticks are not centered very well.
# TODO: Find a better way to do this (short of getting a new controller)
for key in outgoing_packet.keys ():
if abs (outgoing_packet[key]) < .10: outgoing_packet[key] = 0.0
print outgoing_packet
Fuzzy_Sets = {"Fuzzy | _Sets": outgoing_packet}
com.publish_message (Fuzzy_Sets)
except KeyError as i:
pass
sleep (args.epoch)
def commandline():
parser = argparse.Argume | ntParser(description='Mock module.')
parser.add_argument('-e', '--epoch', type=float,
default=0.1,
help='Sleep time per cycle.')
parser.add_argument('-m', '--module_name', type=str,
default='movement/fuzzification',
help='Module name.')
return parser.parse_args()
if __name__ == '__main__':
args = commandline()
main(args)
|
containers-ftw/apps | tests/circle_urls.py | Python | bsd-3-clause | 812 | 0.013547 | #!/usr/bin/env python
'''
circle_urls.py will rename all url files to not have extension .html
'''
import sys
import os
from glob import glob
site_dir = os.path.abspath(sys.argv[1])
print("Using site directory %s" %(site_dir))
files = glob("%s/*.html" %(site_dir))
# For each f | ile, we need to replace all links to have correct .html extension
search_names = [os.path.basename(f).replace('.html','') for f in files]
for html_file in files:
with open(html_file,'r') as filey:
content = filey.read()
for search_name in search_names:
content = content.replace('%s"' %(search_name),'%s.html"' %(search_name))
content = content.replace('/images/logo/logo.png','https:// | sci-f.github.io/apps/assets/img/logo/logo.png')
with open(html_file,'w') as filey:
filey.write(content)
|
cwmartin/rez | src/rez/tests/test_copy_package.py | Python | lgpl-3.0 | 10,779 | 0 | """
test package copying
"""
import shutil
import time
import os.path
import os
from rez.system import system
from rez.build_process_ import create_build_process
from rez.build_system import create_build_system
from rez.resolved_context import ResolvedContext
from rez.packages_ import get_latest_package
from rez.package_copy import copy_package
from rez.vendor.version.version import VersionRange
from rez.tests.util import TestBase, TempdirMixin
class TestCopyPackage(TestBase, TempdirMixin):
@classmethod
def setUpClass(cls):
TempdirMixin.setUpClass()
path = os.path.dirname(__file__)
packages_path = os.path.join(path, "data", "builds", "packages")
cls.src_root = os.path.join(cls.root, "src", "packages")
cls.install_root = os.path.join(cls.root, "packages")
shutil.copytree(packages_path, cls.src_root)
# repo we will copy packages into
cls.dest_install_root = os.path.join(cls.root, "dest_packages")
# include modules
pypath = os.path.join(path, "data", "python", "late_bind")
cls.settings = dict(
packages_path=[cls.install_root],
package_filter=None,
package_definition_python_path=pypath,
resolve_caching=False,
warn_untimestamped=False,
warn_old_commands=False,
implicit_packages=[])
@classmethod
def tearDownClass(cls):
TempdirMixin.tearDownClass()
def setup_once(self):
# build packages used by this test
self._build_package("build_util", "1")
self._build_package("floob")
self._build_package("foo", "1.0.0")
self._build_package("foo", "1.1.0")
self._build_package("bah", "2.1")
@classmethod
def _create_builder(cls, working_dir):
buildsys = create_build_system(working_dir)
return create_build_process(process_type="local",
working_dir=working_dir,
build_system=buildsys)
@classmethod
def _build_package(cls, name, version=None):
# create the builder
working_dir = os.path.join(cls.src_root, name)
if version:
working_dir = os.path.join(working_dir, version)
builder = cls._create_builder(working_dir)
builder.build(install_path=cls.install_root, install=True, clean=True)
def _reset_dest_repository(self):
system.clear_caches()
if os.path.exists(self.dest_install_root):
shutil.rmtree(self.dest_install_root)
os.makedirs(self.dest_install_root)
def _get_src_pkg(self, name, version):
return get_latest_package(
name,
range_=VersionRange("==" + version),
paths=[self.install_root],
error=True
)
def _get_dest_pkg(self, name, version):
return get_latest_package(
name,
range_=VersionRange("==" + version),
paths=[self.dest_install_root],
error=True
)
def _assert_copied(self, result, copied, skipped):
self.assertEqual(len(result["copied"]), copied)
self.assertEqual(len(result["skipped"]), skipped)
def test_1(self):
"""Simple package copy, no variants, no overwrite."""
self._reset_dest_repository()
# make a copy of a package
src_pkg = self._get_src_pkg("floob", "1.2.0")
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
self._assert_copied(result, 1, 0)
# check the copied package exists and matches
dest_pkg = self._get_dest_pkg("floob", "1.2.0")
result_variant = result["copied"][0][1]
dest_variant = dest_pkg.iter_variants().next()
self.assertEqual(dest_variant.handle, result_variant.handle)
pyfile = os.path.join(dest_pkg.base, "python")
ctime = os.stat(pyfile).st_ctime
# copy again but with overwrite=False; should do nothing
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
self._assert_copied(result, 0, 1)
# check that package payload wasn't overwritten
self.assertEqual(os.stat(pyfile).st_ctime, ctime)
def test_2(self):
"""Package copy, no variants, overwrite."""
self._reset_dest_repository()
# make a copy of a package
src_pkg = self._get_src_pkg("floob", "1.2.0")
copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
dest_pkg = self._get_dest_pkg("floob", "1.2.0")
pyfile = os.path.join(dest_pkg.base, "python")
ctime = os.stat(pyfile).st_ctime
# overwrite same package copy
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
overwrite=True
)
self._assert_copied(result, 1, 0)
# check that package payload was overwritten
self.assertNotEqual(os.stat(pyfile).st_ctime, ctime)
def test_3(self):
"""Package copy, variants, overwrite and non-overwrite."""
self._reset_dest_repository()
# make a copy of a varianted package
src_pkg = self._get_src_pkg("bah", "2.1")
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
self._assert_copied(result, 2, 0) # 2 variants
# check the copied variants exist and match
dest_pkg = self._get_dest_pkg("bah", "2.1")
ctimes = []
for index in (0, 1):
result_variant = result["copied"][index][1]
dest_variant = dest_pkg.get_variant(index)
self.assertEqual(dest_variant.handle, result_variant.handle)
pyfile = os.path.join(dest_variant.root, "python")
ctime = os.stat(pyfile).st_ctime
ctimes.append(ctime)
# copy variant with no overwrite, should do nothing
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
variants=[1]
)
self._assert_copied(result, 0, 1)
# copy variant with overwrite
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
variants=[1],
overwrite=True
)
self._assert_copied(result, 1, 0)
# check copied variant is the one we expect
dest_pkg = self._get_dest_pkg("bah", "2.1")
result_variant = result["copied"][0][1]
dest_variant = dest_pkg.get_variant(1)
self.assertEqual(dest_variant.handle, result_variant.handle)
# check copied variant payload was overwritten
pyfile = os.path.join(dest_variant.root, "python")
self.assertNotEqual(os.stat(pyfile).st_ctime, ctimes[1])
# check non-copied variant payload was not written
skipped_variant = dest_pkg.get_variant(0)
pyfile = os.path.join(skipped_variant.root, "python")
self.assertEqual(os.stat(pyfile).st_ctime, ctimes[0])
def test_4(self):
"""Package copy with rename, reversion."""
self._reset_dest_repository()
# copy a package to a different name and version
src_pkg = self._get_src_pkg("floob", "1.2.0")
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
dest_name="flaab",
dest_version="5.4.1"
)
self._assert_copied(result, 1, 0)
# check copied variant is the one we expect
dest_pkg = sel | f._get_dest_pkg("flaab", "5.4.1")
result_variant = result["copied"][0][1]
dest_va | riant = dest_pkg.iter_variants().next()
self.assertEqual(dest_variant.handle, result_variant.handle)
def test_5(self):
"""Package copy with standard, new timestamp."""
self._reset_dest_repository()
# wait 1 second to guarantee newer timestamp in copied pkg
time.sleep(1)
# copy |
apache/incubator-airflow | airflow/providers/google/cloud/example_dags/example_automl_nl_text_extraction.py | Python | apache-2.0 | 3,542 | 0.000847 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLImportDataOperator,
AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_TEXT_BUCKET = os.environ.get(
"GCP_AUTOML_TEXT_BUCKET", "gs://INVALID BUCKET NAME/NL-entity/dataset.csv"
)
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_extraction_model_metadata": {},
}
# Example dataset
DATASET = {"display_name": "test_text_dataset", "text_extraction_dataset_metadata": {}}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_TEXT_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Entities Extraction
with models.DAG(
"example_automl_text",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOper | ator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = create_dataset_task.output['dataset_id']
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATIO | N,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION)
model_id = create_model.output['model_id']
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
import_dataset_task >> create_model
delete_model_task >> delete_datasets_task
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> create_model
# create_model >> delete_model_task
# create_dataset_task >> delete_datasets_task
|
LeotomasMC/TheDerpyMemeBot | Blank Bot/commands.py | Python | gpl-3.0 | 2,467 | 0.008918 | #commands.py
#diode's custom commands
# How to make a custom commands:
#
# In the function commands, type:
#
# if msg.startswith("<command>"):
#
# replace <command> with whatever you want to trigger the command.
#
#
#
# A list of functions:
# bot.chat.shrug(<message>)
# bot.chat.chat(<message>)
# bot.chat.ban(<username>)
# bot.chat.timeout(<username>, [time in seconds])
# bot.chat.purge(<username>)
# bot.chat.color(<TwitchDefaultColor | #<HEX color code>> You can only use a hex color if your bots account has Twitch Prime or Twitch Turbo
# bot.p(<text>)
# A list of variables you might need:
# cfg.NICK # the bots name
#
if __name__ == "__main__":
import run
import sys
import socket
import time
#from config import debug as debug
#from config import admins as admins
import config as cfg
import re
from json import loads
from urllib.request import urlopen
#from bot import chat
import bot
true = True
false = False
version = "1.2"
afkList = []
def commands(user, message):
global all
username = user
msg = message.lower()
arguments = message.split("\r\n")
arguments = arguments[0].split(" ")
if msg.startswith("!commandsversion"):
bot.chat.chat("Advanced Custom Commands File Version: " + str(version))
if bot.debug == true: # DEBUG ONLY COMMANDS!!!
if msg.startswith("blab"):
bot.p("blab")
# NORMAL COMMANDS
if "blab" in msg:
pass
if msg.startswith("!wave"):
try:
bot.chat.chat("Lets all wave at " + arguments[1] + " ! Hi, " + arguments[1] + " !")
except IndexError:
bot.chat.chat("Lets all wave at " + username + " ! Hi, " + username + " !")
## if str(arguments[1]).s | tartswith("@"):
## bot.chat("Lets all wave hello at " + arguments[1] + " ! Hello, " + arguments[1] + " !")
## else:
## bot.chat("Lets all wave hello at @" + arguments[1] + " ! Hello, @" + arguments[1] + " !")
if msg.startswith("!afk"):
if username in afkList:
| try:
afkList.remove(username)
bot.chat.chat(username + " is now back! Welcome back!")
except ValueError:
pass
else:
afkList.append(username)
bot.chat.chat(username + " is now AFK. See you soon!")
|
ondergetekende/python-panavatar | panavatar/color_scheme.py | Python | mit | 5,222 | 0.000383 | import colorsys
def to_rgb(hsv):
"""Converts a color from HSV to a hex RGB.
HSV should be in range 0..1, though hue wraps around. Output is a
hexadecimal color value as used by CSS, HTML and SVG"""
r, g, b = [int(min(255, max(0, component * 256)))
for component in colorsys.hsv_to_rgb(*hsv)]
return "%02x%02x%02x" % (r, g, b)
def get_color_scheme(params):
# Choose a basic scheme.
base_scheme = params.weighted_choice([(75, Monochrome),
(5, Complement),
(20, Adjacent)],
"color_scheme")
the_scheme = base_scheme(params)
# Chose a spatial manipulation for the color.
color_filter = params.weighted_choice([(10, 'noise'),
(4, 'noise_vignette'),
(2, 'vignette'),
(1, 'radial_hue')],
'color_filter')
if "noise" in color_filter:
the_scheme = ColorNoise(params, the_scheme)
if "vignette" in color_filter:
the_scheme = RadialDarken(params, the_scheme)
if "radial_hue" in color_filter:
the_scheme = RadialHue(params, the_scheme)
def sample(coord, scheme=0):
return to_rgb(the_scheme.color_at(coord, scheme))
return sample
class BaseColorScheme():
def color_at(self, coord, scheme):
return self.colors[scheme % len(self.colors)]
class Monochrome(BaseColorScheme):
def __init__(self, params):
base_color = (params.uniform("hue"),
params.uniform("saturation", .5, 1.),
params.uniform("value", .3, 1.))
self.colors = [
base_color,
(base_color[0], base_color[1], base_color[2] + .3),
(base_color[0], base_color[1], base_color[2] - .3),
(base_color[0], base_color[1], base_color[2] + .1),
(base_color[0], base_color[1], base_color[2] - .1),
]
class Complement(BaseColorScheme):
def __init__(self, params):
base_color = (params.uniform("hue"),
params.uniform("saturation", .5, 1.),
params.uniform("value", .3, 1.))
self.colors = [
base_color,
(base_color[0] + .5, base_color[1], base_color[2]),
(base_color[0], base_color[1], base_color[2] + .1),
(base_color[0], base_color[1], base_color[2] - .1),
]
class Adjacent(BaseColorScheme):
def __init__(self, params):
base_color = (params.uniform("hue"),
params.uniform("saturation", .5, 1.),
params.uniform("value", .3, 1.))
self.colors = | [
base_color,
(base_color[0] - .2, base_color[1], base_color[2]),
(base_color[0] + .2, base_color[1], base_color[2]),
(base_color[0], base_color[1], base_color[2] - .3),
(base_color[0], base_color[1], base_color[2] + .1),
]
class ColorNoise:
def __init__(self, params, parent):
self.parent = parent
hue = params.uniform("hue_variation", 0, | .05)
saturation = params.uniform("saturation_variation", 0, .2)
value = params.uniform("value_variation", 0, .5)
base_scale = .1 * abs(params.size)
self.samplers = [
params.perlin("hue_variation_spatial", size=base_scale,
min_value=-hue, max_value=hue, octaves=1),
params.perlin("saturation_variation_spatial", size=base_scale,
min_value=-saturation, max_value=saturation, octaves=1),
params.perlin("value_variation_spatial", size=base_scale,
min_value=-value, max_value=value, octaves=1),
]
def color_at(self, coord, scheme):
base_color = self.parent.color_at(coord, scheme)
return (component + sampler(coord)
for (component, sampler)
in zip(base_color, self.samplers))
class RadialDarken:
def __init__(self, params, parent):
self.parent = parent
self.params = params
self.edge_amount = params.uniform("radial_darkness", .2, .7)
def color_at(self, coord, scheme):
base_color = list(self.parent.color_at(coord, scheme))
distance = 2 * \
abs(coord - (self.params.size / 2.0)) / abs(self.params.size)
fade = 1.0 - min(1.0, distance * self.edge_amount)
return (base_color[0],
base_color[1],
base_color[2] * fade)
class RadialHue:
def __init__(self, params, parent):
self.parent = parent
self.params = params
self.edge_amount = params.uniform("radial_darkness", .1, .5)
def color_at(self, coord, scheme):
base_color = list(self.parent.color_at(coord, scheme))
distance = 2 * \
abs(coord - (self.params.size / 2.0)) / abs(self.params.size)
fade = distance * self.edge_amount
return (base_color[0] + fade,
base_color[1],
base_color[2])
|
frankchin/mezzanine | mezzanine/generic/views.py | Python | bsd-2-clause | 6,198 | 0 | from __future__ import unicode_literals
from future.builtins import str
from json import dumps
from string import punctuation
from django.apps import apps
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.messages import error
from django.core.urlresolvers import reverse
from django.db.models import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.generic.forms import ThreadedCommentForm, RatingForm
from mezzanine.generic.models import Keyword
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.views import render, set_cookie, is_spam
from mezzanine.utils.importing import import_dotted_path
@staff_member_required
def admin_keywords_submit(request):
"""
Adds any new given keywords from the custom keywords field in the
admin, and returns their IDs for use when saving a model with a
keywords field.
"""
keyword_ids, titles = [], []
remove = punctuation.replace("-", "") # Strip punctuation, allow dashes.
for title in request.POST.get("text_keywords", "").split(","):
title = "".join([c for c in title if c not in remove]).strip()
if title:
kw, created = Keyword.objects.get_or_create_iexact(title=title | )
keyword_id = str(kw.id)
if keyword_id not in keyword_ids:
keyword_ids.append(keyword_id)
titles.append(title)
return HttpResponse( | "%s|%s" % (",".join(keyword_ids), ", ".join(titles)))
def initial_validation(request, prefix):
"""
Returns the related model instance and post data to use in the
comment/rating views below.
Both comments and ratings have a ``prefix_ACCOUNT_REQUIRED``
setting. If this is ``True`` and the user is unauthenticated, we
store their post data in their session, and redirect to login with
the view's url (also defined by the prefix arg) as the ``next``
param. We can then check the session data once they log in,
and complete the action authenticated.
On successful post, we pass the related object and post data back,
which may have come from the session, for each of the comments and
ratings view functions to deal with as needed.
"""
post_data = request.POST
login_required_setting_name = prefix.upper() + "S_ACCOUNT_REQUIRED"
posted_session_key = "unauthenticated_" + prefix
redirect_url = ""
if getattr(settings, login_required_setting_name, False):
if not request.user.is_authenticated():
request.session[posted_session_key] = request.POST
error(request, _("You must be logged in. Please log in or "
"sign up to complete this action."))
redirect_url = "%s?next=%s" % (settings.LOGIN_URL, reverse(prefix))
elif posted_session_key in request.session:
post_data = request.session.pop(posted_session_key)
if not redirect_url:
model_data = post_data.get("content_type", "").split(".", 1)
if len(model_data) != 2:
return HttpResponseBadRequest()
try:
model = apps.get_model(*model_data)
obj = model.objects.get(id=post_data.get("object_pk", None))
except (TypeError, ObjectDoesNotExist, LookupError):
redirect_url = "/"
if redirect_url:
if request.is_ajax():
return HttpResponse(dumps({"location": redirect_url}))
else:
return redirect(redirect_url)
return obj, post_data
def comment(request, template="generic/comments.html", extra_context=None):
"""
Handle a ``ThreadedCommentForm`` submission and redirect back to its
related object.
"""
response = initial_validation(request, "comment")
if isinstance(response, HttpResponse):
return response
obj, post_data = response
form_class = import_dotted_path(settings.COMMENT_FORM_CLASS)
form = form_class(request, obj, post_data)
if form.is_valid():
url = obj.get_absolute_url()
if is_spam(request, form, url):
return redirect(url)
comment = form.save(request)
response = redirect(add_cache_bypass(comment.get_absolute_url()))
# Store commenter's details in a cookie for 90 days.
for field in ThreadedCommentForm.cookie_fields:
cookie_name = ThreadedCommentForm.cookie_prefix + field
cookie_value = post_data.get(field, "")
set_cookie(response, cookie_name, cookie_value)
return response
elif request.is_ajax() and form.errors:
return HttpResponse(dumps({"errors": form.errors}))
# Show errors with stand-alone comment form.
context = {"obj": obj, "posted_comment_form": form}
context.update(extra_context or {})
response = render(request, template, context)
return response
def rating(request):
"""
Handle a ``RatingForm`` submission and redirect back to its
related object.
"""
response = initial_validation(request, "rating")
if isinstance(response, HttpResponse):
return response
obj, post_data = response
url = add_cache_bypass(obj.get_absolute_url().split("#")[0])
response = redirect(url + "#rating-%s" % obj.id)
rating_form = RatingForm(request, obj, post_data)
if rating_form.is_valid():
rating_form.save()
if request.is_ajax():
# Reload the object and return the rating fields as json.
obj = obj.__class__.objects.get(id=obj.id)
rating_name = obj.get_ratingfield_name()
json = {}
for f in ("average", "count", "sum"):
json["rating_" + f] = getattr(obj, "%s_%s" % (rating_name, f))
response = HttpResponse(dumps(json))
if rating_form.undoing:
ratings = set(rating_form.previous) ^ set([rating_form.current])
else:
ratings = rating_form.previous + [rating_form.current]
set_cookie(response, "mezzanine-rating", ",".join(ratings))
return response
|
shouldmakemusic/yaas | helper/TrackHelper.py | Python | gpl-2.0 | 5,564 | 0.009166 | # Copyright (C) 2015 Manuel Hirschauer (manuel@hirschauer.net)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Manuel Hirschauer <manuel@hirschauer.net>
"""
TrackHelper provides easy access to the track functions
"""
from YaasHelper import *
class TrackHelper(YaasHelper):
"""
TrackHelper provides easy access to the track functions
"""
def __init__(self, yaas, track):
YaasHelper.__init__(self, yaas)
self.last_played_clip = None
# if the track given is a number
if isinstance(track, int):
if track == CURRENT:
# get the current track
self._track = self.song_helper().get_selected_track()
else:
# or the track with this index
self._track = self.song_helper().get_track(track)
else:
# we got the track object
self._track = track
# get the index of this track
all_tracks = self.song_helper().get_all_tracks_including_return_and_master()
self.log.verbose('looking for track ' + self._track.name)
for i in range(len(all_tracks)):
if all_tracks[i].name == self._track.name:
self._track_index = i
return
def get_track(self):
return self._track
def get_name(self):
return self._track.name
def get_track_index(self):
return self._track_index
def get_devices(self):
return self._track.devices
def get_device(self, name):
device = None
#all_tracks = self.song_helper().get_all_tracks()
#trackIndex = self._track_index
for i in range(len(self._track.devices)):
dname = self._track.devices[i].name;
if dname == name:
device = self._track.devices[i]
return device
def get_device_for_id(self, id):
all_tracks = self.song_helper().get_all_tracks()
trackIndex = self._track_index
if id < len(all_track | s[trackIndex].devices):
return all_tracks[trackIndex].devices[id]
return None
def get_selected_device(self):
device = self._track.view.selected_device
return device
def get_send_value(self, index):
if index is None:
return
return self._track.mixer_device.sends[index].value
def set_sen | d_value(self, index, value):
if index is None:
return
value = float(value);
max = self._track.mixer_device.sends[index].max
min = self._track.mixer_device.sends[index].min
if value > max:
value = max
if value < min:
value = min
self._track.mixer_device.sends[index].value = value
def fire(self, clip_number):
"""
Fires clip with the given number
"""
if clip_number < len(self._track.clip_slots):
self._track.clip_slots[clip_number].fire()
def get_playing_clip(self):
"""
Returns the currently playing clip
"""
track = self._track
for i in range(len(track.clip_slots)):
if track.clip_slots[i].is_playing or track.clip_slots[i].is_recording or track.clip_slots[i].is_triggered:
return track.clip_slots[i].clip
return None
def stop_or_restart_clip(self):
"""
If a clip is playing in the this track - stop it and remember it
If this method is called again and no clip is playing - start it again
"""
self.log.verbose("Stopping clips for track " + self._track.name)
track = self._track
# before stopping - is some clip currently playing?
was_playing = False
for i in range(len(track.clip_slots)):
if track.clip_slots[i].is_playing or track.clip_slots[i].is_recording or track.clip_slots[i].is_triggered:
# remember track number
self.last_played_clip = i
was_playing = True
if was_playing:
# stop
track.stop_all_clips()
# if track was used in looper - free it
# TODO: what was that again??????
#if str(track_index) in self._looper_helper.emulatedLoopClip:
# del self._looper_helper.emulatedLoopClip[str(track_index)]
#self.last_played_clip = None
else:
# if there is a remembered track - play it
if self.last_played_clip is not None:
track.clip_slots[self.last_played_clip].fire()
|
toladata/TolaTables | silo/migrations/0019_auto_20170719_1146.py | Python | gpl-2.0 | 642 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-07-19 18:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('silo', '0018_auto_20170710_1146'),
]
operations = [
migrations.AddField(
model_name='re | ad',
name='autopull_expiration',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='read',
name='autopush_expiration',
field=models.DateTimeField(blank=True, null=T | rue),
),
]
|
jawilson/Flexget | flexget/plugins/sites/frenchtorrentdb.py | Python | mit | 2,131 | 0.001408 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import re
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
log = logging.getLogger('FTDB')
class UrlRewriteFTDB(object):
"""FTDB RSS url_rewrite"""
def url_rewritable(self, task, entry):
# url = entry['url']
if re.match(r'^http://www\.frenchtorrentdb\.com/[^/]+(?!/)[^/]+&rss=1', entry['url']):
return True
return False
def url_rewrite(self, task, entry):
old_url = entry['url']
page_url = old_url.replace('DOWNLOAD', 'INFOS')
page_url = page_url.replace('&rss=1', '')
new_url = self.parse_download_page(page_url, task.requests)
log.debug('PAGE URL NEEDED : %s' % page_url)
log.debug('%s OLD is rewrited to NEW %s' % (old_url, new_url))
entry['url'] = new_url
def parse_download_page(self, page_url, requests):
page = requests.get(page_url)
try:
soup = get_soup(page.text)
except Exception as e:
raise UrlRe | writingError(e)
tag_a = soup.find("a", {"class": "dl_link"})
if not tag_a:
if soup.findAll(text="Connexion ?"):
raise UrlRewritingError('You are not logged in,\
| check if your cookie for\
authentication is up to date')
else:
raise UrlRewritingError('You have reached your download\
limit per 24hours, so I cannot\
get the torrent')
torrent_url = ("http://www.frenchtorrentdb.com" + tag_a.get('href') + "&js=1")
log.debug('TORRENT URL is : %s' % torrent_url)
return torrent_url
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteFTDB, 'frenchtorrentdb', interfaces=['urlrewriter'], api_ver=2)
|
tacaswell/dataportal | dataportal/muxer/data_muxer.py | Python | bsd-3-clause | 38,457 | 0.000364 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from collections import namedtuple, deque
import logging
import pandas as pd
import numpy as np
from scipy.interpolate import interp1d
import pandas.core.groupby # to get custom exception
logger = logging.getLogger(__name__)
__all__ = ['DataMuxer', 'dataframe_to_dict']
class BinningError(Exception):
"""
An exception to raise if there are insufficient sampling rules to
upsampling or downsample a data column into specified bins.
"""
pass
class BadDownsamplerError(Exception):
"""
An exception to raise if a downsampler produces unexpected output.
"""
pass
class ColSpec(namedtuple(
'ColSpec', ['name', 'ndim', 'shape', 'upsample', 'downsample'])):
"""
Named-tuple sub-class to validate the column specifications for the
DataMuxer
Parameters
----------
name : hashable
ndim : uint
Dimensionality of the data stored in the column
shape : tuple or None
like ndarray.shape, where 0 or None are scalar
upsample : {None, 'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'ffill', 'bfill'}
None means that each time bin must have at least one value.
The names refer to kinds of scipy.interpolator. See documentation
link below.
downsample : None or a function
None if the data cannot be downsampled (reduced). Otherwise,
any callable that reduces multiple data points (of whatever dimension)
to a single data point.
References
----------
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
"""
# These reflect the 'method' argument of pandas.DataFrame.fillna
upsampling_methods = {'None', 'linear', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'ffill', 'bfill'}
downsampling_methods = {'None', 'last', 'first', 'median', 'mean', 'sum',
'min', 'max'}
_downsample_mapping = {'last': lambda x: x[-1],
'first': lambda x: x[0],
# new in np 1.9
'median': lambda x: np.median(x, 0),
'mean': lambda x: np.mean(x, 0),
'sum': lambda x: np.sum(x, 0),
'min': lambda x: np.min(x, 0),
'max': lambda x: np.max(x, 0)}
__slots__ = ()
def __new__(cls, name, ndim, shape, upsample, downsample):
# Validations
upsample = _validate_upsample(upsample)
downsample = _validate_downsample(downsample)
if int(ndim) < 0:
raise ValueError("ndim must be positive not {}".format(ndim))
if shape is not None:
shape = tuple(shape)
return super(ColSpec, cls).__new__(
cls, name, int(ndim), shape, upsample, downsample)
def _validate_upsample(input):
# TODO The upsampling method could be any callable.
if input is None or input == 'None':
return 'None'
if not (input in ColSpec.upsampling_methods):
raise ValueError("{} is not a valid upsampling method. It "
"must be one of {}".format(
input, ColSpec.upsampling_methods))
return input.lower()
def _validate_downsample(input):
# TODO The downsampling methods could have string aliases like 'mean'.
if (input is not None) and (not (callable(input) or
input in ColSpec.downsampling_methods)):
raise ValueError("The downsampling method must be a callable, None, "
"or one of {}.".format(ColSpec.downsampling_methods))
if input is None:
return 'None'
re | turn input
class DataMuxer(object):
"""
This class provides a wrapper layer of signals and slots
around a pandas DataFrame to make plugging stuff in for live
view easier.
The data collection/event model being used is all measurements
(that is values that come off of the hardware) are time stamped
to ring time.
The language being used through out is that | of pandas data frames.
The data model is that of a sparse table keyed on time stamps which
is 'densified' on demand by propagating measurements forwards. Not
all measurements (ex images) can be filled. This behavior is controlled
by the `col_info` tuple.
Parameters
----------
events : list
list of Events (any object with the expected attributes will do)
"""
class Planner(object):
def __init__(self, dm):
self.dm = dm
def determine_upsample(self, interpolation=None, use_cols=None):
"Resolve (and if necessary validate) upsampling rules."
if interpolation is None:
interpolation = dict()
if use_cols is None:
use_cols = self.dm.columns
rules = dict()
for name in use_cols:
col_info = self.dm.col_info[name]
rule = _validate_upsample(
interpolation.get(name, col_info.upsample))
rule = _normalize_string_none(rule)
if (rule is not None) and (col_info.ndim > 0):
raise NotImplementedError(
"Only scalar data can be upsampled. "
"The {0}-dimensional source {1} was given the "
"upsampling rule {2}.".format(
col_info.ndim, name, rule))
rules[name] = rule
|
goblin/p2pool | p2pool/util/forest.py | Python | gpl-3.0 | 11,137 | 0.007542 | '''
forest data structure
'''
import itertools
import weakref
from p2pool.util import skiplist, variable
class TrackerSkipList(skiplist.SkipList):
def __init__(self, tracker):
skiplist.SkipList.__init__(self)
self.tracker = tracker
self_ref = weakref.ref(self, lambda _: tracker.removed.unwatch(watch_id))
watch_id = self.tracker.removed.watch(lambda share: self_ref().forget_item(share.hash))
def previous(self, element):
return self.tracker.delta_type.from_element(self.tracker.shares[element]).tail
class DistanceSkipList(TrackerSkipList):
def get_delta(self, element):
return element, 1, self.previous(element)
def combine_deltas(self, (from_hash1, dist1, to_hash1), (from_hash2, dist2, to_hash2)):
if to_hash1 != from_hash2:
raise AssertionError()
return from_hash1, dist1 + dist2, to_hash2
def initial_solution(self, start, (n,)):
return 0, start
def apply_delta(self, (dist1, to_hash1), (from_hash2, dist2, to_hash2), (n,)):
if to_hash1 != from_hash2:
raise AssertionError()
return dist1 + dist2, to_hash2
def judge(self, (dist, hash), (n,)):
if dist > n:
return 1
elif dist == n:
return 0
else:
return -1
def finalize(self, (dist, hash), (n,)):
assert dist == n
return hash
def get_attributedelta_type(attrs): # attrs: {name: func}
class ProtoAttributeDelta(object):
__slots__ = ['head', 'tail'] + attrs.keys()
@classmethod
def get_none(cls, element_id):
return cls(element_id, element_id, **dict((k, 0) for k in attrs))
@classmethod
def from_element(cls, share):
return cls(share.hash, share.previous_hash, **dict((k, v(share)) for k, v in attrs.iteritems()))
def __init__(self, head, tail, **kwargs):
self.head, self.tail = head, tail
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __add__(self, other):
assert self.tail == other.head
return self.__class__(self.head, other.tail, **dict((k, getattr(self, k) + getattr(other, k)) for k in attrs))
def __sub__(self, other):
if self.head == other.head:
return self.__class__(other.tail, self.tail, **dict((k, getattr(self, k) - getattr(other, k)) for k in attrs))
elif self.tail == other.tail:
return self.__class__(self.head, other.head, **dict((k, getattr(self, k) - getattr(other, k)) for k in attrs))
else:
raise AssertionError()
def __repr__(self):
return '%s(%r, %r%s)' % (self.__class__, self.head, self.tail, ''.join(', %s=%r' % (k, getattr(self, k)) for k in attrs))
ProtoAttributeDelta.attrs = attrs
return ProtoAttributeDelta
AttributeDelta = get_attributedelta_type(dict(
height=lambda item: 1,
))
class Tracker(object):
def __init__(self, shares=[], delta_type=AttributeDelta):
self.shares = {} # hash -> share
self.reverse_shares = {} # delta.tail -> set of share_hashes
self.heads = {} # head hash -> tail_hash
self.tails = {} # tail hash -> set of head hashes
self.deltas = {} # share_hash -> delta, ref
self.reverse_deltas = {} # ref -> set of share_hashes
self.ref_generator = itertools.count()
self.delta_refs = {} # ref -> delta
self.reverse_delta_refs = {} # delta.tail -> ref
self.added = variable.Event()
self.removed = variable.Event()
self.get_nth_parent_hash = DistanceSkipList(self)
self.delta_type = delta_type
for share in shares:
self.add(share)
def add(self, share):
assert not isinstance(share, (int, long, type(None)))
delta = self.delta_type.from_element(share)
if delta.head in self.shares:
raise ValueError('share already present')
if delta.head in self.tails:
heads = self.tails.pop(delta.head)
else:
heads = set([delta.head])
if delta.tail in self.heads:
tail = self.heads.pop(delta.tail)
else:
tail = self.get_last(delta.tail)
self.shares[delta.head] = share
self.reverse_shares.setdefault(delta.tail, set()).add(delta.head)
self.tails.setdefault(tail, set()).update(heads)
if delta.tail in self.tails[tail]:
self.tails[tail].remove(delta.tail)
for head in heads:
self.heads[head] = tail
self.added.happened(share)
def remove(self, share_hash):
assert isinstance(share_hash, (int, long, type(None)))
if share_hash not in self.shares:
raise KeyError()
share = self.shares[share_hash]
del share_hash
delta = self.delta_type.from_element(share)
children = self.reverse_shares.get(delta.head, set())
if delta.head in self.heads and delta.tail in self.tails:
tail = self.heads.pop(delta.head)
self.tails[tail].remove(delta.head)
if not self.tails[delta.tail]:
self.tails.pop(delta.tail)
elif delta.head in self.heads:
tail = self.heads.pop(delta.head)
self.tails[tail].remove(delta.head)
if self.reverse_shares[delta.tail] != set([delta.head]):
pass # has sibling
else:
self.tails[tail].add(delta.tail)
self.heads[delta.tail] = tail
elif delta.tail in self.tails and len(self.reverse_shares[delta.tail]) <= 1:
# move delta refs referencing children down to this, so they can be moved up in one step
if delta.tail in self.reverse_delta_refs:
for x in list(self.reverse_deltas.get(self.reverse_delta_refs.get(delta.head, object()), set())):
self.get_last(x)
assert delta.head not in self.reverse_delta_refs, list(self.reverse_deltas.get(self.reverse_delta_refs.get(delta.head, None), set()))
heads = self.tails.pop(delta.tail)
for head in heads:
self.heads[head] = delta.head
self.tails[delta.head] = set(heads)
# move ref pointing to this up
if delta.tail in self.reverse_delta_refs:
assert delta.head not in self.reverse_delta_refs, list(self.reverse_deltas.get(self.reverse_delta_refs.get(delta.head, object()), set()))
ref = self.reverse_delta_refs[delta.tail]
cur_delta = self.delta_refs[ref]
assert cur_delta.tail == delta.tail
self.delta_refs[ref] = cur_delta - self.delta_type.from_element(share)
assert self.delta_refs[ref].tail == delta.head
del self.reverse_delta_refs[delta.tail]
self.reverse_delta_refs[delta.head] = ref
else:
raise NotImplementedError()
# delete delta entry and ref if it is empty
if delta.head in self.deltas:
delta1, ref = self.deltas.pop(delta.head)
self.reverse_deltas[ref].remove(delta.head)
if not self.reverse_deltas[ref]:
del self.reverse_deltas[ref]
delta2 = self.delta_refs.pop(ref)
del self.reverse_delta | _refs[delta2.tail]
self.shares.pop(delta.head)
self.reverse_shares[delta.tail].remove(delta.head)
if not self.reverse_shares[delta.tail]:
self.reverse_shares.pop(delta.tail)
self.removed.happened(share)
def get_height(self, share_hash):
return self.get_delta(share_hash).height
def get_work(self, share_hash):
return self.g | et_delta(share_hash).work
def get_ |
DIRACGrid/DIRAC | src/DIRAC/Core/DISET/private/InnerRPCClient.py | Python | gpl-3.0 | 2,922 | 0.000342 | """ This module hosts the logic for executing an RPC call.
"""
from DIRAC.Core.DISET.private.BaseClient import BaseClient
from DIRAC.Core.Utilities.ReturnValues import S_OK
from DIRAC.Core.Utilities.DErrno import cmpError, ENOAUTH
class InnerRPCClient(BaseClient):
"""This class instruments the BaseClient to perform RPC calls.
At every RPC call, this class:
* connects
* proposes the action
* sends the method parameters
* retrieve the result
* disconnect
"""
# Number of times we retry the call.
# The connection retry is handled by BaseClient
__retry = 0
def executeRPC(self, functionName, args):
"""Perform the RPC call, connect before and disconnect after.
:param functionName: name of the function
:param args: arguments to the function
:return: in case of success, the return of the server call. In any case
we add the connection stub to it.
"""
retVal = self._connect()
# Generate the stub which contains all the connection and call options
# JSON: cast args to list for serialization purposes
stub = [self._getBaseStub(), functionName, list(args)]
if not retVal["OK"]:
retVal["rpcStub"] = stub
return retVal
# Get the transport connection ID as well as the Transport object
trid, transport = retVal["Value"]
try:
# Handshake to perform the RPC call for functionName
retVal = self | ._proposeAction(transport, ("RPC", functionName))
if not retVal["OK"]:
if cmpError(retVal, ENOAUTH): # This query is unauthorized
retVal["rpcStub"] = stub
return retVal
else: # we have network problem or the service is not responding
if self.__retry < 3:
self.__retry += 1
return self.executeRPC(functionName, args)
| else:
retVal["rpcStub"] = stub
return retVal
# Send the arguments to the function
# Note: we need to convert the arguments to list
# We do not need to deseralize it because variadic functions
# can work with list too
retVal = transport.sendData(S_OK(list(args)))
if not retVal["OK"]:
return retVal
# Get the result of the call and append the stub to it
# Note that the RPC timeout basically ticks here, since
# the client waits for data for as long as the server side
# processes the request.
receivedData = transport.receiveData()
if isinstance(receivedData, dict):
receivedData["rpcStub"] = stub
return receivedData
finally:
self._disconnect(trid)
|
fblupi/master_informatica-SSBW | tarea8_bis/sitio_web/restaurantes/views.py | Python | gpl-3.0 | 4,124 | 0.005096 | from django.shortcuts import render, HttpResponse, redirect
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from .models import restaurants, addr
from .forms import RestaurantForm
from lxml import etree
import datetime
import os.path
import logging
logger = logging.getLogger(__name__)
def index(request):
logger.info(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') + " - se ha consultado la página de inicio")
return render(request, 'restaurantes/index.html')
def list(request):
lista = restaurants.objects
context = {
"resta": lista,
"menu": "list"
}
logger.info(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') + " - se ha consultado la lista de restaurantes")
if not lista:
return render(request, 'restaurantes/list_empty.html', context)
return render(request, 'restaurantes/list.html', context)
def search(request):
cocina = request.GET.get('cocina')
lista = restaurants.objects(cuisine__icontains=cocina)
context = {
"resta": lista
}
logger.info(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') + " - se han consultado los restaurantes con cocina " + cocina)
return render(request, 'restaurantes/list.html', context)
def restaurant(request, id):
r = restaurants.objects(restaurant_id=id)[0]
host = request.get_host()
context = {
"resta": r,
"host": host
}
logger.info(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') + " - se ha consultado el restaurante con id " + str(r.restaurant_id))
return render(request, 'restaurantes/restaurant.html', context)
@login_required
def add(request):
if request.method == "POST":
form = RestaurantForm(request.POST, request.FILES)
if form.is_valid():
name = form.cleaned_data['name']
city = form.cleaned_data['city']
cuisine = form.cleaned_data['cuisine']
borough = form.cleaned_data['borough']
api_base_url = 'http://maps.googleapis.com/maps/api/geocode/xml?address='
req = api_base_url + name + city
tree = etree.parse(req)
addressXML = tree.xpath('//address_component')
locationXML = tree.xpath('//location')
buildingXML = addressXML[0].xpath('//long_name/text()')[0]
streetXML = addressXML[1].xpath('//long_name/text()')[1]
cityXML = addressXML[2].xpath('//long_name/text()')[2]
zipcodeXML = int(addressXML[6].xpath('//long_name/text()')[6])
coordXML = [float(locationXML[0].xpath('//lat/text()')[0]), float(locationXML[0].xpath('//lng/text()')[0])]
a = addr(building=buildingXML, street=streetXML, city=cityXML, zipcode=zipcodeXML, coord=coordXML)
r = restaurants()
r.name = name
r.restaurant_id = restaurants.objects.count() + 1
r.cuisine = cuisine
r.borough = borough
r.address = a
if len(request.FILES) != 0:
r.photo.put(request.FILES['photo'], content_type = request.FILES['photo'].content_type)
r.save()
logger.info(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') + " - se ha añadido el restaurante con id " + str(r.restaurant_id))
return redirect('list')
else:
logger.info(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') + " - se ha consultado la página para agregar un restaurante")
form = RestaurantForm();
context = {
"form": form,
"menu": "add"
}
return render(request, 'restaurantes/add.html', context)
def show_image(request, id):
r = restaurants.objects(restaurant_id=id)[0]
image = r.photo.read()
logger.info(datetime.datetime.today().strftime('%Y- | %m-%d %H:%M:%S') + " - se ha consultado la imagen del restaurante con id " + str(r.restaurant_id))
return HttpResponse(image, content_type="image/" + r.photo.format)
def number_of_restaurants(request):
n = restaurants.objects.count()
| return JsonResponse({'n': n})
|
mulkieran/bytesize | src/bytesize/_size.py | Python | gpl-2.0 | 18,819 | 0.000531 | # Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
""" Size class, for creating instances of Size objects.
Contains a few documented methods and a number of __*__ methods
implementing arithmetic operations. Precise numeric types
such as int and Decimal may also occur in some arithmetic expressions,
but all occurrances of floating point numbers in arithmetic expressions
will cause an exception to be raised.
"""
from fractions import Fraction
import six
from ._config import SizeConfig
from ._errors import SizeFractionalResultError
from ._errors import SizeNonsensicalBinOpError
from ._errors import SizeNonsensicalBinOpValueError
from ._errors import SizePowerResultError
from ._errors import SizeValueError
from ._constants import B
from ._constants import BinaryUnits
from ._constants import DecimalUnits
from ._constants import PRECISE_NUMERIC_TYPES
from ._util.math_util import round_fraction
from ._util.misc import get_decimal_info
from ._util.misc import get_string_info
_BYTES_SYMBOL = "B"
class Size(object):
""" Class for instantiating Size objects. """
_FMT_STR = "".join([
"%(approx)s",
"%(sign)s",
"%(left)s",
"%(radix)s",
"%(right)s",
| " ",
"%(units)s",
"%(bytes)s"
])
def __init__(self, value=0, units | =None):
""" Initialize a new Size object.
:param value: a size value, default is 0
:type value: Size, or any finite numeric type (possibly as str)
:param units: the units of the size, default is None
:type units: any of the publicly defined units constants or a Size
:raises SizeValueError: on bad parameters
Must pass None as units argument if value has type Size.
The units number must be a precise numeric type.
"""
if isinstance(value, six.string_types) or \
isinstance(value, PRECISE_NUMERIC_TYPES):
try:
units = B if units is None else units
factor = getattr(units, 'magnitude', None) or int(units)
magnitude = Fraction(value) * factor
except (ValueError, TypeError):
raise SizeValueError(value, "value")
elif isinstance(value, Size):
if units is not None:
raise SizeValueError(
units,
"units",
"meaningless when Size value is passed"
)
magnitude = value.magnitude # pylint: disable=no-member
else:
raise SizeValueError(value, "value")
if SizeConfig.STRICT is True and magnitude.denominator != 1:
raise SizeFractionalResultError()
self._magnitude = magnitude
@property
def magnitude(self):
"""
:returns: the number of bytes
:rtype: Fraction
"""
return self._magnitude
def getDecimalInfo(self, config):
"""
Get information for the decimal representation of ``self``.
:param StrConfig config: the display configuration
:returns: a tuple representing the value
:rtype: tuple of int * int * list of int * list of int
Components:
1. sign, -1 if negative else 1
2. portion on the left of the decimal point
3. non-repeating portion to the right of the decimal point
4. repeating portion to the right of the decimal point
5. units specifier
"""
(magnitude, units) = self.components(config)
radix_num = get_decimal_info(magnitude)
return (
radix_num.sign,
radix_num.left,
radix_num.non_repeating,
radix_num.repeating,
units
)
def getStringInfo(self, config):
"""
Return a representation of the size.
:param :class:`StrConfig` config: representation configuration
:returns: a tuple representing the string to display
:rtype: tuple of bool * int * str * str * unit
Components are:
1. If true, the value is approximate
2. -1 for a negative number, 1 for a positive
3. a string with the decimal digits to the left of the decimal point
4. a string with the decimal digits to the right of the decimal point
5. a unit specifier
"""
(magnitude, units) = self.components(config)
(exact, sign, left, right) = get_string_info(
magnitude,
places=config.max_places
)
return (not exact, sign, left, right, units)
def getString(self, config, display):
""" Return a string representation of the size.
:param :class:`StrConfig` config: representation configuration
:param DisplayConfig display: configuration for display
:returns: a string representation
:rtype: str
"""
(approx, sign, left, right, units) = self.getStringInfo(config)
approx_str = display.approx_symbol \
if approx and display.show_approx_str else ""
if display.strip:
right = right.rstrip('0')
result = {
'approx' : approx_str,
'sign': "-" if sign == -1 else "",
'left': left,
'radix': '.' if right else "",
'right' : right,
'units' : units.abbr,
'bytes' : _BYTES_SYMBOL
}
return self._FMT_STR % result
def __str__(self):
return self.getString(SizeConfig.STR_CONFIG, SizeConfig.DISPLAY_CONFIG)
def __repr__(self):
radix_num = get_decimal_info(self._magnitude)
sign = "-" if radix_num.sign == -1 else ""
if radix_num.non_repeating == [] and radix_num.repeating == []:
return "Size(%s%s)" % (sign, radix_num.left)
non_repeating = "".join(str(x) for x in radix_num.non_repeating)
if radix_num.repeating == []:
return "Size(%s%s.%s)" % (sign, radix_num.left, non_repeating)
repeating = "".join(str(x) for x in radix_num.repeating)
return "Size(%s%s.%s(%s))" % \
(sign, radix_num.left, non_repeating, repeating)
def __deepcopy__(self, memo):
# pylint: disable=unused-argument
return Size(self._magnitude)
def __nonzero__(self):
return self._magnitude != 0
def __int__(self):
return int(self._magnitude)
__trunc__ = __int__
def __hash__(self):
return hash(self._magnitude)
def __bool__(self):
return self.__nonzero__()
# UNARY OPERATIONS
def __abs__(self):
return Size(abs(self._magnitude))
def __neg__(self):
return Size(-(self._magnitude))
def __pos__(self):
return Size(self._magnitude)
# BINARY OPERATIONS
def __add__(self, other):
if not isinstance(other, Size):
raise SizeNonsensicalBinOpError("+", other)
return Size(self._magnitude + other.magnitude)
__radd__ = __add__
def __divmod__(self, other):
# other * div + rem = self
# Therefore, T(rem) = T(self) = Size
# T(div) = Size, if T(other) is numeric
# |
Flyingfox646/flyingfox | src/chunks/migrations/0007_spanish.py | Python | mit | 669 | 0.00299 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-29 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chunks', '0006_frenc | h'),
]
operations = [
migrations.AddField(
model_name='chunk',
name='content_es',
field=models.TextField(blank=True, null=True, verbose_name='content'),
| ),
migrations.AddField(
model_name='chunk',
name='title_es',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='title'),
),
]
|
garbersc/keras-galaxies | output_corr.py | Python | bsd-3-clause | 1,543 | 0.051847 | # TRAIN_LABELS_PATH = "data/raw/solutions_training.csv"
TRAIN_LABELS_PATH = "data/raw/training_solutions_rev1.csv"
TARGET_PATH = "data/solutions_train.npy"
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from termcolor import colored
import load_data
output_names=["smooth","featureOrdisk","NoGalaxy","EdgeOnYes","EdgeOnNo","BarYes","BarNo","SpiralYes","SpiralNo","BulgeNo","BulgeJust","BulgeObvious","BulgDominant","OddYes","OddNo","RoundCompletly","RoundBetween","RoundCigar","Ring","Lense","Disturbed","Irregular","Other","Merger","DustLane","BulgeRound","BlulgeBoxy","BulgeNo2","SpiralTight","SpiralMedium","SpiralLoose","Spiral1Arm","Spiral2Arm","Spiral3Arm","Spiral4Arm","SpiralMoreArms","SpiralCantTell"]
#d = pd.read_csv(TRAIN_LABELS_PATH)
#targets = d.as_matrix()[1:, 1:].astype('float32')
targets=load_data.load_gz('predictions/final/augmented/valid/try_convent_continueAt0p02_next.npy.gz')
targets=targets.T
output_corr=np.zeros((37,37))
print targets.shape
for i i | n xrange(0,37):
for j in xrange(i,37):
output_corr[i][j]=np.corrcoef(targets[i],targets[j])[0][1]
if i!=j and np.abs(output_corr[i][j])>0.3:
if np.abs(output_corr[i][j])>0.7: print colored("%s, %s: %s" %(output_names[i],output_names[j],output_corr[i][j]),'green')
else: | print("%s, %s: %s" %(output_names[i],output_names[j],output_corr[i][j]))
plt.imshow(output_corr, interpolation='none')
plt.colorbar()
plt.savefig("targetsCorrelation_valid.jpeg")
#print "Saving %s" % TARGET_PATH
#np.save(TARGET_PATH, targets)
|
willemneal/CallMe | callMe.py | Python | mit | 2,823 | 0.005668 | '''
The following is from http://stackoverflow.com/a/2022629
Thanks to Longpoke
'''
import types
class Event(list):
"""Event subscription.
A list of callable objects. Calling an instance of this will cause a
call to each item in the list in ascending order by index.
Example Usage:
>>> def f(x):
... print 'f(%s)' % x
>>> def g(x):
... print 'g(%s)' % x
>>> e = Event()
>>> e()
>>> e.append(f)
>>> e(123)
f(123)
>>> e.remove(f)
>>> e()
>>> e += (f, g)
>>> e(10)
f(10)
# g(10)
>>> del e[0]
>>> e(2)
g(2)
"""
def __init__(self,repeat=True):
super(Event,self).__init__()
self.repeat = repeat
def __call__(self, *args, **kwargs):
for f in self:
f(*args, **kwargs)
if not self.repeat:
map(lambda func: self.remove(func),self)
def remove(self, func):
if func in self:
list.remove(self,func)
def __repr__(self):
#Make function names look prettier
items = [item.__name__ if isinstance(item, types.FunctionType) or isinstance(item, types.MethodType)
else item
for item in self]
return "Event %s" % list.__repr__(items)
class Listener(dict):
def addSub(self, name, callback,repeat=True):
'''sets self[name] | to Event() if there is no key name.
Either way self[name] is returned and callback is appended'''
self.setdefault(name, Event(repeat)).append(callback)
def removeSub(self, name, callback):
if name in self:
self[name].remove(callback)
if len(self[name]) == 0:
del self[name]
def listen(self, event, repeat=True):
def wrap(f):
self.addSub(event, f,repeat)
return | f
return wrap
def trigger(self, event):
def wrap(f):
def newFunc(*args, **kwargs):
res = f(*args, **kwargs)
self(event, res)
return res
return newFunc
return wrap
def __call__(self, event, *args, **kwargs):
if event in self:
self[event](*args, **kwargs)
if len(self[event])==0:
self.removeSub(event,self[event])
if "listeners" in self:
self['listeners'](event, *args, **kwargs)
def __add__(self, listener):
self.addSub('listeners', listener)
def __sub__(self, listener):
self.removeSub('listeners', listener)
def __repr__(self):
return "Listener %s"% dict.__repr__(self)
def getListeners(self):
if "listeners" in self:
return self['listeners']
return None
def isListener(self, listener):
return listener in self.getListeners()
|
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/washtenaw/configurations/jobs_event_model_configuration_creator.py | Python | gpl-2.0 | 2,232 | 0.010753 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.configuration import Configuration
class JobsEventModelConfigurationCreator(object):
_model_name = 'agent_event_model'
def __init__(self,
location_set = 'gridcell',
agent_set = 'job',
agent_event_set = 'jobs_event'):
self.location_set = location_set
self.agent_event_set = agent_event_set
self.agent_set = agent_set
def execute(self):
return Configuration({
'import': {
'washtenaw.models.%s' % self._model_name: 'AgentEventModel'
},
'init': {'name': 'AgentEventModel'},
'run': {
'arguments': {
'location_set': self.location_set,
'agent_event_set': self.agent_event_set,
'agent_set':self.agent_set,
'current_year': 'year',
'dataset_pool': 'dataset_pool'
}
}
})
from opus_core.tests import opus_unittest
class TestDeletionEventModelConfigurationCreator(opus_unittest.OpusTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_defaults(self):
creator = JobsEventModelConfigurationCreator()
expected = Configuration({
'import': {
'washtenaw.models.agent_event_model': 'AgentEventModel'
},
'init': {'name': 'AgentEventModel'},
'run': {
'arguments': {
'location_set': 'gridcell',
'agent_event_set': 'jobs_event',
'agent_set':'job',
'current_year': 'year',
'dataset_pool': 'dataset_pool'
}
}
})
result = creator. | execu | te()
self.assertDictsEqual(result, expected)
if __name__ == '__main__':
opus_unittest.main()
|
idrogeno/IdroMips | lib/python/Screens/ScanSetup.py | Python | gpl-2.0 | 71,927 | 0.029725 | from Screen import Screen
from ServiceScan import ServiceScan
from Components.config import config, ConfigSubsection, ConfigSelection, \
ConfigYesNo, ConfigInteger, getConfigListEntry, ConfigSlider, ConfigEnableDisable
from Components.ActionMap import NumberActionMap, ActionMap
from Components.Sources.StaticText import StaticText
from Components.SystemInfo import SystemInfo
from Components.ConfigList import ConfigListScreen
from Components.NimManager import nimmanager, getConfigSatlist
from Components.Label import Label
from Tools.HardwareInfo import HardwareInfo
from Tools.Transponder import getChannelNumber, supportedChannels, channel2frequency
from Screens.InfoBar import InfoBar
from Screens.MessageBox import MessageBox
from enigma import eTimer, eDVBFrontendParametersSatellite, eComponentScan, \
eDVBSatelliteEquipmentControl, eDVBFrontendParametersTerrestrial, \
eDVBFrontendParametersCable, eConsoleAppContainer, eDVBResourceManager
def buildTerTransponder(frequency,
inversion=2, bandwidth = 7000000, fechigh = 6, feclow = 6,
modulation = 2, transmission = 2, guard = 4,
hierarchy = 4, system = 0, plp_id = 0):
# print "freq", frequency, "inv", inversion, "bw", bandwidth, "fech", fechigh, "fecl", feclow, "mod", modulation, "tm", transmission, "guard", guard, "hierarchy", hierarchy
parm = eDVBFrontendParametersTerrestrial()
parm.frequency = frequency
parm.inversion = inversion
parm.bandwidth = bandwidth
parm.code_rate_HP = fechigh
parm.code_rate_LP = feclow
parm.modulation = modulation
parm.transmission_mode = transmission
parm.guard_interval = guard
parm.hierarchy = hierarchy
parm.system = system
parm.plp_id = plp_id
return parm
def getInitialTransponderList(tlist, pos):
list = nimmanager.getTransponders(pos)
for x in list:
if x[0] == 0: #SAT
parm = eDVBFrontendParametersSatellite()
parm.frequency = x[1]
parm.symbol_rate = x[2]
parm.polarisation = x[3]
parm.fec = x[4]
parm.inversion = x[7]
parm.orbital_position = pos
parm.system = x[5]
parm.modulation = x[6]
parm.rolloff = x[8]
parm.pilot = x[9]
tl | ist.append(parm)
def getInitialCableTransponderList(tlist, nim):
list = nimmanager.getTranspondersCable(nim)
for x in list:
if x[0] == 1: #CABLE
parm = eDVBFrontendParametersCable()
parm.frequency = x[1]
parm.symbol_rate = x[2]
parm.modulation = x[3]
parm.fec_inner = x[4]
parm.inversion = x[5]
parm.system = x[6]
tlist.append(parm)
def getInitialTerrestrialTransponderList(tlist, region, tsystem = eDVBFrontendPa | rametersTerrestrial.System_DVB_T_T2, skip_t2 = False):
list = nimmanager.getTranspondersTerrestrial(region)
#self.transponders[self.parsedTer].append((2,freq,bw,const,crh,crl,guard,transm,hierarchy,inv))
#def buildTerTransponder(frequency, inversion = 2, bandwidth = 3, fechigh = 6, feclow = 6,
#modulation = 2, transmission = 2, guard = 4, hierarchy = 4):
for x in list:
if x[0] == 2: #TERRESTRIAL
if skip_t2 and x[10] == eDVBFrontendParametersTerrestrial.System_DVB_T2:
# Should be searching on TerrestrialTransponderSearchSupport.
continue
if tsystem == eDVBFrontendParametersTerrestrial.System_DVB_T_T2:
parm = buildTerTransponder(x[1], x[9], x[2], x[4], x[5], x[3], x[7], x[6], x[8], x[10], x[11])
elif x[10] == eDVBFrontendParametersTerrestrial.System_DVB_T_T2 or x[10] == tsystem:
parm = buildTerTransponder(x[1], x[9], x[2], x[4], x[5], x[3], x[7], x[6], x[8], tsystem, x[11])
else:
continue
tlist.append(parm)
cable_bands = {
"DVBC_BAND_EU_VHF_I" : 1 << 0,
"DVBC_BAND_EU_MID" : 1 << 1,
"DVBC_BAND_EU_VHF_III" : 1 << 2,
"DVBC_BAND_EU_SUPER" : 1 << 3,
"DVBC_BAND_EU_HYPER" : 1 << 4,
"DVBC_BAND_EU_UHF_IV" : 1 << 5,
"DVBC_BAND_EU_UHF_V" : 1 << 6,
"DVBC_BAND_US_LO" : 1 << 7,
"DVBC_BAND_US_MID" : 1 << 8,
"DVBC_BAND_US_HI" : 1 << 9,
"DVBC_BAND_US_SUPER" : 1 << 10,
"DVBC_BAND_US_HYPER" : 1 << 11,
}
cable_autoscan_nimtype = {
'SSH108' : 'ssh108',
'TT3L10' : 'tt3l10',
'TURBO' : 'vuplus_turbo_c'
}
terrestrial_autoscan_nimtype = {
'SSH108' : 'ssh108_t2_scan',
'TT3L10' : 'tt3l10_t2_scan',
'TURBO' : 'vuplus_turbo_t'
}
def GetDeviceId(filter, nim_idx):
tuners={}
device_id = 0
socket_id = 0
for nim in nimmanager.nim_slots:
name_token = nim.description.split(' ')
name = name_token[-1][4:-1]
if name == filter:
if socket_id == nim_idx:
break
if device_id:
device_id = 0
else:
device_id = 1
socket_id += 1
return device_id
class CableTransponderSearchSupport:
def __init__(self):
pass
def tryGetRawFrontend(self, feid):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
raw_channel = res_mgr.allocateRawChannel(self.feid)
if raw_channel:
frontend = raw_channel.getFrontend()
if frontend:
frontend.closeFrontend() # immediate close...
del frontend
del raw_channel
return True
return False
def cableTransponderSearchSessionClosed(self, *val):
print "cableTransponderSearchSessionClosed, val", val
self.cable_search_container.appClosed.remove(self.cableTransponderSearchClosed)
self.cable_search_container.dataAvail.remove(self.getCableTransponderData)
if val and len(val):
if val[0]:
self.setCableTransponderSearchResult(self.__tlist)
else:
self.cable_search_container.sendCtrlC()
self.setCableTransponderSearchResult(None)
self.cable_search_container = None
self.cable_search_session = None
self.__tlist = None
self.cableTransponderSearchFinished()
def cableTransponderSearchClosed(self, retval):
print "cableTransponderSearch finished", retval
self.cable_search_session.close(True)
def getCableTransponderData(self, str):
#prepend any remaining data from the previous call
str = self.remainingdata + str
#split in lines
lines = str.split('\n')
#'str' should end with '\n', so when splitting, the last line should be empty. If this is not the case, we received an incomplete line
if len(lines[-1]):
#remember this data for next time
self.remainingdata = lines[-1]
lines = lines[0:-1]
else:
self.remainingdata = ""
for line in lines:
data = line.split()
if len(data):
if data[0] == 'OK' and data[4] != 'NOT_IMPLEMENTED':
print str
parm = eDVBFrontendParametersCable()
qam = { "QAM16" : parm.Modulation_QAM16,
"QAM32" : parm.Modulation_QAM32,
"QAM64" : parm.Modulation_QAM64,
"QAM128" : parm.Modulation_QAM128,
"QAM256" : parm.Modulation_QAM256 }
inv = { "INVERSION_OFF" : parm.Inversion_Off,
"INVERSION_ON" : parm.Inversion_On,
"INVERSION_AUTO" : parm.Inversion_Unknown }
fec = { "FEC_AUTO" : parm.FEC_Auto,
"FEC_1_2" : parm.FEC_1_2,
"FEC_2_3" : parm.FEC_2_3,
"FEC_3_4" : parm.FEC_3_4,
"FEC_5_6" : parm.FEC_5_6,
"FEC_7_8" : parm.FEC_7_8,
"FEC_8_9" : parm.FEC_8_9,
"FEC_3_5" : parm.FEC_3_5,
"FEC_4_5" : parm.FEC_4_5,
"FEC_9_10" : parm.FEC_9_10,
"FEC_NONE" : parm.FEC_None }
parm.frequency = int(data[1])
parm.symbol_rate = int(data[2])
parm.fec_inner = fec[data[3]]
parm.modulation = qam[data[4]]
parm.inversion = inv[data[5]]
self.__tlist.append(parm)
tmpstr = _("Try to find used transponders in cable network.. please wait...")
tmpstr += "\n\n"
tmpstr += data[1].isdigit() and "%s MHz " % (int(data[1]) / 1000.) or data[1]
tmpstr += data[0]
self.cable_search_session["text"].setText(tmpstr)
def startCableTransponderSearch(self, nim_idx):
def GetCommand(nim_idx):
global cable_autoscan_nimtype
try:
nim_name = nimmanager.getNimName(nim_idx)
if nim_name is not None and nim_name != "":
device_id = ""
nim_name = nim_name.split(' ')[-1][4:-1]
if nim_name == 'TT3L10':
try:
device_id = GetDeviceId('TT3L10', nim_idx)
device_id = "--device=%s" % (device_id)
except Exception, err:
print "GetCommand ->", err
device_id = "--device=0"
command = "%s %s" % (cable_autoscan_nimtype[nim_name], device_id)
return command
except Exception, err:
print "GetCommand ->", err
return "tda1002x"
if not self.tryGetRawFrontend(nim_idx):
self.session.na |
thefactory/marathon-python | marathon/models/events.py | Python | mit | 7,466 | 0.001339 | """
This module is used to translate Events from Marathon's EventBus system.
See:
* https://mesosphere.github.io/marathon/docs/event-bus.html
* https://github.com/mesosphere/marathon/blob/master/src/main/scala/mesosphere/marathon/core/event/Events.scala
"""
from marathon.models.base import MarathonObject
from marathon.models.app import MarathonHealthCheck
from marathon.models.task import MarathonIpAddress
from marathon.models.deployment import MarathonDeploymentPlan
from marathon.exceptions import MarathonError
class MarathonEvent(MarathonObject):
"""
The MarathonEvent base class handles the translation of Event objects sent by the
Marathon server into library MarathonObjects.
"""
KNOWN_ATTRIBUTES = []
attribute_name_to_marathon_object = { # Allows embedding of MarathonObjects inside events.
'health_check': MarathonHealthCheck,
'plan': MarathonDeploymentPlan,
'ip_address': MarathonIpAddress,
}
seq_name_to_singular = {
'ip_addresses': 'ip_address',
}
def __init__(self, event_type, timestamp, **kwargs):
self.event_type = event_type # All events have these two attributes
self.timestamp = timestamp
for attribute in self.KNOWN_ATTRIBUTES:
self._set(attribute, kwargs.get(attribute))
def __to_marathon_object(self, attribute_name, attribute):
if attribute_name in self.attribute_name_to_marathon_object:
clazz = self.attribute_name_to_marathon_object[attribute_name]
# If this attribute already has a Marathon object instantiate it.
attribute = clazz.from_json(attribute)
return attribute
def _set(self, attribute_name, attribute):
if not attribute:
return
# Special handling for lists...
if isinstance(attribute, list):
name = self.seq_name_to_singular.get(attribute_name)
attribute = [
self.__to_marathon_object(name, v)
for v in attribute
]
else:
attribute = self.__to_marathon_object(attribute_name, attribute)
setattr(self, attribute_name, attribute)
class MarathonApiPostEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['client_ip', 'app_definition', 'uri']
class MarathonStatusUpdateEvent(MarathonEvent):
KNOWN_ATTRIBUTES = [
'slave_id', 'task_id', 'task_status', 'app_id', 'host', 'ports', 'version', 'message', 'ip_addresses']
class MarathonFrameworkMessageEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['slave_id', 'executor_id', 'message']
class MarathonSubscribeEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['client_ip', 'callback_url']
class MarathonUnsubscribeEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['client_ip', 'callback_url']
class MarathonAddHealthCheckEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['app_id', 'health_check', 'version']
class MarathonRemoveHealthCheckEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['app_id', 'health_check']
class MarathonFailedHealthCheckEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['app_id', 'health_check', 'task_id', 'instance_id']
class MarathonHealthStatusChangedEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['app_id', 'health_check', 'task_id', 'instance_id', 'alive']
class MarathonGroupChangeSuccess(MarathonEvent):
KNOWN_ATTRIBUTES = ['group_id', 'version']
class MarathonGroupChangeFailed(MarathonEvent):
KNOWN_ATTRIBUTES = ['group_id', 'version', 'reason']
class MarathonDeploymentSuccess(MarathonEvent):
KNOWN_ATTRIBUTES = ['id']
class MarathonDeploymentFailed(MarathonEvent):
KNOWN_ATTRIBUTES = ['id']
class MarathonDeploymentInfo(MarathonEvent):
KNOWN_ATTRIBUTES = ['plan', 'current_step']
class MarathonDeploymentStepSuccess(MarathonEvent):
KNOWN_ATTRIBUTES = ['plan']
class MarathonDeploymentStepFailure(MarathonEvent):
KNOWN_ATTRIBUTES = ['plan']
class MarathonEventStreamAttached(MarathonEvent):
KNOWN_ATTRIBUTES = ['remote_address']
class MarathonEventStreamDetached(MarathonEvent):
KNOWN_ATTRIBUTES = ['remote_address']
class MarathonUnhealthyTaskKillEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['app_id', 'task_id', 'instance_id', 'version', 'reason']
class MarathonAppTerminatedEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['app_id']
class MarathonInstanceChangedEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['instance_id', 'slave_id', 'condition', 'host', 'run_spec_id', 'run_spec_version']
class MarathonUnknownInstanceTerminated(MarathonEvent):
KNOWN_ATTRIBUTES = ['instance_id', 'run_spec_id', 'condition']
class MarathonInstanceHealthChangedEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['instance_id', 'run_spec_id', 'run_spec_version', 'healthy']
class MarathonPodCreatedEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['client_ip', 'uri']
class MarathonPodUpdatedEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['client_ip', 'uri']
class MarathonPodDeletedEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['client_ip', 'uri']
class MarathonUnhealthyInstanceKillEvent(MarathonEvent):
KNOWN_ATTRIBUTES = ['app_id', 'task_id', 'instance_id', 'version', 'reason', 'host', 'slave_id']
class EventFactory:
"""
Handle an event emitted from the Marathon EventBus
See: https://mesosphere.github.io/marathon/docs/event-bus.html
"""
def __init | __(self):
pass
event_to_class = {
'api_post_event': MarathonApiPostEvent,
'status_update_event': MarathonStatusUpdateEvent,
'framework_message_event': MarathonFrameworkMessageEvent,
'subscribe_event': MarathonSubscribeEvent,
'unsubscribe_event': MarathonUnsubscribeEvent,
'add_health_check_event': MarathonAddHea | lthCheckEvent,
'remove_health_check_event': MarathonRemoveHealthCheckEvent,
'failed_health_check_event': MarathonFailedHealthCheckEvent,
'health_status_changed_event': MarathonHealthStatusChangedEvent,
'unhealthy_task_kill_event': MarathonUnhealthyTaskKillEvent,
'group_change_success': MarathonGroupChangeSuccess,
'group_change_failed': MarathonGroupChangeFailed,
'deployment_success': MarathonDeploymentSuccess,
'deployment_failed': MarathonDeploymentFailed,
'deployment_info': MarathonDeploymentInfo,
'deployment_step_success': MarathonDeploymentStepSuccess,
'deployment_step_failure': MarathonDeploymentStepFailure,
'event_stream_attached': MarathonEventStreamAttached,
'event_stream_detached': MarathonEventStreamDetached,
'app_terminated_event': MarathonAppTerminatedEvent,
'instance_changed_event': MarathonInstanceChangedEvent,
'unknown_instance_terminated_event': MarathonUnknownInstanceTerminated,
'unhealthy_instance_kill_event': MarathonUnhealthyInstanceKillEvent,
'instance_health_changed_event': MarathonInstanceHealthChangedEvent,
'pod_created_event': MarathonPodCreatedEvent,
'pod_updated_event': MarathonPodUpdatedEvent,
'pod_deleted_event': MarathonPodDeletedEvent,
}
class_to_event = {v: k for k, v in event_to_class.items()}
def process(self, event):
event_type = event['eventType']
if event_type in self.event_to_class:
clazz = self.event_to_class[event_type]
return clazz.from_json(event)
else:
raise MarathonError(f'Unknown event_type: {event_type}, data: {event}')
|
pwndbg/pwndbg | pwndbg/which.py | Python | mit | 3,111 | 0.001607 | # This license covers everything within this project, except for a few pieces
# of code that we either did not write ourselves or which we derived from code
# that we did not write ourselves. These few pieces have their license specified
# in a header, or by a file called LICENSE.txt, which will explain exactly what
# it covers. The few relevant pieces of code are all contained inside these
# directories:
#
# - pwnlib/constants/
# - pwnlib/data/
#
#
# Copyright (c) 2015 Gallopsled and Zach Riggle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import stat
def which(name, all = False):
"""which(name, flags = os.X_OK, all = False) -> str or str set
Works as the system command ``which``; searches $PATH for ``name`` and
returns a full path if found.
If `all` is :const:`True` the set of all found locations is returned, else
the first occurrence or :const:`None` is returned.
Arguments:
`name` (str): The file to search for.
`all` (bool): Whether to return all locations where `name` was found.
Returns:
If `all` is :const:`True` the set of all locations where `name` was found | ,
else the first location or :const:`None` if not found.
Example:
>>> which('sh')
'/bin/sh'
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep i | n name:
return name
isroot = os.getuid() == 0
out = set()
try:
path = os.environ['PATH']
except KeyError:
log.exception('Environment variable $PATH is not set')
for p in path.split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, os.X_OK):
st = os.stat(p)
if not stat.S_ISREG(st.st_mode):
continue
# work around this issue: https://bugs.python.org/issue9311
if isroot and not \
st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
continue
if all:
out.add(p)
else:
return p
if all:
return out
else:
return None
|
unhammer/gt-CorpusTools | corpustools/test/test_analyser.py | Python | gpl-3.0 | 8,941 | 0.000338 | # -*- coding: utf-8 -*-
#
# This file contains a class to analyse text in giellatekno xml format
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2013-2014 Børre Gaup <borre.gaup@uit.no>
#
from __future__ import unicode_literals
import os
import unittest
import doctest
from lxml import etree
from lxml import doctestcompare
from corpustools import analyser
from corpustools import parallelize
from corpustools import util
here = os.path.dirname(__file__)
class TestAnalyser(unittest.TestCase):
def setUp(self):
self.a = analyser.Analyser(
'sme',
'xfst',
fst_file=os.path.join(here, 'analyser.xfst'),
disambiguation_analysis_file=os.path.join(here,
'disambiguation.cg3'),
function_analysis_file=os.path.join(here, 'functions.cg3'),
dependency_analysis_file=os.path.join(here, 'dependency.cg3'))
self.a.xml_file = parallelize.CorpusXMLFile(
os.path.join(here, 'smefile.xml'))
def assertXmlEqual(self, got, want):
"""Check if two stringified xml snippets are equal
"""
checker = doctestcompare.LXMLOutputChecker()
if not checker.check_output(want, got, 0):
message = checker.output_difference(
doctest.Example("", want), got, 0).encode('utf-8')
raise AssertionError(message)
def test_raise_on_None_file(self):
with self.assertRaises(TypeError):
analyser.Analyser('sme', 'xfst', None, None, None, None)
def test_raise_on_bad_file(self):
with self.assertRaises(util.ArgumentError):
analyser.Analyser('sme',
'xfst',
fst_file=os.path.join(here, 'notafile'),
disambiguation_analysis_file=os.path.join(here,
'notafile'),
function_analysis_file=os.path.join(here, 'notafile'),
dependency_analysis_file=os.path.join(here, 'notafile'))
def test_sme_ccat_output(self):
"""Test if the ccat output is what we expect it to be
"""
got = self.a.ccat()
want = (
'Muhto gaskkohagaid, ja erenoamážit dalle go lei buolaš, '
'de aggregáhta billánii. ¶\n')
self.assertEqual(got, want.encode('utf8'))
def test_sme_preprocess_output(self):
"""Test if the preprocess output is what we expect it to be
"""
got = self.a.preprocess()
want = (
'Muhto\ngaskkohagaid\n,\nja\nerenoamážit\ndalle go\nlei\n'
'buolaš\n,\nde\naggregáhta\nbillánii\n.\n¶\n')
self.assertEqual(got, want.encode('utf8'))
def test_sme_disambiguation_output(self):
"""Check if disambiguation analysis gives the expected output
"""
self.a.disambiguation_analysis()
got = self.a.get_disambiguation()
want = (
'"<Muhto>"\n\t"muhto" CC <sme> @CVP\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv <sme>\n"<,>"\n\t"," CLB\n"<ja>"\n'
'\t"ja" CC <sme> @CNP\n"<erenoamážit>"\n'
'\t"erenoamážit" Adv <sme>\n"<dalle_go>"\n'
'\t"dalle_go" MWE CS <sme> @CVP\n"<lei>"\n'
'\t"leat" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<buolaš>"\n'
'\t"buolaš" Sem/Wthr N <sme> Sg Nom\n"<,>"\n'
'\t"," CLB\n"<de>"\n\t"de" Adv <sme>\n"<aggregáhta>"\n'
'\t"aggregáhta" N <sme> Sg Nom\n"<billánii>"\n'
'\t"billánit" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<.>"\n'
'\t"." CLB\n\n"<¶>"\n\t"¶" CLB\n\n')
self.assertEqual(got, want.encode('utf8'))
def test_sme_dependency_output(self):
"""Check if disambiguation analysis gives the expected output
"""
self.a.dependency_analysis()
got = self.a.get_dependency()
want = (
'"<Muhto>"\n\t"muhto" CC @CVP #1->1\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv @ADVL> #2->12\n"<,>"\n'
'\t"," CLB #3->4\n"<ja>"\n\t"ja" CC @CNP #4->2\n"<erenoamážit>"\n'
'\t"erenoamážit" Adv @ADVL> #5->12\n"<dalle_go>"\n'
'\t"dalle_go" CS @CVP #6->7\n"<lei>"\n'
'\t"leat" V IV Ind Prt Sg3 @FS-ADVL> #7->12\n"<buolaš>"\n'
'\t"buolaš" N Sg Nom @<SPRED #8->7\n"<,>"\n'
'\t"," CLB #9->6\n"<de>"\n'
'\t"de" Adv @ADVL> #10->12\n"<aggregáhta>"\n'
'\t"aggregáhta" N Sg Nom @SUBJ> #11->12\n"<billánii>"\n'
'\t"billánit" V IV Ind Prt Sg3 @FS-ADVL> #12->0\n"<.>"\n'
'\t"." CLB #13->12\n\n"<¶>"\n\t"¶" CLB #1->1\n\n')
self.assertEqual(got, want.encode('utf8'))
def test_analysisXml(self):
"""Check if the xml is what it is supposed to be
"""
self.a.dependency_analysis()
self.a.get_analysis_xml()
got = self.a.xml_file.get_etree()
want = (
'<document xml:lang="sme" id="no_id">\n'
' <header>\n'
' <title>Internáhtta sosiálalaš giliguovddážin</title>\n'
' <genre code="facta"/>\n'
' <author>\n'
' <person firstname="Abba" lastname="Abbamar" sex="m" '
'born="1900" nationality="nor"/>\n'
| ' </author>\n'
' <translator>\n'
' <person firstname="Ibba" lastname="Ibbamar" sex="unknown" '
'born="" nationality=""/>\n'
| ' </translator>\n'
' <translated_from xml:lang="nob"/>\n'
' <year>2005</year>\n'
' <publChannel>\n'
' <publication>\n'
' <publisher>Almmuheaddji OS</publisher>\n'
' </publication>\n'
' </publChannel>\n'
' <wordcount>10</wordcount>\n'
' <availability>\n'
' <free/>\n'
' </availability>\n'
' <submitter name="Børre Gaup" '
'email="boerre.gaup@samediggi.no"/>\n'
' <multilingual>\n'
' <language xml:lang="nob"/>\n'
' </multilingual>\n'
' <origFileName>aarseth_s.htm</origFileName>\n'
' <metadata>\n'
' <uncomplete/>\n'
' </metadata>\n'
' <version>XSLtemplate 1.9 ; file-specific xsl '
'$Revision: 1.3 $; common.xsl $Revision$; </version>\n'
' </header>\n'
' <body><disambiguation><![CDATA["<Muhto>"\n'
'\t"muhto" CC <sme> @CVP\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv <sme>\n"<,>"\n\t"," CLB\n"<ja>"\n'
'\t"ja" CC <sme> @CNP\n"<erenoamážit>"\n'
'\t"erenoamážit" Adv <sme>\n"<dalle_go>"\n'
'\t"dalle_go" MWE CS <sme> @CVP\n"<lei>"\n'
'\t"leat" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<buolaš>"\n'
'\t"buolaš" Sem/Wthr N <sme> Sg Nom\n"<,>"\n'
'\t"," CLB\n"<de>"\n\t"de" Adv <sme>\n"<aggregáhta>"\n'
'\t"aggregáhta" N <sme> Sg Nom\n"<billánii>"\n'
'\t"billánit" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<.>"\n'
'\t"." CLB\n\n"<¶>"\n\t"¶" CLB\n\n]]></disambiguation>'
'<dependency><![CDATA["<Muhto>"\n'
'\t"muhto" CC @CVP #1->1\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv @ADVL> #2->12\n"<,>"\n'
'\t"," CLB #3->4\n"<ja>"\n\t"ja" CC @CNP #4->2\n"<erenoamážit>"\n' |
nimblecode/nimblecode | server/library/prompts/py/09-jsonDumps.py | Python | mit | 79 | 0.037975 | import json
| a = [{'first':'Alan', 'last':'Turing'}]
j = | json.dumps(a)
print(j) |
52ai/django-ccsds | tests/auth_tests/test_views.py | Python | bsd-3-clause | 40,812 | 0.001323 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware
from django.test import (
TestCase, ignore_warnings, modify_settings, override_settings,
)
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
| "Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
| ('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://adminsite.com", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
|
kain88-de/mdanalysis | package/MDAnalysis/visualization/streamlines_3D.py | Python | gpl-2.0 | 25,088 | 0.006138 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
'''
Multicore 3D streamplot Python library for MDAnalysis --- :mod:`MDAnalysis.visualization.streamlines_3D`
=========================================================================================================
:Authors: Tyler Reddy and Matthieu Chavent
:Year: 2014
:Copyright: GNU Public License v3
:Citation: [Chavent2014]_
.. autofunction:: generate_streamlines_3d
'''
from __future__ import division, absolute_import
import six
from six.moves import range
import MDAnalysis
import multiprocessing
import numpy as np
import numpy.testing
import scipy
import scipy.spatial.distance
def determine_container_limits(coordinate_file_path, trajectory_file_path, buffer_value):
'''A function for the parent process which should take the input trajectory and calculate the limits of the
container for the system and return these limits.'''
universe_object = MDAnalysis.Universe(coordinate_file_path, trajectory_file_path)
all_atom_selection = universe_object.select_atoms('all') # select all particles
all_atom_coordinate_array = all_atom_selection.positions
x_min, x_max, y_min, y_max, z_min, z_max = [
all_atom_coordinate_array[..., 0].min(),
all_atom_coordinate_array[..., 0].max(), all_atom_coordinate_array[..., 1].min(),
all_atom_coordinate_array[..., 1].max(), all_atom_coordinate_array[..., 2].min(),
all_atom_coordinate_array[..., 2].max()]
tuple_of_limits = \
(
x_min - buffer_value,
x_max + buffer_value, y_min - buffer_value, y_max + buffer_value, z_min - buffer_value,
z_max + buffer_value) # using buffer_value to catch particles near edges
return tuple_of_limits
def produce_grid(tuple_of_limits, grid_spacing):
'''Produce a grid for the simulation system based on the tuple of Cartesian Coordinate limits calculated in an
earlier step.'''
x_min, x_max, y_min, y_max, z_min, z_max = tuple_of_limits
grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing, z_min:z_max:grid_spacing]
return grid
def split_grid(grid, num_cores):
'''Take the overall grid for the system and split it into lists of cube vertices that can be distributed to each
core.'''
# unpack the x,y,z mgrid arrays
x, y, z = grid
num_z_values = z.shape[-1]
num_sheets = z.shape[0]
delta_array_shape = tuple(
[n - 1 for n in x.shape]) # the final target shape for return delta arrays is n-1 in each dimension
ordered_list_per_sheet_x_values = []
for x_sheet in x: # each x_sheet should have shape (25,23) and the same x value in each element
array_all_x_values_current_sheet = x_sheet.flatten()
ordered_list_per_sheet_x_values.append(array_all_x_values_current_sheet)
ordered_list_per_sheet_y_values = []
for y_columns in y:
array_all_y_values_current_sheet = y_columns.flatten()
ordered_list_per_sheet_y_values.append(array_all_y_values_current_sheet)
ordered_list_per_sheet_z_values = []
for z_slices in z:
array_all_z_values_current_sheet = z_slices.flatten()
ordered_list_per_sheet_z_values.append(array_all_z_values_current_sheet)
ordered_list_cartesian_coordinates_per_sheet = []
for x_sheet_coords, y_sheet_coords, z_sheet_coords in zip(ordered_list_per_sheet_x_values,
ordered_list_per_sheet_y_values,
ordered_list_per_sheet_z_values):
ordered_list_cartesian_coordinates_per_sheet.append(zip(x_sheet_coords, y_sheet_coords, z_sheet_coords))
array_ordered_cartesian_coords_per_sheet = np.array(ordered_list_cartesian_coordinates_per_sheet)
#now I'm going to want to build cubes in an ordered fashion, and in such a way that I can track the index /
# centroid of each cube for domain decomposition / reconstruction and mayavi mlab.flow() input
#cubes will be formed from N - 1 base sheets combined with subsequent sheets
current_base_sheet = 0
dictionary_cubes_centroids_indices = {}
cube_counter = 0
while current_base_sheet < num_sheets - 1:
current_base_sheet_array = array_ordered_cartesian_coords_per_sheet[current_base_sheet]
current_top_sheet_array = array_ordered_cartesian_coords_per_sheet[
current_base_sheet + 1] # the points of the sheet 'to the right' in the grid
current_index = 0
while current_index < current_base_sheet_array.shape[0] - num_z_values:
# iterate through all the indices in each of the sheet arrays (careful to avoid extra
# points not needed for cubes)
column_z_level = 0 # start at the bottom of a given 4-point column and work up
while column_z_level < num_z_values - 1:
current_list_cube_vertices = []
first_two_vertices_base_sheet = current_base_sheet_array[current_index:current_index + 2, ...].tolist()
first_two_vertices_top_sheet = current_top_sheet_array[current_index:current_index + 2, ...].tolist()
next_two_vertices_base_sheet = current_base_sheet_array[current_index +
num_z_values: 2 +
num_z_values + current_index, ...].tolist()
next_two_vertices_top_sheet = current_top_sheet_array[current_index +
num_z_values: 2 +
num_z_values + current_index, ...].tolist()
for vertex_set in [
first_two_vertices_base_sheet, first_two_vertices_top_sheet,
next_two_vertices_base_sheet, next_two_vertices_top_sheet
]:
current_list_cube_vertices.extend(vertex | _set)
vertex_array = np.array(current_list_cube_vertices)
assert vertex_array.shape == (8, 3), "vertex_array has incorrect shape"
cube_centroid = np.average(np.array(current_list_cube_vertices), axis=0)
dictionary_cubes_centroids_indices[cube_counter] = {
| 'centroid': cube_centroid,
'vertex_list': current_list_cube_vertices}
cube_counter += 1
current_index += 1
column_z_level += 1
if column_z_level == num_z_values - 1: # the loop will break but I should also increment the
# current_index
current_index += 1
current_base_sheet += 1
total_cubes = len(dictionary_cubes_centroids_indices)
#produce an array of pseudo cube indices (actually the dictionary keys which are cube numbers in string format):
pseudo_cube_indices = np.arange(0, total_cubes)
sublist_of_cube_indices_per_core = np.array_split(pseudo_cube_indices, num_cores)
#now, the split of pseudoindices seems to work well, and the above sublist_of_cube_indices_per_core is a list of
# arrays of cube n |
mblayman/markwiki | docs/conf.py | Python | bsd-2-clause | 8,182 | 0.006478 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.append(os.path.abspath('..'))
from setup import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MarkWiki'
copyright = u'2016, Matt Layman and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives w | ill be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = [] |
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MarkWikidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MarkWiki.tex', u'MarkWiki Documentation',
u'Matt Layman and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'markwiki', u'MarkWiki Documentation',
[u'Matt Layman and contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MarkWiki', u'MarkWiki Documentation',
u'Matt Layman and contributors', 'MarkWiki', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = Fals |
Arno-Nymous/pyload | module/plugins/hoster/FilejokerNet.py | Python | gpl-3.0 | 1,357 | 0.003685 | # -*- coding: utf-8 -*-
from ..internal.XFSHoster import XFSHoster
clas | s FilejokerNet(XFSHoster):
__name__ = "FilejokerNet"
__type__ = "ho | ster"
__version__ = "0.04"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?filejoker\.net/\w{12}'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Filejoker.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
PLUGIN_DOMAIN = "filejoker.net"
WAIT_PATTERN = r'Please [Ww]ait (?:<span id="count" class="alert-success">)?([\w ]+?)(?:</span> seconds</p>| until the next download)'
ERROR_PATTERN = r'Wrong Captcha'
PREMIUM_ONLY_PATTERN = 'Free Members can download files no bigger'
INFO_PATTERN = r'<div class="name-size">(?P<N>.+?) <small>\((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)</small></div>'
SIZE_REPLACEMENTS = [('Kb', 'KB'), ('Mb', 'MB'), ('Gb', 'GB')]
LINK_PATTERN = r'<div class="premium-download">\s+<a href="(.+?)"'
|
kiahosseini/django-form-validation | form_validation/forms.py | Python | mit | 5,956 | 0.004533 | from django.conf import settings
from django import forms
from django.utils.translation import get_language
from copy import deepcopy
class ValidateForm(object):
@property
def media(self):
# I don't use class Media, because I need current_language in path
return super(ValidateForm, self).media + forms.Media(
js=('{}form_validation/jquery/jquery.validate.min.js'.format(settings.STATIC_URL),
'{}form_validation/jquery/jquery.validate.messages.{}.js'.format(settings.STATIC_URL, get_language()),
'{}form_validation/js/our_base_form.js'.format(settings.STATIC_URL),
),
css={'all': ('{}form_validation/jquery/jquery.validate.css'.format(settings.STATIC_URL))})
validation_required_fields = []
validation_max_length_fields = {}
validation_min_length_fields = {}
validation_equals_fields = {}
validation_regex_fields = []
validation_email_fields = []
validation_url_fields = []
validation_digits_fields = []
validation_number_fields = []
validation_date_fields = []
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super(ValidateForm, self).__init__(*args, **kwargs)
if hasattr(self, 'fields'):
ValidateForm.get_ready_for_validation(form_instance=self)
@staticmethod
def get_ready_for_validation(form_instance):
required_fields = deepcopy(form_instance.validation_required_fields)
max_length_fields = deepcopy(form_instance.validation_max_length_fields)
equals_fields = deepcopy(form_instance.validation_equals_fields)
min_length_fields = deepcopy(form_instance.validation_min_length_fields)
regex_fields = deepcopy(form_instance.validation_regex_fields)
email_fields = deepcopy(form_instance.validation_email_fields)
url_fields = deepcopy(form_instance.validation_url_fields)
digits_fields = deepcopy(form_instance.validation_digits_fields)
number_fields = deepcopy(form_instance.validation_number_fields)
date_fields = deepcopy(form_instance.validation_date_fields)
for f in form_instance.fields.keys():
fld = form_instance.fields[f]
if hasattr(form_instance, 'Meta') and hasattr(form_instance.Meta, 'blank_fields'):
if not f in form_instance.Meta.blank_fields and fld.required:
required_fields += [f]
else:
if fld.required:
required_fields += [f]
if getattr(fld, 'max_length', False) and not f in max_length_fields:
max_length_fields[f] = fld.max_length
if getattr(fld, 'min_length', False) and not f in min_length_fields:
min_length_fields[f] = fld.min_length
if forms.EmailField in fld.__class__.__bases__ or isinstance(fld, forms.EmailField):
email_fields += [f]
if forms.URLField in fld.__class__.__bases__ or isinstance(fld, forms.URLField):
url_fields += [f]
if getattr(fld, 'digits', False):
digits_fields += [f]
if forms.FloatField in fld.__class__.__bases__ or forms.IntegerField in fld.__class__.__bases__ or isinstance(
fld, (forms.IntegerField, forms.FloatField)) or getattr(fld, 'number', False):
number_fields += [f]
for f in form_instance.fields.keys():
fld = form_instance.fields[f]
if f in required_fields:
fld.required = True
fld.widget.attrs['required'] = True
if f in max_length_fields.keys():
fld.widget.attrs['maxlength'] = max_length_fields.get(f)
if f in min_length_fields.keys():
fld.widget.attrs['minlength'] = min_length_fields.get(f)
if f in equals_fields.keys():
fld.widget.attrs['equalTo'] = '#id%s_%s' % (
('_%s' % form_instance.prefix) if form_instance.prefix else '',
equals_fields.get(f))
if f in email_fields:
fld.widget.attrs['email'] = True
if f in url_fields:
fld.help_text += 'http://www.example.com'
if f in digits_fields:
fld.widget.attrs['digits'] = True
if f in number_fields:
fld.widget.attrs['number'] = True
if forms.FileField in fld.__class__.__bases__ or isinstance(fld, forms.FileField) or getattr(fld,
'content_types',
False) or getattr(
fld, 'ext_whitelist', False):
fld.widget.attrs['extension'] = ','.join(geta | ttr(fld, 'ext_whitelist', []))
if f in required_fields and (
hasattr(form_instance, 'instance') and getattr(form_instance.instance, f, False)):
del fld.widge | t.attrs['required']
if forms.ImageField in fld.__class__.__bases__ or isinstance(fld, forms.ImageField):
fld.widget.attrs['accept'] = 'image/*'
fld.widget.attrs['extension'] = 'jpg,jpeg,gif,png'
ValidateForm.clear_lists(form_instance)
@staticmethod
def clear_lists(form_instance):
form_instance.validation_max_length_fields = form_instance.validation_min_length_fields = validation_equals_fields = {}
form_instance.validation_email_fields = form_instance.validation_regex_fields = form_instance.validation_required_fields = \
form_instance.validation_url_fields = form_instance.validation_digits_fields = []
class BaseForm(ValidateForm, forms.Form):
pass
class BaseModelForm(ValidateForm, forms.ModelForm):
pass
|
zejn/prometapi | prometapi/encoders.py | Python | agpl-3.0 | 2,113 | 0.007099 | # coding: ut | f-8
import math
try:
long_int = long
except NameError:
long_int = int
def rshift(val, n):
if val >= 0:
val2 = val >> n
else:
val2 = val + 0x100000000 >> n
return val2
def str_to_longs(e):
t = []
pad = len(e) % 4
if pad != 0:
e = e + (4 - pad) * '\x00'
array_len = math.ceil(len(e)/4.0)
r = 0
while r < array_len:
t.append(
ord(e[4*r]) +
(ord(e[4*r + 1]) << 8) +
| (ord(e[4*r + 2]) << 16) +
(ord(e[4*r + 3]) << 24)
)
r += 1
return t
def longs_to_str(e):
t = []
for a in e:
s = chr(255 & a) + chr(a >> 8 & 255) + chr(a >> 16 & 255) + chr(a >> 24 & 255)
t.append(s)
return ''.join(t)
def encrypt(e, t):
r = str_to_longs(e)
a = str_to_longs(t)
if len(r) <= 1:
r = [r[0], 0]
l = len(r)
g = r[l - 1]
s = r[0]
i = 2654435769
c = math.floor(6 + 52 / l)
u = 0
while c > 0:
u += i
n = rshift(u, 2) & 3
p = 0
while l > p:
s = r[(p + 1) % l]
o = (rshift(g, 5) ^ ((s << 2) & 0xffffffff) ) + (rshift(s, 3) ^ ((g << 4) & 0xffffffff)) ^ (u ^ s) + (a[3 & p ^ n] ^ g)
g = r[p] = (r[p] + o) & 0xffffffff
p += 1
c -= 1
f = longs_to_str(r)
return f
def decrypt(e, t):
o = str_to_longs(e)
n = str_to_longs(t)
l = len(o)
g = o[l - 1]
s = o[0]
i = 2654435769
c = long_int(math.floor(6 + 52 / l))
u = c * i
while u:
a = rshift(u, 2) & 3
p = l - 1
while p >= 0:
if p > 0:
g = o[p-1]
else:
g = o[l-1]
r = (rshift(g, 5) ^ ((s << 2) & 0xffffffff)) + \
(rshift(s, 3) ^ ((g << 4) & 0xffffffff)) ^ \
(u ^ s) + \
(n[3 & p ^ a] ^ g)
s = o[p] = (o[p] - r) & 0xffffffff
p -= 1
u -= i
f = longs_to_str(o)
return f
|
dajohnso/cfme_tests | cfme/tests/cloud/test_cloud_events.py | Python | gpl-2.0 | 3,400 | 0.002059 | # -*- coding: utf-8 -*-
"""This module tests only cloud specific events"""
import pytest
import yaml
from cfme.common.vm import VM
from cfme.cloud.provider.azure import AzureProvider
from utils import testgen
from utils.generators import random_vm_name
pytestmark = [
pytest.mark.tier(3)
]
pytest_generate_tests = testgen.generate([AzureProvider], scope='module')
def test_manage_nsg_group(provider, setup_provider, register_event):
"""
tests that create/remove azure network security groups events are received and parsed by CFME
"""
nsg_name = random_vm_name(context='nsg')
resource_group = provider.data['provisioning']['resource_group']
# registering add/remove network security group events
# we need to check raw data by regexps, since many azure events aren't parsed by CFME yet
def add_cmp(_, y):
data = yaml.load(y)
return data['resourceId'].endswith(nsg_name) and data['status']['value'] == 'Accepted' and \
data['subStatus']['value'] == 'Created'
fd_add_attr = {'full_data': 'will be ignored',
'cmp_func': add_cmp}
# add network security group event
register_event(fd_add_attr, source=provider.type.upper(),
event_type='networkSecurityGroups_write_EndRequest')
def rm_cmp(_, y):
data = yaml.load(y)
return data['resourceId'].endswith(nsg_name) and data['status']['value'] == 'Succeeded' \
and len(data['subStatus']['value']) == 0
fd_rm_attr = {'full_data': 'will be ignored',
'cmp_func': rm_cmp}
# remove network security group
register_event(fd_rm_attr | , source=provider.type.upper(),
event_type='networkSecurityGroups_delete_EndRequest')
# creating and removing network security group
provider.mgmt.create_netsec_group(nsg_name, resource_group)
provider.mgmt.remove_netsec_group(nsg_name, resource_group)
def test_vm_capture(request, provide | r, setup_provider, register_event):
"""
tests that generalize and capture vm azure events are received and parsed by CFME
"""
mgmt = provider.mgmt
vm = VM.factory(random_vm_name(context='capture'), provider)
if not mgmt.does_vm_exist(vm.name):
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
vm.refresh_relationships()
# # deferred delete vm
request.addfinalizer(vm.delete_from_provider)
def cmp_function(_, y):
data = yaml.load(y)
return data['resourceId'].endswith(vm.name) and data['status']['value'] == 'Succeeded'
full_data_attr = {'full_data': 'will be ignored',
'cmp_func': cmp_function}
# generalize event
register_event(full_data_attr, source='AZURE',
event_type='virtualMachines_generalize_EndRequest')
# capture event
register_event(full_data_attr, source='AZURE', event_type='virtualMachines_capture_EndRequest')
# capture vm
image_name = vm.name
resource_group = provider.data['provisioning']['resource_group']
mgmt.capture_vm(vm.name, resource_group, 'templates', image_name)
# delete remaining image
container = 'system'
blob_images = mgmt.list_blob_images(container)
# removing both json and vhd files
test_image = [img for img in blob_images if image_name in img][-1]
mgmt.remove_blob_image(test_image, container)
|
photoninger/ansible | lib/ansible/modules/network/aci/aci_contract_subject_to_filter.py | Python | gpl-3.0 | 5,651 | 0.001947 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_contract_subject_to_filter
short_description: Bind Contract Subjects to Filters on Cisco ACI fabrics (vz:RsSubjFiltAtt)
description:
- Bind Contract Subjects to Filters on Cisco ACI fabrics.
- More information from the internal APIC class I(vz:RsSubjFiltAtt) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The C(tenant), C(contract), C(subject), and C(filter_name) must exist before using this module in your playbook.
- The M(aci_tenant), M(aci_contract) | , M(aci_contract_subject), and M(aci_filter) modules can be used for these.
options:
contract:
description:
- The name of the contract.
aliases: [ contract_name ]
filter:
description:
- The name of the Filter to bind to the Subject.
aliases: [ filter_name ]
log:
description:
- Determines if the binding should be se | t to log.
- The APIC defaults new Subject to Filter bindings to C(none).
choices: [ log, none ]
aliases: [ directive ]
default: none
subject:
description:
- The name of the Contract Subject.
aliases: [ contract_subject, subject_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_subject_filter_binding:
hostname: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
tenant: '{{ tenant }}'
contract: '{{ contract }}'
subject: '{{ subject }}'
filter: '{{ filter }}'
log: '{{ log }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']),
filter=dict(type='str', aliases=['filter_name']),
log=dict(tyep='str', choices=['log', 'none'], aliases=['directive']),
subject=dict(type='str', aliases=['contract_subject', 'subject_name']),
tenant=dict(type='str', aliases=['tenant_name']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'filter', 'subject', 'tenant']],
['state', 'present', ['contract', 'filter', 'subject', 'tenant']],
],
)
contract = module.params['contract']
filter_name = module.params['filter']
log = module.params['log']
subject = module.params['subject']
tenant = module.params['tenant']
state = module.params['state']
# Add subject_filter key to modul.params for building the URL
module.params['subject_filter'] = filter_name
# Convert log to empty string if none, as that is what API expects. An empty string is not a good option to present the user.
if log == 'none':
log = ''
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
filter_target='eq(vzBrCP.name, "{0}")'.format(contract),
module_object=contract,
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
filter_target='eq(vzSubj.name, "{0}")'.format(subject),
module_object=subject,
),
subclass_3=dict(
aci_class='vzRsSubjFiltAtt',
aci_rn='rssubjFiltAtt-{0}'.format(filter_name),
filter_target='eq(vzRsSubjFiltAtt.tnVzFilterName, "{0}")'.format(filter_name),
module_object=filter_name,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='vzRsSubjFiltAtt',
class_config=dict(
tnVzFilterName=filter_name,
directives=log,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='vzRsSubjFiltAtt')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove subject_filter used to build URL from module.params
module.params.pop('subject_filter')
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
zenodo/invenio | invenio/legacy/bibclassify/ontology_reader.py | Python | gpl-2.0 | 44,242 | 0.000294 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibClassify ontology reader.
The ontology reader reads currently either a RDF/SKOS taxonomy or a
simple controlled vocabulary file (1 word per line). The first role of
this module is to manage the cached version of the ontology file. The
second role is to hold all methods responsible for the creation of
regular expressions. These methods are grammatically related as we take
care of different forms of the same words. The grammatical rules can be
configured via the configuration file.
The main method from this module is get_regular_expressions.
"""
from __future__ import print_function
from datetime import datetime, timedelta
from six import iteritems
from six.moves import cPickle
import os
import re
import sys
import tempfile
import time
import urllib2
import traceback
import xml.sax
import thread
import rdflib
from invenio.legacy.bibclassify import config as bconfig
from invenio.modules.classifier.errors import TaxonomyError
log = bconfig.get_logger("bibclassify.ontology_reader")
from invenio import config
from invenio.modules.classifier.registry import taxonomies
# only if not running in a stanalone mode
if bconfig.STANDALONE:
dbquery = None
from urllib2 import urlopen
else:
from invenio.legacy import dbquery
from invenio.utils.url import make_invenio_opener
urlopen = make_invenio_opener('BibClassify').open
_contains_digit = re.compile("\d")
_starts_with_non = re.compile("(?i)^non[a-z]")
_starts_with_anti = re.compile("(?i)^anti[a-z]")
_split_by_punctuation = re.compile("(\W+)")
_CACHE = {}
def get_cache(taxonomy_id):
"""Return thread-safe cache for the given taxonomy id.
:param taxonomy_id: identifier of the taxonomy
:type taxonomy_id: str
:return: dictionary object (empty if no taxonomy_id
is found), you must not change anything inside it.
Create a new dictionary and use set_cache if you want
to update the cache!
"""
# Because of a standalone mode, we don't use the
# invenio.data_cacher.DataCacher, but it has no effect
# on proper functionality.
if taxonomy_id in _CACHE:
ctime, taxonomy = _CACHE[taxonomy_id]
# check it is fresh version
onto_name, onto_path, onto_url = _get_ontology(taxonomy_id)
cache_path = _get_cache_path(onto_name)
# if source exists and is newer than the cache hold in memory
if os.path.isfile(onto_path) and os.path.getmtime(onto_path) > ctime:
log.info('Forcing taxonomy rebuild as cached'
' version is newer/updated.')
return {} # force cache rebuild
# if cache exists and is newer than the cache hold in memory
if os.path.isfile(cache_path) and os.path.getmtime(cache_path) > ctime:
log.info('Forcing taxonomy rebuild as source'
' file is newer/updated.')
return {}
log.info('Taxonomy retrieved from cache')
return taxonomy
return {}
def set_cache(taxonomy_id, contents):
"""Update cache in a thread-safe manner."""
lock = thread.allocate_lock()
lock.acquire()
try:
_CACHE[taxonomy_id] = (time.time(), contents)
finally:
lock.release()
def get_regular_expressions(taxonomy_name, rebuild=False, no_cache=False):
"""Return a list of patterns compiled from the RDF/SKOS ontology.
Uses cache if it exists and if the taxonomy hasn't changed.
"""
# Translate the ontology name into a local path. Check if the name
# relates to an existing ontology.
onto_name, onto_path, onto_url = _get_ontology(taxonomy_name)
if not onto_path:
raise TaxonomyError("Unable to locate the taxonomy: '%s'."
% taxonomy_name)
cache_path = _get_cache_path(onto_name)
log.debug('Taxonomy discovered, now we load it '
'(from cache: %s, onto_path: %s, cache_path: %s)'
% (not no_cache, onto_path, cache_path))
if os.access(cache_path, os.R_OK):
if os.access(onto_path, os.R_OK):
if rebuild or no_cache:
log.debug("Cache generation was manually forced.")
return _build_cache(onto_path, skip_cache=no_cache)
else:
# ontology file not found. Use the cache instead.
log.warning("The ontology couldn't be located. However "
"a cached version of it is available. Using it as a "
"reference.")
return _get_cache(cache_path, source_file=onto_path)
if (os.path.getmtime(cache_path) >
os.path.getmtime(onto_path)):
# Cache is more recent than the ontology: use cache.
log.debug("Normal situation, cache is older than ontology,"
" so we load it from cache")
return _get_cache(cache_path, source_file=onto_path)
else:
# Ontology is more recent than the cache: rebuild cache.
log.warning("Cache '%s' is older than '%s'. "
"We will rebuild the cache" %
(cache_path, onto_path))
return _build_cache(onto_path, skip_cache=no_cache)
elif os.access(onto_path, os.R_OK):
| if not no_cache and\
os.path.exists(cache_path) and\
not os.access(cache_path, os.W_OK):
raise TaxonomyError('We cannot read/write into: %s. '
'Aborting!' % cache_path)
| elif not no_cache and os.path.exists(cache_path):
log.warning('Cache %s exists, but is not readable!' % cache_path)
log.info("Cache not available. Building it now: %s" % onto_path)
return _build_cache(onto_path, skip_cache=no_cache)
else:
raise TaxonomyError("We miss both source and cache"
" of the taxonomy: %s" % taxonomy_name)
def _get_remote_ontology(onto_url, time_difference=None):
"""Check if the online ontology is more recent than the local ontology.
If yes, try to download and store it in Invenio's cache directory.
Return a boolean describing the success of the operation.
:return: path to the downloaded ontology.
"""
if onto_url is None:
return False
dl_dir = ((config.CFG_CACHEDIR or tempfile.gettempdir()) + os.sep +
"bibclassify" + os.sep)
if not os.path.exists(dl_dir):
os.mkdir(dl_dir)
local_file = dl_dir + os.path.basename(onto_url)
remote_modif_time = _get_last_modification_date(onto_url)
try:
local_modif_seconds = os.path.getmtime(local_file)
except OSError:
# The local file does not exist. Download the ontology.
download = True
log.info("The local ontology could not be found.")
else:
local_modif_time = datetime(*time.gmtime(local_modif_seconds)[0:6])
# Let's set a time delta of 1 hour and 10 minutes.
time_difference = time_difference or timedelta(hours=1, minutes=10)
download = remote_modif_time > local_modif_time + time_difference
if download:
log.info("The remote ontology '%s' is more recent "
"than the local ontology." % onto_url)
if download:
if not _download_ontology(onto_url, local_file):
log.warning("Error downloading the ontology fro |
n054/totp-cgi | contrib/gitolite/command.py | Python | gpl-2.0 | 22,349 | 0.002058 | #!/usr/bin/python -tt
__author__ = 'mricon'
import logging
import os
import sys
import anyjson
import totpcgi
import totpcgi.backends
import totpcgi.backends.file
import totpcgi.utils
import datetime
import dateutil
import dateutil.parser
import dateutil.tz
from string import Template
import syslog
#--------------- CHANGE ME TO REFLECT YOUR ENVIRONMENT -------------------
# You need to change this to reflect your environment
GL_2FA_COMMAND = 'ssh git@example.com 2fa'
HELP_DOC_LINK = 'https://example.com'
# Set to False to disallow yubikey (HOTP) enrolment
ALLOW_YUBIKEY = True
# This will allow anyone to use "override" as the 2-factor token
# Obviously, this should only be used during initial debugging
# and testing and then set to false.
ALLOW_BYPASS_OVERRIDE = False
# In the TOTP case, the window size is the time drift between the user's device
# and the server. A window size of 17 means 17*10 seconds, or in other words,
# we'll accept any tokencodes that were valid within 170 seconds before now, and
# 170 seconds after now.
# In the HOTP case, discrepancy between the counter on the device and the counter
# on the server is virtually guaranteed (accidental button presses on the yubikey,
# authentication failures, etc), so the window size indicates how many tokens we will
# try in addition to the current one. The setting of 30 is sane and is not likely to
# lock someone out.
TOTP_WINDOW_SIZE = 17
HOTP_WINDOW_SIZE = 30
# First value is the number of times. Second value is the number of seconds.
# So, "3, 30" means "3 falures within 30 seconds"
RATE_LIMIT = (3, 30)
# Google Authenticator and other devices default to key length of 80 bits, while
# for yubikeys the length must be 160 bits. I suggest you leave these as-is.
TOTP_KEY_LENGTH = 80
HOTP_KEY_LENGTH = 160
# This identifies the token in the user's TOTP app
TOTP_USER_MASK = '$username@example.com'
# GeoIP-city database location.
# This is only currently used as a sort of a reminder to the users, so when they list
# their current validations using list-val, it can help them figure out where they
# previously authorized from.
# You can download the City database from
# http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.xz and put
# into GL_ADMIN_BASE/2fa/ (uncompress first). If the code doesn't find it, it'll
# try to use the basic GeoIP country information. If that fails, it'll just
# quitely omit GeoIP data.
GEOIP_CITY_DB = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/GeoLiteCity.dat')
# Identify ourselves in syslog as "gl-2fa"
syslog.openlog('gl-2fa', syslog.LOG_PID, syslog.LOG_AUTH)
#-------------------------------------------------------------------------
# default basic logger. We override it later.
logger = logging.getLogger(__name__)
def print_help_link():
print('')
print('If you need more help, please see the following link:')
print(' %s' % HELP_DOC_LINK)
print('')
def get_geoip_crc(ipaddr):
import GeoIP
if os.path.exists(GEOIP_CITY_DB):
logger.debug('Opening geoip db in %s' % GEOIP_CITY_DB)
gi = GeoIP.open(GEOIP_CITY_DB, GeoIP.GEOIP_STANDARD)
else:
logger.debug('%s does not exist, using basic geoip db' % GEOIP_CITY_DB)
gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE)
ginfo = gi.record_by_addr(ipaddr)
if ginfo is not None:
city = region_name = country_code = 'Unknown'
if ginfo['city'] is not None:
city = unicode(ginfo['city'], 'iso-8859-1')
if ginfo['region_name'] is not None:
region_name = unicode(ginfo['region_name'], 'iso-8859-1')
if ginfo['country_code'] is not None:
country_code = unicode(ginfo['country_code'], 'iso-8859-1')
crc = u'%s, %s, %s' % (city, region_name, country_code)
else:
# try just the country code, then
crc = gi.country_code_by_addr(ipaddr)
if not crc:
return None
crc = unicode(crc, 'iso-8859-1')
return crc
def load_authorized_ips():
# The authorized ips file has the following structure:
# {
# 'IP_ADDR': {
# 'added': RFC_8601_DATETIME,
# 'expires': RFC_8601_DATETIME,
# 'whois': whois information about the IP at the time of recording,
# 'geoip': geoip information about the IP at the time of recording,
# }
#
# It is stored in GL_ADMIN_BASE/2fa/validations/GL_USER.js
user = os.environ['GL_USER']
val_dir = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/validations')
if not os.path.exists(val_dir):
os.makedirs(val_dir, 0700)
logger.debug('Created val_dir in %s' % val_dir)
valfile = os.path.join(val_dir, '%s.js' % user)
logger.debug('Loading authorized ips from %s' % valfile)
valdata = {}
if os.access(valfile, os.R_OK):
try:
fh = open(valfile, 'r')
jdata = fh.read()
fh.close()
valdata = anyjson.deserialize(jdata)
except:
logger.critical('Validations file exists, but could not be parsed!')
logger.critical('All previous validations have been lost, starting fresh.')
return valdata
def store_authorized_ips(valdata):
user = os.environ['GL_USER']
val_dir = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/validations')
valfile = os.path.join(val_dir, '%s.js' % user)
jdata = anyjson.serialize(valdata)
fh = open(valfile, 'w')
fh.write(jdata)
fh.close()
logger.debug('Wrote new validations file in %s' % valfile)
def store_validation(remote_ip, hours):
valdata = load_authorized_ips()
utc = dateutil.tz.tzutc()
now_time = datetime.datetime.now(utc).replace(microsecond=0)
expires = now_time + datetime.timedelta(hours=hours)
logger.info('Adding IP address %s until %s' % (remote_ip, expires.strftime('%c %Z')))
valdata[remote_ip] = {
| 'added': now_time.isoformat(sep=' '),
'expires': expires.isoformat(sep=' '),
}
# Try to lookup whois info if cymruwhois is available
try:
import cymruwhois
cym = cymruwhois.Client()
res = cym.lookup(remote_ip)
| if res.owner and res.cc:
whois = "%s/%s\n" % (res.owner, res.cc)
valdata[remote_ip]['whois'] = whois
logger.info('Whois information for %s: %s' % (remote_ip, whois))
except:
pass
try:
geoip = get_geoip_crc(remote_ip)
if geoip is not None:
valdata[remote_ip]['geoip'] = geoip
logger.info('GeoIP information for %s: %s' % (remote_ip, geoip))
except:
pass
store_authorized_ips(valdata)
def generate_user_token(backends, mode):
if mode == 'totp':
gaus = totpcgi.utils.generate_secret(
RATE_LIMIT, TOTP_WINDOW_SIZE, 5, bs=TOTP_KEY_LENGTH)
else:
gaus = totpcgi.utils.generate_secret(
RATE_LIMIT, HOTP_WINDOW_SIZE, 5, bs=HOTP_KEY_LENGTH)
gaus.set_hotp(0)
user = os.environ['GL_USER']
backends.secret_backend.save_user_secret(user, gaus, None)
# purge all old state, as it's now obsolete
backends.state_backend.delete_user_state(user)
logger.info('New token generated for user %s' % user)
remote_ip = os.environ['SSH_CONNECTION'].split()[0]
syslog.syslog(
syslog.LOG_NOTICE,
'Enrolled: user=%s, host=%s, mode=%s' % (user, remote_ip, mode)
)
if mode == 'totp':
# generate provisioning URI
tpt = Template(TOTP_USER_MASK)
totp_user = tpt.safe_substitute(username=user)
qr_uri = gaus.otp.provisioning_uri(totp_user)
import urllib
print('')
print('Please make sure "qrencode" is installed.')
print('Run the following commands to display your QR code:')
print(' unset HISTFILE')
print(' qrencode -tANSI -m1 -o- "%s"' % qr_uri)
print('')
print('If that does not work or if you do not have access to')
print('qrencode or a similar QR encoding tool, then you may')
print('open an INCOGNITO/PRIVATE MODE window in your browser')
print('and paste the following URL:')
print |
DiCarloLab-Delft/PycQED_py3 | pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_PQSC.py | Python | mit | 9,278 | 0.00388 | """
Driver for PQSC V1
Author: Michael Kerschbaum
Date: 2019/09
"""
import time
import sys
import os
import logging
import numpy as np
import pycqed
import json
import copy
import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument as zibase
log = logging.getLogger(__name__)
##########################################################################
# Exceptions
##########################################################################
##########################################################################
# Module level functions
##########################################################################
##########################################################################
# Class
##########################################################################
class ZI_PQSC(zibase.ZI_base_instrument):
"""
This is the frist version of the PycQED driver for the Zurich Instruments
PQSC.
"""
# Put in correct minimum required revisions
#FIXME: put correct version
MIN_FWREVISION = 63210
MIN_FPGAREVISION = 63133
##########################################################################
# 'public' functions: device control
##########################################################################
def __init__(self,
name,
device: str,
interface: str = 'USB',
port: int = 8004,
server: str = '',
**kw) -> None:
"""
Input arguments:
name: (str) name of the instrument
device (str) the name of the device e.g., "dev8008"
interface (str) the name of the interface to use
('1GbE' or 'USB')
port (int) the port to connect to for the ziDataServer
(don't change)
server: (str) the host where the ziDataServer is running
"""
t0 = time.time()
# Our base class includes all the functionality needed to initialize
# the parameters of the object. Those parameters are read from
# instrument-specific JSON files stored in the zi_parameter_files
# folder.
super().__init__(
name=name,
device=device,
interface=interface,
server=server,
port=port,
awg_module=False,
**kw)
t1 = time.time()
print('Initialized PQSC', self.devname, 'in %.2fs' % (t1 - t0))
##########################################################################
# Private methods
##########################################################################
def _check_devtype(self):
if self.devtype != 'PQSC':
raise zibase.ziDeviceError('Device {} of type {} is not a PQSC \
instrument!'.format(self.devname, self.devtype))
def _check_options(self):
"""
Checks that the correct options are installed on the instrument.
"""
# FIXME
# options = self.gets('features/options').split('\n')
# if 'QA' not in options:
# raise zibase.ziOptionsError('Device {} is missing the QA option!'.format(self.devname))
# if 'AWG' not in options:
# raise zibase.ziOptionsError('Device {} is missing the AWG option!'.format(self.devname))
def _check_versions(self):
"""
Checks that sufficient versions of the firmware are available.
"""
if self.geti('system/fwrevision') < ZI_PQSC.MIN_FWREVISION:
raise zibase.ziVersionError(
'Insufficient firmware revision detected! Need {}, got {}!'.
format(ZI_PQSC. | MIN_FWREVISION, self.geti('system/fwrevision')))
if self.geti('system/fpgarevision') < ZI_PQSC.MIN_FPGAREVISION:
raise zibase.ziVersionError(
'Insufficient FPGA revision detected! Need {}, got {}!'.format(
ZI_PQSC.MIN_FPGAREVISION,
self.geti('system/fpgarevision')))
def _add_extra_parameters(self) -> None:
"""
We add a few additional custom parameters on top of the ones defined in the device files. T | hese are:
qas_0_trans_offset_weightfunction - an offset correction parameter for all weight functions,
this allows normalized calibration when performing cross-talk suppressed readout. The parameter
is not actually used in this driver, but in some of the support classes that make use of the driver.
AWG_file - allows the user to configure the AWG with a SeqC program from a specific file.
Provided only because the old version of the driver had this parameter. It is discouraged to use
it.
wait_dly - a parameter that enables the user to set a delay in AWG clocks cycles (4.44 ns) to be
applied between when the AWG starts playing the readout waveform, and when it triggers the
actual readout.
cases - a parameter that can be used to define which combination of readout waveforms to actually
download to the instrument. As the instrument has a limited amount of memory available, it is
not currently possible to store all 1024 possible combinations of readout waveforms that would
be required to address the maximum number of qubits supported by the instrument (10). Therefore,
the 'cases' mechanism is used to reduce that number to the combinations actually needed by
an experiment.
"""
super()._add_extra_parameters()
# FIXME: put in correct clock_freq
def clock_freq(self):
return 300e6
##########################################################################
# 'public' functions:
##########################################################################
def check_errors(self, errors_to_ignore=None) -> None:
"""
Checks the instrument for errors.
"""
errors = json.loads(self.getv('raw/error/json/errors'))
# If this is the first time we are called, log the detected errors,
# but don't raise any exceptions
if self._errors is None:
raise_exceptions = False
self._errors = {}
else:
raise_exceptions = True
# Asserted in case errors were found
found_errors = False
# Combine errors_to_ignore with commandline
_errors_to_ignore = copy.copy(self._errors_to_ignore)
if errors_to_ignore is not None:
_errors_to_ignore += errors_to_ignore
# Go through the errors and update our structure, raise exceptions if
# anything changed
for m in errors['messages']:
code = m['code']
count = m['count']
severity = m['severity']
message = m['message']
if not raise_exceptions:
self._errors[code] = {
'count' : count,
'severity': severity,
'message' : message}
log.warning(f'{self.devname}: Code {code}: "{message}" ({severity})')
else:
# Check if there are new errors
if code not in self._errors or count > self._errors[code]['count']:
if code in _errors_to_ignore:
log.warning(f'{self.devname}: {message} ({code}/{severity})')
else:
log.error(f'{self.devname}: {message} ({code}/{severity})')
found_errors = True
if code in self._errors:
self._errors[code]['count'] = count
else:
self._errors[code] = {
'count' : count,
'severity': severity,
'message' : message}
if found_errors:
raise zibase.ziRuntimeError('Errors detected during run-time!')
def set_repetitions(self, num_reps: int):
|
msterin/play | vsc-play/python/helloworld/manage.py | Python | mit | 666 | 0 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'helloworld.settings')
try:
from | django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command | _line(sys.argv)
if __name__ == '__main__':
main()
|
imndszy/voluntary | excel.py | Python | mit | 1,036 | 0.013078 | # -*- coding:utf8 -*-
# Author: shizhenyu96@gamil.com
# github: https://github.com/imndszy
from openpyxl import Workbook
from openpyxl.compat import range
import pymysql
import time
while True:
try:
conn = pymysql.connect(host='localhost',port=3306,user='szy',passwd='123456',db='voluntary',charset='utf8')
cur = conn.cursor()
cur.execute("select `stuid`,`service_time`,`service_time_a`,`service_time_b` from users")
resu | lt = cur.fetchall()
cur.close()
conn.close()
except:
print 'access database wrong'
time.sleep(30)
continue
result = [list(x) for | x in result]
wb = Workbook()
dest_filename = '/home/ubuntu/www/voluntary/info.xlsx'
ws1 = wb.active
ws1.title = "data"
info=[u'学号',u'总志愿时长',u'A类志愿服务时长',u'B类志愿服务时长']
i = 0
ws1.append(info)
for row in range(2,len(result)+2):
ws1.append(result[i])
i += 1
wb.save(filename=dest_filename)
time.sleep(30)
|
emulbreh/vacuous | vacuous/transactions.py | Python | mit | 641 | 0.00156 | from vacuous.backends import iter_cached_backends
class commit_on_success(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __enter__(self):
| pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
for backend in iter_cached_backends():
backend.rollback()
else:
for backend in iter_cached_backends():
backend.commit(**self.kwargs)
def __call__(self, func):
@wraps(func)
def decorated(*args, **kwargs):
with self:
return func(*args, **kwargs)
| return decorated
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.