text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
"""add unique key to username
Revision ID: c19852e4dcda
Revises: 1478867a872a
Create Date: 2020-08-06 00:39:03.004053
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c19852e4dcda'
down_revision = '1478867a872a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index('ix_user_username')
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
batch_op.create_index('ix_user_username', ['username'], unique=False)
# ### end Alembic commands ###
|
hackerspace-silesia/cebulany-manager
|
migrations/versions/c19852e4dcda_add_unique_key_to_username.py
|
Python
|
mit
| 953
| 0.001049
|
#!/bin/env python
# Copyright 2013 Zynga Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module maps request to function based on the url and method
"""
import re
import os
import cgi
import urlrelay
from cgi import parse_qs
from diskmapper import DiskMapper
@urlrelay.url('^.*$', 'GET')
def index(environ, start_response):
"""Handles GET requests
"""
query_string = parse_qs(environ.get("QUERY_STRING"))
status = '202 Accepted'
response_headers = [('Content-type', 'text/plain')]
dm = DiskMapper(environ, start_response)
if "action" in query_string:
action = query_string["action"]
if "get_host_config" in action:
return dm.get_host_config()
elif "get_all_config" in action:
return dm.get_all_config()
elif "get_vb_mapping" in action:
key = None
if "vbucket" in query_string:
key = query_string["vbucket"][0]
return dm.get_vbuckets("vbucket", key)
elif "get_ss_mapping" in action:
key = None
if "storage_server" in query_string:
key = query_string["storage_server"][0]
return dm.get_vbuckets("storage_server", key)
return dm.forward_request()
@urlrelay.url('^.*$', 'DELETE')
def delete(environ, start_response):
"""Handles GET requests
"""
dm = DiskMapper(environ, start_response)
return dm.forward_request()
@urlrelay.url('^.*$', 'POST')
def upload(environ, start_response):
dm = DiskMapper(environ, start_response)
return dm.upload()
|
zbase/disk_mapper
|
dm_server/lib/urlmapper.py
|
Python
|
apache-2.0
| 2,103
| 0.000951
|
"""
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'at0zvwnp1y=4sva38l)0)ejiaiq$aqap8ehs7uld0g948yj-fy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Template files
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
|
HayaoSuzuki/django-tutorial
|
mysite/mysite/settings.py
|
Python
|
mit
| 2,134
| 0
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from nova import config
from nova import ipv6
from nova import paths
from nova.tests.unit import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.linux_net')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.openstack.common.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(config_fixture.Config):
"""Fixture to manage global conf settings."""
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver',
'nova.virt.fake.SmallFakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('vlan_interface', 'eth0')
self.conf.set_default('auth_strategy', 'noauth')
config.parse_args([], default_config_files=[])
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('fatal_exception_format_errors', True)
self.conf.set_default('enabled', True, 'osapi_v3')
self.conf.set_default('force_dhcp_release', False)
self.conf.set_default('periodic_enable', False)
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)
|
cloudbase/nova-virtualbox
|
nova/tests/unit/conf_fixture.py
|
Python
|
apache-2.0
| 3,169
| 0
|
# Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
# TODO: add tests for all machines
# TODO: add tests for new status callbacks
"""Base classes for machine types. Do not use directly."""
import binascii
import threading
import serial
from plover import _, log
from plover.machine.keymap import Keymap
from plover.misc import boolean
# i18n: Machine state.
STATE_STOPPED = _('stopped')
# i18n: Machine state.
STATE_INITIALIZING = _('initializing')
# i18n: Machine state.
STATE_RUNNING = _('connected')
# i18n: Machine state.
STATE_ERROR = _('disconnected')
class StenotypeBase:
"""The base class for all Stenotype classes."""
# Layout of physical keys.
KEYS_LAYOUT = ''
# And special actions to map to.
ACTIONS = ()
# Fallback to use as machine type for finding a compatible keymap
# if one is not already available for this machine type.
KEYMAP_MACHINE_TYPE = None
def __init__(self):
# Setup default keymap with no translation of keys.
keys = self.get_keys()
self.keymap = Keymap(keys, keys)
self.keymap.set_mappings(zip(keys, keys))
self.stroke_subscribers = []
self.state_subscribers = []
self.state = STATE_STOPPED
def set_keymap(self, keymap):
"""Setup machine keymap."""
self.keymap = keymap
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
pass
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
pass
def add_stroke_callback(self, callback):
"""Subscribe to output from the stenotype machine.
Argument:
callback -- The function to call whenever there is output from
the stenotype machine and output is being captured.
"""
self.stroke_subscribers.append(callback)
def remove_stroke_callback(self, callback):
"""Unsubscribe from output from the stenotype machine.
Argument:
callback -- A function that was previously subscribed.
"""
self.stroke_subscribers.remove(callback)
def add_state_callback(self, callback):
self.state_subscribers.append(callback)
def remove_state_callback(self, callback):
self.state_subscribers.remove(callback)
def _notify(self, steno_keys):
"""Invoke the callback of each subscriber with the given argument."""
for callback in self.stroke_subscribers:
callback(steno_keys)
def set_suppression(self, enabled):
'''Enable keyboard suppression.
This is only of use for the keyboard machine,
to suppress the keyboard when then engine is running.
'''
pass
def suppress_last_stroke(self, send_backspaces):
'''Suppress the last stroke key events after the fact.
This is only of use for the keyboard machine,
and the engine is resumed with a command stroke.
Argument:
send_backspaces -- The function to use to send backspaces.
'''
pass
def _set_state(self, state):
self.state = state
for callback in self.state_subscribers:
callback(state)
def _stopped(self):
self._set_state(STATE_STOPPED)
def _initializing(self):
self._set_state(STATE_INITIALIZING)
def _ready(self):
self._set_state(STATE_RUNNING)
def _error(self):
self._set_state(STATE_ERROR)
@classmethod
def get_actions(cls):
"""List of supported actions to map to."""
return cls.ACTIONS
@classmethod
def get_keys(cls):
return tuple(cls.KEYS_LAYOUT.split())
@classmethod
def get_option_info(cls):
"""Get the default options for this machine."""
return {}
class ThreadedStenotypeBase(StenotypeBase, threading.Thread):
"""Base class for thread based machines.
Subclasses should override run.
"""
def __init__(self):
threading.Thread.__init__(self)
self.name += '-machine'
StenotypeBase.__init__(self)
self.finished = threading.Event()
def run(self):
"""This method should be overridden by a subclass."""
pass
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self.finished.clear()
self._initializing()
self.start()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
self.finished.set()
try:
self.join()
except RuntimeError:
pass
self._stopped()
class SerialStenotypeBase(ThreadedStenotypeBase):
"""For use with stenotype machines that connect via serial port.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
# Default serial parameters.
SERIAL_PARAMS = {
'port': None,
'baudrate': 9600,
'bytesize': 8,
'parity': 'N',
'stopbits': 1,
'timeout': 2.0,
}
def __init__(self, serial_params):
"""Monitor the stenotype over a serial port.
The key-value pairs in the <serial_params> dict are the same
as the keyword arguments for a serial.Serial object.
"""
ThreadedStenotypeBase.__init__(self)
self.serial_port = None
self.serial_params = serial_params
def _close_port(self):
if self.serial_port is None:
return
self.serial_port.close()
self.serial_port = None
def start_capture(self):
self._close_port()
try:
self.serial_port = serial.Serial(**self.serial_params)
except (serial.SerialException, OSError):
log.warning('Can\'t open serial port', exc_info=True)
self._error()
return
if not self.serial_port.isOpen():
log.warning('Serial port is not open: %s', self.serial_params.get('port'))
self._error()
return
return ThreadedStenotypeBase.start_capture(self)
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
ThreadedStenotypeBase.stop_capture(self)
self._close_port()
@classmethod
def get_option_info(cls):
"""Get the default options for this machine."""
sb = lambda s: int(float(s)) if float(s).is_integer() else float(s)
converters = {
'port': str,
'baudrate': int,
'bytesize': int,
'parity': str,
'stopbits': sb,
'timeout': float,
'xonxoff': boolean,
'rtscts': boolean,
}
return {
setting: (default, converters[setting])
for setting, default in cls.SERIAL_PARAMS.items()
}
def _iter_packets(self, packet_size):
"""Yield packets of <packets_size> bytes until the machine is stopped.
N.B.: to workaround the fact that the Toshiba Bluetooth stack
on Windows does not correctly handle the read timeout setting
(returning immediately if some data is already available):
- the effective timeout is re-configured to <timeout/packet_size>
- multiple reads are done (until a packet is complete)
- an incomplete packet will only be discarded if one of
those reads return no data (but not on short read)
"""
self.serial_port.timeout = max(
self.serial_params.get('timeout', 1.0) / packet_size,
0.01,
)
packet = b''
while not self.finished.is_set():
raw = self.serial_port.read(packet_size - len(packet))
if not raw:
if packet:
log.error('discarding incomplete packet: %s',
binascii.hexlify(packet))
packet = b''
continue
packet += raw
if len(packet) != packet_size:
continue
yield packet
packet = b''
|
openstenoproject/plover
|
plover/machine/base.py
|
Python
|
gpl-2.0
| 8,198
| 0.000732
|
# -*- coding: utf-8 -*-
from folium.plugins.marker_cluster import MarkerCluster
from folium.utilities import if_pandas_df_convert_to_numpy, validate_location
from jinja2 import Template
class FastMarkerCluster(MarkerCluster):
"""
Add marker clusters to a map using in-browser rendering.
Using FastMarkerCluster it is possible to render 000's of
points far quicker than the MarkerCluster class.
Be aware that the FastMarkerCluster class passes an empty
list to the parent class' __init__ method during initialisation.
This means that the add_child method is never called, and
no reference to any marker data are retained. Methods such
as get_bounds() are therefore not available when using it.
Parameters
----------
data: list of list with values
List of list of shape [[lat, lon], [lat, lon], etc.]
When you use a custom callback you could add more values after the
lat and lon. E.g. [[lat, lon, 'red'], [lat, lon, 'blue']]
callback: string, optional
A string representation of a valid Javascript function
that will be passed each row in data. See the
FasterMarkerCluster for an example of a custom callback.
name : string, optional
The name of the Layer, as it will appear in LayerControls.
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening (only for overlays).
icon_create_function : string, default None
Override the default behaviour, making possible to customize
markers colors and sizes.
**kwargs
Additional arguments are passed to Leaflet.markercluster options. See
https://github.com/Leaflet/Leaflet.markercluster
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = (function(){
{{ this.callback }}
var data = {{ this.data|tojson }};
var cluster = L.markerClusterGroup({{ this.options|tojson }});
{%- if this.icon_create_function is not none %}
cluster.options.iconCreateFunction =
{{ this.icon_create_function.strip() }};
{%- endif %}
for (var i = 0; i < data.length; i++) {
var row = data[i];
var marker = callback(row);
marker.addTo(cluster);
}
cluster.addTo({{ this._parent.get_name() }});
return cluster;
})();
{% endmacro %}""")
def __init__(self, data, callback=None, options=None,
name=None, overlay=True, control=True, show=True, icon_create_function=None, **kwargs):
if options is not None:
kwargs.update(options) # options argument is legacy
super(FastMarkerCluster, self).__init__(name=name, overlay=overlay,
control=control, show=show,
icon_create_function=icon_create_function,
**kwargs)
self._name = 'FastMarkerCluster'
data = if_pandas_df_convert_to_numpy(data)
self.data = [[*validate_location(row[:2]), *row[2:]] # noqa: E999
for row in data]
if callback is None:
self.callback = """
var callback = function (row) {
var icon = L.AwesomeMarkers.icon();
var marker = L.marker(new L.LatLng(row[0], row[1]));
marker.setIcon(icon);
return marker;
};"""
else:
self.callback = 'var callback = {};'.format(callback)
|
ocefpaf/folium
|
folium/plugins/fast_marker_cluster.py
|
Python
|
mit
| 3,954
| 0.000506
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 01 10:45:09 2014
Training models remotely in cloud
@author: pacif_000
"""
from kafka.client import KafkaClient
from kafka.consumer import SimpleConsumer
import os
import platform
if platform.system() == 'Windows':
import win32api
else:
import signal
import thread
import traceback
kafkaHost = 'monkkafka.cloudapp.net:9092,monkkafka.cloudapp.net:9093,monkkafka.cloudapp.net:9094'
kafkaTopic = 'expr'
kafkaGroup = 'expr'
kafka = None
producer = None
consumer = None
def onexit():
global kafka, consumer, producer
if consumer:
consumer.commit()
consumer.stop()
consumer = None
if producer:
producer.stop()
producer = None
if kafka:
kafka.close()
kafka = None
print('remote_rainter {0} is shutting down'.format(os.getpid()))
def handler(sig, hook = thread.interrupt_main):
global kafka, consumer, producer
if consumer:
consumer.commit()
consumer.stop()
consumer = None
if producer:
producer.stop()
producer = None
if kafka:
kafka.close()
kafka = None
print('remote_rainter {0} is shutting down'.format(os.getpid()))
exit(1)
def server():
global kafka, producer, consumer
if platform.system() == 'Windows':
win32api.SetConsoleCtrlHandler(handler, 1)
else:
signal.signal(signal.SIGINT, onexit)
try:
kafka = KafkaClient(kafkaHost,timeout=None)
consumer = SimpleConsumer(kafka, kafkaGroup, kafkaTopic, partitions=[0,1,2])
for message in consumer:
print(message)
except Exception as e:
print('Exception {0}'.format(e))
print('Can not consume actions')
print(traceback.format_exc())
except KeyboardInterrupt:
onexit()
finally:
onexit()
if __name__=='__main__':
while 1:
server()
|
xumiao/pymonk
|
tests/kafka_tester.py
|
Python
|
mit
| 1,929
| 0.007258
|
# Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import re
from typing import List
from yawast.reporting.enums import Vulnerabilities
from yawast.scanner.plugins.result import Result
from yawast.shared import network, output
_checked: List[str] = []
def reset():
global _checked
_checked = []
def check_cve_2019_5418(url: str) -> List[Result]:
global _checked
# this only applies to controllers, so skip the check unless the link ends with '/'
if not url.endswith("/") or url in _checked:
return []
results: List[Result] = []
_checked.append(url)
try:
res = network.http_get(
url, False, {"Accept": "../../../../../../../../../e*c/p*sswd{{"}
)
if network.response_body_is_text(res):
body = res.text
req = network.http_build_raw_request(res.request)
# check to see if "root" is in the string, then do the proper check
if "root:" in body:
pattern = r"root:[a-zA-Z0-9]+:0:0:.+$"
mtch = re.search(pattern, body)
if mtch:
results.append(
Result(
f"Rails CVE-2019-5418: File Content Disclosure: {url} - {mtch.group(0)}",
Vulnerabilities.SERVER_RAILS_CVE_2019_5418,
url,
[body, req],
)
)
except Exception:
output.debug_exception()
return results
|
adamcaudill/yawast
|
yawast/scanner/plugins/http/servers/rails.py
|
Python
|
mit
| 1,712
| 0.001752
|
#因为首尾相连, 考虑第一位抢或不抢 两种情况分开
class Solution:
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
if len(nums) == 1:
return nums[0]
if len(nums) == 2:
return max(nums)
l = []
l1 = []
# while nums[1] > nums[0] or nums[len(nums)-1] > nums[0]:
# temp = nums[0]
# del nums[0]
# nums.append(temp)
# print(nums)
# l = [0] * len(nums)
l.append(nums[0])
l1.append(0)
l.append(max(nums[0], nums[1]))
l1.append(nums[1])
for i in range(2, len(nums)):
if i == len(nums) - 1:
l.append(l[i-1])
else:
l.append(max(l[i-2] + nums[i], l[i-1]))
if i == 2:
l1.append(max(l1[i-1], nums[i]))
else:
l1.append(max(l1[i-2] + nums[i], l1[i-1]))
return max(max(l), max(l1))
|
MingfeiPan/leetcode
|
dp/213.py
|
Python
|
apache-2.0
| 1,077
| 0.003865
|
import unittest
from mock import Mock
from cartodb_services.tomtom.isolines import TomTomIsolines, DEFAULT_PROFILE
from cartodb_services.tools import Coordinate
from credentials import tomtom_api_key
VALID_ORIGIN = Coordinate(-73.989, 40.733)
class TomTomIsolinesTestCase(unittest.TestCase):
def setUp(self):
self.tomtom_isolines = TomTomIsolines(apikey=tomtom_api_key(),
logger=Mock())
def test_calculate_isochrone(self):
time_ranges = [300, 900]
solution = self.tomtom_isolines.calculate_isochrone(
origin=VALID_ORIGIN,
profile=DEFAULT_PROFILE,
time_ranges=time_ranges)
assert solution
def test_calculate_isodistance(self):
distance_range = 10000
solution = self.tomtom_isolines.calculate_isodistance(
origin=VALID_ORIGIN,
profile=DEFAULT_PROFILE,
distance_range=distance_range)
assert solution
|
CartoDB/geocoder-api
|
server/lib/python/cartodb_services/test/test_tomtomisoline.py
|
Python
|
bsd-3-clause
| 992
| 0
|
# -*- coding: utf-8 -*-
class Ledger(object):
def __init__(self, db):
self.db = db
def balance(self, token):
cursor = self.db.cursor()
cursor.execute("""SELECT * FROM balances WHERE TOKEN = %s""", [token])
row = cursor.fetchone()
return 0 if row is None else row[2]
def deposit(self, token, amount):
cursor = self.db.cursor()
cursor.execute(
"""INSERT INTO balances (token, amount)
SELECT %s, 0
WHERE NOT EXISTS (SELECT 1 FROM balances WHERE token = %s)""",
[token, token])
cursor.execute(
"""UPDATE balances SET amount = amount + %s WHERE token = %s""",
[amount, token])
cursor.execute(
"""INSERT INTO movements (token, amount) VALUES(%s, %s)""",
[token, amount])
self.db.commit()
return True
def withdraw(self, token, amount):
"""Remove the given amount from the token's balance."""
cursor = self.db.cursor()
cursor.execute("""
UPDATE balances
SET amount = amount - %s
WHERE token = %s AND amount >= %s""",
[amount, token, amount])
success = (cursor.rowcount == 1)
if success:
cursor.execute(
"""INSERT INTO movements (token, amount) VALUES(%s, %s)""",
[token, -amount])
self.db.commit()
return success
|
Storj/accounts
|
accounts/ledger.py
|
Python
|
mit
| 1,481
| 0.00135
|
#!/usr/bin/python
#
#
from distutils.core import setup
from spacewalk.common.rhnConfig import CFG, initCFG
initCFG('web')
setup(name = "rhnclient",
version = "5.5.9",
description = CFG.PRODUCT_NAME + " Client Utilities and Libraries",
long_description = CFG.PRODUCT_NAME + """\
Client Utilities
Includes: rhn_check, action handler, and modules to allow
client packages to communicate with RHN.""",
author = 'Joel Martin',
author_email = 'jmartin@redhat.com',
url = 'http://rhn.redhat.com',
packages = ["rhn.actions", "rhn.client"],
license = "GPL",
)
|
PaulWay/spacewalk
|
client/solaris/rhnclient/setup.py
|
Python
|
gpl-2.0
| 610
| 0.029508
|
# Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from . import BedLevelMachineAction
from . import UMOUpgradeSelection
def getMetaData():
return {}
def register(app):
return { "machine_action": [
BedLevelMachineAction.BedLevelMachineAction(),
UMOUpgradeSelection.UMOUpgradeSelection()
]}
|
Ultimaker/Cura
|
plugins/UltimakerMachineActions/__init__.py
|
Python
|
lgpl-3.0
| 366
| 0.008197
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="scatter.line", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatter/line/_width.py
|
Python
|
mit
| 523
| 0.001912
|
n=int(input('Enter any number: '))
if n%2!=0:
n=n+1
for i in range(n):
for j in range(n):
if (i==int(n/2)) or j==int(n/2) or ((i==0)and (j>=int(n/2))) or ((j==0)and (i<=int(n/2))) or ((j==n-1)and (i>=int(n/2))) or ((i==n-1)and (j<=int(n/2))):
print('*',end='')
else:
print(' ',end='')
print()
|
rohitjogson/pythonwork
|
assign27.09.py
|
Python
|
gpl-3.0
| 355
| 0.059155
|
from PyQt4.QtCore import QSize
from PyQt4.QtGui import QVBoxLayout
# This is really really ugly, but the QDockWidget for some reason does not notice when
# its child widget becomes smaller...
# Therefore we manually set its minimum size when our own minimum size changes
class MyVBoxLayout(QVBoxLayout):
def __init__(self, parent=None):
QVBoxLayout.__init__(self, parent)
self._last_size = QSize(0, 0)
def setGeometry(self, r):
QVBoxLayout.setGeometry(self, r)
try:
wid = self.parentWidget().parentWidget()
new_size = self.minimumSize()
if new_size == self._last_size: return
self._last_size = new_size
twid = wid.titleBarWidget()
if twid is not None:
theight = twid.sizeHint().height()
else:
theight = 0
new_size += QSize(0, theight)
wid.setMinimumSize(new_size)
except Exception:
pass
|
bitmingw/FindYourSister
|
sloth/sloth/gui/utils.py
|
Python
|
bsd-2-clause
| 994
| 0.002012
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from webob import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.extensions import agent
from neutron.tests.common import helpers
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
LBAAS_HOSTA = 'hosta'
LBAAS_HOSTB = 'hostb'
class AgentTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
return agent.Agent.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is just for testing
class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
supported_extension_aliases = ["agent"]
class AgentDBTestMixIn(object):
def _list_agents(self, expected_res_status=None,
neutron_context=None,
query_string=None):
agent_res = self._list('agents',
neutron_context=neutron_context,
query_params=query_string)
if expected_res_status:
self.assertEqual(agent_res.status_int, expected_res_status)
return agent_res
def _register_agent_states(self, lbaas_agents=False):
"""Register two L3 agents and two DHCP agents."""
l3_hosta = helpers._get_l3_agent_dict(
L3_HOSTA, constants.L3_AGENT_MODE_LEGACY)
l3_hostb = helpers._get_l3_agent_dict(
L3_HOSTB, constants.L3_AGENT_MODE_LEGACY)
dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA)
dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC)
helpers.register_l3_agent(host=L3_HOSTA)
helpers.register_l3_agent(host=L3_HOSTB)
helpers.register_dhcp_agent(host=DHCP_HOSTA)
helpers.register_dhcp_agent(host=DHCP_HOSTC)
res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
if lbaas_agents:
lbaas_hosta = {
'binary': 'neutron-loadbalancer-agent',
'host': LBAAS_HOSTA,
'topic': 'LOADBALANCER_AGENT',
'configurations': {'device_drivers': ['haproxy_ns']},
'agent_type': constants.AGENT_TYPE_LOADBALANCER}
lbaas_hostb = copy.deepcopy(lbaas_hosta)
lbaas_hostb['host'] = LBAAS_HOSTB
callback = agents_db.AgentExtRpcCallback()
callback.report_state(
self.adminContext,
agent_state={'agent_state': lbaas_hosta},
time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT))
callback.report_state(
self.adminContext,
agent_state={'agent_state': lbaas_hostb},
time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT))
res += [lbaas_hosta, lbaas_hostb]
return res
def _register_dvr_agents(self):
dvr_snat_agent = helpers.register_l3_agent(
host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
dvr_agent = helpers.register_l3_agent(
host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR)
return [dvr_snat_agent, dvr_agent]
class AgentDBTestCase(AgentDBTestMixIn,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
fmt = 'json'
def setUp(self):
plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin'
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
self.useFixture(tools.AttributeMapMemento())
ext_mgr = AgentTestExtensionManager()
super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
self.adminContext = context.get_admin_context()
def test_create_agent(self):
data = {'agent': {}}
_req = self.new_create_request('agents', data, self.fmt)
_req.environ['neutron.context'] = context.Context(
'', 'tenant_id')
res = _req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_list_agent(self):
agents = self._register_agent_states()
res = self._list('agents')
self.assertEqual(len(agents), len(res['agents']))
def test_show_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent')
self.assertEqual(2, len(agents['agents']))
agent = self._show('agents', agents['agents'][0]['id'])
self.assertEqual('neutron-l3-agent', agent['agent']['binary'])
def test_update_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertEqual(1, len(agents['agents']))
com_id = agents['agents'][0]['id']
agent = self._show('agents', com_id)
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = False
new_agent['agent']['description'] = 'description'
self._update('agents', com_id, new_agent)
agent = self._show('agents', com_id)
self.assertFalse(agent['agent']['admin_state_up'])
self.assertEqual('description', agent['agent']['description'])
def test_dead_agent(self):
cfg.CONF.set_override('agent_down_time', 1)
self._register_agent_states()
time.sleep(1.5)
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertFalse(agents['agents'][0]['alive'])
|
barnsnake351/neutron
|
neutron/tests/unit/extensions/test_agent.py
|
Python
|
apache-2.0
| 7,027
| 0
|
#!/bin/env python
# \author Hans J. Johnson
#
# Prepare for the future by recommending
# use of itk::Math:: functions over
# vnl_math:: functions.
# Rather than converting vnl_math_ to vnl_math::
# this prefers to convert directly to itk::Math::
# namespace. In cases where vnl_math:: is simply
# an alias to std:: functions, itk::Math directly
# uses the std:: version of the function.
import os
import sys
from collections import OrderedDict
## slight modification from grep command
info_for_conversion="""
XXXX,vnl_math_isnan,itk::Math::isnan
XXXX,vnl_math_isinf,itk::Math::isinf
XXXX,vnl_math_isfinite,itk::Math::isfinite
XXXX,vnl_math_isnormal,itk::Math::isnormal
XXXX,vnl_math_max,std::max
XXXX,vnl_math_min,std::min
XXXX,vnl_math_cuberoot,itk::Math::cbrt
XXXX,vnl_math_hypot,itk::Math::hypot
XXXX,vnl_math_angle_0_to_2pi,itk::Math::angle_0_to_2pi
XXXX,vnl_math_angle_minuspi_to_pi,itk::Math::angle_minuspi_to_pi
XXXX,vnl_math_rnd_halfinttoeven,itk::Math::halfinttoeven
XXXX,vnl_math_rnd_halfintup,itk::Math::rnd_halfintup
XXXX,vnl_math_rnd,itk::Math::rnd
XXXX,vnl_math_floor,itk::Math::floor
XXXX,vnl_math_ceil,itk::Math::ceil
XXXX,vnl_math_abs,itk::Math::abs
XXXX,vnl_math_sqr,itk::Math::sqr
XXXX,vnl_math_cube,itk::Math::cube
XXXX,vnl_math_sgn,itk::Math::sgn
XXXX,vnl_math_sgn0,itk::Math::sgn0
XXXX,vnl_math_squared_magnitude,itk::Math::squared_magnitude
XXXX,vnl_math_remainder_truncated,itk::Math::remainder_truncated
XXXX,vnl_math_remainder_floored,itk::Math::remainder_floored
"""
ITK_replace_head_names = OrderedDict()
ITK_replace_functionnames = OrderedDict()
ITK_replace_manual = OrderedDict()
ITK_replace_manual['"vnl/vnl_math.h"']='"itkMath.h"'
ITK_replace_manual['<vnl/vnl_math.h>']='<itkMath.h>'
for line in info_for_conversion.splitlines():
linevalues = line.split(",")
if len(linevalues) != 3:
#print("SKIPPING: " + str(linevalues))
continue
fname=linevalues[0]
new_name=fname.replace("ITK_","").replace(".h","")
ITK_replace_head_names['#include "{0}"'.format(fname)]="""#if !defined( ITK_LEGACY_FUTURE_REMOVE )
# include "{0}"
#endif
#include <{1}>""".format(fname,new_name)
ITK_replace_head_names['#include <{0}>'.format(fname)]="""#if !defined( ITK_LEGACY_FUTURE_REMOVE )
# include <{0}>
#endif
#include <{1}>""".format(fname,new_name)
ITK_pat=linevalues[1]
new_pat=linevalues[2]
ITK_replace_functionnames[ITK_pat]=new_pat
# Need to fix the fact that both std::ios is a base and a prefix
if "std::ios::" in new_pat:
ITK_replace_manual[new_pat.replace("std::ios::","std::ios_")] = new_pat
#print(ITK_replace_head_names)
#print(ITK_replace_functionnames)
cfile=sys.argv[1]
file_as_string=""
with open(cfile,"r") as rfp:
original_string=rfp.read()
file_as_string=original_string
required_header="" ## For ITK, this is always empty
for searchval,replaceval in ITK_replace_head_names.items():
file_as_string_new = file_as_string.replace(searchval,required_header+replaceval)
if file_as_string_new != file_as_string:
required_header=""
file_as_string=file_as_string_new
for searchval,replaceval in ITK_replace_functionnames.items():
file_as_string = file_as_string.replace(searchval,replaceval)
for searchval,replaceval in ITK_replace_manual.items():
file_as_string = file_as_string.replace(searchval,replaceval)
if file_as_string != original_string:
print("Processing: {0}".format(cfile))
with open(cfile,"w") as wfp:
wfp.write(file_as_string)
else:
print("SKIPPING: {0}".format(cfile))
|
zachary-williamson/ITK
|
Utilities/Maintenance/VNL_ModernizeNaming.py
|
Python
|
apache-2.0
| 3,528
| 0.014172
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines custom errors and exceptions used in `astropy.samp`.
"""
import xmlrpc.client as xmlrpc
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['SAMPWarning', 'SAMPHubError', 'SAMPClientError', 'SAMPProxyError']
class SAMPWarning(AstropyUserWarning):
"""
SAMP-specific Astropy warning class
"""
class SAMPHubError(Exception):
"""
SAMP Hub exception.
"""
class SAMPClientError(Exception):
"""
SAMP Client exceptions.
"""
class SAMPProxyError(xmlrpc.Fault):
"""
SAMP Proxy Hub exception
"""
|
pllim/astropy
|
astropy/samp/errors.py
|
Python
|
bsd-3-clause
| 637
| 0
|
#Author: Miguel Molero <miguel.molero@gmail.com>
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class ObjectInspectorWidget(QWidget):
def __init__(self, parent = None):
super(ObjectInspectorWidget, self).__init__(parent)
layout = QVBoxLayout()
self.tab = QTabWidget()
self.properties_tree = QTreeWidget()
self.properties_tree.setHeaderLabels(["",""])
self.properties_tree.setAlternatingRowColors(True)
self.properties_tree.setColumnCount(2)
self.properties_tree.header().resizeSection(0, 200)
self.tab.addTab(self.properties_tree, "Properties")
layout.addWidget(self.tab)
self.setLayout(layout)
self.setGeometry(0,0,100, 400)
def update(self, props):
self.properties_tree.clear()
data_tree = QTreeWidgetItem(self.properties_tree)
data_tree.setText(0,"Data")
#data_tree.setFont(0,QFont(c.FONT_NAME, c.FONT_SIZE_1, QFont.Bold))
labels = props.keys()
values = props.values()
self.populateTree(data_tree, labels, values)
def populateTree(self, parent,labels,values):
for i,j in zip(labels,values):
if j is None:
continue
item = QTreeWidgetItem(parent)
item.setText(0,i)
#item.setFont(0,QFont(c.FONT_NAME, c.FONT_SIZE_2, QFont.Normal))
if isinstance(j,bool):
if j is True:
item.setText(1, c.MARK)
else:
item.setText(1, c.CROSS)
else:
item.setText(1,str(j))
#item.setFont(1,QFont(c.FONT_NAME, c.FONT_SIZE_3, QFont.Normal))
self.properties_tree.expandItem(parent)
|
mmolero/pcloudpy
|
pcloudpy/gui/components/ObjectInspectorWidget.py
|
Python
|
bsd-3-clause
| 1,774
| 0.012401
|
import unittest
from collections import namedtuple
from io import BytesIO
import codecs
import sha2
import hmac
class TestSHA2(unittest.TestCase):
# test vectors from https://csrc.nist.gov/projects/cryptographic-standards-and-guidelines/example-values
TestVector = namedtuple('TestVector', ['digestcls', 'text', 'key', 'mac'])
TEST_VECTORS = (
# SHA-224 based HMACs
TestVector(
digestcls=sha2.SHA224,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F'
'10111213' '14151617' '18191A1B' '1C1D1E1F'
'20212223' '24252627' '28292A2B' '2C2D2E2F'
'30313233' '34353637' '38393A3B' '3C3D3E3F',
'hex',
),
mac=codecs.decode(
'C7405E3A' 'E058E8CD' '30B08B41' '40248581' 'ED174CB3'
'4E1224BC' 'C1EFC81B',
'hex',
),
),
TestVector(
digestcls=sha2.SHA224,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B',
'hex',
),
mac=codecs.decode(
'E3D249A8' 'CFB67EF8' 'B7A169E9' 'A0A59971' '4A2CECBA'
'65999A51' 'BEB8FBBE',
'hex',
),
),
TestVector(
digestcls=sha2.SHA224,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263',
'hex',
),
mac=codecs.decode(
'91C52509' 'E5AF8531' '601AE623' '0099D90B' 'EF88AAEF'
'B961F408' '0ABC014D',
'hex',
),
),
# SHA-256 based HMACs
TestVector(
digestcls=sha2.SHA256,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F10111213' '14151617'
'18191A1B' '1C1D1E1F' '20212223' '2425262728292A2B' '2C2D2E2F'
'30313233' '34353637' '38393A3B' '3C3D3E3F',
'hex',
),
mac=codecs.decode(
'8BB9A1DB' '9806F20DF7F77B82' '138C7914' 'D174D59E' '13DC4D01'
'69C9057B' '133E1D62',
'hex',
),
),
TestVector(
digestcls=sha2.SHA256,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '0405060708090A0B' '0C0D0E0F' '10111213' '14151617'
'18191A1B' '1C1D1E1F',
'hex',
),
mac=codecs.decode(
'A28CF431' '30EE696A98F14A37' '678B56BC' 'FCBDD9E5' 'CF69717F'
'ECF5480F' '0EBDF790',
'hex',
),
),
TestVector(
digestcls=sha2.SHA256,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263',
'hex',
),
mac=codecs.decode(
'BDCCB6C7' '2DDEADB5' '00AE7683' '86CB38CC' '41C63DBB'
'0878DDB9' 'C7A38A43' '1B78378D',
'hex',
),
),
# SHA-384 based HMACs
TestVector(
digestcls=sha2.SHA384,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263'
'64656667' '68696A6B' '6C6D6E6F' '70717273' '74757677'
'78797A7B' '7C7D7E7F',
'hex',
),
mac=codecs.decode(
'63C5DAA5' 'E651847C' 'A897C958' '14AB830B' 'EDEDC7D2'
'5E83EEF9' '195CD458' '57A37F44' '8947858F' '5AF50CC2'
'B1B730DD' 'F29671A9',
'hex',
),
),
TestVector(
digestcls=sha2.SHA384,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'1415161718191A1B' '1C1D1E1F' '20212223' '24252627' '28292A2B'
'2C2D2E2F',
'hex',
),
mac=codecs.decode(
'6EB242BD' 'BB582CA1' '7BEBFA48' '1B1E2321' '1464D2B7'
'F8C20B9FF2201637' 'B93646AF' '5AE9AC31' '6E98DB45' 'D9CAE773'
'675EEED0',
'hex',
),
),
TestVector(
digestcls=sha2.SHA384,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263'
'64656667' '68696A6B' '6C6D6E6F' '70717273' '74757677'
'78797A7B' '7C7D7E7F' '80818283' '84858687' '88898A8B'
'8C8D8E8F' '90919293' '94959697' '98999A9B' '9C9D9E9F'
'A0A1A2A3' 'A4A5A6A7' 'A8A9AAAB' 'ACADAEAF' 'B0B1B2B3'
'B4B5B6B7' 'B8B9BABB' 'BCBDBEBF' 'C0C1C2C3' 'C4C5C6C7',
'hex',
),
mac=codecs.decode(
'5B664436' 'DF69B0CA' '22551231' 'A3F0A3D5' 'B4F97991'
'713CFA84' 'BFF4D079' '2EFF96C2' '7DCCBBB6' 'F79B65D5'
'48B40E85' '64CEF594',
'hex',
),
),
# SHA-512 based HMACs
TestVector(
digestcls=sha2.SHA512,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263'
'64656667' '68696A6B' '6C6D6E6F' '70717273' '74757677'
'78797A7B' '7C7D7E7F',
'hex',
),
mac=codecs.decode(
'FC25E240' '658CA785' 'B7A811A8' 'D3F7B4CA' '48CFA26A'
'8A366BF2' 'CD1F836B' '05FCB024' 'BD368530' '81811D6C'
'EA4216EB' 'AD79DA1C' 'FCB95EA4' '586B8A0C' 'E356596A'
'55FB1347',
'hex',
),
),
TestVector(
digestcls=sha2.SHA512,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F',
'hex',
),
mac=codecs.decode(
'FD44C18B' 'DA0BB0A6' 'CE0E82B0' '31BF2818' 'F6539BD5'
'6EC00BDC' '10A8A2D7' '30B3634D' 'E2545D63' '9B0F2CF7'
'10D0692C' '72A1896F' '1F211C2B' '922D1A96' 'C392E07E'
'7EA9FEDC',
'hex',
),
),
TestVector(
digestcls=sha2.SHA512,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263'
'64656667' '68696A6B' '6C6D6E6F' '70717273' '74757677'
'78797A7B' '7C7D7E7F' '80818283' '84858687' '88898A8B'
'8C8D8E8F' '90919293' '94959697' '98999A9B' '9C9D9E9F'
'A0A1A2A3' 'A4A5A6A7' 'A8A9AAAB' 'ACADAEAF' 'B0B1B2B3'
'B4B5B6B7' 'B8B9BABB' 'BCBDBEBF' 'C0C1C2C3' 'C4C5C6C7',
'hex',
),
mac=codecs.decode(
'D93EC8D2' 'DE1AD2A9' '957CB9B8' '3F14E76A' 'D6B5E0CC'
'E285079A' '127D3B14' 'BCCB7AA7' '286D4AC0' 'D4CE6421'
'5F2BC9E6' '870B33D9' '7438BE4A' 'AA20CDA5' 'C5A912B4'
'8B8E27F3',
'hex',
),
),
)
def test_hmac(self):
iio = BytesIO()
for tv in self.__class__.TEST_VECTORS:
iio.truncate(0)
iio.seek(0)
iio.write(tv.text)
iio.seek(0)
digest = hmac.digest(iio, tv.key, digestcls=tv.digestcls)
self.assertEqual(tv.mac, digest,
"{}{}".format(tv.digestcls, tv.text))
if __name__ == '__main__':
unittest.main()
|
olbat/o1b4t
|
coding/crypto/test_hmac.py
|
Python
|
gpl-3.0
| 10,162
| 0.000098
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, clf, res=0.02):
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, res),
np.arange(y_min, y_max, res))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
class Perceptron(object):
def __init__(self, eta=0.01, epochs=50):
self.eta = eta
self.epochs = epochs
def train(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.epochs):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
# Корректные выходы перцептрона для данной выборки
y = np.array([[1],[1],[1],[1],[-1],[-1],[-1],[-1]]).reshape(8,1)
# Массив входных данных для перцептрона
X = np.array([[0,3],[1,2],[2,2],[4,0],[-1,2],[2,0],[3,-1],[4,-1]]).reshape(8,2)
ppn = Perceptron(epochs=10, eta=0.1)
ppn.train(X, y)
plot_decision_regions(X, y, clf=ppn)
plt.title('Perceptron')
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
plt.plot(range(1, len(ppn.errors_)+1), ppn.errors_, marker='o')
plt.xlabel('Iterations')
plt.ylabel('Misclassifications')
plt.show()
|
wyzekid/Python_Projects
|
Perceptron/Rosenblatt_perceptron.py
|
Python
|
gpl-3.0
| 1,928
| 0.015659
|
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import Client
from .....checkout.tests import BaseCheckoutAppTests
from .....delivery.tests import TestDeliveryProvider
from .....order import handler as order_handler
from .....payment import ConfirmationFormNeeded
from .....payment.tests import TestPaymentProvider
from .....pricing import handler as pricing_handler
from .....product import handler as product_handler
from .....product.tests import DeadParrot
from .....product.tests.pricing import FiveZlotyPriceHandler
from ..app import checkout_app
from .....cart.tests import TestCart
from .....order.tests import TestOrder
class TestPaymentProviderWithConfirmation(TestPaymentProvider):
def confirm(self, order, typ=None):
raise ConfirmationFormNeeded(action='http://test.payment.gateway.example.com')
class App(BaseCheckoutAppTests):
checkout_app = checkout_app
urls = BaseCheckoutAppTests.MockUrls(checkout_app=checkout_app)
def setUp(self):
checkout_app.cart_model = TestCart
checkout_app.order_model = TestOrder
self.parrot = DeadParrot.objects.create(slug='parrot',
species='Hyacinth Macaw')
self.dead_parrot = self.parrot.variants.create(color='blue',
looks_alive=False)
satchless_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
self.custom_settings = {
'SATCHLESS_PRODUCT_VIEW_HANDLERS': ('satchless.cart.add_to_cart_handler',),
'TEMPLATE_DIRS': (os.path.join(satchless_dir, 'category', 'templates'),
os.path.join(satchless_dir, 'order', 'templates'),
os.path.join(satchless_dir, 'cart', 'templates'),
os.path.join(satchless_dir, 'cart', 'templates'),
os.path.join(os.path.join(os.path.dirname(__file__),
'templates')),
os.path.join(os.path.join(os.path.dirname(__file__), '..',
'templates'))),
'TEMPLATE_LOADERS': (
'django.template.loaders.filesystem.Loader',
)
}
self.original_settings = self._setup_settings(self.custom_settings)
product_handler.init_queue()
order_handler.delivery_queue = order_handler.DeliveryQueue(TestDeliveryProvider)
order_handler.payment_queue = order_handler.PaymentQueue(TestPaymentProviderWithConfirmation)
self.anon_client = Client()
self.original_pricing_handlers = settings.SATCHLESS_PRICING_HANDLERS
pricing_handler.pricing_queue = pricing_handler.PricingQueue(FiveZlotyPriceHandler)
def tearDown(self):
self._teardown_settings(self.original_settings, self.custom_settings)
product_handler.init_queue()
pricing_handler.pricing_queue = pricing_handler.PricingQueue(*self.original_pricing_handlers)
def test_checkout_view_passes_with_correct_data(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.dead_parrot, 1)
order = self._get_or_create_order_for_client(self.anon_client)
response = self._test_status(reverse('checkout:checkout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
data={'email': 'foo@example.com'})
dg = response.context['delivery_group_forms']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
for g, typ, form in dg:
data[form.add_prefix('email')] = 'foo@example.com'
response = self._test_status(self.checkout_app.reverse('checkout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=302, method='post', data=data,
follow=True)
order = self.checkout_app.order_model.objects.get(pk=order.pk)
self.assertRedirects(response, reverse('checkout:confirmation',
kwargs={'order_token':
order.token}))
self.assertEqual(order.status, 'payment-pending')
def test_confirmation_view_redirects_when_order_or_payment_is_missing(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.dead_parrot, 1)
order = self._get_or_create_order_for_client(self.anon_client)
# without payment
self._test_status(reverse('checkout:confirmation',
kwargs={'order_token': order.token}),
client_instance=self.anon_client, status_code=302)
# finish checkout view
response = self._test_status(self.checkout_app.reverse('checkout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
data={'email': 'foo@example.com'})
dg = response.context['delivery_group_forms']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
for g, typ, form in dg:
data[form.add_prefix('email')] = 'foo@example.com'
response = self._test_status(self.checkout_app.reverse('checkout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=302, method='post', data=data,
follow=True)
self._test_status(self.checkout_app.reverse('confirmation',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=200)
|
fusionbox/satchless
|
satchless/contrib/checkout/singlestep/tests/__init__.py
|
Python
|
bsd-3-clause
| 7,298
| 0.00274
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import pytest
import backend_common
@pytest.fixture(scope='session')
def app():
'''Load shipit_api in test mode
'''
import shipit_api
config = backend_common.testing.get_app_config({
'SQLALCHEMY_DATABASE_URI': 'sqlite://',
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'AUTH_CLIENT_ID': 'dummy_id',
'AUTH_CLIENT_SECRET': 'dummy_secret',
'AUTH_DOMAIN': 'auth.localhost',
'AUTH_REDIRECT_URI': 'http://localhost/login',
'OIDC_USER_INFO_ENABLED': True,
'OIDC_CLIENT_SECRETS': os.path.join(os.path.dirname(__file__), 'client_secrets.json'),
'TASKCLUSTER_CLIENT_ID': 'something',
'TASKCLUSTER_ACCESS_TOKEN': 'something',
})
app = shipit_api.create_app(config)
with app.app_context():
backend_common.testing.configure_app(app)
yield app
|
La0/mozilla-relengapi
|
src/shipit/api/tests/conftest.py
|
Python
|
mpl-2.0
| 1,089
| 0.000918
|
import sys
import argparse
import numpy as np
import pylab as pl
import netCDF4
import logging
import pymqdatastream.connectors.todl.todl_data_processing as todl_data_processing
try:
from PyQt5 import QtCore, QtGui, QtWidgets
except:
from qtpy import QtCore, QtGui, QtWidgets
#https://matplotlib.org/3.1.0/gallery/user_interfaces/embedding_in_qt_sgskip.html
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger('todl_quickview')
logger.setLevel(logging.DEBUG)
# FP07 Polynom hack
T = np.asarray([1.4, 9.01, 20.96, 27.55,34.77])
V = np.asarray([2.95, 2.221, 1.508, 1.26, 1.07])
P = np.polyfit(V,T,2)
#print('Polynom',P)
#https://stackoverflow.com/questions/18539679/embedding-the-matplotlib-toolbar-in-pyqt4-using-matplotlib-custom-widget#18563884
class MplCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class MplWidget(QtWidgets.QWidget):
def __init__(self, parent = None):
QtWidgets.QWidget.__init__(self, parent)
self.canvas = MplCanvas()
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
self.vbl = QtWidgets.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.vbl.addWidget(self.mpl_toolbar)
self.setLayout(self.vbl)
class todlquickviewMainWindow(QtWidgets.QMainWindow):
"""The main interface of the TODL-Quickview gui
"""
def __init__(self,fname):
QtWidgets.QMainWindow.__init__(self)
self.all_widgets = []
mainMenu = self.menuBar()
self.setWindowTitle("TODL Quickview")
#self.setWindowIcon(QtGui.QIcon('logo/pymqdatastream_logo_v0.2.svg.png'))
extractAction = QtWidgets.QAction("&Quit", self)
extractAction.setShortcut("Ctrl+Q")
extractAction.setStatusTip('Closing the program')
extractAction.triggered.connect(self.close_application)
fileMenu = mainMenu.addMenu('&File')
fileMenu.addAction(extractAction)
self.statusBar()
self.mainwidget = todlquickviewWidget(fname)
self.setCentralWidget(self.mainwidget)
self.width_orig = self.frameGeometry().width()
self.height_orig = self.frameGeometry().height()
self.width_main = self.width_orig
self.height_main = self.height_orig
def close_application(self):
logger.debug('Goodbye!')
self.close()
for w in self.mainwidget.plotWidgets:
w.close()
self.mainwidget.close()
class todlquickviewWidget(QtWidgets.QWidget):
"""
"""
def __init__(self,fname=None):
QtWidgets.QMainWindow.__init__(self)
layout = QtWidgets.QGridLayout()
self.plotWidgets = []
self.data = {}
self.layout = layout
self.setLayout(layout)
self.plot_button = QtWidgets.QPushButton('Plot')
self.plot_button.clicked.connect(self.plot_data)
self.var_combo = QtWidgets.QComboBox(self)
self.layout.addWidget(self.var_combo,0,0)
self.layout.addWidget(self.plot_button,0,1)
if(fname is not None):
logger.debug('Opening file:' + fname)
self.read_ncfile(fname)
self.show()
def plot_data(self):
print('Plotting')
plotvar_y = self.var_combo.currentText()
plotdata_y = self.data[plotvar_y][plotvar_y][:]
plotdata_x = self.data[plotvar_y]['x0'][:]
try:
lab_unit = '[' + self.data[plotvar_y][plotvar_y].units + ']'
except:
lab_unit = ''
ylabel = plotvar_y + lab_unit
#if('ch1' in plotvar_y):
if False:
print('Calculating temperature from polynom')
plotdata_y = np.polyval(P,plotdata_y)
plotdata_y = np.ma.masked_where((plotdata_y > T.max()) | (plotdata_y < T.min()),plotdata_y)
#print(T.max(),T.min())
# Calculate the frequency
fi = 1/(np.diff(plotdata_x).mean())
plotFrame = MplWidget()
ax = plotFrame.canvas.ax
plotFrame.canvas.ax.plot(plotdata_x,plotdata_y)
ax.set_title('Frequency:' + str(fi))
ax.set_xlabel('t [s]')
ax.set_ylabel(ylabel)
plotFrame.show()
self.plotWidgets.append(plotFrame)
def read_ncfile(self,fname):
nc = netCDF4.Dataset(fname)
# Try to read ADC data
try:
nca = nc.groups['adc']
except:
nca = None
pass
if(nca is not None):
for varname in nca.variables:
vartmp = nca.variables[varname]
print(vartmp)
print(vartmp.dimensions[0])
if(not "cnt" in varname):
self.data[vartmp.name] = {vartmp.name:vartmp,vartmp.dimensions[0]:nca.variables[vartmp.dimensions[0]]}
self.data[vartmp.name]['x0'] = self.data[vartmp.name][vartmp.dimensions[0]]
# Add to the gui
self.var_combo.addItem(varname)
#self.FLAG_CH1=True
#print('Found ch1 ADC data')
else:
print('cnt ...')
# Read in PyroScience data
print('Trying Firesting data')
try:
ncp = nc.groups['pyro']
cnt10ks_p = ncp.variables['cnt10ks_pyro'][:]
#time_p = netCDF4.num2date(ncp.variables['time'][:],units = ncp.variables['time'].units)
fp = 1/(np.diff(cnt10ks_p).mean())
# Add to the gui
self.var_combo.addItem('phi')
#phi = ncp.variables['phi'][:]
# Add to the data
self.data['phi'] = {'phi':ncp.variables['phi'],'cnt10ks_p':ncp.variables['cnt10ks_pyro']}
self.data['phi']['x0'] = self.data['phi']['cnt10ks_p']
self.FLAG_PYRO=True
print('Found Pyro data')
except Exception as e:
print('Pyro:' + str(e))
self.FLAG_PYRO=False
# Read in IMU
print('Trying IMU data')
try:
self.FLAG_IMU = True
for g in nc.groups:
print(g)
if('imu' in g):
nci = nc.groups[g]
try:
cntvar = 'cnt10ks_imu'
nci.variables[cntvar][:]
except:
cntvar = 'cnt10ks'
nci.variables[cntvar][:]
cnt10ks_imu = nci.variables[cntvar][:]
#time_imu = netCDF4.num2date(nci.variables['time'][:],units=nci.variables['time'].units)
fi = 1/(np.diff(cnt10ks_imu).mean())
for vartmp in nci.variables:
print(vartmp)
if(not "cnt" in vartmp):
varname = g + ' ' + vartmp
print('reading')
self.var_combo.addItem(varname)
self.data[varname] = {varname:nci.variables[vartmp],'cnt10ks':nci.variables[cntvar]}
self.data[varname]['x0'] = self.data[varname][cntvar]
#accx = nci.variables['accx'][:]
#accy = nci.variables['accy'][:]
#accz = nci.variables['accz'][:]
#gyrox = nci.variables['gyrox'][:]
#gyroy = nci.variables['gyroy'][:]
#gyroz = nci.variables['gyroz'][:]
#magx = nci.variables['magx'][:]
#magy = nci.variables['magy'][:]
#magz = nci.variables['magz'][:]
print('Found IMU data')
except Exception as e:
print('Hallo!')
print(e)
self.FLAG_IMU = False
# If run from the command line
def main():
print('This is todl_quickview')
app = QtWidgets.QApplication(sys.argv)
screen_resolution = app.desktop().screenGeometry()
width, height = screen_resolution.width(), screen_resolution.height()
if(len(sys.argv) > 1):
fname = sys.argv[1]
else:
print('Specify a file for quickview')
exit()
window = todlquickviewMainWindow(fname=fname)
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main_gui()
|
MarineDataTools/pymqdatastream
|
pymqdatastream/connectors/todl/tools/todl_quickview.py
|
Python
|
gpl-3.0
| 9,129
| 0.011173
|
# Copyright (c) 2017-2020 Glenn McKechnie <glenn.mckechnie@gmail.com>
# Credit to Tom Keffer <tkeffer@gmail.com>, Matthew Wall and the core
# weewx team, all from whom I've borrowed heavily.
# Mistakes are mine, corrections and or improvements welcomed
# https://github.com/glennmckechnie/weewx-wxobs
#
# rsync code based on weeutil/rsyncupload.py by
# Will Page <companyguy@gmail.com> and
#
# See the file LICENSE.txt for your full rights.
#
#
# added text
import subprocess
import time
import errno
import os
import weewx.engine
from weeutil.weeutil import to_bool
from weewx.cheetahgenerator import SearchList
wxobs_version = "0.7.4"
try:
# weewx4 logging
import weeutil.logger
import logging
log = logging.getLogger(__name__)
def logdbg(msg):
log.debug(msg)
def loginf(msg):
log.info(msg)
def logerr(msg):
log.error(msg)
except ImportError:
# old-style weewx logging
import syslog
def logmsg(level, msg):
syslog.syslog(level, 'wxobs: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
def wxrsync(rsync_user, rsync_server, rsync_options, rsync_loc_file,
rsync_loc_file2, rsync_ssh_str, rem_path, wxobs_debug,
log_success):
"""
rsync_user
rsync_server
rsync_options
rsync_loc_file
rsync_loc_file2 # maybe empty
rsync_ssh_str
rem_path
wxobs_debug
log_success
"""
t_1 = time.time()
# construct the command argument
cmd = ['rsync']
cmd.extend([rsync_options])
# cmd.extend(["-tOJrl"])
# provide some stats on the transfer
cmd.extend(["--stats"])
cmd.extend(["--compress"])
cmd.extend([rsync_loc_file])
cmd.extend([rsync_loc_file2])
cmd.extend([rsync_ssh_str])
try:
# perform the actual rsync transfer...
if wxobs_debug == 2:
loginf("rsync cmd is ... %s" % (cmd))
# rsynccmd = subprocess.Popen(cmd, stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT, close_fds=True)
rsynccmd = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = rsynccmd.communicate()[0]
stroutput = stdout.decode("utf-8").strip()
# rsyncpid = rsynccmd.pid
# loginf(" pre.wait rsync pid is %s" % rsyncpid)
# rsynccmd.wait()
# rsyncpid = rsynccmd.pid
# loginf(" post.wait rsync pid is %s" % rsyncpid)
# subprocess.call( ('ps', '-l') )
except OSError as e:
# print "EXCEPTION"
if e.errno == errno.ENOENT:
logerr("rsync does not appear to be installed on this system. \
(errno %d, \"%s\")" % (e.errno, e.strerror))
raise
# we have some output from rsync so generate an appropriate message
if stroutput.find('rsync error:') < 0:
# no rsync error message so parse rsync --stats results
rsyncinfo = {}
for line in iter(stroutput.splitlines()):
if line.find(':') >= 0:
(n, v) = line.split(':', 1)
rsyncinfo[n.strip()] = v.strip()
# get number of files and bytes transferred and produce an
# appropriate message
try:
if 'Number of regular files transferred' in rsyncinfo:
n_ber = rsyncinfo['Number of regular files transferred']
else:
n_ber = rsyncinfo['Number of files transferred']
n_bytes = rsyncinfo['Total file size']
n_sent = rsyncinfo['Literal data']
if n_ber is not None and n_bytes is not None:
rsync_message = ("rsync'd %s of %s files (%s) in "
"%%0.2f seconds" % (n_sent, n_ber, n_bytes))
else:
rsync_message = "rsync executed in %0.2f seconds"
# loginf("%s " % (rsync_message))
except:
rsync_message = ("rsync exception raised:"
"executed in %0.2f seconds")
loginf(" ERR %s " % (rsync_message))
else:
# suspect we have an rsync error so tidy stroutput
# and display a message
stroutput = stroutput.replace("\n", ". ")
stroutput = stroutput.replace("\r", "")
# Attempt to catch a few errors that may occur and deal with them
# see man rsync for EXIT VALUES
rsync_message = ("rsync command failed after %0.2f secs (set"
"set 'wxobs_debug = 2' in skin.conf),")
if "code 1)" in stroutput:
if wxobs_debug == 2:
logerr("rsync code 1 - %s" % stroutput)
rsync_message = ('syntax error in rsync command'
'- set debug = 1 - ! FIX ME !')
loginf(" ERR %s " % (rsync_message))
rsync_message = ("code 1, syntax error, failed"
" rsync executed in %0.2f seconds")
elif ("code 23" and "Read-only file system") in stroutput:
# read-only file system
# sadly, won't be detected until after first succesful transfer
# but it's useful then!
if wxobs_debug == 2:
logerr("rsync code 23 - %s" % stroutput)
loginf("ERR Read only file system ! FIX ME !")
rsync_message = ("code 23, read-only, rsync failed"
" executed in %0.2f seconds")
elif ("code 23" and "link_stat") in stroutput:
# likely to be that a local path doesn't exist - possible typo?
if wxobs_debug == 2:
logdbg("rsync code 23 found %s" % stroutput)
rsync_message = ("rsync code 23 : is %s correct?"
"! FIXME !" % (rsync_loc_file))
loginf(" ERR %s " % rsync_message)
rsync_message = ("code 23, link_stat, rsync failed"
" executed in %0.2f seconds")
elif "code 11" in stroutput:
# directory structure at remote end is missing - needs creating
# on this pass. Should be Ok on next pass.
if wxobs_debug == 2:
loginf("rsync code 11 - %s" % stroutput)
rsync_message = ("rsync code 11 found Creating %s"
" as a fix?, space issue?" % (rem_path))
loginf("%s" % rsync_message)
# laborious but apparently necessary, the only way the command
# will run!? build the ssh command - n.b: spaces cause wobblies!
cmd = ['ssh']
cmd.extend(["%s@%s" % (rsync_user, rsync_server)])
mkdirstr = "mkdir -p"
cmd.extend([mkdirstr])
cmd.extend([rem_path])
if wxobs_debug == 2:
loginf("sshcmd %s" % cmd)
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
rsync_ssh_str = rem_path
rsync_message = ("code 11, rsync mkdir cmd executed"
" in % 0.2f seconds")
elif ("code 12") and ("Permission denied") in stroutput:
if wxobs_debug == 2:
logdbg("rsync code 12 - %s" % stroutput)
rsync_message = ("Permission error in rsync command, probably at"
" remote end authentication ! FIX ME !")
loginf(" ERR %s " % (rsync_message))
rsync_message = "code 12, rsync failed, executed in % 0.2f seconds"
elif ("code 12") and ("No route to host") in stroutput:
if wxobs_debug == 2:
logdbg("rsync code 12 - %s" % stroutput)
rsync_message = "No route to host error in rsync command ! FIX ME!"
loginf(" ERR %s " % (rsync_message))
rsync_message = "code 12, rsync failed, executed in % 0.2f seconds"
else:
logerr("rsync [%s] reported this error: %s" % (cmd, stroutput))
if log_success:
if wxobs_debug == 0:
t_o = ''
rsync_ssh_str = ''
else:
t_o = ' to '
t_2 = time.time()
loginf("%s" % rsync_message % (t_2-t_1) + t_o + rsync_ssh_str)
class wxobs(SearchList):
def __init__(self, generator):
SearchList.__init__(self, generator)
"""
This is a minimal SLE - all we want is to make the php configuration
easier by transferring the database values as used by weewx, plus a few
other variables.
It has since expanded to allow the transfer of the sqlite database
by using rsync, providing the [Remote] section of the config is
populated
In wxobs/skin.conf:
send_inc: An option to stop sending the include file/s. These contain
the database configuration values, timezone and oprional debugging
stanzas for the php script to operate. Needless to say you need to
send them at least once.
If you can't think of a reason why you'd need this then you don't need
to implement it.
I run a mysql database locally and export an sqlite to the remote. This
allows me to do that without too much trouble (remembering to set up
the symlinks is the biggest issue)
include_path: the directory where the php include file will be stored
this holds the database configuration as sourced from weewx.conf
If you store them locally you can change that path using this option.
If you send the files to another server you can change this path using
dest_directory (which will affect the database also.)
disp_interval: is the time between displayed records; 1800 is a
half-hour and is the default
display_type: 'single' (default) or 'average'
Whether to return the timestamped entry for the value displayed in the
periodic Stats table (single), or average it over all the database
entries between it and the previous one that was displayed. (average)
app_Temp: This is a recent addition to weewx and is not enabled by
default. The calculation is performed but there is no field in the
stock database. This variable allows for the substitution with another
value.
The default is to use windchill.
Keep it to the group_degrees (because the label is hard coded in.)
timezone: If the date or time is not being displayed correctly
we'll assume it's a php issue and pass a timezone string to the script.
This can be done by adding your time zone as the following example
indicates. Replace the string with your zone description
timezone = Melbourne/Australia
self.php_error: enable php error messages in index.php.
This maybe useful at the start of configuration, it shouldn't be
needed after everything is running smoothly
default is False. set to True to enable this to be sent via the
include file.
[[Remote]]
This is used when you want to transfer the include file and the
database to a remote machine where the web files have been sent
seperately with the weewx.conf [FTP] or [RSYNC] section.
dest_directory: is the switch that turns this o. It transfers BOTH the
include and database files to the same directory as the tuple specifies
If using multiple databases and include files make sure they are unique
ie:- if you are transferring from multiple machine.
It will fetch the rsync user and server from the wxobs/skin.conf file
and use those values or if they are missing then it will use the values
from the [RSYNC] section of weewx.conf which is possibly configured
already.
rsync_user (user) = user_name for rsync command
rsync_server (server)= ip address of the remote machine
send_include = True #This is the default, set to False if you don't
want to send the include file repeatedly to the server. Use with
caution (ie: remember this setting when things stop working, it might
be the cure)
rsync_options: Not documented in the skin.conf Default is '-ac'. Use
with caution and no spaces allowed.
[[RainTiming]]
shift_rain: For rain accounting times other than midnight to midnight
set this to True
If no other options are given the accounting time will be the
australian rain day which starts at 9 a.m.
default is false - start at midnight 00:00:00 through to the next day.
rain_start: used to shift time (in seconds) to something other than
9a.m. The default is 32400
rain_label: the o'clock label for the rain_start above.
default is 9
show_warning: An information message will appear on the report page
(index.php) if the database is in US units (imperial) or units are
detected that don't match the native units required for the delta-T
calcs.
An information div is included in the report page when this occurs.
This is a switch (boolean) to turn it off.
wxobs_debug: Allow index.php to include debugging info if set to...
1 and above is low level, variables, some logic.
2 is for wxobs remote cmds etc.
3 only for delta-T final values (low level - if enabled)
4 only for delta-T unit conversion calcs (verbose) - if enabled
5 only for ordinalCompass conversion calcs (N, NE...CALM) (verbose)
6 is for database debugging
[[DeltaT]]
calculate_deltaT: Whether to generate deltaT for the report page.
Default is not to generate that data.
This is a switch (boolean) to turn it on.
[[PHPUnits]]
tempConvert:
speedConvert:
pressConvert:
rainConvert: These are all used to convert the database units to ones
for display by the php generated report.
Because we are bypassing weewx to generate the historical data, we
can't utilize the inbuilt weewx functions for unit conversion therefore
we need to affect them ourselves.
This is performed (if needed) by specifying the conversion to be done
from the [[PHPUnits]] section of the skin.conf file.
The default is to perform no conversion, to accept the units as they
are.
"""
self.wxobs_version = wxobs_version
self.wxobs_debug = int(self.generator.skin_dict['wxobs'].get(
'wxobs_debug', '0'))
self.send_inc = to_bool(self.generator.skin_dict['wxobs'].get(
'send_include', True))
# self.inc_path = self.generator.skin_dict['wxobs'].get(
# 'include_path', '/usr/share/php')
# intervals for display of results
self.disp_interval = self.generator.skin_dict['wxobs'].get(
'display_interval', '1800')
self.arch_interval = self.generator.config_dict['StdArchive'].get(
'archive_interval')
# now decide whether intermediate reading will be averaged or ignored.
self.display_type = self.generator.skin_dict['wxobs'].get(
'display_type', 'single')
if self.display_type == 'single':
self.disp_single = to_bool(True)
# loginf(" %s single readings, %s " % (
# self.disp_single, self.display_type))
elif self.display_type == 'average':
self.disp_single = to_bool(False)
self.arch_interval = self.disp_interval
# loginf(" %s average readings, %s " % (
# self.disp_single, self.display_type))
else:
# loginf(" reverting to single readings,
# %s is not an option" % self.display_type)
self.disp_single = to_bool(True)
self.app_temp = self.generator.skin_dict['wxobs'].get(
'app_Temp', 'windchill')
self.php_zone = self.generator.skin_dict['wxobs'].get(
'timezone', '')
self.php_error = to_bool(self.generator.skin_dict['wxobs'].get(
'show_php_errors', False))
self.show_warning = to_bool(self.generator.skin_dict['wxobs']['DeltaT']
.get('show_warning', True))
self.want_delta = to_bool(self.generator.skin_dict['wxobs']['DeltaT']
.get('calculate_deltaT', False))
if not self.want_delta:
self.show_warning = to_bool(self.generator.skin_dict['wxobs']
['DeltaT'].get('show_warning', False))
# these variable are being used as a function names, thus the Case
# abuse... usage! and the complaints from syntax checkers.
self.tempConvert = self.generator.skin_dict['wxobs']['PHPUnits'].get(
'temperature_convert', 'NTC')
self.speedConvert = self.generator.skin_dict['wxobs']['PHPUnits'].get(
'speed_convert', 'NSC')
self.pressConvert = self.generator.skin_dict['wxobs']['PHPUnits'].get(
'pressure_convert', 'NPC')
self.rainConvert = self.generator.skin_dict['wxobs']['PHPUnits'].get(
'rain_convert', 'NDC')
self.shift_rain = to_bool(self.generator.skin_dict['wxobs']
['RainTiming'].get('shift_rain', False))
# 32400 (rainday_start) == 9 hours == 9 a.m.
self.rainday_start = self.generator.skin_dict['wxobs']['RainTiming'].get(
'rain_start', '32400')
# 32400 == 9 hours == 9 (start_label) a.m.
self.start_label = self.generator.skin_dict['wxobs']['RainTiming'].get(
'start_label', '9')
# target_unit = METRICWX # Options are 'US', 'METRICWX', or 'METRIC'
self.targ_unit = self.generator.config_dict['StdConvert'].get(
'target_unit')
# used for rsync of sqlite databases and include file to remote machines
self.dest_dir = self.generator.skin_dict['wxobs']['Remote'].get(
'dest_directory', '')
if self.dest_dir:
self.rsync_user = self.generator.skin_dict['wxobs']['Remote'] \
.get('rsync_user', '')
if not self.rsync_user:
try:
self.rsync_user = self.generator.config_dict['StdReport'] \
['RSYNC'].get('user', '')
except:
if self.wxobs_debug >= 1:
logdbg("No rsync _user supplied?")
self.rsync_server = self.generator.skin_dict['wxobs']['Remote'] \
.get('rsync_server', '')
if not self.rsync_server:
try:
self.rsync_server = self.generator.config_dict['StdReport'] \
['RSYNC'].get('server', '')
except:
if self.wxobs_debug >= 1:
logdbg("No rsync server supplied?")
# did we get anything that we can use?
if not self.rsync_user or not self.rsync_server:
self.dest_dir = ''
else:
# we did so we need these...
self.rsync_options = self.generator.skin_dict['wxobs'] \
['Remote'].get('rsync_options', '-ac')
self.log_success = to_bool(self.generator.skin_dict['wxobs']
['Remote'].get('log_success', True))
pass
# prepare the database details and write the include file
def_dbase = self.generator.config_dict['DataBindings'] \
['wx_binding'].get('database')
if self.wxobs_debug == 5:
logdbg("database is %s" % def_dbase)
#########################
# BEGIN TESTING ONLY:
# For use when testing sqlite transfer when a mysql database is the default
# archive
# Our normal mode of operation is False - ie: don't change a bloody thing!
# It won't be mentioned in the skin.conf description. You'll need to have seen
# this to know the switch exists!
test_sqlite = to_bool(self.generator.skin_dict['wxobs']['Remote'].get(
'test_withmysql', False))
if test_sqlite:
def_dbase = 'archive_sqlite'
# END TESTING ONLY:
#########################
if def_dbase == 'archive_mysql':
self.dbase = 'mysql'
self.mysql_base = self.generator.config_dict['Databases'] \
[def_dbase].get('database_name')
id_match = self.mysql_base
self.mysql_host = self.generator.config_dict['DatabaseTypes'] \
['MySQL'].get('host')
self.mysql_user = self.generator.config_dict['DatabaseTypes'] \
['MySQL'].get('user')
self.mysql_pass = self.generator.config_dict['DatabaseTypes'] \
['MySQL'].get('password')
v_al = ["<?php\n $php_dbase = '%s';\n $php_mysql_base = '%s';\n"
" $php_mysql_host = '%s';\n $php_mysql_user = '%s';\n"
" $php_mysql_pass = '%s';\n" %
(self.dbase, self.mysql_base, self.mysql_host,
self.mysql_user, self.mysql_pass)]
if self.wxobs_debug == 6:
loginf("mysql database is %s, %s, %s, %s" % (
self.mysql_base, self.mysql_host,
self.mysql_user, self.mysql_pass))
elif def_dbase == 'archive_sqlite':
self.dbase = 'sqlite'
self.sq_dbase = self.generator.config_dict['Databases'] \
[def_dbase].get('database_name')
id_match = self.sq_dbase[:-4]
self.sq_root = self.generator.config_dict['DatabaseTypes'] \
['SQLite'].get('SQLITE_ROOT')
self.sqlite_db = ("%s/%s" % (self.sq_root, self.sq_dbase))
v_al = ["<?php\n $php_dbase = 'sqlite';\n $php_sqlite_db = '%s';\n" %
self.sqlite_db]
if self.wxobs_debug == 6:
loginf("sqlite database is %s, %s, %s" % (
self.sq_dbase, self.sq_root, self.sqlite_db))
# phpinfo.php shows include_path as .:/usr/share/php, we'll put it
# in there and hopefully that will work for most users.
# I use/prefer /tmp/wxobs_inc.inc
inc_file = ("wxobs_%s.inc" % id_match)
if self.dest_dir != '':
# create an empty index.html to obscure directory listing
self.zero_html = self.dest_dir+"/index.html"
if not os.path.exists(self.dest_dir):
os.makedirs(self.dest_dir, mode=0o0755)
if not os.path.isfile(self.zero_html):
with open(self.zero_html, 'a') as z: # Create file if does not exist
pass # and auto close it
# we are rsyncing remotely
# And going to change all the remote paths, the include_path has
# lost its precedence.
self.inc_path = self.dest_dir
self.include_file = ("%s/%s" % (self.inc_path, inc_file))
# pre-empt inevitable warning/exception when using
# test_sqlite = False
self.sq_dbase = self.generator.config_dict['Databases'] \
[def_dbase].get('database_name')
new_location = (self.dest_dir+"/"+self.sq_dbase)
v_al = ["<?php\n $php_dbase = 'sqlite';\n $php_sqlite_db = '%s/%s';" %
(self.dest_dir, self.sq_dbase)]
# symlink database to new location here, which will be mirrored on the
# remote serve. This rallows local usage of wxobs as well as remote
org_location = (self.sq_root+"/"+self.sq_dbase)
if not os.path.isfile(new_location):
if self.wxobs_debug == 2:
loginf("database, attempting to \'symlink %s %s\'"
% (org_location, new_location))
try:
os.symlink(org_location, new_location)
except OSError as e:
logerr("error creating database symlink %s" % e)
try:
if not os.access(self.include_file, os.W_OK):
os.makedirs(self.inc_path)
except OSError as e:
if e.errno == os.errno.EEXIST:
pass
else:
# All other cases, local or remote...
# we are going to retain the defaults values, maybe a slight tweak.
# use the skin.conf include_path, either default or the override.
self.inc_path = self.generator.skin_dict['wxobs'].get(
'include_path', '/usr/share/php')
# phpinfo.php include_path is referenced but missing in some
# cases - php7.3? Possibly installed with php-pear ?
# FIXME: a quick and harmless fix is to create it.
if not os.path.exists(self.inc_path):
os.makedirs(self.inc_path, mode=0o0755)
loginf("Created %s" % self.inc_path)
self.include_file = ("%s/%s" % (self.inc_path, inc_file))
# if self.send_inc and self.dest_dir != '':
if self.send_inc:
php_inc = open(self.include_file, 'w')
php_inc.writelines(v_al)
if self.php_zone != '':
t_z = ("\n ini_set(\"date.timezone\", \"%s\");" % self.php_zone)
if self.wxobs_debug == 2:
loginf("timezone is set to %s" % t_z)
php_inc.write(t_z)
if self.php_error:
php_err = ('\n ini_set(\'display_errors\', 1);'
'\n ini_set(\'display_startup_errors\', 1);'
'\n error_reporting(E_ALL);')
if self.wxobs_debug == 2:
loginf("php error reporting is set: %s" % php_err)
php_inc.writelines(php_err)
php_inc.close()
# use rsync to transfer database remotely, but ONLY if requested
if def_dbase == 'archive_sqlite' and self.dest_dir != '':
# honor request to move destination directories (same for both)
# create and redefine as appropriate
if self.dest_dir:
self.sq_root = self.dest_dir
# database transfer
db_loc_file = "%s" % (self.sqlite_db)
db_ssh_str = "%s@%s:%s/" % (self.rsync_user, self.rsync_server,
self.sq_root)
wxrsync(self.rsync_user, self.rsync_server, self.rsync_options,
db_loc_file, self.zero_html, db_ssh_str, self.sq_root,
self.wxobs_debug, self.log_success)
if self.send_inc:
# perform include file transfer if wanted, zero_html just
# fills a slot here.
inc_loc_file = "%s" % (self.include_file)
inc_ssh_str = "%s@%s:%s/" % (self.rsync_user, self.rsync_server,
self.inc_path)
wxrsync(self.rsync_user, self.rsync_server, self.rsync_options,
inc_loc_file, self.zero_html, inc_ssh_str,
self.inc_path, self.wxobs_debug, self.log_success)
|
glennmckechnie/weewx-wxobs
|
bin/user/wxobs.py
|
Python
|
gpl-3.0
| 27,446
| 0.000911
|
# © 2017 Sergio Teruel <sergio.teruel@tecnativa.com>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from .hooks import pre_init_hook
from . import models
from . import report
|
OCA/margin-analysis
|
account_invoice_margin/__init__.py
|
Python
|
agpl-3.0
| 194
| 0
|
# Copyright (c) 2014, Max Zwiessele, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from paramz.transformations import *
from paramz.transformations import __fixed__
|
befelix/GPy
|
GPy/core/parameterization/transformations.py
|
Python
|
bsd-3-clause
| 194
| 0
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import AbinsModules
class CalculateS(object):
"""
Class producer for generating required S calculator
Currently available S calculators:
* SPowderSemiEmpiricalCalculator
"""
@staticmethod
def init(filename=None, temperature=None, sample_form=None, abins_data=None, instrument=None,
quantum_order_num=None, bin_width=1.0):
"""
:param filename: name of input DFT file (CASTEP: foo.phonon)
:param temperature: temperature in K for which calculation of S should be done
:param sample_form: form in which experimental sample is: Powder or SingleCrystal (str)
:param abins_data: object of type AbinsData with data from phonon file
:param instrument: object of type Instrument for which simulation should be performed
:param quantum_order_num: number of quantum order events taken into account during the simulation
:param bin_width: width of bins in wavenumber
"""
if sample_form in AbinsModules.AbinsConstants.ALL_SAMPLE_FORMS:
if sample_form == "Powder":
return AbinsModules.SPowderSemiEmpiricalCalculator(filename=filename, temperature=temperature,
abins_data=abins_data, instrument=instrument,
quantum_order_num=quantum_order_num,
bin_width=bin_width)
# TODO: implement numerical powder averaging
# elif sample == "SingleCrystal": #TODO implement single crystal scenario
else:
raise ValueError("Only implementation for sample in the form of powder is available.")
else:
raise ValueError("Invalid sample form %s" % sample_form)
|
mganeva/mantid
|
scripts/AbinsModules/CalculateS.py
|
Python
|
gpl-3.0
| 2,205
| 0.004989
|
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import math
import random
import time
import uuid
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import ExtractorError
class IqiyiIE(InfoExtractor):
IE_NAME = 'iqiyi'
IE_DESC = '爱奇艺'
_VALID_URL = r'http://(?:[^.]+\.)?iqiyi\.com/.+\.html'
_TESTS = [{
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
'md5': '2cb594dc2781e6c941a110d8f358118b',
'info_dict': {
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
'title': '美国德州空中惊现奇异云团 酷似UFO',
'ext': 'f4v',
}
}, {
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb',
'title': '名侦探柯南第752集',
},
'playlist': [{
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part1',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part2',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part3',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part4',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part5',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part6',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part7',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part8',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}],
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
'only_matching': True,
}, {
'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
'only_matching': True,
}, {
'url': 'http://yule.iqiyi.com/pcb.html',
'only_matching': True,
}]
_FORMATS_MAP = [
('1', 'h6'),
('2', 'h5'),
('3', 'h4'),
('4', 'h3'),
('5', 'h2'),
('10', 'h1'),
]
@staticmethod
def md5_text(text):
return hashlib.md5(text.encode('utf-8')).hexdigest()
def construct_video_urls(self, data, video_id, _uuid):
def do_xor(x, y):
a = y % 3
if a == 1:
return x ^ 121
if a == 2:
return x ^ 72
return x ^ 103
def get_encode_code(l):
a = 0
b = l.split('-')
c = len(b)
s = ''
for i in range(c - 1, -1, -1):
a = do_xor(int(b[c - i - 1], 16), i)
s += chr(a)
return s[::-1]
def get_path_key(x, format_id, segment_index):
mg = ')(*&^flash@#$%a'
tm = self._download_json(
'http://data.video.qiyi.com/t?tn=' + str(random.random()), video_id,
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
)['t']
t = str(int(math.floor(int(tm) / (600.0))))
return self.md5_text(t + mg + x)
video_urls_dict = {}
for format_item in data['vp']['tkl'][0]['vs']:
if 0 < int(format_item['bid']) <= 10:
format_id = self.get_format(format_item['bid'])
else:
continue
video_urls = []
video_urls_info = format_item['fs']
if not format_item['fs'][0]['l'].startswith('/'):
t = get_encode_code(format_item['fs'][0]['l'])
if t.endswith('mp4'):
video_urls_info = format_item['flvs']
for segment_index, segment in enumerate(video_urls_info):
vl = segment['l']
if not vl.startswith('/'):
vl = get_encode_code(vl)
key = get_path_key(
vl.split('/')[-1].split('.')[0], format_id, segment_index)
filesize = segment['b']
base_url = data['vp']['du'].split('/')
base_url.insert(-1, key)
base_url = '/'.join(base_url)
param = {
'su': _uuid,
'qyid': uuid.uuid4().hex,
'client': '',
'z': '',
'bt': '',
'ct': '',
'tn': str(int(time.time()))
}
api_video_url = base_url + vl + '?' + \
compat_urllib_parse.urlencode(param)
js = self._download_json(
api_video_url, video_id,
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
video_url = js['l']
video_urls.append(
(video_url, filesize))
video_urls_dict[format_id] = video_urls
return video_urls_dict
def get_format(self, bid):
matched_format_ids = [_format_id for _bid, _format_id in self._FORMATS_MAP if _bid == str(bid)]
return matched_format_ids[0] if len(matched_format_ids) else None
def get_bid(self, format_id):
matched_bids = [_bid for _bid, _format_id in self._FORMATS_MAP if _format_id == format_id]
return matched_bids[0] if len(matched_bids) else None
def get_raw_data(self, tvid, video_id, enc_key, _uuid):
tm = str(int(time.time()))
tail = tm + tvid
param = {
'key': 'fvip',
'src': self.md5_text('youtube-dl'),
'tvId': tvid,
'vid': video_id,
'vinfo': 1,
'tm': tm,
'enc': self.md5_text(enc_key + tail),
'qyid': _uuid,
'tn': random.random(),
'um': 0,
'authkey': self.md5_text(self.md5_text('') + tail),
}
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
compat_urllib_parse.urlencode(param)
raw_data = self._download_json(api_url, video_id)
return raw_data
def get_enc_key(self, swf_url, video_id):
# TODO: automatic key extraction
# last update at 2015-12-18 for Zombie::bite
enc_key = '8b6b683780897eb8d9a48a02ccc4817d'[::-1]
return enc_key
def _real_extract(self, url):
webpage = self._download_webpage(
url, 'temp_id', note='download video page')
tvid = self._search_regex(
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
video_id = self._search_regex(
r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
swf_url = self._search_regex(
r'(http://[^\'"]+MainPlayer[^.]+\.swf)', webpage, 'swf player URL')
_uuid = uuid.uuid4().hex
enc_key = self.get_enc_key(swf_url, video_id)
raw_data = self.get_raw_data(tvid, video_id, enc_key, _uuid)
if raw_data['code'] != 'A000000':
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
if not raw_data['data']['vp']['tkl']:
raise ExtractorError('No support iQiqy VIP video')
data = raw_data['data']
title = data['vi']['vn']
# generate video_urls_dict
video_urls_dict = self.construct_video_urls(
data, video_id, _uuid)
# construct info
entries = []
for format_id in video_urls_dict:
video_urls = video_urls_dict[format_id]
for i, video_url_info in enumerate(video_urls):
if len(entries) < i + 1:
entries.append({'formats': []})
entries[i]['formats'].append(
{
'url': video_url_info[0],
'filesize': video_url_info[-1],
'format_id': format_id,
'preference': int(self.get_bid(format_id))
}
)
for i in range(len(entries)):
self._sort_formats(entries[i]['formats'])
entries[i].update(
{
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
}
)
if len(entries) > 1:
info = {
'_type': 'multi_video',
'id': video_id,
'title': title,
'entries': entries,
}
else:
info = entries[0]
info['id'] = video_id
info['title'] = title
return info
|
atomic83/youtube-dl
|
youtube_dl/extractor/iqiyi.py
|
Python
|
unlicense
| 9,558
| 0.000745
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-15 16:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0019_auto_20160427_1520'),
]
operations = [
migrations.AddField(
model_name='aarelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='aarelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='aarelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='personal_name_first',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='personal_name_last',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='personal_name_suffix',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='type_qualifier',
field=models.CharField(blank=True, choices=[(b'BGN', b'Began'), (b'END', b'Ended'), (b'OCR', b'Occurred')], max_length=3, null=True),
),
migrations.AddField(
model_name='authority',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='authority',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='authority',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='ccrelation',
name='data_display_order',
field=models.FloatField(default=1.0, help_text=b'Position at which the citation should be displayed in the citation detail view. Whole numbers or decimals can be used.'),
),
migrations.AddField(
model_name='ccrelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='ccrelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='ccrelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='additional_titles',
field=models.TextField(blank=True, help_text=b'Additional titles (not delimited, free text).', null=True),
),
migrations.AddField(
model_name='citation',
name='book_series',
field=models.CharField(blank=True, help_text=b'Used for books, and potentially other works in a series.', max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='personal_name_first',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='personal_name_last',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='personal_name_suffix',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='type_qualifier',
field=models.CharField(blank=True, choices=[(b'BGN', b'Began'), (b'END', b'Ended'), (b'OCR', b'Occurred')], max_length=3, null=True),
),
migrations.AddField(
model_name='historicalauthority',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalauthority',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalauthority',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalccrelation',
name='data_display_order',
field=models.FloatField(default=1.0, help_text=b'Position at which the citation should be displayed in the citation detail view. Whole numbers or decimals can be used.'),
),
migrations.AddField(
model_name='historicalccrelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalccrelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalccrelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalcitation',
name='additional_titles',
field=models.TextField(blank=True, help_text=b'Additional titles (not delimited, free text).', null=True),
),
migrations.AddField(
model_name='historicalcitation',
name='book_series',
field=models.CharField(blank=True, help_text=b'Used for books, and potentially other works in a series.', max_length=255, null=True),
),
migrations.AddField(
model_name='historicalcitation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalcitation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalcitation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicallinkeddata',
name='access_status',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicallinkeddata',
name='access_status_date_verified',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='historicallinkeddata',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicallinkeddata',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicallinkeddata',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicallinkeddata',
name='resource_name',
field=models.CharField(blank=True, help_text=b'Name of the resource that the URN links to.', max_length=255, null=True),
),
migrations.AddField(
model_name='historicallinkeddata',
name='url',
field=models.CharField(blank=True, help_text=b'If the resource has a DOI, use the DOI instead and do not include URL. Do include the http:// prefix. If used must also provide URLDateAccessed.', max_length=255, null=True),
),
migrations.AddField(
model_name='historicalperson',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalperson',
name='personal_name_preferred',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='historicalperson',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalperson',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicaltracking',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicaltracking',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicaltracking',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='linkeddata',
name='access_status',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='linkeddata',
name='access_status_date_verified',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='linkeddata',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='linkeddata',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='linkeddata',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='linkeddata',
name='resource_name',
field=models.CharField(blank=True, help_text=b'Name of the resource that the URN links to.', max_length=255, null=True),
),
migrations.AddField(
model_name='linkeddata',
name='url',
field=models.CharField(blank=True, help_text=b'If the resource has a DOI, use the DOI instead and do not include URL. Do include the http:// prefix. If used must also provide URLDateAccessed.', max_length=255, null=True),
),
migrations.AddField(
model_name='partdetails',
name='extent',
field=models.PositiveIntegerField(blank=True, help_text=b'Provides the size of the work in pages, words, or other counters.', null=True),
),
migrations.AddField(
model_name='partdetails',
name='extent_note',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='person',
name='personal_name_preferred',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='tracking',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='tracking',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='tracking',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
upconsulting/IsisCB
|
isiscb/isisdata/migrations/0020_auto_20160615_1630.py
|
Python
|
mit
| 15,648
| 0.000831
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-14 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('time', models.DateTimeField(auto_now_add=True)),
('meta_tag', models.CharField(max_length=150)),
('view_count', models.IntegerField(default=0, editable=False)),
('public_doc', models.BooleanField()),
('update_time', models.DateTimeField(auto_now=True)),
],
),
]
|
kyunooh/JellyBlog
|
lifeblog/migrations/0001_initial.py
|
Python
|
apache-2.0
| 900
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UploadCertificateResponse(Model):
"""The upload registration certificate response.
All required parameters must be populated in order to send to Azure.
:param auth_type: Specifies authentication type. Possible values include:
'Invalid', 'AzureActiveDirectory'
:type auth_type: str or ~azure.mgmt.edgegateway.models.AuthenticationType
:param resource_id: Required. The resource ID of the Data Box Edge/Gateway
device.
:type resource_id: str
:param aad_authority: Required. Azure Active Directory tenant authority.
:type aad_authority: str
:param aad_tenant_id: Required. Azure Active Directory tenant ID.
:type aad_tenant_id: str
:param service_principal_client_id: Required. Azure Active Directory
service principal client ID.
:type service_principal_client_id: str
:param service_principal_object_id: Required. Azure Active Directory
service principal object ID.
:type service_principal_object_id: str
:param azure_management_endpoint_audience: Required. The azure management
endpoint audience.
:type azure_management_endpoint_audience: str
"""
_validation = {
'resource_id': {'required': True},
'aad_authority': {'required': True},
'aad_tenant_id': {'required': True},
'service_principal_client_id': {'required': True},
'service_principal_object_id': {'required': True},
'azure_management_endpoint_audience': {'required': True},
}
_attribute_map = {
'auth_type': {'key': 'authType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'aad_authority': {'key': 'aadAuthority', 'type': 'str'},
'aad_tenant_id': {'key': 'aadTenantId', 'type': 'str'},
'service_principal_client_id': {'key': 'servicePrincipalClientId', 'type': 'str'},
'service_principal_object_id': {'key': 'servicePrincipalObjectId', 'type': 'str'},
'azure_management_endpoint_audience': {'key': 'azureManagementEndpointAudience', 'type': 'str'},
}
def __init__(self, **kwargs):
super(UploadCertificateResponse, self).__init__(**kwargs)
self.auth_type = kwargs.get('auth_type', None)
self.resource_id = kwargs.get('resource_id', None)
self.aad_authority = kwargs.get('aad_authority', None)
self.aad_tenant_id = kwargs.get('aad_tenant_id', None)
self.service_principal_client_id = kwargs.get('service_principal_client_id', None)
self.service_principal_object_id = kwargs.get('service_principal_object_id', None)
self.azure_management_endpoint_audience = kwargs.get('azure_management_endpoint_audience', None)
|
Azure/azure-sdk-for-python
|
sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/upload_certificate_response.py
|
Python
|
mit
| 3,198
| 0.001876
|
'''Todo:
* Add multiple thread support for async_process functions
* Potentially thread each handler function? idk
'''
import sys
import socket
import re
import threading
import logging
import time
if sys.hexversion < 0x03000000:
#Python 2
import Queue as queue
BlockingIOError = socket.error
else:
import queue
from .ircclient import IRCClient
logger = logging.getLogger(__name__)
#Somewhat complex regex that accurately matches nick!username@host, with named groups for easy parsing and usage
user_re = re.compile(r'(?P<nick>[\w\d<-\[\]\^\{\}\~]+)!(?P<user>[\w\d<-\[\]\^\{\}\~]+)@(?P<host>.+)')
class IRCBot(IRCClient):
'''See `IRCClient` for basic client usage, here is usage for the bot system
Handler notation:
on_join(self, nick, host, channel)
on_topic(self, nick, host, channel, topic)
on_part(self, nick, host, channel, message)
on_msg(self, nick, host, channel, message)
on_privmsg(self, nick, host, message)
on_chanmsg(self, nick, host, channel, message)
on_notice(self, nick, host, channel, message)
on_nick(self, nick, new_nick, host)
'''
_handlers = {
'join': [],
'part': [],
'kick': [],
'topic': [],
'msg': [],
'privmsg': [],
'chanmsg': [],
'notice': [],
'nick': []
}
_process_thread = None
def _async_process(self):
while not self._stop_event.is_set():
time.sleep(0.01)
try:
args = self._in_queue.get_nowait()
#These "msg"s will be raw irc received lines, which have several forms
# basically, we should be looking for
# :User!Name@host COMMAND <ARGS>
userhost = user_re.search(args[0][1:])
if userhost:
nick, host, user = userhost.groups()
command = args[1]
if command == 'JOIN':
channel = args[2][1:] #JOIN Channels are : prefixed
for handler in self._handlers['join']:
handler(self, nick, host, channel)
elif command == 'TOPIC':
channel = args[2]
topic = ' '.join(args[3:])
for handler in self._handlers['topic']:
handler(self, nick, host, channel, topic)
elif command == 'PART':
channel = args[2]
message = ' '.join(args[3:])
for handler in self._handlers['part']:
handler(self, nick, host, channel, message)
elif command == 'PRIVMSG':
channel = args[2]
message = ' '.join(args[3:])[1:]
for handler in self._handlers['msg']:
handler(self, nick, host, channel, message)
if channel[0] == '#':
#this is a channel
for handler in self._handlers['chanmsg']:
handler(self, nick, host, channel, message)
else:
#private message
for handler in self._handlers['privmsg']:
handler(self, nick, host, message)
elif command == 'KICK':
channel = args[2]
kicked_nick = args[3]
reason = ' '.join(args[4:])[1:]
for handler in self._handlers['kick']:
handler(self, nick, host, channel, kicked_nick, reason)
elif command == 'NICK':
new_nick = args[2][1:]
for handler in self._handlers['nick']:
handler(self, nick, new_nick, host)
elif command == 'NOTICE':
#:nick!user@host NOTICE <userchan> :message
channel = args[2]
message = ' '.join(args[3:])
for handler in self._handlers['notice']:
handler(self, nick, host, channel, message)
else:
logger.warning("Unhandled command %s" % command)
self._in_queue.task_done()
except queue.Empty as e: pass
except Exception as e:
logger.exception("Error while handling message " + str(args))
def start(self):
IRCClient.start(self)
self._process_thread = threading.Thread(target=self._async_process)
self._process_thread.start()
def on(self, type):
'''Decorator function'''
def decorator(self, func):
'''decorated functions should be written as class methods
@on('join')
def on_join(self, channel):
print("Joined channel %s" % channel)
'''
self._handlers[type].append(func)
return func
return decorator
def on_join(self, func):
self._handlers['join'].append(func)
return func
def on_part(self, func):
self._handlers['part'].append(func)
return func
def on_kick(self, func):
self._handlers['kick'].append(func)
return func
def on_msg(self, func):
self._handlers['msg'].append(func)
return func
def on_privmsg(self, func):
self._handlers['privmsg'].append(func)
return func
def on_chanmsg(self, func):
self._handlers['chanmsg'].append(func)
return func
def on_notice(self, func):
self._handlers['notice'].append(func)
return func
def on_nick(self, func):
self._handlers['nick'].append(func)
return func
__all__ = ['IRCBot']
|
codetalkio/TelegramIRCImageProxy
|
asyncirc/ircbot.py
|
Python
|
mit
| 5,983
| 0.00234
|
from django.http import HttpRequest
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
try:
from allauth.account import app_settings as allauth_settings
from allauth.utils import (email_address_exists,
get_username_max_length)
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
except ImportError:
raise ImportError("allauth needs to be added to INSTALLED_APPS.")
from rest_framework import serializers
from requests.exceptions import HTTPError
# Import is needed only if we are using social login, in which
# case the allauth.socialaccount will be declared
if 'allauth.socialaccount' in settings.INSTALLED_APPS:
from allauth.socialaccount.helpers import complete_social_login
class SocialLoginSerializer(serializers.Serializer):
access_token = serializers.CharField(required=False, allow_blank=True)
code = serializers.CharField(required=False, allow_blank=True)
def _get_request(self):
request = self.context.get('request')
if not isinstance(request, HttpRequest):
request = request._request
return request
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass.
Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:returns: A populated instance of the
`allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token, response=response)
social_login.token = token
return social_login
def validate(self, attrs):
view = self.context.get('view')
request = self._get_request()
if not view:
raise serializers.ValidationError(
_("View is not defined, pass it as a context variable")
)
adapter_class = getattr(view, 'adapter_class', None)
if not adapter_class:
raise serializers.ValidationError(_("Define adapter_class in view"))
adapter = adapter_class(request)
app = adapter.get_provider().get_app(request)
# More info on code vs access_token
# http://stackoverflow.com/questions/8666316/facebook-oauth-2-0-code-and-token
# Case 1: We received the access_token
if attrs.get('access_token'):
access_token = attrs.get('access_token')
# Case 2: We received the authorization code
elif attrs.get('code'):
self.callback_url = getattr(view, 'callback_url', None)
self.client_class = getattr(view, 'client_class', None)
if not self.callback_url:
raise serializers.ValidationError(
_("Define callback_url in view")
)
if not self.client_class:
raise serializers.ValidationError(
_("Define client_class in view")
)
code = attrs.get('code')
provider = adapter.get_provider()
scope = provider.get_scope(request)
client = self.client_class(
request,
app.client_id,
app.secret,
adapter.access_token_method,
adapter.access_token_url,
self.callback_url,
scope
)
token = client.get_access_token(code)
access_token = token['access_token']
else:
raise serializers.ValidationError(
_("Incorrect input. access_token or code is required."))
social_token = adapter.parse_token({'access_token': access_token})
social_token.app = app
try:
login = self.get_social_login(adapter, app, social_token, access_token)
complete_social_login(request, login)
except HTTPError:
raise serializers.ValidationError(_('Incorrect value'))
if not login.is_existing:
login.lookup()
login.save(request, connect=True)
attrs['user'] = login.account.user
return attrs
class RegisterSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=allauth_settings.USERNAME_REQUIRED
)
email = serializers.EmailField(required=allauth_settings.EMAIL_REQUIRED)
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_("A user is already registered with this e-mail address."))
return email
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(_("The two password fields didn't match."))
return data
def custom_signup(self, request, user):
pass
def get_cleaned_data(self):
return {
'username': self.validated_data.get('username', ''),
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', '')
}
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
adapter.save_user(request, user, self)
self.custom_signup(request, user)
setup_user_email(request, user, [])
return user
class VerifyEmailSerializer(serializers.Serializer):
key = serializers.CharField()
|
saurabhVisie/appserver
|
rest_auth/registration/serializers.py
|
Python
|
mit
| 6,316
| 0.000792
|
#!/Users/shreyashirday/Personal/openmdao-0.13.0/bin/python
# EASY-INSTALL-SCRIPT: 'docutils==0.10','rst2odt_prepstyles.py'
__requires__ = 'docutils==0.10'
__import__('pkg_resources').run_script('docutils==0.10', 'rst2odt_prepstyles.py')
|
HyperloopTeam/FullOpenMDAO
|
bin/rst2odt_prepstyles.py
|
Python
|
gpl-2.0
| 237
| 0.004219
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all exception's which search services may raise."""
from search.common import utils
class Error(Exception):
"""Generic error."""
def ToString(self, error_prefix):
"""Builds error message string escaping it for HTML.
Args:
error_prefix: an error prefix.
Returns:
HTML escaped error message.
"""
if error_prefix:
return utils.HtmlEscape(
"{0}: {1}".format(error_prefix, str("\n".join(self.args))))
else:
return utils.HtmlEscape("Error: {0}".format(str("\n".join(self.args))))
def __str__(self):
return self.ToString("Error")
class BadQueryException(Error):
"""BadQueryException error."""
def __str__(self):
return self.ToString("BadQueryException")
# Places search service pool exception.
class PoolConnectionException(Error):
"""PoolConnectionException error."""
def __str__(self):
return self.ToString("PoolConnectionException")
def main():
pass
if __name__ == "__main__":
main()
|
tst-mswartz/earthenterprise
|
earth_enterprise/src/server/wsgi/search/common/exceptions.py
|
Python
|
apache-2.0
| 1,602
| 0.006866
|
#! /usr/bin/env python
# coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
import os
#config
#change the GPIO Port number
gpioport=24
sdate = time.strftime("%H:%M:%S")
stime = time.strftime("%Y-%m-%d")
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpioport, GPIO.IN)
def sysshutdown(channel):
msg="System shutdown GPIO.Low state"
logpath="/var/log/shutdown.log"
print("System shutdown")
f = open(logpath, "a")
f.write(str(sdate)+";"+str(stime)+";"+str(msg)+";")
f.close()
os.system("shutdown -h now")
while True:
if(GPIO.input(gpioport)):
sysshutdown("1")
break
time.sleep(2)
|
BaileySN/Raspberry-Pi-Shutdown-Button
|
shutdown_script.py
|
Python
|
gpl-3.0
| 586
| 0.030717
|
#!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
from collections import deque
from itertools import islice
#from subprocess import call
import subprocess
from optparse import OptionParser
from tempfile import mkstemp
import glob
import os
import random
import re
import shlex
import shutil
import sys
import tempfile
import time
import resource
import locale
#import file
locale.setlocale(locale.LC_ALL, "C")
FNULL = open('/dev/null', 'w')
base_path = os.path.dirname(sys.argv[0])[:-len('src/')]
dry_run = False
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def call(call_arr, stdout=sys.stdout, stderr=sys.stderr):
if not dry_run:
subprocess.call(call_arr, stdout=stdout, stderr=stderr)
def sort_reads_command(options,reads_filename):
"""
Sort the incoming FASTQ filename.
"""
SORT_CMD = "fastq-sort " + reads_filename
call_arr = SORT_CMD.split()
output_fp = open(options.output_dir + '/sorted/' + os.path.basename(reads_filename), 'w')
out_cmd(output_fp.name, FNULL.name, call_arr)
call(call_arr, stdout=output_fp, stderr=FNULL)
return output_fp.name
def sort_reads(options):
"""
Sort the FASTQ reads and update the options accordingly.
"""
ensure_dir(options.output_dir + '/sorted/')
if options.unpaired_reads_filenames:
new_filenames = []
for reads_filenames in options.unpaired_reads_filenames.split(','):
new_filenames.append(sort_reads_command(options, reads_filenames))
options.unpaired_reads_filenames = ','.join(new_filenames)
if options.first_mate_filenames:
new_filenames = []
for reads_filenames in options.first_mate_filenames.split(','):
new_filenames.append(sort_reads_command(options, reads_filenames))
options.first_mate_filenames = ','.join(new_filenames)
if options.second_mate_filenames:
new_filenames = []
for reads_filenames in options.second_mate_filenames.split(','):
new_filenames.append(sort_reads_command(options, reads_filenames))
options.second_mate_filenames = ','.join(new_filenames)
def compress(options):
"""
Compress the reads using all methods.
"""
ensure_dir(options.output_dir + '/original/')
ensure_dir(options.output_dir + '/goodbad/')
ensure_dir(options.output_dir + '/maxqual/')
ensure_dir(options.output_dir + '/minqual/')
std_err_file = open('compress.log', 'w')
# Basic command line scripts to run the individual compression schemes.
GB_COMPRESSION_CMD = "./src/good_bad_coding.py -r [READ] -c 2 -b 0 -i [COMPRESSED_FILE]"
MAX_VALUE_COMPRESSION_CMD = "./src/good_bad_coding.py -r [READ] -g [MAX_QV] -b 40 -c 2 -i [COMPRESSED_FILE]"
MIN_VALUE_COMPRESSION_CMD = "./src/good_bad_coding.py -r [READ] -g 0 -b 0 -c 2 -i [COMPRESSED_FILE]"
POLY_REGRESSION_CMD = "Rscript src/poly_regression_parallel.R [READ] [OUTPUT] [DEGREE] [COMPRESSED_FILE] [NUM_THREADS] [MAX_QV]"
PROFILE_COMPRESSION_CMD = "Rscript src/profile_parallel.R [READ] [OUTPUT] [TRAINING_SIZE] [NUM_PROFILES] [COMPRESSED_FILE] [NUM_THREADS]"
QUALCOMP_COMPRESS_CMD = "./runCompress.sh -i [READ] -c [CLUSTERS] -r [RATE]"
QUALCOMP_DECOMPRESS_CMD = "./runDecompress.sh -p [DIR] -c [CLUSTERS] -r [RATE]"
RQS_COMPRESS_CMD = "./src/run_rqs.sh [READ] [OUTPUT]"
#qvz -c 3 -r .10 -v test_results4/original/frag_1.fastq.quals tmp/test_1_c3_r.10
QVZ_COMPRESS_CMD = "[QVZ]/qvz -c [CLUSTERS] -r [RATE] -v [READ] [OUTPUT]"
QVZ_DECOMPRESS_CMD = "[QVZ]/qvz -x -v [INPUT] [OUTPUT]"
# Store which compression directories we created.
options.compressed_dirs = []
options.compressed_dirs.append('original')
options.compressed_dirs.append('goodbad')
options.compressed_dirs.append('maxqual')
options.compressed_dirs.append('minqual')
for reads_filename in options.reads_filenames:
# Copy the original sequences over.
out_cmd("", std_err_file.name, ["cp", reads_filename, options.output_dir + '/original/' + os.path.basename(reads_filename)])
shutil.copyfile(reads_filename, options.output_dir + '/original/' + os.path.basename(reads_filename))
# Good/bad binary compression.
call_arr = GB_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/goodbad/' + os.path.basename(reads_filename) + '.comp').split()
output_fp = open(options.output_dir + '/goodbad/' + os.path.basename(reads_filename), 'w')
out_cmd(options.output_dir + '/goodbad/' + os.path.basename(reads_filename), std_err_file.name, call_arr)
call(call_arr, stdout=output_fp, stderr=std_err_file)
# Max/min quality value compression. We can use good_bad.py script to do this.
call_arr = MAX_VALUE_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/maxqual/' + os.path.basename(reads_filename) + '.comp')\
.replace('[MAX_QV]', options.max_quality).split()
output_fp = open(options.output_dir + '/maxqual/' + os.path.basename(reads_filename), 'w')
out_cmd(options.output_dir + '/maxqual/' + os.path.basename(reads_filename), std_err_file.name, call_arr)
call(call_arr, stdout=output_fp, stderr=std_err_file)
call_arr = MIN_VALUE_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/minqual/' + os.path.basename(reads_filename) + '.comp').split()
output_fp = open(options.output_dir + '/minqual/' + os.path.basename(reads_filename), 'w')
out_cmd(options.output_dir + '/minqual/' + os.path.basename(reads_filename), std_err_file.name, call_arr)
call(call_arr, stdout=output_fp, stderr=std_err_file)
#continue
# Polynomial regression.
if options.poly_degrees:
for degree in options.poly_degrees.split(','):
ensure_dir(options.output_dir + '/degree_' + degree + '/')
if 'degree_' + degree not in options.compressed_dirs:
options.compressed_dirs.append('degree_' + degree)
#continue
call_arr = POLY_REGRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[OUTPUT]', options.output_dir + '/degree_' + degree + '/' + os.path.basename(reads_filename))\
.replace('[DEGREE]', degree)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/degree_' + degree +'/' + os.path.basename(reads_filename) + '.comp')\
.replace('[NUM_THREADS]', options.threads)\
.replace('[MAX_QV]', options.max_quality).split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# Profile compression using k-means.
if options.profile_sizes:
for profiles in options.profile_sizes.split(','):
ensure_dir(options.output_dir + '/profile_' + profiles + '/')
if 'profile_' + profiles not in options.compressed_dirs:
options.compressed_dirs.append('profile_' + profiles)
#continue
call_arr = PROFILE_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[OUTPUT]', options.output_dir + '/profile_' + profiles + '/' + os.path.basename(reads_filename))\
.replace('[NUM_PROFILES]', profiles)\
.replace('[TRAINING_SIZE]', options.training_size)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/profile_' + profiles +'/' + os.path.basename(reads_filename) + '.comp')\
.replace('[NUM_THREADS]', options.threads).split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# Compress using QualComp.
if options.rates:
for rate in options.rates.split(','):
#continue
ensure_dir(options.output_dir + '/qualcomp_r' + rate + '/')
if 'qualcomp_r' + rate not in options.compressed_dirs:
options.compressed_dirs.append('qualcomp_r' + rate)
#continue
"""
QUALCOMP_COMPRESS_CMD = "$QUALCOMP/runCompressMod.sh -i [READ] -c [CLUSTERS] -r [RATE]"
QUALCOMP_DECOMPRESS_CMD = "$QUALCOMP/runDecompress.sh -p [DIR] -c [CLUSTERS] -r [RATE]"
"""
reads_abs_path = os.path.abspath(reads_filename)
prev_dir = os.getcwd()
os.chdir(os.environ["QUALCOMP"])
call_arr = QUALCOMP_COMPRESS_CMD.replace('[READ]', reads_abs_path)\
.replace('[CLUSTERS]', options.clusters)\
.replace('[RATE]', rate).split()
out_cmd(std_err_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=std_err_file, stderr=std_err_file)
# Also decompress using QualComp special function.
qualcomp_prefix = reads_abs_path.split('.')[0]
call_arr = QUALCOMP_DECOMPRESS_CMD.replace('[DIR]', qualcomp_prefix)\
.replace('[CLUSTERS]', options.clusters)\
.replace('[RATE]', rate).split()
out_cmd(std_err_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=std_err_file, stderr=std_err_file)
os.chdir(prev_dir)
# QualComp writes the files into the original directory,
# so move the fastq files into the QualComp directory.
mv_cmd = "mv " + qualcomp_prefix + "_" + options.clusters + "_" + rate + ".fastq " + options.output_dir + '/qualcomp_r' + rate + '/' + os.path.basename(reads_filename)
call_arr = mv_cmd.split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
filename_list = glob.glob(qualcomp_prefix + "_" + options.clusters + "_*")
mv_cmd = "mv " + ' '.join(filename_list) + ' ' + options.output_dir + '/qualcomp_r' + rate + '/'
call_arr = mv_cmd.split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# Concatenate all the binary files to create a single 'compressed' file.
filename_list = glob.glob(options.output_dir + '/qualcomp_r' + rate + '/' + os.path.basename(reads_filename).split(".")[0] + "*bin")
cat_cmd = "cat " + ' '.join(filename_list)
call_arr = cat_cmd.split()
bin_file = open(options.output_dir + '/qualcomp_r' + rate + '/' + os.path.basename(reads_filename) + '.comp', 'w')
out_cmd(bin_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=bin_file, stderr=std_err_file)
# Compress using QualComp.
if options.qvz_rates:
for rate in options.qvz_rates.split(','):
ensure_dir(options.output_dir + '/qvz_r' + rate + '/')
if 'qvz_r' + rate not in options.compressed_dirs:
options.compressed_dirs.append('qvz_r' + rate)
"""
QVZ_COMPRESS_CMD = "[QVZ]/qvz -c [CLUSTERS] -r [RATE] -v [READ] [OUTPUT]"
QVZ_DECOMPRESS_CMD = "[QVZ]/qvz -x -v [INPUT] [OUTPUT]"
"""
# We first need to get the quality values only. TODO(cmhill): Very hacky.
with open(reads_filename) as fin, open(options.output_dir + '/qvz_r' + rate + '/orig_' + os.path.basename(reads_filename) + '.quals', 'w') as fout:
fout.writelines(islice(fin, 3, None, 4))
call_arr = QVZ_COMPRESS_CMD.replace('[READ]', options.output_dir + '/qvz_r' + rate + '/orig_' + os.path.basename(reads_filename) + '.quals')\
.replace('[QVZ]', os.environ["QVZ"])\
.replace('[CLUSTERS]', options.clusters)\
.replace('[RATE]', rate)\
.replace('[OUTPUT]', options.output_dir + '/qvz_r' + rate + '/' + os.path.basename(reads_filename) + '.comp').split()
out_cmd(std_err_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=std_err_file, stderr=std_err_file)
# Also decompress using QVZ special function.
#qualcomp_prefix = reads_abs_path.split('.')[0]
call_arr = QVZ_DECOMPRESS_CMD.replace('[INPUT]', options.output_dir + '/qvz_r' + rate + '/' + os.path.basename(reads_filename) + '.comp')\
.replace('[QVZ]', os.environ["QVZ"])\
.replace('[OUTPUT]', options.output_dir + '/qvz_r' + rate + '/' + os.path.basename(reads_filename) + '.quals').split()
out_cmd(std_err_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=std_err_file, stderr=std_err_file)
# Rebuild the FASTQ file from the unconstructed quals.
call_arr_str = "python ./src/merge_qual_into_fastq.py " + reads_filename + " " + options.output_dir + '/qvz_r' + rate + '/' + os.path.basename(reads_filename) + '.quals'
call_arr = call_arr_str.split()
decompressed_file = open(options.output_dir + '/qvz_r' + rate + '/' + os.path.basename(reads_filename), 'w')
out_cmd(decompressed_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=decompressed_file, stderr=std_err_file)
# Compress with RQS.
ensure_dir(options.output_dir + '/rqs/')
if 'rqs' not in options.compressed_dirs:
options.compressed_dirs.append('rqs')
call_arr = RQS_COMPRESS_CMD.replace('[READ]', reads_filename)\
.replace('[OUTPUT]', options.output_dir + '/rqs/').split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# # Profile compression using k-means.
# for profiles in options.profile_sizes.split(','):
# ensure_dir(options.output_dir + '/profile_' + profiles + '/')
#
# if 'profile_' + profiles not in options.compressed_dirs:
# options.compressed_dirs.append('profile_' + profiles)
#
# #continue
#
# call_arr = PROFILE_COMPRESSION_CMD.replace('[READ]', reads_filename)\
# .replace('[OUTPUT]', options.output_dir + '/profile_' + profiles + '/' + os.path.basename(reads_filename))\
# .replace('[NUM_PROFILES]', profiles)\
# .replace('[TRAINING_SIZE]', options.training_size)\
# .replace('[COMPRESSED_FILE]', options.output_dir + '/profile_' + profiles +'/' + os.path.basename(reads_filename) + '.comp')\
# .replace('[NUM_THREADS]', options.threads).split()
#
# out_cmd("", std_err_file.name, call_arr)
# call(call_arr, stderr=std_err_file)
# After we compress/decompress everything, write out the quality values to a separate file and then run bzip on them.
for compression_method in options.compressed_dirs:
for reads_filename in options.reads_filenames:
decompressed_file = options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename)
# Check if we already made a quals file, like in the case of QVZ.
if not os.path.isfile(decompressed_file + '.quals'):
with open(decompressed_file) as fin, open(decompressed_file + '.quals', 'w') as fout:
fout.writelines(islice(fin, 3, None, 4))
# Even though we do it in python, output the awk command in case someone runs it independently.
cmd = 'awk \'{if (NR % 4 == 0) print $0}\' ' + options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename)
out_cmd(decompressed_file + '.quals', std_err_file.name, 'awk \'{if (NR % 4 == 0) print $0}\''.split())
# Bzip2 the quality values.
cmd = "bzip2 -k " + options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename) + '.quals'
out_cmd("", std_err_file.name, cmd.split())
call(cmd.split(), stderr=std_err_file)
# Bzip2 the compressed quality values.
if os.path.isfile(options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename) + '.comp'):
cmd = "bzip2 -k " + options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename) + '.comp'
out_cmd("", std_err_file.name, cmd.split())
call(cmd.split(), stderr=std_err_file)
# Calculate the information lost from compression.
calc_mean_squared_error(options)
std_err_file.close()
pass
def calc_mean_squared_error(options):
"""
Calculate mean squared error between the original and decompressed reads.
"""
std_err_file = open('mse.log', 'w')
for compression_method in options.compressed_dirs:
for reads_filename in options.reads_filenames:
MSE_CMD = "python src/evaluate_loss.py " + options.output_dir + '/original/' + os.path.basename(reads_filename) + '.quals' + '\t' + \
options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename) + '.quals'
mse_std_out = open(options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename) + '.mse', 'w')
out_cmd(mse_std_out.name, std_err_file.name, MSE_CMD.split())
call(MSE_CMD.split(), stdout=mse_std_out, stderr=std_err_file)
def quality_preprocessing(options):
"""
Examine the effects of lossy compression on quality preprocessing tools.
"""
std_err_file = open('preprocessing.log', 'w')
SICKLE_CMD = "sickle se -f [READ] -t sanger -o [OUTPUT]"
for compression_method in options.compressed_dirs:
for reads_filename in options.reads_filenames:
output_filename = options.output_dir + '/preprocessing/' + compression_method + '/' + os.path.basename(reads_filename)
ensure_dir(output_filename)
stats_file = open(options.output_dir + '/preprocessing/' + compression_method + '/' + os.path.basename(reads_filename) + '.stats', 'w')
call_arr = SICKLE_CMD.replace('[READ]', options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename)).replace('[OUTPUT]', output_filename).split()
out_cmd("", stats_file.name, call_arr)
call(call_arr, stdout=stats_file)
# Process the stats file to get the relevant information.
"""Output stats file is in the format of:
SE input file: tmp/goodbad/frag_small_2.fastq
Total FastQ records: 200
FastQ records kept: 138
FastQ records discarded: 62
"""
# Parse the above to get how many records were kept.
for line in open(stats_file.name, 'r').readlines():
if line.startswith('FastQ records kept:'):
records_kept = line.strip().split()[3]
open(stats_file.name + '.records_kept', 'w').write(records_kept + '\n')
# Find out many bases were kept and write out the header files to a separate file.
line_number = 1
bases = 0
headers = []
for line in open(output_filename, 'r'):
if (line_number % 4) == 1:
headers.append(line.strip())
if (line_number % 4) == 2:
bases += len(line.strip())
line_number += 1
open(stats_file.name + '.bases', 'w').write(str(bases) + '\n')
headers.sort(cmp=locale.strcoll)
open(options.output_dir + '/preprocessing/' + compression_method + '/' + os.path.basename(reads_filename) + '.headers', 'w').write('\n'.join(headers) + '\n')
def assemble(options):
"""
Test assemblies using ALLPATHS-LG.
"""
std_err_file = open('assemble.log', 'a')
# The first thing step is to create the in_groups.csv and in_libs.csv.
IN_GROUPS_CSV = """group_name,\tlibrary_name,\tfile_name
frag,\tIllumina_01,\t[FULL_PATH]/[COMPRESSION]/frag_*.fastq
shortjump,\tIllumina_02,\t[FULL_PATH]/[COMPRESSION]/shortjump_*.fastq"""
IN_LIBS_CSV = """library_name,\tproject_name,\torganism_name,\ttype,\tpaired,\tfrag_size,\tfrag_stddev,\tinsert_size,\tinsert_stddev,\tread_orientation,\tgenomic_start,\tgenomic_end
Illumina_01,\tassembly,\tunknown,\tfragment,\t1,\t180,\t10,\t,\t,\tinward,\t,\t
Illumina_02,\tassembly,\tunknown,\tjumping,\t1,\t,\t,\t3000,\t500,\toutward,\t,\t"""
#print(IN_GROUPS_CSV.replace('[FULL_PATH]', os.path.abspath(options.output_dir)).replace('[COMPRESSION]', 'goodbad'))
for compression_method in options.compressed_dirs:
ensure_dir(options.output_dir + '/assemble/' + compression_method + '/')
open(options.output_dir + '/assemble/' + compression_method + '/in_groups.csv', 'w').write(IN_GROUPS_CSV.replace('[FULL_PATH]', \
os.path.abspath(options.output_dir)).replace('[COMPRESSION]', compression_method))
open(options.output_dir + '/assemble/' + compression_method + '/in_libs.csv', 'w').write(IN_LIBS_CSV)
# Prepare the input for AllpathsLG.
PREPARE_CMD = 'PrepareAllPathsInputs.pl DATA_DIR=' + os.path.abspath(options.output_dir) + '/assemble/' + compression_method + \
' IN_GROUPS_CSV=' + os.path.abspath(options.output_dir) + '/assemble/' + compression_method + '/in_groups.csv' + \
' IN_LIBS_CSV=' + os.path.abspath(options.output_dir) + '/assemble/' + compression_method + '/in_libs.csv'
call_arr = PREPARE_CMD.split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# For AllpathsLG to run successfully, need to have a PLOIDY file present.
PLOIDY_CMD = "echo 1"
call_arr = PLOIDY_CMD.split()
ploidy_file = open(options.output_dir + '/assemble/' + compression_method + '/ploidy', 'w')
out_cmd(ploidy_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=ploidy_file, stderr=std_err_file)
# Run AllpathsLG
ALLPATHS_CMD = "RunAllPathsLG PRE=" + os.path.abspath(options.output_dir) + '/assemble/' + compression_method + " DATA_SUBDIR=. RUN=allpaths SUBDIR=run THREADS=" + options.threads + " OVERWRITE=True REFERENCE_NAME=."
call_arr = ALLPATHS_CMD.split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# Calculate the assembly likelihood.
# /cbcb/project-scratch/cmhill/metalap/calc_prob.py -1 original/frag_1.fastq -2 original/frag_2.fastq -q -a decomp_0/allpaths/ASSEMBLIES/run/final.assembly.fasta -I 0 -X 500 -m 180 -t 18 -p 32
reads_filenames = ','.join(options.reads_filenames)
assembly_filename = os.path.abspath(options.output_dir + '/assemble/' + compression_method + '/allpaths/ASSEMBLIES/run/final.assembly.fasta')
CALC_PROB_CMD = os.environ["CALCPROB"] + "/calc_prob.py -q -i " + reads_filenames + ' -a ' + assembly_filename + ' -p ' + options.threads
# tmp_rhodo/assemble/original/allpaths/ASSEMBLIES/run/final.assembly.fasta
probs_file = open(options.output_dir + '/assemble/' + compression_method + '/output.probs', 'w')
call_arr = CALC_PROB_CMD.split()
out_cmd(probs_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=probs_file, stderr=std_err_file)
# Sum the read probabilities.
SUM_PROB_CMD = os.environ["CALCPROB"] + '/sum_prob.py -t 1e-80 -i ' + probs_file.name
sum_file = open(options.output_dir + '/assemble/' + compression_method + '/output.sum', 'w')
call_arr = SUM_PROB_CMD.split()
out_cmd(sum_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=sum_file, stderr=std_err_file)
# Run the getCorrectnessStatistics.sh
# sh getCorrectnessStats.sh [reference] [assembly] [assembly]
#reads_abs_path = os.path.abspath(reads_filename)
output_abs_dir = os.path.abspath(options.output_dir + '/assemble/' + compression_method + '/')
reference_abs_path = os.path.abspath(options.reference_fasta)
prev_dir = os.getcwd()
os.chdir(os.environ["MUMMER"])
GET_CORRECTNESS_STATS_CMD = "sh ./getCorrectnessStats.sh " + reference_abs_path + ' ' + assembly_filename + ' ' + assembly_filename
correctness_stats_file = open(output_abs_dir + '/assembly.correctness', 'w')
call_arr = GET_CORRECTNESS_STATS_CMD.split()
out_cmd(correctness_stats_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=correctness_stats_file, stderr=std_err_file)
os.chdir(prev_dir)
def align_reads(options):
"""
Evaluate how all decompressed reads align with Bowtie2.
"""
std_err_file = open('alignment.log', 'a')
# Construct the Bowtie2 index.
ensure_dir(options.output_dir + "/align/")
BOWTIE2_INDEX_CMD = "bowtie2-build " + options.reference_fasta + " " + options.output_dir + "/align/reference"
call_arr = BOWTIE2_INDEX_CMD.split()
out_cmd("", "", call_arr)
call(call_arr, stdout=FNULL, stderr=std_err_file)
# Align the reads.
BOWTIE2_CMD = "bowtie2 -x " + options.output_dir + "/align/reference -p " + options.threads + " --reorder -U [READ] --al [ALIGNED] -S [SAM]"
# Have to do max/min alignment
for compression_method in options.compressed_dirs:
for reads_filename in options.reads_filenames:
alignment_filename = options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.alignment_summary'
ensure_dir(alignment_filename)
alignment_file = open(alignment_filename, 'w')
call_arr = BOWTIE2_CMD.replace('[READ]', options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename))\
.replace('[ALIGNED]', options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.aligned')\
.replace('[SAM]', options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.sam').split()
out_cmd(FNULL.name, alignment_filename, call_arr)
call(call_arr, stdout=FNULL, stderr=alignment_file)
# Print out the headers of the aligned sequences.
with open(options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.aligned', 'r') as aligned_file, open(options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.headers', 'w') as headers:
headers.writelines(islice(aligned_file, 0, None, 4))
def call_snps(options):
"""
Call SNPs using decompressed reads.
"""
std_err_file = open('snp.log', 'a')
ensure_dir(options.output_dir + "/snp/")
SAMTOOLS_SORT_CMD = "samtools sort -T ./ -o [SORTED_SAM] -O sam [SAM]"
PILEUP_CMD = "samtools mpileup -ugf [REFERENCE] [SAM]"
SNP_CMD = "bcftools call -vV indels -mO v -o [OUTPUT_VCF] [PILEUP]"
for compression_method in options.compressed_dirs:
for reads_filename in options.reads_filenames:
pileup_filename = options.output_dir + '/snp/' + compression_method + '/' + os.path.basename(reads_filename) + '.pileup'
ensure_dir(pileup_filename)
call_arr = SAMTOOLS_SORT_CMD.replace('[SAM]', options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.sam')\
.replace('[SORTED_SAM]', options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.sorted.sam').split()
out_cmd(std_err_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=std_err_file, stderr=std_err_file)
pileup_file = open(pileup_filename, 'w')
call_arr = PILEUP_CMD.replace('[REFERENCE]', options.reference_fasta)\
.replace('[SAM]', options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.sorted.sam').split()
out_cmd(pileup_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=pileup_file, stderr=std_err_file)
call_arr = SNP_CMD.replace('[OUTPUT_VCF]', options.output_dir + '/snp/' + compression_method + '/' + os.path.basename(reads_filename) + '.vcf')\
.replace('[PILEUP]', options.output_dir + '/snp/' + compression_method + '/' + os.path.basename(reads_filename) + '.pileup').split()
out_cmd(std_err_file.name, std_err_file.name, call_arr)
call(call_arr, stdout=std_err_file, stderr=std_err_file)
# Print out the locations of the SNP.
snps_filename = options.output_dir + '/snp/' + compression_method + '/' + os.path.basename(reads_filename) + '.snps'
with open(snps_filename, 'w') as snps_file, open(options.output_dir + '/snp/' + compression_method + '/' + os.path.basename(reads_filename) + '.vcf', 'r') as vcf_file:
line = vcf_file.readline()
while line.startswith('#'):
line = vcf_file.readline()
snps = []
while line:
tuple = line.split('\t')
if len(tuple)>1:
snps.append(str(tuple[0] + '\t' + tuple[1] + '\n'))
#snps_file.write(str(tuple[0] + '\t' + tuple[1] + '\n'))
line = vcf_file.readline()
snps.sort()
for snp in snps:
snps_file.write(snp)
def post_process_results(options):
"""
Consolidate the results.
"""
process_compression_stats(options)
if options.preprocessing:
process_preprocessing_stats(options)
if options.assemble:
process_assembly_stats(options)
if options.alignment or options.snp:
process_alignment_stats(options)
if options.snp:
process_snp_stats(options)
def process_compression_stats(options):
"""
Ouput compression results for each FASTQ file:
compression_method original_size compressed_size bzip2_size
"""
ensure_dir(options.output_dir + "/results/")
for reads_filename in options.reads_filenames:
# Store the amount of bases we see.
bases = 0
for line in open(options.output_dir + '/original/' + os.path.basename(reads_filename) + '.quals', 'r'):
bases += len(line.strip())
results_file = open(options.output_dir + "/results/" + os.path.basename(reads_filename) + '.compression', 'w')
results_file.write("compression\tbases\torig_size\torig_bzip2\tcomp_size\tcomp_bzip2\tmse\tL1\tlorentzian\tbits_bp\n")
for compression_method in options.compressed_dirs:
filename = options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename)
results = compression_method + '\t'
results += str(bases) + '\t'
# Get the original size.
results += str(os.path.getsize(filename + '.quals')) + '\t'
# Get the bzip2 size.
results += str(os.path.getsize(filename + '.quals.bz2')) + '\t'
compressed_size = os.path.getsize(filename + '.quals.bz2')
# Get the compressed size.
if os.path.isfile(filename + '.comp'):
results += str(os.path.getsize(filename + '.comp')) + '\t'
results += str(os.path.getsize(filename + '.comp.bz2')) + '\t'
if os.path.getsize(filename + '.comp.bz2') < compressed_size:
compressed_size = os.path.getsize(filename + '.comp.bz2')
else:
results += "NA\tNA\t"
# Get the MSE.
distortions = grab_list_of_values_from_file(filename + '.mse')
results += distortions[0] + '\t' + distortions[2] + '\t' + distortions[4] + '\t'
# Print the bits/bp.
results += str((compressed_size * 8) / float(bases)) + '\n'
results_file.write(results)
results_file.close()
def process_preprocessing_stats(options):
"""
Output the preprocessing results.
"""
for reads_filename in options.reads_filenames:
sequence_count = num_lines(reads_filename) / 4
results_file = open(options.output_dir + "/results/" + os.path.basename(reads_filename) + '.preprocessing', 'w')
results_file.write("compression\tuniq_orig\tuniq_comp\tcommon_kept\tcommon_discard\tbases_kept\n")
for compression_method in options.compressed_dirs:
results = compression_method + '\t'
# Get the number of sequences kept unique to original.
unique_to_orig = run_comm_and_return_line_count(options, "23", options.output_dir + '/preprocessing/original/'\
+ os.path.basename(reads_filename) + '.headers',\
options.output_dir + '/preprocessing/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.headers')
results += unique_to_orig + '\t'
# Get the number of sequences kept unique to the compression method.
unique_to_compress = run_comm_and_return_line_count(options, "13", options.output_dir + '/preprocessing/original/'\
+ os.path.basename(reads_filename) + '.headers',\
options.output_dir + '/preprocessing/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.headers')
results += unique_to_compress + '\t'
# Get the number of sequences kept by both methods.
common_count = run_comm_and_return_line_count(options, "12", options.output_dir + '/preprocessing/original/'\
+ os.path.basename(reads_filename) + '.headers',\
options.output_dir + '/preprocessing/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.headers')
results += common_count + '\t'
# Get the number of sequences filtered by both methods.
results += str(sequence_count - int(unique_to_orig) - int(unique_to_compress) - int(common_count)) + '\t'
# Get the bases kept in the file.
results += str(grab_value_from_file(options.output_dir + '/preprocessing/' + compression_method + '/' + os.path.basename(reads_filename) + '.stats.bases'))
results += '\n'
results_file.write(results)
results_file.close()
def process_assembly_stats(options):
"""
Output the assembly stats.
"""
results_file = open(options.output_dir + "/results/assembly", 'w')
results_file.write("compression\tref_bases\tasm_bases\tN50\tmissing_ref_bases\tavg_idy\tsnps\tindels_gt5bp\tinversions\treloc\ttransloc\tcorr_N50\tLAP\n")
for compression_method in options.compressed_dirs:
results = compression_method + '\t'
results += "\t".join(parse_correctness_stats(options.output_dir + '/assemble/' + compression_method + '/assembly.correctness'))
results += '\t' + str(grab_value_from_file(options.output_dir + '/assemble/' + compression_method + '/output.sum'))
results += '\n'
results_file.write(results)
results_file.close()
def parse_correctness_stats(filename):
"""
Parse the results returned from get_Correctness.sh
"""
results = []
stats_file = open(filename, 'r')
line = stats_file.readline()
while not line.startswith('Reference:'):
line = stats_file.readline()
# Add Reference bases.
results.append(line.strip().split()[1])
# Add the genome bases.
line = stats_file.readline()
results.append(line.strip().split()[1])
line = stats_file.readline()
while not line.startswith('N50:'):
line = stats_file.readline()
# Add N50.
results.append(line.split()[1])
line = stats_file.readline()
while not line.startswith('Missing Reference'):
line = stats_file.readline()
# Missing reference bases.
results.append(line.strip().split(' ')[3].split('(')[0])
line = stats_file.readline()
while not line.startswith('Avg Idy:'):
line = stats_file.readline()
# Add Avg Idy.
results.append(line.strip().split(' ')[2])
# Add SNPs.
line = stats_file.readline()
results.append(line.strip().split(' ')[1])
# Add Indels > 5bp.
line = stats_file.readline()
line = stats_file.readline()
results.append(line.strip().split(' ')[3])
# Add Inversions.
line = stats_file.readline()
results.append(line.strip().split(' ')[1])
# Add Relocations.
line = stats_file.readline()
results.append(line.strip().split(' ')[1])
# Add translocations.
line = stats_file.readline()
results.append(line.strip().split(' ')[1])
line = stats_file.readline()
while not line.startswith('N50:'):
line = stats_file.readline()
# Add Corrected N50.
results.append(line.strip().split()[1])
stats_file.close()
return results
def process_alignment_stats(options):
"""
Process the bowtie2 alignment results.
"""
# options.output_dir + '/' + compression_method + '/' + os.path.basename(reads_filename) + '.aligned'
for reads_filename in options.reads_filenames:
sequence_count = num_lines(reads_filename) / 4
results_file = open(options.output_dir + "/results/" + os.path.basename(reads_filename) + '.alignment', 'w')
results_file.write("compression\tmapped\tshared\tuniq_orig\tuniq_comp\tunmapped_shared\n")
for compression_method in options.compressed_dirs:
results = compression_method + '\t'
# Get aligned reads.
results += str(num_lines(options.output_dir + '/align/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.headers')) + '\t'
# Get the number of sequences kept by both methods.
common_count = run_comm_and_return_line_count(options, "12", options.output_dir + '/align/original/'\
+ os.path.basename(reads_filename) + '.headers',\
options.output_dir + '/align/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.headers')
results += common_count + '\t'
# Get the number of sequences kept unique to original.
unique_to_orig = run_comm_and_return_line_count(options, "23", options.output_dir + '/align/original/'\
+ os.path.basename(reads_filename) + '.headers',\
options.output_dir + '/align/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.headers')
results += unique_to_orig + '\t'
# Get the number of sequences kept unique to the compression method.
unique_to_compress = run_comm_and_return_line_count(options, "13", options.output_dir + '/align/original/'\
+ os.path.basename(reads_filename) + '.headers',\
options.output_dir + '/align/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.headers')
results += unique_to_compress + '\t'
# Get the number of sequences filtered by both methods.
results += str(sequence_count - int(unique_to_orig) - int(unique_to_compress) - int(common_count))
# Get the bases kept in the file.
#results += str(grab_value_from_file(options.output_dir + '/align/' + compression_method + '/' + os.path.basename(reads_filename) + '.stats.bases'))
results += '\n'
results_file.write(results)
results_file.close()
def process_snp_stats(options):
"""
Process the SNP results.
"""
for reads_filename in options.reads_filenames:
sequence_count = num_lines(reads_filename) / 4
results_file = open(options.output_dir + "/results/" + os.path.basename(reads_filename) + '.snps', 'w')
results_file.write("compression\tshared\tuniq_orig\tuniq_comp")
if options.gold_standard_snps:
results_file.write("\tshared_gold\tuniq_gold\tgold_uniq_comp")
results_file.write("\n")
for compression_method in options.compressed_dirs:
results = compression_method + '\t'
# Get the number of SNPs kept by both methods.
common_count = run_comm_and_return_line_count(options, "12", options.output_dir + '/snp/original/'\
+ os.path.basename(reads_filename) + '.snps',\
options.output_dir + '/snp/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.snps')
results += common_count + '\t'
# Get the number of SNPs unique to original.
unique_to_orig = run_comm_and_return_line_count(options, "23", options.output_dir + '/snp/original/'\
+ os.path.basename(reads_filename) + '.snps',\
options.output_dir + '/snp/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.snps')
results += unique_to_orig + '\t'
# Get the number of SNPs unique to the compression method.
unique_to_compress = run_comm_and_return_line_count(options, "13", options.output_dir + '/snp/original/'\
+ os.path.basename(reads_filename) + '.snps',\
options.output_dir + '/snp/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.snps')
results += unique_to_compress
# Get the number of SNPS unique to the compression method that are shared with the gold standard set.
if options.gold_standard_snps:
# Get the number of SNPs kept by both methods.
common_count = run_comm_and_return_line_count(options, "12", options.gold_standard_snps
options.output_dir + '/snp/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.snps')
results += common_count + '\t'
# Get the number of SNPs unique to original.
unique_to_orig = run_comm_and_return_line_count(options, "23", options.gold_standard_snps
options.output_dir + '/snp/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.snps')
results += unique_to_orig + '\t'
# Get the number of SNPs unique to the compression method.
unique_to_compress = run_comm_and_return_line_count(options, "13", options.gold_standard_snps
options.output_dir + '/snp/' + compression_method + '/'\
+ os.path.basename(reads_filename) + '.snps')
results += unique_to_compress
results += '\n'
results_file.write(results)
results_file.close()
def run_comm_and_return_line_count(options, suppress, file1, file2):
"""
Run: comm -1 [suppress] [file1] [file2] | wc -l
"""
std_err_file = open('preprocessing.log', 'a')
cmd = "comm -" + suppress + " " + file1 + " " + file2
tmp_file = tempfile.NamedTemporaryFile('w', dir=options.output_dir)
call_arr = cmd.split()
out_cmd(tmp_file.name, "", call_arr)
call(call_arr, stdout=tmp_file, stderr=std_err_file)
cmd = "wc -l " + tmp_file.name
tmp_wc_file = tempfile.NamedTemporaryFile('w', dir=options.output_dir)
call_arr = cmd.split()
out_cmd(tmp_wc_file.name, "", call_arr)
call(call_arr, stdout=tmp_wc_file, stderr=std_err_file)
count = grab_value_from_file(tmp_wc_file.name)
tmp_file.close()
tmp_wc_file.close()
return count
"""
I/O Helpers
"""
def setup_shell_file():
if shell_file_fp:
shell_file_fp.write("#!/bin/bash\n")
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
assert os.path.exists(d)
def out_cmd(std_out = "", std_err = "", *objs):
#line(75)
if shell_file_fp:
if std_out != "":
std_out_sht = " 1>%s " % (std_out)
else:
std_out_sht = ""
if std_err != "":
std_err_sht = " 2>%s " % (std_err)
else:
std_err_sht = ""
shell_file_fp.write(' '.join(*objs) + std_out_sht + std_err_sht + "\n")
shell_file_fp.flush()
print(bcolors.OKBLUE + "COMMAND:\t" + bcolors.ENDC, ' '.join(*objs) + std_out_sht, file=sys.stderr)
def grab_value_from_file(filename):
"""
Return the value from the first line of a file.
"""
return open(filename, 'r').readline().strip().split()[0]
def grab_list_of_values_from_file(filename):
"""
Return the value from the first line of a file.
"""
return open(filename, 'r').readline().strip().split()
def num_lines(filename):
"""
http://stackoverflow.com/questions/845058/how-to-get-line-count-cheaply-in-python
"""
return sum(1 for line in open(filename, 'r'))
"""
I/O Helpers
"""
def get_options():
parser = OptionParser()
# Input parameters.
parser.add_option("-r", "--reads", dest="unpaired_reads_filenames", help="Unpaired FASTQ filenames.")
parser.add_option("-1", "--first", dest="first_mate_filenames", help="First mate FASTQ filenames.")
parser.add_option("-2", "--second", dest="second_mate_filenames", help="Second mate FASTQ filenames.")
parser.add_option("-f", "--reference", dest="reference_fasta", help="Reference FASTA filename.")
# Output parameters.
parser.add_option("-o", "--output_dir", dest="output_dir", help="Output directory.")
# Pipeline options.
parser.add_option("-a", "--assemble", dest="assemble", help="Run assembly evaluation.", action='store_true')
parser.add_option("-p", "--preprocessing", dest="preprocessing", help="Run preprocessing tools evaluation.", action='store_true')
parser.add_option("-b", "--alignment", dest="alignment", help="Run alignment evaluation (using Bowtie2).", action='store_true')
parser.add_option("-n", "--snp", dest="snp", help="Run SNP analysis (will include alignment option if not already selected).", action='store_true')
parser.add_option("-s", "--sort", dest="sort_reads", help="Sort FASTQ reads before the pipeline begins (requires fastq-sort).", action='store_true')
# Polynomial regression specific options.
parser.add_option("--poly-degrees", dest="poly_degrees", help="Comma-separated list of polynomial degrees to use for regression.")
# Profile-specific compression options.
parser.add_option("--training-size", dest="training_size", help="Training size used for clustering.", default = "10000")
parser.add_option("--profile-sizes", dest="profile_sizes", help="Comma-separated list of number of profiles to use.", default="256")
# QualComp-specific compression options.
parser.add_option("--rates", dest="rates", help="QualComp parameter for setting the bits/reads.", default=None)#"30")
parser.add_option("--clusters", dest="clusters", help="QualComp parameter for setting number of clusters.", default="3")
# RQS-specific compression options.
#parser.add_option("--rqs", dest="rates", help="QualComp parameter for setting the bits/reads.", default="30")
#parser.add_option("--clusters", dest="clusters", help="QualComp parameter for setting number of clusters.", default="3")
# QVZ-specific compression options.
parser.add_option("--qvz-rates", dest="qvz_rates", help="QVZ parameter for setting the bits/reads.", default=None)#".30")
parser.add_option("--qvz-clusters", dest="qvz_clusters", help="QVZ parameter for setting number of clusters.", default="3")
# Max, min quality value compression options.
parser.add_option("--max-qv", dest="max_quality", help="Use this value for max quality value compression.", default="40")
parser.add_option("--min-qv", dest="min_quality", help="Use this value for min quality value compression.", default="10")
# SNP-specific options.
parser.add_option("--gold-standard-snps", dest="gold_standard_snps", help="Gold standard SNPs to use for comparison.")
# Additional options.
parser.add_option("-t", "--threads", dest="threads", help="Number of threads (default 32).", default="32")
parser.add_option("-d", "--dry-run", dest="dry_run", help="Don't run any commands.", action='store_true')
(options, args) = parser.parse_args()
return (options,args)
def main():
(options, args) = get_options()
shell_file = options.output_dir + "/commands.sh"
ensure_dir(shell_file)
global shell_file_fp
shell_file_fp = open(shell_file, 'w')
setup_shell_file()
global dry_run
dry_run = options.dry_run
# Sort the fastq files first if necessary.
if options.sort_reads:
sort_reads(options)
# Gather all the filenames.
reads_filenames = []
if options.unpaired_reads_filenames:
reads_filenames.extend(options.unpaired_reads_filenames.split(','))
if options.first_mate_filenames:
reads_filenames.extend(options.first_mate_filenames.split(','))
if options.second_mate_filenames:
reads_filenames.extend(options.second_mate_filenames.split(','))
options.reads_filenames = reads_filenames
# Compress and then decompress the reads.
compress(options)
# Assemble the sequences with ALLPATHS-LG.
if options.assemble:
assemble(options)
# Carry out preprocessing evaluation with SICKLE.
if options.preprocessing:
quality_preprocessing(options)
# Align the reads using Bowtie2.
if options.alignment or options.snp:
align_reads(options)
if options.snp:
call_snps(options)
# Output the compression results.
post_process_results(options)
if __name__ == '__main__':
main()
|
cmhill/q-compression
|
src/compress.py
|
Python
|
mit
| 50,746
| 0.005656
|
"""Prepare rendering of popular smart grid actions widget"""
from apps.widgets.smartgrid import smartgrid
def supply(request, page_name):
"""Supply view_objects content, which are the popular actions from the smart grid game."""
_ = request
num_results = 5 if page_name != "status" else None
#contruct a dictionary containing the most popular tasks.
#The keys are the type of the task and the values are a list of tasks."""
popular_tasks = {
"Activity": smartgrid.get_popular_actions("activity", "approved", num_results),
"Commitment": smartgrid.get_popular_actions("commitment", "approved", num_results),
"Event": smartgrid.get_popular_actions("event", "pending", num_results),
"Excursion": smartgrid.get_popular_actions("excursion", "pending", num_results),
}
count = len(popular_tasks)
return {
"popular_tasks": popular_tasks,
"no_carousel": page_name == "status",
"range": count,
}
|
yongwen/makahiki
|
makahiki/apps/widgets/popular_tasks/views.py
|
Python
|
mit
| 997
| 0.008024
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-02-29 16:58
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sa_api_v2', '0004_django_19_updates'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='height',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='attachment',
name='width',
field=models.IntegerField(blank=True, null=True),
),
]
|
openplans/shareabouts-api
|
src/sa_api_v2/migrations/0005_add_dimensions_to_attachments.py
|
Python
|
gpl-3.0
| 702
| 0
|
"""Authentication and authorization."""
from haas.errors import AuthorizationError
from haas import model
from abc import ABCMeta, abstractmethod
import sys
_auth_backend = None
class AuthBackend(object):
"""An authentication/authorization backend.
Extensions which implement authentication/authorization backends should
inherit from this class, and invoke ``set_auth_backend()`` on an instance
of the subclass
Subclasses of AuthBackend must override `authenticate`, `_have_admin`,
and `_have_project_access`, and nothing else. Users of the AuthBackend must
not invoke `_have_admin` and `_have_project_access`, preferring
`have_admin` and `have_project_access`.
"""
__metaclass__ = ABCMeta
@abstractmethod
def authenticate(self):
"""Authenticate the api call, and prepare for later authorization checks.
This method will be invoked inside of a flask request context,
with ``haas.rest.local.db`` initialized to a valid database session.
It is responsible for authenticating the request, and storing any
data it will need later to determine whether the requested operation
is authorized.
The attribute ``haas.rest.local.auth`` is reserved for use by auth
backends; A backend may store any information it needs as that
attribute.
This method must return a boolean indicating whether or not
authentication was successful -- True if so, False if not.
"""
@abstractmethod
def _have_admin(self):
"""Check if the request is authorized to act as an administrator.
Return True if so, False if not. This will be called sometime after
``authenticate()``.
"""
@abstractmethod
def _have_project_access(self, project):
"""Check if the request is authorized to act as the given project.
Each backend must implement this method. The backend does not need
to deal with the case where the authenticated user is an admin here;
the `have_*` and `require_*` wrappers handle this.
"""
def have_admin(self):
"""Check if the request is authorized to act as an administrator.
Return True if so, False if not. This will be caled sometime after
``authenticate()``.
"""
return self._have_admin()
def have_project_access(self, project):
"""Check if the request is authorized to act as the given project.
Return True if so, False if not. This will be caled sometime after
``authenticate()``.
``project`` will be a ``Project`` object, *not* the name of the
project.
Note that have_admin implies have_project_acccess.
"""
assert isinstance(project, model.Project)
return self._have_admin() or self._have_project_access(project)
def require_admin(self):
"""Ensure the request is authorized to act as an administrator.
Raises an ``AuthorizationError`` on failure, instead of returning
False. This is a convienence wrapper around ``have_admin``,
and should not be overwritten by subclasses.
"""
if not self.have_admin():
raise AuthorizationError("This operation is administrator-only.")
def require_project_access(self, project):
"""Like ``require_admin()``, but wraps ``have_project_access()``."""
if not self.have_project_access(project):
raise AuthorizationError(
"You do not have access to the required project.")
def set_auth_backend(backend):
"""Set the authentication backend to ``backend``.
This should be called exactly once, on startup, with an instance of
``AuthBackend`` as it's argument.
"""
global _auth_backend
if _auth_backend is not None:
sys.exit("Fatal Error: set_auth_backed() called twice. Make sure "
"you don't have conflicting extensions loaded.")
_auth_backend = backend
def get_auth_backend():
"""Return the current auth backend."""
return _auth_backend
|
meng-sun/hil
|
haas/auth.py
|
Python
|
apache-2.0
| 4,101
| 0.000244
|
"""
Author: Seyed Hamidreza Mohammadi
This file is part of the shamidreza/uniselection software.
Please refer to the LICENSE provided alongside the software (which is GPL v2,
http://www.gnu.org/licenses/gpl-2.0.html).
This file includes the code for putting all the pieces together.
"""
from utils import *
from extract_unit_info import *
from search import *
from generate_speech import *
if __name__ == "__main__":
if 0: # test pit2gci
pit_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.lf0'
target_gci = pit2gci(pit_file)
if 1: # test read_dur,pit,for methods
dur_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.dur'
for_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.for'
pit_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.lf0'
#a=read_hts_for(for_file)
#b=read_hts_pit(pit_file)
#c=read_hts_dur(dur_file)
fname = 'arctic_a0001'
lab_name=corpus_path+'/lab/'+fname+'.lab'
wav_name=corpus_path+'/wav/'+fname+'.wav'
pm_name=corpus_path+'/pm/'+fname+'.pm'
##target_units = load_input(lab_name)
#times, labs = read_lab(lab_name)
##tmp_units=extract_info(lab_name, wav_name, 0,0)
times, pits, vox_times, vox_vals = read_hts_pit(pit_file)
frm_time, frm_val = read_hts_for(for_file)
gcis=pit2gci(times, pits, vox_times, vox_vals)
tmp_units, times=read_input_lab(dur_file, pit_file)
#tmp_units = tmp_units[128:140]##
target_units = np.zeros(len(tmp_units), 'object')
for j in xrange(len(tmp_units)):
target_units[j] = tmp_units[j]
if 0:
units, fnames=load_units()
units = units[:int(units.shape[0]*(100.0/100.0))]
best_units_indice=search(target_units, units,limit=20)
best_units = units[best_units_indice]
f=open('tmp2.pkl','w+')
import pickle
pickle.dump(best_units,f)
pickle.dump(fnames,f)
f.flush()
f.close()
else:
f=open('tmp2.pkl','r')
import pickle
best_units=pickle.load(f)
fnames=pickle.load(f)
#best_units = best_units[128:140]##
f.close()
for i in xrange(target_units.shape[0]):
print target_units[i].phone, best_units[i].phone, best_units[i].unit_id
#wavs=concatenate_units_overlap(best_units, fnames)
#gcis = gcis[(gcis>times[128]) * (gcis<times[140])]
#gcis -= times[128]
##$frm_time, frm_val = units2for(best_units, fnames, times, frm_time, frm_val)
frm_time *= 16000.0
gcis=units2gci(best_units, fnames)##$
gcis = np.array(gcis)
##$gcis *= 16000
gcis = gcis.astype(np.uint32)
old_times = np.array(times).copy()
old_times *= 16000.0
times=units2dur(best_units, fnames)##$
times = np.array(times)
##$times *= 16000
times = times.astype(np.uint32)
#times = times[128:141]##
#aa=times[0]##
#for i in range(len(times)):##
#times[i] -= aa##
#frm_time *= 16000
wavs=concatenate_units_psola_har_overlap(best_units, fnames, old_times, times, gcis, frm_time, frm_val, overlap=0.5)
#wavs=concatenate_units_nooverlap(best_units, fnames)
ftime, fval = get_formant(wavs, 16000)
from scipy.io.wavfile import write as wwrite
wwrite('out.wav', 16000, wavs)
print 'successfully saved out.wav'
|
shamidreza/unitselection
|
experiment.py
|
Python
|
gpl-2.0
| 3,464
| 0.018764
|
import os
import os.path
from raiden.constants import RAIDEN_DB_VERSION
def database_from_privatekey(base_dir, app_number):
""" Format a database path based on the private key and app number. """
dbpath = os.path.join(base_dir, f"app{app_number}", f"v{RAIDEN_DB_VERSION}_log.db")
os.makedirs(os.path.dirname(dbpath))
return dbpath
|
hackaugusto/raiden
|
raiden/tests/utils/app.py
|
Python
|
mit
| 351
| 0.002849
|
# coding=utf-8
from datetime import datetime, date, time
from decimal import Decimal
import json
import django
from django.forms import IntegerField
from django.test import TransactionTestCase, Client
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy
import pytz
from formapi.api import DjangoJSONEncoder
from formapi.compat import smart_u, get_user_model
from formapi.models import APIKey
from formapi.utils import get_sign
TOTAL_TESTS = 19
class SignedRequestTest(TransactionTestCase):
def setUp(self):
self.api_key = APIKey.objects.create(email="test@example.com")
self.api_key_revoked = APIKey.objects.create(email="test3@example.com", revoked=True)
self.client = Client()
self.user = get_user_model().objects.create(email="user@example.com", username="räksmörgås")
self.user.set_password("rosebud")
self.user.save()
self.authenticate_url = '/api/v1.0.0/user/authenticate/'
self.language_url = '/api/v1.0.0/comp/lang/'
def send_request(self, url, data, key=None, secret=None, req_method="POST"):
if not key:
key = self.api_key.key
if not secret:
secret = self.api_key.secret
sign = get_sign(secret, **data)
data['key'] = key
data['sign'] = sign
if req_method == 'POST':
return self.client.post(url, data)
elif req_method == 'GET':
return self.client.get(url, data)
def test_api_key(self):
smart_u(self.api_key)
def test_valid_auth(self):
response = self.send_request(self.authenticate_url, {'username': self.user.username, 'password': 'rosebud'})
self.assertEqual(response.status_code, 200)
response_data = json.loads(smart_u(response.content))
self.assertEqual(response_data['errors'], {})
self.assertTrue(response_data['success'])
self.assertIsNotNone(response_data['data'])
def test_invalid_call(self):
response = self.send_request('/api/v1.0.0/math/subtract/', {'username': self.user.username, 'password': 'rosebud'})
self.assertEqual(response.status_code, 404)
def test_unsigned_auth(self):
data = {'username': self.user.username, 'password': 'rosebud'}
response = self.client.post(self.authenticate_url, data)
self.assertEqual(response.status_code, 401)
def test_invalid_sign(self):
data = {'username': self.user.username, 'password': 'rosebud'}
sign = get_sign(self.api_key.secret, **data)
data['key'] = self.api_key.key
data['sign'] = sign + "bug"
response = self.client.post(self.authenticate_url, data)
self.assertEqual(response.status_code, 401)
def test_invalid_password(self):
data = {'username': self.user.username, 'password': '1337hax/x'}
response = self.send_request(self.authenticate_url, data)
self.assertEqual(response.status_code, 400)
response_data = json.loads(smart_u(response.content))
self.assertGreater(len(response_data['errors']), 0)
self.assertFalse(response_data['success'])
self.assertFalse(response_data['data'])
def test_invalid_parameters(self):
data = {'email': self.user.email, 'password': 'rosebud'}
response = self.send_request(self.authenticate_url, data)
self.assertEqual(response.status_code, 401)
def test_revoked_api_key(self):
data = {'username': self.user.username, 'password': 'rosebud'}
response = self.send_request(self.authenticate_url, data, self.api_key_revoked.key, self.api_key_revoked.secret)
self.assertEqual(response.status_code, 401)
def test_get_call(self):
data = {'username': self.user.username, 'password': '1337haxx'}
response = self.send_request(self.authenticate_url, data, req_method='GET')
self.assertEqual(response.status_code, 200)
def test_multiple_values(self):
data = {'languages': ['python', 'java']}
response = self.send_request(self.language_url, data, req_method='GET')
self.assertEqual(response.status_code, 200)
class HMACTest(TransactionTestCase):
def setUp(self):
self.api_key = APIKey.objects.create(email="test@example.com")
def test_parameter_sign(self):
# test unicode
url_params = u'first_name=mårten&last_name=superkebab'
dict_params = {'first_name': u'mårten', 'last_name': u'superkebab'}
self.assert_equal_signs(url_params, dict_params)
# test string
url_params = 'first_name=mårten&last_name=superkebab'
dict_params = {'first_name': 'mårten', 'last_name': 'superkebab'}
self.assert_equal_signs(url_params, dict_params)
# test integer
url_params = u'dividend=4&divisor=2'
dict_params = {'dividend': 4, 'divisor': 2}
self.assert_equal_signs(url_params, dict_params)
# test boolean
url_params = u'secure=True'
dict_params = {'secure': True}
self.assert_equal_signs(url_params, dict_params)
def assert_equal_signs(self, url_params, dict_params):
sign1 = get_sign(self.api_key.secret, querystring=url_params)
sign2 = get_sign(self.api_key.secret, **dict_params)
self.assertEqual(sign1, sign2)
class UnsignedRequestTest(TransactionTestCase):
def setUp(self):
self.client = Client()
self.divide_url = '/api/v1.0.0/math/divide/'
def test_ok_call(self):
data = {'dividend': 7, 'divisor': 2}
response = self.client.post(self.divide_url, data)
self.assertEqual(response.status_code, 200)
response_data = json.loads(smart_u(response.content))
self.assertEqual(response_data['data'], 3.5)
def test_invalid_call(self):
data = {'dividend': "a", 'divisor': 2}
response = self.client.post(self.divide_url, data)
self.assertEqual(response.status_code, 400)
response_data = json.loads(smart_u(response.content))
dividend_error = response_data['errors']['dividend']
self.assertEqual(dividend_error[0], smart_u(IntegerField().error_messages['invalid']))
self.assertGreater(len(response_data['errors']), 0)
self.assertFalse(response_data['success'])
self.assertFalse(response_data['data'])
def test_error_call(self):
data = {'dividend': "42", 'divisor': 0}
response = self.client.post(self.divide_url, data)
response_data = json.loads(smart_u(response.content))
self.assertFalse(response_data['success'])
class JSONEncoderTest(TransactionTestCase):
def setUp(self):
self.dumps = curry(json.dumps, cls=DjangoJSONEncoder)
def test_datetime_encode(self):
naive_micro_datetime = {'datetime': datetime.now(), 'int': 1}
self.dumps(naive_micro_datetime)
naive_second_datetime = {'datetime': datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
self.dumps(naive_second_datetime)
tz_utc_datetime = {'datetime': datetime.now().replace(tzinfo=pytz.UTC)}
self.dumps(tz_utc_datetime)
datetime_date = {'datetime': date.today()}
self.dumps(datetime_date)
naive_datetime_time = {'datetime': time()}
self.dumps(naive_datetime_time)
naive_datetime_micro_time = {'datetime': time(microsecond=100)}
self.dumps(naive_datetime_micro_time)
def test_decimal_encode(self):
decimal_data = {'decimal': Decimal("1.504")}
self.dumps(decimal_data)
def test_queryset(self):
user_manager = get_user_model().objects
user_manager.create(username="test", email="test@example.com")
queryset = {'queryset': user_manager.all()}
self.dumps(queryset)
self.dumps(user_manager.all())
def test_values_list(self):
if django.VERSION < (1, 9):
user_manager = get_user_model().objects
user_manager.create(username="test", email="test@example.com")
values = user_manager.values('id', 'email')
self.dumps(values)
values_list = user_manager.values_list('id', flat=True)
self.dumps(values_list)
def test_gettext(self):
gettext_data = {'gettext': ugettext_lazy(u'tränslate me please')}
self.dumps(gettext_data)
|
5monkeys/django-formapi
|
formapi/tests.py
|
Python
|
mit
| 8,355
| 0.001078
|
'''
20140213
Import CSV Data - Dict
Save as JASON?
Basic Stats
Save to file
Find Key Words
Generate Reports...
Generate Plots
'''
import csv
import numpy as np
import matplotlib as mpl
from scipy.stats import nanmean
filename = '20140211_ING.csv'
###____________ Helper ___________###
def number_fields(data):
'''gets numeric fields from loaded csv data'''
names = data.dtype.names
dtypes = data.dtype
NUM_FIELDS = []
for i in range(len(names)):
if ('float' in str(dtypes[i])) or ('int' in str(dtypes[i])):
NUM_FIELDS.append(str(names[i]))
return NUM_FIELDS
def string_fields(data):
'''gets text fields from loaded csv data'''
names = data.dtype.names
dtypes = data.dtype
STRING_FIELDS = []
for i in range(len(names)):
if 'S' in str(dtypes[i]):
STRING_FIELDS.append(str(names[i]))
return STRING_FIELDS
def count_vals(array):
vals = len(array)
for i in array:
if np.isnan(i):
vals = vals - 1
return vals
def number_summary(data, num_fields):
'''take data and numeric feilds and do stuff'''
sum_dict = {}
for i in num_fields:
sum_dict[i] = {}
sum_dict[i]['Mean'] = nanmean(data[i])
sum_dict[i]['#Values'] = count_vals(data[i])
sum_dict[i]['Max'] = np.nanmax(data[i])
sum_dict[i]['Min'] = np.nanmin(data[i])
return sum_dict
###________ reports _________###
def basic_report(filename):
'''prints summary report form file'''
data = np.recfromcsv(filename)
NUM_COL = len(data.dtype.names)
NUM_ROW = len(data)
NAMES = data.dtype.names
DTYPES = data.dtype
print('--------------------')
print('---- CSV REPORT ----')
print('--------------------')
print('')
print('Filename: \t %s' % filename)
print('')
print('# records: \t %s' % NUM_ROW)
print('# columns: \t %s' % NUM_COL)
print('')
print('--------------------')
print('- name - data type ')
for i in range(len(NAMES)):
print('-- %s \t %s --' % (NAMES[i], DTYPES[i]))
print('--------------------')
def numeric_report(filename):
data = np.recfromcsv(filename)
fields = number_fields(data)
d = number_summary(data, fields)
print('------------------------')
print('---- NUMERIC REPORT ----')
print('------------------------')
print('')
print('Filename: \t %s' % filename)
print('')
print('--------------------')
for i in fields:
print('FIELD: \t\t %s' % i)
print('#Values: \t %s' % d[i]['#Values'])
print('Max: \t\t %s' % d[i]['Max'])
print('Min: \t\t %s' % d[i]['Min'])
print('Mean: \t\t %s' % round(d[i]['Mean'], 2))
print('--------------------')
print('')
###________ main _________###
def main(filename):
basic_report(filename)
print("")
numeric_report(filename)
main(filename)
|
Jim-Rod/csv_summary
|
csv_summary.py
|
Python
|
mit
| 2,979
| 0.006378
|
# -*- coding: utf-8 -*-
# Created By: Virgil Dupras
# Created On: 2009-09-19
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from PyQt4.QtCore import SIGNAL, Qt
from PyQt4.QtGui import QWidget, QHeaderView
from fs_model import FSModel, IGNORE_BOX_NAME
from ui.ignore_box_ui import Ui_IgnoreBox
class IgnoreBoxModel(FSModel):
def __init__(self, app):
FSModel.__init__(self, app, app.board.ignore_box, IGNORE_BOX_NAME)
self.connect(self.app, SIGNAL('ignoreBoxChanged()'), self.ignoreBoxChanged)
#--- Events
def ignoreBoxChanged(self):
self.reset()
class IgnoreBox(QWidget, Ui_IgnoreBox):
def __init__(self, app):
QWidget.__init__(self, None)
self.app = app
self.boxModel = IgnoreBoxModel(app)
self._setupUi()
self.connect(self.browserView.selectionModel(), SIGNAL('selectionChanged(QItemSelection,QItemSelection)'), self.browserSelectionChanged)
def _setupUi(self):
self.setupUi(self)
self.setWindowFlags(Qt.Tool)
self.browserView.setModel(self.boxModel)
h = self.browserView.header()
h.setResizeMode(QHeaderView.Fixed)
h.resizeSection(1, 120)
h.setResizeMode(0, QHeaderView.Stretch)
#--- Events
def browserSelectionChanged(self, selected, deselected):
selectedIndexes = self.browserView.selectionModel().selectedRows()
nodes = [index.internalPointer() for index in selectedIndexes]
items = [node.ref for node in nodes]
self.app.selectBoardItems(items)
|
hsoft/musicguru
|
qt/ignore_box.py
|
Python
|
bsd-3-clause
| 1,816
| 0.008811
|
"""The WaveBlocks Project
Compute the transformation to the eigen basis for wavefunction.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
from WaveBlocksND import BlockFactory
from WaveBlocksND import WaveFunction
from WaveBlocksND import BasisTransformationWF
def transform_wavefunction_to_eigen(iomin, iomout, blockidin=0, blockidout=0):
"""Compute the transformation to the eigenbasis for a wavefunction.
Save the result back to a file.
:param iomin: An :py:class:`IOManager: instance providing the simulation data.
:param iomout: An :py:class:`IOManager: instance for saving the transformed data.
:param blockidin: The data block from which the values are read. Default is `0`.
:param blockidout: The data block to which the values are written. Default is `0`.
"""
parameters = iomin.load_parameters()
# Number of time steps we saved
timesteps = iomin.load_wavefunction_timegrid(blockid=blockidin)
nrtimesteps = timesteps.shape[0]
iomout.add_wavefunction(parameters, timeslots=nrtimesteps, blockid=blockidout)
# The grid on the domain
grid = BlockFactory().create_grid(parameters)
# The potential used
Potential = BlockFactory().create_potential(parameters)
# Basis transformator
BT = BasisTransformationWF(Potential)
BT.set_grid(grid)
# And two empty wavefunctions
WF = WaveFunction(parameters)
WF.set_grid(grid)
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Compute eigentransform at timestep %d" % step)
# Retrieve simulation data
values = iomin.load_wavefunction(timestep=step, blockid=blockidin)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# Project wavefunction values to eigenbasis
BT.transform_to_eigen(WF)
# Save the transformed values
iomout.save_wavefunction(WF.get_values(), timestep=step, blockid=blockidout)
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/Interface/EigentransformWavefunction.py
|
Python
|
bsd-3-clause
| 2,041
| 0.00294
|
try:
import ossaudiodev
except:
print "ossaudiodev not installed"
ossaudiodev = None
try:
import FFT
except:
print "FFT not installed"
ossaudiodev = None
try:
import Numeric
except:
print "Numeric not installed"
ossaudiodev = None
import struct, math, time, threading, copy
def add(s1, s2):
return minmax([(v1 + v2) for (v1, v2) in zip(s1, s2)])
def minmax(vector):
return [min(max(v,0),255) for v in vector]
def scale(sample, value):
return minmax([((s - 128) * value) + 128 for s in sample])
def sine(freqs, seconds, volume = 1.0, sample_rate = 8000.0):
sample = [128] * int(sample_rate * seconds)
if type(freqs) == type(0):
freqs = [freqs]
for freq in freqs:
for n in range(len(sample)):
sample[n] += int(127 * math.sin(n * 2 * math.pi * freq/sample_rate) * volume)
return minmax(sample)
class SoundThread(threading.Thread):
def __init__(self, parent, name = "sound thread"):
threading.Thread.__init__(self, name = name)
self.parent = parent
self.event = threading.Event()
self.start()
def run(self):
while not self.event.isSet():
self.parent.lock.acquire()
buffer = copy.copy(self.parent.buffer)
self.parent.buffer = None
self.parent.lock.release()
if buffer != None:
self.parent.dev.write("".join(map(chr,buffer)))
self.parent.dev.flush()
self.event.wait(.001)
def join(self, timeout=None):
self.event.set()
threading.Thread.join(self, timeout)
class SoundDevice:
def __init__(self, device, async = 0, cache = 1):
self.device = device
self.async = async
self.cache = cache
self.cacheDict = {}
self.status = "closed"
self.number_of_channels= 1
self.sample_rate= 8000
self.sample_width= 1
self.minFreq = 20
self.maxFreq = 3500
self.debug = 0
self.buffer = None
if ossaudiodev != None:
self.format = ossaudiodev.AFMT_U8
if self.debug:
self.setFile("770.txt")
if self.async:
self.lock = threading.Lock()
self.thread = SoundThread(self)
def initialize(self, mode):
if ossaudiodev == None: return
self.dev = ossaudiodev.open("/dev/dsp", mode)
self.dev.setparameters(self.format,
self.number_of_channels,
self.sample_rate)
self.status = mode
def play(self, sample):
"""
"""
if ossaudiodev == None: return
if self.status != "w":
self.initialize("w")
if self.async:
self.lock.acquire()
self.buffer = sample
self.lock.release()
else:
self.dev.write("".join(map(chr,sample)))
self.dev.flush()
def playTone(self, freqs, seconds, volume = 1.0):
"""
freq example: playTone([550,400], .1, volume=.5) # middle C for .1 seconds, half volume
"""
if ossaudiodev == None: return
if type(freqs) == type(0):
freqs = [freqs]
if self.status != "w":
self.initialize("w")
sample = [128] * int(self.sample_rate * seconds)
for freq in freqs:
if self.cache and (freq,seconds) in self.cacheDict:
sample = self.cacheDict[(freq,seconds)]
else:
for n in range(len(sample)):
sample[n] = min(max(sample[n] + int(127 * math.sin(n * 2 * math.pi * freq/self.sample_rate) * volume), 0),255)
self.cacheDict[(freq,seconds)] = sample
if self.async:
self.lock.acquire()
self.buffer = sample
self.lock.release()
else:
self.dev.write("".join(map(chr,sample)))
self.dev.flush()
def read(self, seconds):
if ossaudiodev == None: return
if self.status != "r":
self.initialize("r")
buffer = self.dev.read(int(self.sample_rate * seconds))
size = len(buffer)
return struct.unpack(str(size) + "B", buffer)
def setFile(self, filename):
if ossaudiodev == None: return
self.filename = filename
self.fp = open(self.filename, "r")
def readFile(self, seconds):
if ossaudiodev == None: return
data = None
try:
data = eval(self.fp.readline())
except:
self.fp = open(self.filename, "r")
try:
data = eval(self.fp.readline())
except:
print "Failed reading file '%s'" % self.filename
time.sleep(seconds)
return data[:int(seconds * self.sample_rate)]
def getFreq(self, seconds):
# change to read from the buffer, rather than block
if ossaudiodev == None: return
if self.debug:
data = self.readFile(1)
else:
data = self.read(seconds)
transform = FFT.real_fft(data).real
minFreqPos = self.minFreq
maxFreqPos = self.maxFreq
freq = Numeric.argmax(transform[1+minFreqPos:maxFreqPos])
value = transform[1+minFreqPos:maxFreqPos][freq]
domFreq = (freq + self.minFreq) / seconds
if self.debug and abs(value) > 8000 and self.minFreq < domFreq < self.maxFreq:
print "Frequence:", domFreq, "Value:", value, "Volume:", transform[0]
return (domFreq, value, transform[0])
def close(self):
if ossaudiodev == None: return
if self.status != "closed":
self.dev.close()
self.status = "closed"
if __name__ == "__main__":
sd = SoundDevice("/dev/dsp", async = 1)
sd.playTone(500, 1)
## DTMF Tones
## 1209 Hz 1336 Hz 1477 Hz 1633 Hz
## ABC DEF
## 697 Hz 1 2 3 A
## GHI JKL MNO
## 770 Hz 4 5 6 B
## PRS TUV WXY
## 852 Hz 7 8 9 C
## oper
## 941 Hz * 0 # D
|
emilydolson/forestcat
|
pyrobot/tools/sound.py
|
Python
|
agpl-3.0
| 6,306
| 0.014589
|
# (c) 2014, James Tanner <tanner.jc@gmail.com>
# (c) 2014, James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
import tempfile
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible import errors
from ansible.parsing.vault import VaultLib
from ansible.parsing.vault import VaultEditor
from ansible.module_utils._text import to_bytes, to_text
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
v10_data = """$ANSIBLE_VAULT;1.0;AES
53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9
9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1
83c62ffb04c2512995e815de4b4d29ed"""
v11_data = """$ANSIBLE_VAULT;1.1;AES256
62303130653266653331306264616235333735323636616539316433666463323964623162386137
3961616263373033353631316333623566303532663065310a393036623466376263393961326530
64336561613965383835646464623865663966323464653236343638373165343863623638316664
3631633031323837340a396530313963373030343933616133393566366137363761373930663833
3739"""
class TestVaultEditor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_methods_exist(self):
v = VaultEditor(None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data',
'shuffle_files']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
@patch.object(VaultEditor, '_editor_shell_command')
def test_create_file(self, mock_editor_shell_command):
def sc_side_effect(filename):
return ['touch', filename]
mock_editor_shell_command.side_effect = sc_side_effect
tmp_file = tempfile.NamedTemporaryFile()
os.unlink(tmp_file.name)
ve = VaultEditor("ansible")
ve.create_file(tmp_file.name)
self.assertTrue(os.path.exists(tmp_file.name))
def test_decrypt_1_0(self):
# Skip testing decrypting 1.0 files if we don't have access to AES, KDF or Counter.
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v10_file.name)
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = to_text(f.read())
f.close()
os.unlink(v10_file.name)
assert error_hit is False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v11_file = tempfile.NamedTemporaryFile(delete=False)
with v11_file as f:
f.write(to_bytes(v11_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v11_file.name)
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v11_file.name, "rb")
fdata = to_text(f.read())
f.close()
os.unlink(v11_file.name)
assert error_hit is False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_rekey_migration(self):
# Skip testing rekeying files if we don't have access to AES, KDF or Counter.
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.rekey_file(v10_file.name, 'ansible2')
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = f.read()
f.close()
assert error_hit is False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib("ansible2")
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError:
error_hit = True
os.unlink(v10_file.name)
assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
assert error_hit is False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == b"foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
|
kaarolch/ansible
|
test/units/parsing/vault/test_vault_editor.py
|
Python
|
gpl-3.0
| 6,376
| 0.000941
|
def main():
a=raw_input()
print a.lstrip()
print "Hello world"
main()
|
kumarisneha/practice_repo
|
techgig_rstrip.py
|
Python
|
mit
| 83
| 0.024096
|
from __future__ import absolute_import
from jinja2 import Markup
from rstblog.programs import RSTProgram
import typogrify
class TypogrifyRSTProgram(RSTProgram):
def get_fragments(self):
if self._fragment_cache is not None:
return self._fragment_cache
with self.context.open_source_file() as f:
self.get_header(f)
rv = self.context.render_rst(f.read().decode('utf-8'))
rv['fragment'] = Markup(typogrify.typogrify(rv['fragment']))
self._fragment_cache = rv
return rv
def setup(builder):
builder.programs['rst'] = TypogrifyRSTProgram
|
ericam/sidesaddle
|
modules/typogrify.py
|
Python
|
mit
| 620
| 0.001613
|
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory
import numpy as np
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
codename = 'lammps_md@boston'
############################
# Define input parameters #
############################
a = 5.404
cell = [[a, 0, 0],
[0, a, 0],
[0, 0, a]]
symbols=['Si'] * 8
scaled_positions = [(0.875, 0.875, 0.875),
(0.875, 0.375, 0.375),
(0.375, 0.875, 0.375),
(0.375, 0.375, 0.875),
(0.125, 0.125, 0.125),
(0.125, 0.625, 0.625),
(0.625, 0.125, 0.625),
(0.625, 0.625, 0.125)]
structure = StructureData(cell=cell)
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
structure.store()
# Silicon(C) Tersoff
tersoff_si = {'Si Si Si ': '3.0 1.0 1.7322 1.0039e5 16.218 -0.59826 0.78734 1.0999e-6 1.7322 471.18 2.85 0.15 2.4799 1830.8'}
potential ={'pair_style': 'tersoff',
'data': tersoff_si}
lammps_machine = {
'num_machines': 1,
'parallel_env': 'mpi*',
'tot_num_mpiprocs': 16}
parameters_md = {'timestep': 0.001,
'temperature' : 300,
'thermostat_variable': 0.5,
'equilibrium_steps': 100,
'total_steps': 2000,
'dump_rate': 1}
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=lammps_machine)
calc.label = "test lammps calculation"
calc.description = "A much longer description"
calc.use_code(code)
calc.use_structure(structure)
calc.use_potential(ParameterData(dict=potential))
calc.use_parameters(ParameterData(dict=parameters_md))
test_only = False
if test_only: # It will not be submitted
import os
subfolder, script_filename = calc.submit_test()
print "Test_submit for calculation (uuid='{}')".format(calc.uuid)
print "Submit file in {}".format(os.path.join(
os.path.relpath(subfolder.abspath),
script_filename))
else:
calc.store_all()
print "created calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
calc.submit()
print "submitted calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
|
abelcarreras/aiida_extensions
|
plugins/launcher/launch_lammps_md_si.py
|
Python
|
mit
| 2,634
| 0.002278
|
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'riak_repl', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
def parse_pyproject_array(name):
import os
import re
from ast import literal_eval
pattern = r'^{} = (\[.*?\])$'.format(name)
with open(os.path.join(HERE, 'pyproject.toml'), 'r', encoding='utf-8') as f:
# Windows \r\n prevents match
contents = '\n'.join(line.rstrip() for line in f.readlines())
array = re.search(pattern, contents, flags=re.MULTILINE | re.DOTALL).group(1)
return literal_eval(array)
CHECKS_BASE_REQ = parse_pyproject_array('dependencies')[0]
setup(
name='datadog-riak_repl',
version=ABOUT['__version__'],
description='The Riak_repl check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent riak_repl check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author='Britt Treece',
author_email='britt.treece@gmail.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# The package we're going to ship
packages=['datadog_checks', 'datadog_checks.riak_repl'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': parse_pyproject_array('deps')},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
DataDog/integrations-extras
|
riak_repl/setup.py
|
Python
|
bsd-3-clause
| 2,385
| 0.000839
|
import os,sys,re
# EXTRACTING ALL FILENAMES AND THEIR CLIENTS
# ---------------------------------------------------
# read in the log
# ---------------------------------------------------
f=open(sys.argv[1],'rb')
data=f.readlines()
f.close()
n=0
t=len(data)
clients = []
filename = None
for l in data :
n = n + 1
parts = l.split()
# new file to ingest
if parts[6] == 'Read' :
# all products will have its first client as "allproducts"
if filename != None :
if len(clients) == 0 :
clients.append('allproducts')
else :
clients.sort()
clients.insert(0,'allproducts')
print("%s %s" % (filename,','.join(clients)) )
filepath = parts[-1]
filename = filepath.split('/')[-1]
fparts = filename.split(':')
# :20070409000009 trailing get rid of it
if fparts[-1][:2] == '20' and len(fparts[-1]) == 14 :
fparts = fparts[:-1]
# '::' trailing get rid of it
if fparts[-1] == '' :
fparts = fparts[:-1]
filename = ':'.join(fparts)
clients = []
if parts[6] == 'Written' :
filepath = parts[-1]
client = 'conversion_' +filepath.split('/')[1]
if client == 'conversion_ppmtogif' : client = 'cvt_togif'
if client == 'conversion_rawtodfx' : continue
clients.append(client)
if parts[6] == 'create_link:' :
filepath = parts[-1]
client = filepath.split('/')[4]
clients.append(client)
if len(clients) == 0 :
clients.append('allproducts')
else :
clients.sort()
clients.insert(0,'allproducts')
print("%s %s" % (filename,','.join(clients)) )
|
khosrow/metpx
|
sundew/doc/pds_conversion/routing_step1.py
|
Python
|
gpl-2.0
| 1,705
| 0.039883
|
# -*- coding: utf-8 -*-
import webapp2
from boilerplate import models
from boilerplate import forms
from boilerplate.handlers import BaseHandler
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import ndb
from google.appengine.api import users as googleusers
from collections import OrderedDict, Counter
from wtforms import fields
class Logout(BaseHandler):
def get(self):
self.redirect(googleusers.create_logout_url(dest_url=self.uri_for('home')))
class Geochart(BaseHandler):
def get(self):
users = models.User.query().fetch(projection=['country'])
users_by_country = Counter()
for user in users:
if user.country:
users_by_country[user.country] += 1
params = {
"data": users_by_country.items()
}
return self.render_template('admin/geochart.html', **params)
class EditProfileForm(forms.EditProfileForm):
activated = fields.BooleanField('Activated')
class List(BaseHandler):
def get(self):
p = self.request.get('p')
q = self.request.get('q')
c = self.request.get('c')
forward = True if p not in ['prev'] else False
cursor = Cursor(urlsafe=c)
if q:
qry = models.User.query(ndb.OR(models.User.last_name == q,
models.User.email == q,
models.User.username == q))
else:
qry = models.User.query()
PAGE_SIZE = 5
if forward:
users, next_cursor, more = qry.order(models.User.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
if next_cursor and more:
self.view.next_cursor = next_cursor
if c:
self.view.prev_cursor = cursor.reversed()
else:
users, next_cursor, more = qry.order(-models.User.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
users = list(reversed(users))
if next_cursor and more:
self.view.prev_cursor = next_cursor
self.view.next_cursor = cursor.reversed()
def pager_url(p, cursor):
params = OrderedDict()
if q:
params['q'] = q
if p in ['prev']:
params['p'] = p
if cursor:
params['c'] = cursor.urlsafe()
return self.uri_for('user-list', **params)
self.view.pager_url = pager_url
self.view.q = q
params = {
"list_columns": [('username', 'Username'),
('last_name', 'Last Name'),
('email', 'E-Mail'),
('country', 'Country')],
"users" : users,
"count" : qry.count()
}
# FIXME: admin_user should probably go into BaseHandler
params['admin_user'] = googleusers.is_current_user_admin()
return self.render_template('admin/users.html', **params)
class Edit(BaseHandler):
def get_or_404(self, user_id):
try:
user = models.User.get_by_id(long(user_id))
if user:
return user
except ValueError:
pass
self.abort(404)
def edit(self, user_id):
if self.request.POST:
user = self.get_or_404(user_id)
if self.form.validate():
self.form.populate_obj(user)
user.put()
self.add_message("Changes saved!", 'success')
return self.redirect_to("user-edit", user_id=user_id)
else:
self.add_message("Could not save changes!", 'error')
else:
user = self.get_or_404(user_id)
self.form.process(obj=user)
params = {
'user' : user
}
return self.render_template('admin/edituser.html', **params)
@webapp2.cached_property
def form(self):
return EditProfileForm(self)
|
nortd/bomfu
|
admin/users.py
|
Python
|
lgpl-3.0
| 4,033
| 0.002727
|
import asyncio
from unittest import mock
from aiorpcx import RPCError
from server.env import Env
from server.controller import Controller
loop = asyncio.get_event_loop()
def set_env():
env = mock.create_autospec(Env)
env.coin = mock.Mock()
env.loop_policy = None
env.max_sessions = 0
env.max_subs = 0
env.max_send = 0
env.bandwidth_limit = 0
env.identities = ''
env.tor_proxy_host = env.tor_proxy_port = None
env.peer_discovery = env.PD_SELF = False
env.daemon_url = 'http://localhost:8000/'
return env
async def coro(res):
return res
def raise_exception(msg):
raise RPCError(1, msg)
def ensure_text_exception(test, exception):
res = err = None
try:
res = loop.run_until_complete(test)
except Exception as e:
err = e
assert isinstance(err, exception), (res, err)
def test_transaction_get():
async def test_verbose_ignore_by_backend():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
sut.daemon_request.return_value = coro('11'*32)
res = await sut.transaction_get('ff'*32, True)
assert res == '11'*32
async def test_verbose_ok():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
response = {
"hex": "00"*32,
"blockhash": "ff"*32
}
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32, True)
assert res == response
response = {
"hex": "00"*32,
"blockhash": None
}
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32, True)
assert res == response
async def test_no_verbose():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
response = 'cafebabe'*64
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32)
assert res == response
async def test_verbose_failure():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
sut.daemon_request.return_value = coro(
raise_exception('some unhandled error'))
await sut.transaction_get('ff' * 32, True)
async def test_wrong_txhash():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
await sut.transaction_get('cafe')
sut.daemon_request.assert_not_called()
loop.run_until_complete(asyncio.gather(
*[
test_verbose_ignore_by_backend(),
test_verbose_ok(),
test_no_verbose()
]
))
for error_test in [test_verbose_failure, test_wrong_txhash]:
ensure_text_exception(error_test(), RPCError)
|
erasmospunk/electrumx
|
tests/server/test_api.py
|
Python
|
mit
| 2,877
| 0
|
"""
================================================================================
Logscaled Histogram
================================================================================
| Calculates a logarithmically spaced histogram for a data map.
| Written By: Matthew Stadelman
| Date Written: 2016/03/07
| Last Modifed: 2016/10/20
"""
import scipy as sp
from .histogram import Histogram
class HistogramLogscale(Histogram):
r"""
Performs a histogram where the bin limits are logarithmically spaced
based on the supplied scale factor. If there are negative values then
the first bin contains everything below 0, the next bin will contain
everything between 0 and 1.
kwargs include:
scale_fact - numeric value to generate axis scale for bins. A
scale fact of 10 creates bins: 0-1, 1-10, 10-100, etc.
"""
def __init__(self, field, **kwargs):
super().__init__(field)
self.args.update(kwargs)
self.output_key = 'hist_logscale'
self.action = 'histogram_logscale'
@classmethod
def _add_subparser(cls, subparsers, parent):
r"""
Adds a specific action based sub-parser to the supplied arg_parser
instance.
"""
parser = subparsers.add_parser(cls.__name__,
aliases=['histlog'],
parents=[parent],
help=cls.__doc__)
#
parser.add_argument('scale_fact', type=float, nargs='?', default=10.0,
help='base to generate logscale from')
parser.set_defaults(func=cls)
def define_bins(self, **kwargs):
r"""
This defines the bins for a logscaled histogram
"""
self.data_vector.sort()
sf = self.args['scale_fact']
num_bins = int(sp.logn(sf, self.data_vector[-1]) + 1)
#
# generating initial bins from 1 - sf**num_bins
low = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[:-1]
high = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[1:]
#
# Adding "catch all" bins for anything between 0 - 1 and less than 0
if self.data_vector[0] < 1.0:
low.insert(0, 0.0)
high.insert(0, 1.0)
if self.data_vector[0] < 0.0:
low.insert(0, self.data_vector[0])
high.insert(0, 0.0)
#
self.bins = [bin_ for bin_ in zip(low, high)]
|
stadelmanma/netl-ap-map-flow
|
apmapflow/data_processing/histogram_logscale.py
|
Python
|
gpl-3.0
| 2,486
| 0
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import os
import sys
import glob
# add python module logger to path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'logger'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'device_files'))
from logger import Logger
from dfg.device import Device
from dfg.merger import DeviceMerger
from dfg.avr.avr_reader import AVRDeviceReader
from dfg.avr.avr_writer import AVRDeviceWriter
if __name__ == "__main__":
"""
Some test code
"""
level = 'info'
logger = Logger(level)
devices = []
for arg in sys.argv[1:]:
if arg in ['error', 'warn', 'info', 'debug', 'disabled']:
level = arg
logger.setLogLevel(level)
continue
xml_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'AVR_devices', (arg + '*'))
files = glob.glob(xml_path)
for file in files:
# deal with this here, rather than rewrite half the name merging
if os.path.basename(file) != "ATtiny28.xml":
part = AVRDeviceReader(file, logger)
device = Device(part, logger)
devices.append(device)
merger = DeviceMerger(devices, logger)
merger.mergedByPlatform('avr')
folder = os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'xpcc', 'architecture', 'platform', 'devices', 'avr')
for dev in merger.mergedDevices:
writer = AVRDeviceWriter(dev, logger)
writer.write(folder)
|
dergraaf/xpcc
|
tools/device_file_generator/avr_generator.py
|
Python
|
bsd-3-clause
| 1,677
| 0.019678
|
from rest_framework import relations, serializers
import amo
import mkt.carriers
import mkt.regions
from addons.models import Category
from mkt.api.fields import SplitField, TranslationSerializerField
from mkt.api.serializers import URLSerializerMixin
from mkt.collections.serializers import (CollectionSerializer, SlugChoiceField,
SlugModelChoiceField)
from mkt.submit.serializers import PreviewSerializer
from mkt.webapps.api import AppSerializer
from .models import FeedApp, FeedItem
class FeedAppSerializer(URLSerializerMixin, serializers.ModelSerializer):
app = SplitField(relations.PrimaryKeyRelatedField(required=True),
AppSerializer())
description = TranslationSerializerField(required=False)
preview = SplitField(relations.PrimaryKeyRelatedField(required=False),
PreviewSerializer())
pullquote_attribution = TranslationSerializerField(required=False)
pullquote_rating = serializers.IntegerField(required=False)
pullquote_text = TranslationSerializerField(required=False)
class Meta:
fields = ('app', 'description', 'id', 'preview',
'pullquote_attribution', 'pullquote_rating', 'pullquote_text',
'url')
model = FeedApp
url_basename = 'feedapp'
class FeedItemSerializer(URLSerializerMixin, serializers.ModelSerializer):
carrier = SlugChoiceField(required=False,
choices_dict=mkt.carriers.CARRIER_MAP)
region = SlugChoiceField(required=False,
choices_dict=mkt.regions.REGION_LOOKUP)
category = SlugModelChoiceField(required=False,
queryset=Category.objects.filter(type=amo.ADDON_WEBAPP))
item_type = serializers.SerializerMethodField('get_item_type')
# Types of objects that are allowed to be a feed item.
collection = SplitField(relations.PrimaryKeyRelatedField(required=False),
CollectionSerializer())
class Meta:
fields = ('carrier', 'category', 'collection', 'id', 'item_type',
'region', 'url')
item_types = ('collection',)
model = FeedItem
url_basename = 'feeditem'
def validate(self, attrs):
"""
Ensure that at least one object type is specified.
"""
item_changed = any(k for k in self.Meta.item_types if k in attrs.keys())
num_defined = sum(1 for item in self.Meta.item_types if attrs.get(item))
if item_changed and num_defined != 1:
message = ('A valid value for exactly one of the following '
'parameters must be defined: %s' % ','.join(
self.Meta.item_types))
raise serializers.ValidationError(message)
return attrs
def get_item_type(self, obj):
for item_type in self.Meta.item_types:
if getattr(obj, item_type):
return item_type
return
|
wagnerand/zamboni
|
mkt/feed/serializers.py
|
Python
|
bsd-3-clause
| 2,948
| 0.002035
|
#!/usr/bin/env python
import os
import glob
import unittest
import pysmile
import json
__author__ = 'Jonathan Hosmer'
class PySmileTestDecode(unittest.TestCase):
def setUp(self):
curdir = os.path.dirname(os.path.abspath(__file__))
self.smile_dir = os.path.join(curdir, 'data', 'smile')
self.json_dir = os.path.join(curdir, 'data', 'json')
def test_json_org_sample1(self):
s = os.path.join(self.smile_dir, 'json-org-sample1.smile')
j = os.path.join(self.json_dir, 'json-org-sample1.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample2(self):
s = os.path.join(self.smile_dir, 'json-org-sample2.smile')
j = os.path.join(self.json_dir, 'json-org-sample2.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample3(self):
s = os.path.join(self.smile_dir, 'json-org-sample3.smile')
j = os.path.join(self.json_dir, 'json-org-sample3.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample4(self):
s = os.path.join(self.smile_dir, 'json-org-sample4.smile')
j = os.path.join(self.json_dir, 'json-org-sample4.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample5(self):
s = os.path.join(self.smile_dir, 'json-org-sample5.smile')
j = os.path.join(self.json_dir, 'json-org-sample5.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_numbers_int_4k(self):
s = os.path.join(self.smile_dir, 'numbers-int-4k.smile')
j = os.path.join(self.json_dir, 'numbers-int-4k.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_numbers_int_64k(self):
s = os.path.join(self.smile_dir, 'numbers-int-64k.smile')
j = os.path.join(self.json_dir, 'numbers-int-64k.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_test1(self):
s = os.path.join(self.smile_dir, 'test1.smile')
j = os.path.join(self.json_dir, 'test1.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_test2(self):
s = os.path.join(self.smile_dir, 'test2.smile')
j = os.path.join(self.json_dir, 'test2.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
class PySmileTestEncode(unittest.TestCase):
def setUp(self):
curdir = os.path.dirname(os.path.abspath(__file__))
self.smile_dir = os.path.join(curdir, 'data', 'smile')
self.json_dir = os.path.join(curdir, 'data', 'json')
def test_json_org_sample1(self):
s = os.path.join(self.smile_dir, 'json-org-sample1.smile')
j = os.path.join(self.json_dir, 'json-org-sample1.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample2(self):
s = os.path.join(self.smile_dir, 'json-org-sample2.smile')
j = os.path.join(self.json_dir, 'json-org-sample2.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample3(self):
s = os.path.join(self.smile_dir, 'json-org-sample3.smile')
j = os.path.join(self.json_dir, 'json-org-sample3.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample4(self):
s = os.path.join(self.smile_dir, 'json-org-sample4.smile')
j = os.path.join(self.json_dir, 'json-org-sample4.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample5(self):
s = os.path.join(self.smile_dir, 'json-org-sample5.smile')
j = os.path.join(self.json_dir, 'json-org-sample5.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_numbers_int_4k(self):
s = os.path.join(self.smile_dir, 'numbers-int-4k.smile')
j = os.path.join(self.json_dir, 'numbers-int-4k.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_numbers_int_64k(self):
s = os.path.join(self.smile_dir, 'numbers-int-64k.smile')
j = os.path.join(self.json_dir, 'numbers-int-64k.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_test1(self):
s = os.path.join(self.smile_dir, 'test1.smile')
j = os.path.join(self.json_dir, 'test1.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_test2(self):
s = os.path.join(self.smile_dir, 'test2.smile')
j = os.path.join(self.json_dir, 'test2.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
class PySmileTestMisc(unittest.TestCase):
def test_1(self):
a = [1]
b = pysmile.decode(':)\n\x03\xf8\xc2\xf9')
self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_2(self):
a = [1, 2]
b = pysmile.decode(':)\n\x03\xf8\xc2\xc4\xf9')
self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_3(self):
a = [1, 2, {'c': 3}]
b = pysmile.decode(':)\n\x03\xf8\xc2\xc4\xfa\x80c\xc6\xfb\xf9')
self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_4(self):
a = {'a': 1}
b = pysmile.decode(':)\n\x03\xfa\x80a\xc2\xfb')
self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_5(self):
a = {'a': '1', 'b': 2, 'c': [3], 'd': -1, 'e': 4.20}
b = pysmile.decode(
':)\n\x03\xfa\x80a@1\x80c\xf8\xc6\xf9\x80b\xc4\x80e(fL\x19\x04\x04\x80d\xc1\xfb')
self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_6(self):
a = {'a': {'b': {'c': {'d': ['e']}}}}
b = pysmile.decode(
':)\n\x03\xfa\x80a\xfa\x80b\xfa\x80c\xfa\x80d\xf8@e\xf9\xfb\xfb\xfb\xfb')
self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
|
jhosmer/PySmile
|
tests/pysmile_tests.py
|
Python
|
apache-2.0
| 11,679
| 0.004196
|
import nmrglue as ng
import matplotlib.pyplot as plt
# read in data
dic, data = ng.pipe.read("test.ft2")
# find PPM limits along each axis
uc_15n = ng.pipe.make_uc(dic, data, 0)
uc_13c = ng.pipe.make_uc(dic, data, 1)
x0, x1 = uc_13c.ppm_limits()
y0, y1 = uc_15n.ppm_limits()
# plot the spectrum
fig = plt.figure(figsize=(10, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
cl = [8.5e4 * 1.30 ** x for x in range(20)]
ax.contour(data, cl, colors='blue', extent=(x0, x1, y0, y1), linewidths=0.5)
# add 1D slices
x = uc_13c.ppm_scale()
s1 = data[uc_15n("105.52ppm"), :]
s2 = data[uc_15n("115.85ppm"), :]
s3 = data[uc_15n("130.07ppm"), :]
ax.plot(x, -s1 / 8e4 + 105.52, 'k-')
ax.plot(x, -s2 / 8e4 + 115.85, 'k-')
ax.plot(x, -s3 / 8e4 + 130.07, 'k-')
# label the axis and save
ax.set_xlabel("13C ppm", size=20)
ax.set_xlim(183.5, 167.5)
ax.set_ylabel("15N ppm", size=20)
ax.set_ylim(139.5, 95.5)
fig.savefig("spectrum_2d.png")
|
atomman/nmrglue
|
examples/jbnmr_examples/s4_2d_plotting/plot_2d_pipe_spectrum.py
|
Python
|
bsd-3-clause
| 929
| 0
|
#!/usr/bin/env python3
#
# Copyright (c) 2012 Timo Savola
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
import argparse
import struct
class Arena(object):
class Node(object):
def __init__(self, arena, address):
self.arena = arena
self.address = address
def __eq__(self, other):
return self.address == other.address
def __lt__(self, other):
return self.address < other.address
def _uint32(self, offset):
return self.arena._uint32(self.address + offset)
def _data(self, offset, size):
return self.arena._data(self.address + offset, size)
@property
def end(self):
return self.address + self.size
class Allocated(Node):
def __init__(self, arena, address, size):
super(Arena.Allocated, self).__init__(arena, address)
self.size = size
def __str__(self):
return "Allocated space at %u: %r" % (self.address, self.data)
@property
def data(self):
return self._data(0, self.size)
class Free(Node):
def __str__(self):
return "Free space from %u to %u" % (self.address, self.address + self.size)
@property
def size(self):
return self._uint32(0)
@property
def next_node(self):
address = self.next_address
return self.arena.free_nodes[address] if address else None
@property
def next_address(self):
return self._uint32(4)
_initial_address = 8
def __init__(self, data):
self.data = data
def init(self):
self.allocations = {}
self.free_nodes = {}
if self.size < self._initial_address:
return
last_node = None
next_addr = self._uint32(0)
while next_addr:
self.__init_allocated(last_node, next_addr)
node = self.Free(self, next_addr)
self.free_nodes[node.address] = node
last_node = node
next_addr = node.next_address
assert not next_addr or last_node.address < next_addr
self.__init_allocated(last_node, self.size)
def __init_allocated(self, prev_node, end):
address = prev_node.end if prev_node else self._initial_address
self.allocations[address] = self.Allocated(self, address, end - address)
@property
def size(self):
return len(self.data)
@property
def nodes(self):
nodes = {}
nodes.update(self.allocations)
nodes.update(self.free_nodes)
return nodes
def dump(self):
for node in sorted(self.nodes.values()):
print(node)
def _uint32(self, address):
return struct.unpack("<I", self._data(address, 4))[0]
def _data(self, address, size):
if address + size > len(self.data):
raise Exception("address %u size %u out of arena (size %u)" % (address, size, len(self.data)))
return self.data[address:address+size]
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
arena_parser = subparsers.add_parser("arena")
arena_parser.set_defaults(func=arena_command)
arena_parser.add_argument("filename", type=str, metavar="FILE")
arena_parser.add_argument("--dump", action="store_true")
args = parser.parse_args()
args.func(args)
def arena_command(args):
error = None
with open(args.filename, "rb") as file:
arena = Arena(file.read())
try:
arena.init()
except Exception as e:
error = e
if args.dump:
arena.dump()
if error:
raise error
if __name__ == "__main__":
main()
|
tsavola/concrete
|
python/concrete/tools.py
|
Python
|
lgpl-2.1
| 3,421
| 0.030108
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the photometry module.
"""
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_less)
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import NDData, StdDevUncertainty
from astropy.table import Table
import astropy.units as u
from astropy.wcs import WCS
from ..photometry import aperture_photometry
from ..circle import (CircularAperture, CircularAnnulus, SkyCircularAperture,
SkyCircularAnnulus)
from ..ellipse import (EllipticalAperture, EllipticalAnnulus,
SkyEllipticalAperture, SkyEllipticalAnnulus)
from ..rectangle import (RectangularAperture, RectangularAnnulus,
SkyRectangularAperture, SkyRectangularAnnulus)
from ...datasets import get_path, make_4gaussians_image, make_wcs, make_gwcs
from ...utils._optional_deps import HAS_GWCS, HAS_MATPLOTLIB # noqa
APERTURE_CL = [CircularAperture,
CircularAnnulus,
EllipticalAperture,
EllipticalAnnulus,
RectangularAperture,
RectangularAnnulus]
TEST_APERTURES = list(zip(APERTURE_CL, ((3.,),
(3., 5.),
(3., 5., 1.),
(3., 5., 4., 12./5., 1.),
(5, 8, np.pi / 4),
(8, 12, 8, 16./3., np.pi / 8))))
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_outside_array(aperture_class, params):
data = np.ones((10, 10), dtype=float)
aperture = aperture_class((-60, 60), *params)
fluxtable = aperture_photometry(data, aperture)
# aperture is fully outside array:
assert np.isnan(fluxtable['aperture_sum'])
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_inside_array_simple(aperture_class, params):
data = np.ones((40, 40), dtype=float)
aperture = aperture_class((20., 20.), *params)
table1 = aperture_photometry(data, aperture, method='center',
subpixels=10)
table2 = aperture_photometry(data, aperture, method='subpixel',
subpixels=10)
table3 = aperture_photometry(data, aperture, method='exact', subpixels=10)
true_flux = aperture.area
assert table1['aperture_sum'] < table3['aperture_sum']
if not isinstance(aperture, (RectangularAperture, RectangularAnnulus)):
assert_allclose(table3['aperture_sum'], true_flux)
assert_allclose(table2['aperture_sum'], table3['aperture_sum'],
atol=0.1)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_aperture_plots(aperture_class, params):
# This test should run without any errors, and there is no return
# value.
# TODO: check the content of the plot
aperture = aperture_class((20., 20.), *params)
aperture.plot()
def test_aperture_pixel_positions():
pos1 = (10, 20)
pos2 = [(10, 20)]
pos3 = u.Quantity((10, 20), unit=u.pixel)
pos4 = u.Quantity([(10, 20)], unit=u.pixel)
r = 3
ap1 = CircularAperture(pos1, r)
ap2 = CircularAperture(pos2, r)
ap3 = CircularAperture(pos3, r)
ap4 = CircularAperture(pos4, r)
assert not np.array_equal(ap1.positions, ap2.positions)
assert_allclose(ap1.positions, ap3.positions)
assert_allclose(ap2.positions, ap4.positions)
class BaseTestAperturePhotometry:
def test_array_error(self):
# Array error
error = np.ones(self.data.shape, dtype=float)
if not hasattr(self, 'mask'):
mask = None
true_error = np.sqrt(self.area)
else:
mask = self.mask
# 1 masked pixel
true_error = np.sqrt(self.area - 1)
table1 = aperture_photometry(self.data,
self.aperture, method='center',
mask=mask, error=error)
table2 = aperture_photometry(self.data,
self.aperture,
method='subpixel', subpixels=12,
mask=mask, error=error)
table3 = aperture_photometry(self.data,
self.aperture, method='exact',
mask=mask, error=error)
if not isinstance(self.aperture, (RectangularAperture,
RectangularAnnulus)):
assert_allclose(table3['aperture_sum'], self.true_flux)
assert_allclose(table2['aperture_sum'], table3['aperture_sum'],
atol=0.1)
assert np.all(table1['aperture_sum'] < table3['aperture_sum'])
if not isinstance(self.aperture, (RectangularAperture,
RectangularAnnulus)):
assert_allclose(table3['aperture_sum_err'], true_error)
assert_allclose(table2['aperture_sum_err'],
table3['aperture_sum_err'], atol=0.1)
assert np.all(table1['aperture_sum_err'] < table3['aperture_sum_err'])
class TestCircular(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.true_flux = self.area
class TestCircularArray(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = ((20., 20.), (25., 25.))
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.area = np.array((self.area, ) * 2)
self.true_flux = self.area
class TestCircularAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
r_in = 8.
r_out = 10.
self.aperture = CircularAnnulus(position, r_in, r_out)
self.area = np.pi * (r_out * r_out - r_in * r_in)
self.true_flux = self.area
class TestCircularAnnulusArray(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = ((20., 20.), (25., 25.))
r_in = 8.
r_out = 10.
self.aperture = CircularAnnulus(position, r_in, r_out)
self.area = np.pi * (r_out * r_out - r_in * r_in)
self.area = np.array((self.area, ) * 2)
self.true_flux = self.area
class TestElliptical(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
a = 10.
b = 5.
theta = -np.pi / 4.
self.aperture = EllipticalAperture(position, a, b, theta=theta)
self.area = np.pi * a * b
self.true_flux = self.area
class TestEllipticalAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
a_in = 5.
a_out = 8.
b_out = 5.
theta = -np.pi / 4.
self.aperture = EllipticalAnnulus(position, a_in, a_out, b_out,
theta=theta)
self.area = (np.pi * (a_out * b_out) -
np.pi * (a_in * b_out * a_in / a_out))
self.true_flux = self.area
class TestRectangularAperture(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
h = 5.
w = 8.
theta = np.pi / 4.
self.aperture = RectangularAperture(position, w, h, theta=theta)
self.area = h * w
self.true_flux = self.area
class TestRectangularAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
h_out = 8.
w_in = 8.
w_out = 12.
h_in = w_in * h_out / w_out
theta = np.pi / 8.
self.aperture = RectangularAnnulus(position, w_in, w_out, h_out,
theta=theta)
self.area = h_out * w_out - h_in * w_in
self.true_flux = self.area
class TestMaskedSkipCircular(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
self.mask = np.zeros((40, 40), dtype=bool)
self.mask[20, 20] = True
position = (20., 20.)
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.true_flux = self.area - 1
class BaseTestDifferentData:
def test_basic_circular_aperture_photometry(self):
aperture = CircularAperture(self.position, self.radius)
table = aperture_photometry(self.data, aperture,
method='exact')
assert_allclose(table['aperture_sum'].value, self.true_flux)
assert table['aperture_sum'].unit, self.fluxunit
assert np.all(table['xcenter'].value ==
np.transpose(self.position)[0])
assert np.all(table['ycenter'].value ==
np.transpose(self.position)[1])
class TestInputNDData(BaseTestDifferentData):
def setup_class(self):
data = np.ones((40, 40), dtype=float)
self.data = NDData(data, unit=u.adu)
self.radius = 3
self.position = [(20, 20), (30, 30)]
self.true_flux = np.pi * self.radius * self.radius
self.fluxunit = u.adu
@pytest.mark.remote_data
def test_wcs_based_photometry_to_catalogue():
pathcat = get_path('spitzer_example_catalog.xml', location='remote')
pathhdu = get_path('spitzer_example_image.fits', location='remote')
hdu = fits.open(pathhdu)
data = u.Quantity(hdu[0].data, unit=hdu[0].header['BUNIT'])
wcs = WCS(hdu[0].header)
scale = hdu[0].header['PIXSCAL1']
catalog = Table.read(pathcat)
pos_skycoord = SkyCoord(catalog['l'], catalog['b'], frame='galactic')
photometry_skycoord = aperture_photometry(
data, SkyCircularAperture(pos_skycoord, 4 * u.arcsec), wcs=wcs)
photometry_skycoord_pix = aperture_photometry(
data, SkyCircularAperture(pos_skycoord, 4. / scale * u.pixel),
wcs=wcs)
assert_allclose(photometry_skycoord['aperture_sum'],
photometry_skycoord_pix['aperture_sum'])
# Photometric unit conversion is needed to match the catalogue
factor = (1.2 * u.arcsec) ** 2 / u.pixel
converted_aperture_sum = (photometry_skycoord['aperture_sum'] *
factor).to(u.mJy / u.pixel)
fluxes_catalog = catalog['f4_5'].filled()
# There shouldn't be large outliers, but some differences is OK, as
# fluxes_catalog is based on PSF photometry, etc.
assert_allclose(fluxes_catalog, converted_aperture_sum.value, rtol=1e0)
assert(np.mean(np.fabs(((fluxes_catalog - converted_aperture_sum.value) /
fluxes_catalog))) < 0.1)
# close the file
hdu.close()
def test_wcs_based_photometry():
data = make_4gaussians_image()
wcs = make_wcs(data.shape)
# hard wired positions in make_4gaussian_image
pos_orig_pixel = u.Quantity(([160., 25., 150., 90.],
[70., 40., 25., 60.]), unit=u.pixel)
pos_skycoord = wcs.pixel_to_world(pos_orig_pixel[0], pos_orig_pixel[1])
pos_skycoord_s = pos_skycoord[2]
photometry_skycoord_circ = aperture_photometry(
data, SkyCircularAperture(pos_skycoord, 3 * u.arcsec), wcs=wcs)
photometry_skycoord_circ_2 = aperture_photometry(
data, SkyCircularAperture(pos_skycoord, 2 * u.arcsec), wcs=wcs)
photometry_skycoord_circ_s = aperture_photometry(
data, SkyCircularAperture(pos_skycoord_s, 3 * u.arcsec), wcs=wcs)
assert_allclose(photometry_skycoord_circ['aperture_sum'][2],
photometry_skycoord_circ_s['aperture_sum'])
photometry_skycoord_circ_ann = aperture_photometry(
data, SkyCircularAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec),
wcs=wcs)
photometry_skycoord_circ_ann_s = aperture_photometry(
data, SkyCircularAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec),
wcs=wcs)
assert_allclose(photometry_skycoord_circ_ann['aperture_sum'][2],
photometry_skycoord_circ_ann_s['aperture_sum'])
assert_allclose(photometry_skycoord_circ_ann['aperture_sum'],
photometry_skycoord_circ['aperture_sum'] -
photometry_skycoord_circ_2['aperture_sum'])
photometry_skycoord_ell = aperture_photometry(
data, SkyEllipticalAperture(pos_skycoord, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_2 = aperture_photometry(
data, SkyEllipticalAperture(pos_skycoord, 2 * u.arcsec,
2.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_s = aperture_photometry(
data, SkyEllipticalAperture(pos_skycoord_s, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_ann = aperture_photometry(
data, SkyEllipticalAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_ann_s = aperture_photometry(
data, SkyEllipticalAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
assert_allclose(photometry_skycoord_ell['aperture_sum'][2],
photometry_skycoord_ell_s['aperture_sum'])
assert_allclose(photometry_skycoord_ell_ann['aperture_sum'][2],
photometry_skycoord_ell_ann_s['aperture_sum'])
assert_allclose(photometry_skycoord_ell['aperture_sum'],
photometry_skycoord_circ['aperture_sum'], rtol=5e-3)
assert_allclose(photometry_skycoord_ell_ann['aperture_sum'],
photometry_skycoord_ell['aperture_sum'] -
photometry_skycoord_ell_2['aperture_sum'], rtol=1e-4)
photometry_skycoord_rec = aperture_photometry(
data, SkyRectangularAperture(pos_skycoord,
6 * u.arcsec, 6 * u.arcsec,
0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_4 = aperture_photometry(
data, SkyRectangularAperture(pos_skycoord,
4 * u.arcsec, 4 * u.arcsec,
0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_s = aperture_photometry(
data, SkyRectangularAperture(pos_skycoord_s,
6 * u.arcsec, 6 * u.arcsec,
0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_ann = aperture_photometry(
data, SkyRectangularAnnulus(pos_skycoord, 4 * u.arcsec, 6 * u.arcsec,
6 * u.arcsec, theta=0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_ann_s = aperture_photometry(
data, SkyRectangularAnnulus(pos_skycoord_s, 4 * u.arcsec,
6 * u.arcsec, 6 * u.arcsec,
theta=0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
assert_allclose(photometry_skycoord_rec['aperture_sum'][2],
photometry_skycoord_rec_s['aperture_sum'])
assert np.all(photometry_skycoord_rec['aperture_sum'] >
photometry_skycoord_circ['aperture_sum'])
assert_allclose(photometry_skycoord_rec_ann['aperture_sum'][2],
photometry_skycoord_rec_ann_s['aperture_sum'])
assert_allclose(photometry_skycoord_rec_ann['aperture_sum'],
photometry_skycoord_rec['aperture_sum'] -
photometry_skycoord_rec_4['aperture_sum'], rtol=1e-4)
def test_basic_circular_aperture_photometry_unit():
radius = 3
true_flux = np.pi * radius * radius
aper = CircularAperture((12, 12), radius)
data1 = np.ones((25, 25), dtype=float)
table1 = aperture_photometry(data1, aper)
assert_allclose(table1['aperture_sum'], true_flux)
unit = u.adu
data2 = u.Quantity(data1 * unit)
table2 = aperture_photometry(data2, aper)
assert_allclose(table2['aperture_sum'].value, true_flux)
assert table2['aperture_sum'].unit == data2.unit == unit
error1 = np.ones((25, 25))
with pytest.raises(ValueError):
# data has unit, but error does not
aperture_photometry(data2, aper, error=error1)
error2 = u.Quantity(error1 * u.Jy)
with pytest.raises(ValueError):
# data and error have different units
aperture_photometry(data2, aper, error=error2)
def test_aperture_photometry_with_error_units():
"""Test aperture_photometry when error has units (see #176)."""
data1 = np.ones((40, 40), dtype=float)
data2 = u.Quantity(data1, unit=u.adu)
error = u.Quantity(data1, unit=u.adu)
radius = 3
true_flux = np.pi * radius * radius
unit = u.adu
position = (20, 20)
table1 = aperture_photometry(data2, CircularAperture(position, radius),
error=error)
assert_allclose(table1['aperture_sum'].value, true_flux)
assert_allclose(table1['aperture_sum_err'].value, np.sqrt(true_flux))
assert table1['aperture_sum'].unit == unit
assert table1['aperture_sum_err'].unit == unit
def test_aperture_photometry_inputs_with_mask():
"""
Test that aperture_photometry does not modify the input
data or error array when a mask is input.
"""
data = np.ones((5, 5))
aperture = CircularAperture((2, 2), 2.)
mask = np.zeros_like(data, dtype=bool)
data[2, 2] = 100. # bad pixel
mask[2, 2] = True
error = np.sqrt(data)
data_in = data.copy()
error_in = error.copy()
t1 = aperture_photometry(data, aperture, error=error, mask=mask)
assert_array_equal(data, data_in)
assert_array_equal(error, error_in)
assert_allclose(t1['aperture_sum'][0], 11.5663706144)
t2 = aperture_photometry(data, aperture)
assert_allclose(t2['aperture_sum'][0], 111.566370614)
TEST_ELLIPSE_EXACT_APERTURES = [(3.469906, 3.923861394, 3.),
(0.3834415188257778, 0.3834415188257778, 0.3)]
@pytest.mark.parametrize('x,y,r', TEST_ELLIPSE_EXACT_APERTURES)
def test_ellipse_exact_grid(x, y, r):
"""
Test elliptical exact aperture photometry on a grid of pixel positions.
This is a regression test for the bug discovered in this issue:
https://github.com/astropy/photutils/issues/198
"""
data = np.ones((10, 10))
aperture = EllipticalAperture((x, y), r, r, 0.)
t = aperture_photometry(data, aperture, method='exact')
actual = t['aperture_sum'][0] / (np.pi * r ** 2)
assert_allclose(actual, 1)
@pytest.mark.parametrize('value', [np.nan, np.inf])
def test_nan_inf_mask(value):
"""Test that nans and infs are properly masked [267]."""
data = np.ones((9, 9))
mask = np.zeros_like(data, dtype=bool)
data[4, 4] = value
mask[4, 4] = True
radius = 2.
aper = CircularAperture((4, 4), radius)
tbl = aperture_photometry(data, aper, mask=mask)
desired = (np.pi * radius**2) - 1
assert_allclose(tbl['aperture_sum'], desired)
def test_aperture_partial_overlap():
data = np.ones((20, 20))
error = np.ones((20, 20))
xypos = [(10, 10), (0, 0), (0, 19), (19, 0), (19, 19)]
r = 5.
aper = CircularAperture(xypos, r=r)
tbl = aperture_photometry(data, aper, error=error)
assert_allclose(tbl['aperture_sum'][0], np.pi * r ** 2)
assert_array_less(tbl['aperture_sum'][1:], np.pi * r ** 2)
unit = u.MJy / u.sr
tbl = aperture_photometry(data * unit, aper, error=error * unit)
assert_allclose(tbl['aperture_sum'][0].value, np.pi * r ** 2)
assert_array_less(tbl['aperture_sum'][1:].value, np.pi * r ** 2)
assert_array_less(tbl['aperture_sum_err'][1:].value, np.pi * r ** 2)
assert tbl['aperture_sum'].unit == unit
assert tbl['aperture_sum_err'].unit == unit
def test_pixel_aperture_repr():
aper = CircularAperture((10, 20), r=3.0)
assert '<CircularAperture(' in repr(aper)
assert 'Aperture: CircularAperture' in str(aper)
aper = CircularAnnulus((10, 20), r_in=3.0, r_out=5.0)
assert '<CircularAnnulus(' in repr(aper)
assert 'Aperture: CircularAnnulus' in str(aper)
aper = EllipticalAperture((10, 20), a=5.0, b=3.0, theta=15.0)
assert '<EllipticalAperture(' in repr(aper)
assert 'Aperture: EllipticalAperture' in str(aper)
aper = EllipticalAnnulus((10, 20), a_in=4.0, a_out=8.0, b_out=4.0,
theta=15.0)
assert '<EllipticalAnnulus(' in repr(aper)
assert 'Aperture: EllipticalAnnulus' in str(aper)
aper = RectangularAperture((10, 20), w=5.0, h=3.0, theta=15.0)
assert '<RectangularAperture(' in repr(aper)
assert 'Aperture: RectangularAperture' in str(aper)
aper = RectangularAnnulus((10, 20), w_in=4.0, w_out=8.0, h_out=4.0,
theta=15.0)
assert '<RectangularAnnulus(' in repr(aper)
assert 'Aperture: RectangularAnnulus' in str(aper)
def test_sky_aperture_repr():
s = SkyCoord([1, 2], [3, 4], unit='deg')
aper = SkyCircularAperture(s, r=3*u.pix)
a_repr = ('<SkyCircularAperture(<SkyCoord (ICRS): (ra, dec) in deg\n'
' [(1., 3.), (2., 4.)]>, r=3.0 pix)>')
a_str = ('Aperture: SkyCircularAperture\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'r: 3.0 pix')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyCircularAnnulus(s, r_in=3.*u.pix, r_out=5*u.pix)
a_repr = ('<SkyCircularAnnulus(<SkyCoord (ICRS): (ra, dec) in deg\n'
' [(1., 3.), (2., 4.)]>, r_in=3.0 pix, r_out=5.0 pix)>')
a_str = ('Aperture: SkyCircularAnnulus\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'r_in: 3.0 pix\nr_out: 5.0 pix')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyEllipticalAperture(s, a=3*u.pix, b=5*u.pix, theta=15*u.deg)
a_repr = ('<SkyEllipticalAperture(<SkyCoord (ICRS): (ra, dec) in '
'deg\n [(1., 3.), (2., 4.)]>, a=3.0 pix, b=5.0 pix,'
' theta=15.0 deg)>')
a_str = ('Aperture: SkyEllipticalAperture\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'a: 3.0 pix\nb: 5.0 pix\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyEllipticalAnnulus(s, a_in=3*u.pix, a_out=5*u.pix, b_out=3*u.pix,
theta=15*u.deg)
a_repr = ('<SkyEllipticalAnnulus(<SkyCoord (ICRS): (ra, dec) in '
'deg\n [(1., 3.), (2., 4.)]>, a_in=3.0 pix, '
'a_out=5.0 pix, b_in=1.8 pix, b_out=3.0 pix, '
'theta=15.0 deg)>')
a_str = ('Aperture: SkyEllipticalAnnulus\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'a_in: 3.0 pix\na_out: 5.0 pix\nb_in: 1.8 pix\n'
'b_out: 3.0 pix\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyRectangularAperture(s, w=3*u.pix, h=5*u.pix, theta=15*u.deg)
a_repr = ('<SkyRectangularAperture(<SkyCoord (ICRS): (ra, dec) in '
'deg\n [(1., 3.), (2., 4.)]>, w=3.0 pix, h=5.0 pix'
', theta=15.0 deg)>')
a_str = ('Aperture: SkyRectangularAperture\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'w: 3.0 pix\nh: 5.0 pix\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyRectangularAnnulus(s, w_in=5*u.pix, w_out=10*u.pix,
h_out=6*u.pix, theta=15*u.deg)
a_repr = ('<SkyRectangularAnnulus(<SkyCoord (ICRS): (ra, dec) in deg'
'\n [(1., 3.), (2., 4.)]>, w_in=5.0 pix, '
'w_out=10.0 pix, h_in=3.0 pix, h_out=6.0 pix, '
'theta=15.0 deg)>')
a_str = ('Aperture: SkyRectangularAnnulus\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'w_in: 5.0 pix\nw_out: 10.0 pix\nh_in: 3.0 pix\n'
'h_out: 6.0 pix\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
def test_rectangular_bbox():
# odd sizes
width = 7
height = 3
a = RectangularAperture((50, 50), w=width, h=height, theta=0)
assert a.bbox.shape == (height, width)
a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0)
assert a.bbox.shape == (height + 1, width + 1)
a = RectangularAperture((50, 50), w=width, h=height, theta=90.*np.pi/180.)
assert a.bbox.shape == (width, height)
# even sizes
width = 8
height = 4
a = RectangularAperture((50, 50), w=width, h=height, theta=0)
assert a.bbox.shape == (height + 1, width + 1)
a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0)
assert a.bbox.shape == (height, width)
a = RectangularAperture((50.5, 50.5), w=width, h=height,
theta=90.*np.pi/180.)
assert a.bbox.shape == (width, height)
def test_elliptical_bbox():
# integer axes
a = 7
b = 3
ap = EllipticalAperture((50, 50), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b + 1, 2*a + 1)
ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b, 2*a)
ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.)
assert ap.bbox.shape == (2*a + 1, 2*b + 1)
# fractional axes
a = 7.5
b = 4.5
ap = EllipticalAperture((50, 50), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b, 2*a)
ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b + 1, 2*a + 1)
ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.)
assert ap.bbox.shape == (2*a, 2*b)
@pytest.mark.skipif('not HAS_GWCS')
@pytest.mark.parametrize('wcs_type', ('wcs', 'gwcs'))
def test_to_sky_pixel(wcs_type):
data = make_4gaussians_image()
if wcs_type == 'wcs':
wcs = make_wcs(data.shape)
elif wcs_type == 'gwcs':
wcs = make_gwcs(data.shape)
ap = CircularAperture(((12.3, 15.7), (48.19, 98.14)), r=3.14)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.r, ap2.r)
ap = CircularAnnulus(((12.3, 15.7), (48.19, 98.14)), r_in=3.14,
r_out=5.32)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.r_in, ap2.r_in)
assert_allclose(ap.r_out, ap2.r_out)
ap = EllipticalAperture(((12.3, 15.7), (48.19, 98.14)), a=3.14, b=5.32,
theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.a, ap2.a)
assert_allclose(ap.b, ap2.b)
assert_allclose(ap.theta, ap2.theta)
ap = EllipticalAnnulus(((12.3, 15.7), (48.19, 98.14)), a_in=3.14,
a_out=15.32, b_out=4.89, theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.a_in, ap2.a_in)
assert_allclose(ap.a_out, ap2.a_out)
assert_allclose(ap.b_out, ap2.b_out)
assert_allclose(ap.theta, ap2.theta)
ap = RectangularAperture(((12.3, 15.7), (48.19, 98.14)), w=3.14, h=5.32,
theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.w, ap2.w)
assert_allclose(ap.h, ap2.h)
assert_allclose(ap.theta, ap2.theta)
ap = RectangularAnnulus(((12.3, 15.7), (48.19, 98.14)), w_in=3.14,
w_out=15.32, h_out=4.89, theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.w_in, ap2.w_in)
assert_allclose(ap.w_out, ap2.w_out)
assert_allclose(ap.h_out, ap2.h_out)
assert_allclose(ap.theta, ap2.theta)
def test_position_units():
"""Regression test for unit check."""
pos = (10, 10) * u.pix
pos = np.sqrt(pos**2)
ap = CircularAperture(pos, r=3.)
assert_allclose(ap.positions, np.array([10, 10]))
def test_radius_units():
"""Regression test for unit check."""
pos = SkyCoord(10, 10, unit='deg')
r = 3.*u.pix
r = np.sqrt(r**2)
ap = SkyCircularAperture(pos, r=r)
assert ap.r.value == 3.0
assert ap.r.unit == u.pix
def test_scalar_aperture():
"""
Regression test to check that length-1 aperture list appends a "_0"
on the column names to be consistent with list inputs.
"""
data = np.ones((20, 20), dtype=float)
ap = CircularAperture((10, 10), r=3.)
colnames1 = aperture_photometry(data, ap, error=data).colnames
assert (colnames1 == ['id', 'xcenter', 'ycenter', 'aperture_sum',
'aperture_sum_err'])
colnames2 = aperture_photometry(data, [ap], error=data).colnames
assert (colnames2 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',
'aperture_sum_err_0'])
colnames3 = aperture_photometry(data, [ap, ap], error=data).colnames
assert (colnames3 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',
'aperture_sum_err_0', 'aperture_sum_1',
'aperture_sum_err_1'])
def test_nan_in_bbox():
"""
Regression test that non-finite data values outside of the aperture
mask but within the bounding box do not affect the photometry.
"""
data1 = np.ones((101, 101))
data2 = data1.copy()
data1[33, 33] = np.nan
data1[67, 67] = np.inf
data1[33, 67] = -np.inf
data1[22, 22] = np.nan
data1[22, 23] = np.inf
error = data1.copy()
aper1 = CircularAperture((50, 50), r=20.)
aper2 = CircularAperture((5, 5), r=20.)
tbl1 = aperture_photometry(data1, aper1, error=error)
tbl2 = aperture_photometry(data2, aper1, error=error)
assert_allclose(tbl1['aperture_sum'], tbl2['aperture_sum'])
assert_allclose(tbl1['aperture_sum_err'], tbl2['aperture_sum_err'])
tbl3 = aperture_photometry(data1, aper2, error=error)
tbl4 = aperture_photometry(data2, aper2, error=error)
assert_allclose(tbl3['aperture_sum'], tbl4['aperture_sum'])
assert_allclose(tbl3['aperture_sum_err'], tbl4['aperture_sum_err'])
def test_scalar_skycoord():
"""
Regression test to check that scalar SkyCoords are added to the table
as a length-1 SkyCoord array.
"""
data = make_4gaussians_image()
wcs = make_wcs(data.shape)
skycoord = wcs.pixel_to_world(90, 60)
aper = SkyCircularAperture(skycoord, r=0.1*u.arcsec)
tbl = aperture_photometry(data, aper, wcs=wcs)
assert isinstance(tbl['sky_center'], SkyCoord)
def test_nddata_input():
data = np.arange(400).reshape((20, 20))
error = np.sqrt(data)
mask = np.zeros((20, 20), dtype=bool)
mask[8:13, 8:13] = True
unit = 'adu'
wcs = make_wcs(data.shape)
skycoord = wcs.pixel_to_world(10, 10)
aper = SkyCircularAperture(skycoord, r=0.7*u.arcsec)
tbl1 = aperture_photometry(data*u.adu, aper, error=error*u.adu, mask=mask,
wcs=wcs)
uncertainty = StdDevUncertainty(error)
nddata = NDData(data, uncertainty=uncertainty, mask=mask, wcs=wcs,
unit=unit)
tbl2 = aperture_photometry(nddata, aper)
for column in tbl1.columns:
if column == 'sky_center': # cannot test SkyCoord equality
continue
assert_allclose(tbl1[column], tbl2[column])
|
astropy/photutils
|
photutils/aperture/tests/test_photometry.py
|
Python
|
bsd-3-clause
| 32,618
| 0
|
# -*- coding: utf-8 -*-
import time
from django.conf import settings
from django.template import Context
from sekizai.context import SekizaiContext
from cms.api import add_plugin, create_page, create_title
from cms.cache import _get_cache_version, invalidate_cms_page_cache
from cms.cache.placeholder import (
_get_placeholder_cache_version_key,
_get_placeholder_cache_version,
_set_placeholder_cache_version,
_get_placeholder_cache_key,
set_placeholder_cache,
get_placeholder_cache,
clear_placeholder_cache,
)
from cms.exceptions import PluginAlreadyRegistered
from cms.models import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.project.pluginapp.plugins.caching.cms_plugins import (
DateTimeCacheExpirationPlugin,
LegacyCachePlugin,
NoCachePlugin,
SekizaiPlugin,
TimeDeltaCacheExpirationPlugin,
TTLCacheExpirationPlugin,
VaryCacheOnPlugin,
)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils import get_cms_setting
from cms.utils.helpers import get_timezone_name
class CacheTestCase(CMSTestCase):
def tearDown(self):
from django.core.cache import cache
super(CacheTestCase, self).tearDown()
cache.clear()
def setUp(self):
from django.core.cache import cache
super(CacheTestCase, self).setUp()
cache.clear()
def test_cache_placeholder(self):
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(5, 9)):
self.render_template_obj(template, {}, request)
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = False
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(1):
self.render_template_obj(template, {}, request)
# toolbar
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.show_toolbar = True
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(3):
self.render_template_obj(template, {}, request)
page1.publish('en')
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict(
CMS_PAGE_CACHE=False
)
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(13, 25)):
self.client.get('/en/')
with self.assertNumQueries(FuzzyInt(5, 11)):
self.client.get('/en/')
overrides['CMS_PLACEHOLDER_CACHE'] = False
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(7, 15)):
self.client.get('/en/')
def test_no_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot='body')[0]
placeholder2 = page1.placeholders.filter(slot='right-column')[0]
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, 'TextPlugin', 'en', body="English")
add_plugin(placeholder2, 'TextPlugin', 'en', body="Deutsch")
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
# Request the page without the 'no-cache' plugin
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(18, 25)):
response1 = self.client.get('/en/')
content1 = response1.content
# Fetch it again, it is cached.
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response2 = self.client.get('/en/')
content2 = response2.content
self.assertEqual(content1, content2)
# Once again with PAGE_CACHE=False, to prove the cache can
# be disabled
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(5, 24)):
response3 = self.client.get('/en/')
content3 = response3.content
self.assertEqual(content1, content3)
# Add the 'no-cache' plugin
add_plugin(placeholder1, "NoCachePlugin", 'en')
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(4, 6)):
output = self.render_template_obj(template, {}, request)
with self.assertNumQueries(FuzzyInt(14, 24)):
response = self.client.get('/en/')
self.assertTrue("no-cache" in response['Cache-Control'])
resp1 = response.content.decode('utf8').split("$$$")[1]
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(4):
output2 = self.render_template_obj(template, {}, request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(8, 14)):
response = self.client.get('/en/')
resp2 = response.content.decode('utf8').split("$$$")[1]
self.assertNotEqual(output, output2)
self.assertNotEqual(resp1, resp2)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_timedelta_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TimeDeltaCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *TimeDeltaCacheExpirationPlugin, expires in 45s.
add_plugin(placeholder1, "TimeDeltaCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get('/en/')
self.assertTrue('max-age=45' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TimeDeltaCacheExpirationPlugin)
def test_datetime_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get('/en/')
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
def TTLCacheExpirationPlugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get('/en/')
self.assertTrue('max-age=50' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
def test_expiration_cache_plugins(self):
"""
Tests that when used in combination, the page is cached to the
shortest TTL.
"""
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
add_plugin(placeholder2, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 26)):
response = self.client.get('/en/')
resp1 = response.content.decode('utf8').split("$$$")[1]
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control']) # noqa
cache_control1 = response['Cache-Control']
expires1 = response['Expires']
time.sleep(1) # This ensures that the cache has aged measurably
# Request it again, this time, it comes from the cache
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response = self.client.get('/en/')
resp2 = response.content.decode('utf8').split("$$$")[1]
# Content will be the same
self.assertEqual(resp2, resp1)
# Cache-Control will be different because the cache has aged
self.assertNotEqual(response['Cache-Control'], cache_control1)
# However, the Expires timestamp will be the same
self.assertEqual(response['Expires'], expires1)
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_dual_legacy_cache_plugins(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(LegacyCachePlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Adds a no-cache plugin. In older versions of the CMS, this would
# prevent the page from caching in, but since this plugin also defines
# get_cache_expiration() it is ignored.
add_plugin(placeholder1, "LegacyCachePlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)):
response = self.client.get('/en/')
self.assertTrue('no-cache' not in response['Cache-Control'])
plugin_pool.unregister_plugin(LegacyCachePlugin)
def test_cache_page(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1.get_path(), 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated())
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that the cache is invalidated on unpublishing the page
#
old_version = _get_cache_version()
page1.unpublish('en')
self.assertGreater(_get_cache_version(), old_version)
#
# Test that this means the page is actually not cached.
#
page1.publish('en')
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that the above behavior is different when CMS_PAGE_CACHE is
# set to False (disabled)
#
with self.settings(CMS_PAGE_CACHE=False):
# Test that the page is initially un-cached
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are still requires DB
# access.
#
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
def test_no_page_cache_on_toolbar_edit(self):
with self.settings(CMS_PAGE_CACHE=True):
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en')
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Publish
page1.publish('en')
# Set edit mode
session = self.client.session
session['cms_edit'] = True
session.save()
# Make an initial ?edit request
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
# Disable edit mode
session = self.client.session
session['cms_edit'] = False
session.save()
# Set the cache
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
# Assert cached content was used
with self.assertNumQueries(0):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
# Set edit mode once more
session = self.client.session
session['cms_edit'] = True
session.save()
# Assert no cached content was used
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('/en/?edit')
self.assertEqual(response.status_code, 200)
def test_invalidate_restart(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1.get_path(), 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated())
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
old_plugins = plugin_pool.plugins
plugin_pool.clear()
plugin_pool.discover_plugins()
plugin_pool.plugins = old_plugins
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
def test_sekizai_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(SekizaiPlugin)
add_plugin(placeholder1, "SekizaiPlugin", 'en')
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
page1.publish('en')
response = self.client.get('/en/')
self.assertContains(response, 'alert(')
response = self.client.get('/en/')
self.assertContains(response, 'alert(')
def test_cache_invalidation(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder = page1.placeholders.get(slot="body")
add_plugin(placeholder, "TextPlugin", 'en', body="First content")
page1.publish('en')
response = self.client.get('/en/')
self.assertContains(response, 'First content')
response = self.client.get('/en/')
self.assertContains(response, 'First content')
add_plugin(placeholder, "TextPlugin", 'en', body="Second content")
page1.publish('en')
response = self.client.get('/en/')
self.assertContains(response, 'Second content')
def test_render_placeholder_cache(self):
"""
Regression test for #4223
Assert that placeholder cache is cleared correctly when a plugin is saved
"""
invalidate_cms_page_cache()
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
##
test_plugin = add_plugin(ph1, u"TextPlugin", u"en", body="Some text")
test_plugin.save()
request = self.get_request()
content_renderer = self.get_content_renderer(request)
# asserting initial text
context = SekizaiContext()
context['cms_content_renderer'] = content_renderer
context['request'] = self.get_request()
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Some text")
# deleting local plugin cache
del ph1._plugins_cache
test_plugin.body = 'Other text'
test_plugin.save()
# plugin text has changed, so the placeholder rendering
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Other text")
class PlaceholderCacheTestCase(CMSTestCase):
def setUp(self):
from django.core.cache import cache
super(PlaceholderCacheTestCase, self).setUp()
cache.clear()
self.page = create_page(
'en test page', 'nav_playground.html', 'en', published=True)
# Now create and publish as 'de' title
create_title('de', "de test page", self.page)
self.page.publish('de')
self.placeholder = self.page.placeholders.filter(slot="body")[0]
plugin_pool.register_plugin(VaryCacheOnPlugin)
add_plugin(self.placeholder, 'TextPlugin', 'en', body='English')
add_plugin(self.placeholder, 'TextPlugin', 'de', body='Deutsch')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'en')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'de')
self.en_request = self.get_request('/en/')
self.en_request.current_page = Page.objects.get(pk=self.page.pk)
self.en_us_request = self.get_request('/en/')
self.en_us_request.META['HTTP_COUNTRY_CODE'] = 'US'
self.en_uk_request = self.get_request('/en/')
self.en_uk_request.META['HTTP_COUNTRY_CODE'] = 'UK'
self.de_request = self.get_request('/de/')
self.de_request.current_page = Page.objects.get(pk=self.page.pk)
def tearDown(self):
from django.core.cache import cache
super(PlaceholderCacheTestCase, self).tearDown()
plugin_pool.unregister_plugin(VaryCacheOnPlugin)
cache.clear()
def test_get_placeholder_cache_version_key(self):
cache_version_key = '{prefix}|placeholder_cache_version|id:{id}|lang:{lang}|site:{site}'.format(
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
)
self.assertEqual(
_get_placeholder_cache_version_key(self.placeholder, 'en', 1),
cache_version_key
)
def test_set_clear_get_placeholder_cache_version(self):
initial, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
clear_placeholder_cache(self.placeholder, 'en', 1)
version, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
self.assertGreater(version, initial)
def test_get_placeholder_cache_key(self):
version, vary_on_list = _get_placeholder_cache_version(self.placeholder, 'en', 1)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format( # noqa
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='_',
)
_set_placeholder_cache_version(self.placeholder, 'en', 1, version, vary_on_list=vary_on_list, duration=1)
actual_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(actual_key, desired_key)
en_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
de_key = _get_placeholder_cache_key(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(en_key, de_key)
en_us_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(en_key, en_us_key)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format( # noqa
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='US',
)
self.assertEqual(en_us_key, desired_key)
def test_set_get_placeholder_cache(self):
# Test with a super-long prefix
en_context = Context({
'request': self.en_request,
'cms_content_renderer': self.get_content_renderer(self.en_request)
})
en_us_context = Context({
'request': self.en_us_request,
'cms_content_renderer': self.get_content_renderer(self.en_us_request)
})
en_uk_context = Context({
'request': self.en_uk_request,
'cms_content_renderer': self.get_content_renderer(self.en_uk_request)
})
en_content = self.placeholder.render(en_context, 350, lang='en')
en_us_content = self.placeholder.render(en_us_context, 350, lang='en')
en_uk_content = self.placeholder.render(en_uk_context, 350, lang='en')
del self.placeholder._plugins_cache
de_context = Context({
'request': self.de_request,
'cms_content_renderer': self.get_content_renderer(self.de_request)
})
de_content = self.placeholder.render(de_context, 350, lang='de')
self.assertNotEqual(en_content, de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_content, self.en_request)
cached_en_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(cached_en_content, en_content)
set_placeholder_cache(self.placeholder, 'de', 1, de_content, self.de_request)
cached_de_content = get_placeholder_cache(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(cached_en_content, cached_de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_us_content, self.en_us_request)
cached_en_us_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(cached_en_content, cached_en_us_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_uk_content, self.en_uk_request)
cached_en_uk_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_uk_request)
self.assertNotEqual(cached_en_us_content, cached_en_uk_content)
def test_set_get_placeholder_cache_with_long_prefix(self):
"""
This is for testing that everything continues to work even when the
cache-keys are hashed.
"""
# Use an absurdly long cache prefix to get us in the right neighborhood...
with self.settings(CMS_CACHE_PREFIX="super_lengthy_prefix" * 9): # 180 chars
en_crazy_request = self.get_request('/en/')
# Use a ridiculously long "country code" (80 chars), already we're at 260 chars.
en_crazy_request.META['HTTP_COUNTRY_CODE'] = 'US' * 40 # 80 chars
en_crazy_context = Context({'request': en_crazy_request})
en_crazy_content = self.placeholder.render(en_crazy_context, 350, lang='en')
set_placeholder_cache(self.placeholder, 'en', 1, en_crazy_content, en_crazy_request)
# Prove that it is hashed...
crazy_cache_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, en_crazy_request)
key_length = len(crazy_cache_key)
# 221 = 180 (prefix length) + 1 (separator) + 40 (sha1 hash)
self.assertTrue('render_placeholder' not in crazy_cache_key and key_length == 221)
# Prove it still works as expected
cached_en_crazy_content = get_placeholder_cache(self.placeholder, 'en', 1, en_crazy_request)
self.assertEqual(en_crazy_content, cached_en_crazy_content)
|
FinalAngel/django-cms
|
cms/tests/test_cache.py
|
Python
|
bsd-3-clause
| 37,055
| 0.001538
|
#!/usr/bin/python
import cgi
from redis import Connection
from socket import gethostname
from navi import *
fields = cgi.FieldStorage()
title = "Message Box"
msg_prefix = 'custom.message.'
def insert_msg(cust, tm, msg):
conn = Connection(host=gethostname(),port=6379)
conn.send_command('set', msg_prefix+cust+'--'+tm, msg)
conn.disconnect()
def read_msg():
ret = ''
conn = Connection(host=gethostname(),port=6379)
conn.send_command('keys', msg_prefix+'*')
keys = conn.read_response()
vals = []
if len(keys) != 0:
conn.send_command('mget', *keys)
vals = conn.read_response()
ret += "<h2>" + "Message log" + "</h2>"
for k, v in zip(keys, vals):
ret += "<span>" + k.replace(msg_prefix, '').replace('--', ' ') + "</span>"
ret += "<pre readonly=\"true\">" + v + "</pre>"
conn.disconnect()
ret += "<br>"
return ret
def reply():
import time, os
ret = ""
ret += "Content-Type: text/html\n\n"
ret += "<!DOCTYPE html>"
ret += "<html>"
ret += default_head(title)
ret += default_navigator()
ret += "<body>"
ret += "<div class=\"content\">"
ret += "<h2>Welcome, " + os.environ["REMOTE_ADDR"] + "!</h2>"
ret += "<span>" + os.environ["HTTP_USER_AGENT"] + "</span><br><br>"
if fields.has_key('msgbox'):
insert_msg(os.environ["REMOTE_ADDR"], time.strftime(time.asctime()), fields['msgbox'].value)
ret += read_msg()
ret += "</div>"
ret += "</body>"
ret += "</html>"
print ret
reply()
|
Zex/Starter
|
cgi-bin/leave_message.py
|
Python
|
mit
| 1,575
| 0.010159
|
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from . import controllers
|
OCA/social
|
website_mass_mailing_name/__init__.py
|
Python
|
agpl-3.0
| 91
| 0
|
import struct
import unittest
from zoonado.protocol import response, primitives
class ResponseTests(unittest.TestCase):
def test_deserialize(self):
class FakeResponse(response.Response):
opcode = 99
parts = (
("first", primitives.Int),
("second", primitives.UString),
)
# note that the xid and opcode are omitted, they're part of a preamble
# that a connection would use to determine which Response to use
# for deserializing
raw = struct.pack("!ii6s", 3, 6, b"foobar")
result = FakeResponse.deserialize(raw)
self.assertEqual(result.first, 3)
self.assertEqual(result.second, u"foobar")
|
wglass/zoonado
|
tests/protocol/test_response.py
|
Python
|
apache-2.0
| 731
| 0
|
import os
MOZ_OBJDIR = 'obj-firefox'
config = {
'default_actions': [
'clobber',
'clone-tools',
'checkout-sources',
#'setup-mock',
'build',
#'upload-files',
#'sendchange',
'check-test',
'valgrind-test',
#'generate-build-stats',
#'update',
],
'stage_platform': 'linux64-valgrind',
'publish_nightly_en_US_routes': False,
'build_type': 'valgrind',
'tooltool_manifest_src': "browser/config/tooltool-manifests/linux64/\
releng.manifest",
'platform_supports_post_upload_to_latest': False,
'enable_signing': False,
'enable_talos_sendchange': False,
'perfherder_extra_options': ['valgrind'],
#### 64 bit build specific #####
'env': {
'MOZBUILD_STATE_PATH': os.path.join(os.getcwd(), '.mozbuild'),
'MOZ_AUTOMATION': '1',
'DISPLAY': ':2',
'HG_SHARE_BASE_DIR': '/builds/hg-shared',
'MOZ_OBJDIR': 'obj-firefox',
'TINDERBOX_OUTPUT': '1',
'TOOLTOOL_CACHE': '/builds/tooltool_cache',
'TOOLTOOL_HOME': '/builds',
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'CCACHE_DIR': '/builds/ccache',
'CCACHE_COMPRESS': '1',
'CCACHE_UMASK': '002',
'LC_ALL': 'C',
## 64 bit specific
'PATH': '/tools/buildbot/bin:/usr/local/bin:/usr/lib64/ccache:/bin:\
/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/tools/git/bin:/tools/python27/bin:\
/tools/python27-mercurial/bin:/home/cltbld/bin',
},
'src_mozconfig': 'browser/config/mozconfigs/linux64/valgrind',
#######################
}
|
Yukarumya/Yukarum-Redfoxes
|
testing/mozharness/configs/builds/releng_sub_linux_configs/64_valgrind.py
|
Python
|
mpl-2.0
| 1,603
| 0.004367
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('training', '0006_auto_20160627_1620'),
]
operations = [
migrations.RemoveField(
model_name='trainesscourserecord',
name='approvedby',
),
migrations.RemoveField(
model_name='trainesscourserecord',
name='createdby',
),
migrations.RemoveField(
model_name='trainesscourserecord',
name='createtimestamp',
),
]
|
akademikbilisim/ab-kurs-kayit
|
abkayit/training/migrations/0007_auto_20160628_1243.py
|
Python
|
gpl-3.0
| 617
| 0
|
"""
Proctored Exams Transformer
"""
from django.conf import settings
from edx_proctoring.api import get_attempt_status_summary
from edx_proctoring.models import ProctoredExamStudentAttemptStatus
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer, FilteringTransformerMixin
class ProctoredExamTransformer(FilteringTransformerMixin, BlockStructureTransformer):
"""
Exclude proctored exams unless the user is not a verified student or has
declined taking the exam.
"""
VERSION = 1
BLOCK_HAS_PROCTORED_EXAM = 'has_proctored_exam'
@classmethod
def name(cls):
return "proctored_exam"
@classmethod
def collect(cls, block_structure):
"""
Computes any information for each XBlock that's necessary to execute
this transformer's transform method.
Arguments:
block_structure (BlockStructureCollectedData)
"""
block_structure.request_xblock_fields('is_proctored_enabled')
block_structure.request_xblock_fields('is_practice_exam')
def transform_block_filters(self, usage_info, block_structure):
if not settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False):
return [block_structure.create_universal_filter()]
def is_proctored_exam_for_user(block_key):
"""
Test whether the block is a proctored exam for the user in
question.
"""
if (
block_key.block_type == 'sequential' and (
block_structure.get_xblock_field(block_key, 'is_proctored_enabled') or
block_structure.get_xblock_field(block_key, 'is_practice_exam')
)
):
# This section is an exam. It should be excluded unless the
# user is not a verified student or has declined taking the exam.
user_exam_summary = get_attempt_status_summary(
usage_info.user.id,
unicode(block_key.course_key),
unicode(block_key),
)
return user_exam_summary and user_exam_summary['status'] != ProctoredExamStudentAttemptStatus.declined
return [block_structure.create_removal_filter(is_proctored_exam_for_user)]
|
shabab12/edx-platform
|
lms/djangoapps/course_api/blocks/transformers/proctored_exam.py
|
Python
|
agpl-3.0
| 2,327
| 0.003008
|
from panda3d.core import LPoint3
# EDIT GAMEMODE AT THE BOTTOM (CHESS VARIANTS)
# COLORS (for the squares)
BLACK = (0, 0, 0, 1)
WHITE = (1, 1, 1, 1)
HIGHLIGHT = (0, 1, 1, 1)
HIGHLIGHT_MOVE = (0, 1, 0, 1)
HIGHLIGHT_ATTACK = (1, 0, 0, 1)
# SCALE (for the 3D representation)
SCALE = 0.5
PIECE_SCALE = 0.3
BOARD_HEIGHT = 1.5
# MODELS
MODEL_PAWN = "models/pawn.obj"
MODEL_ROOK = "models/rook.obj"
MODEL_KNIGHT = "models/knight.obj"
MODEL_BISHOP = "models/bishop.obj"
MODEL_QUEEN = "models/queen.obj"
MODEL_KING = "models/king.obj"
MODEL_UNICORN = "models/unicorn.obj"
# MODEL TEXTURES
TEXTURE_WHITE = "models/light_wood.jpg"
TEXTURE_BLACK = "models/dark_wood.jpg"
# HELPER FUNCTIONS
def square_position(x, y, z, board_size):
# Gives the 3d position of a square based on x, y, z
xx, yy, zz = board_size
x = (x - (3.5/8)*xx) * SCALE
y = (y - (3.5/8)*yy) * SCALE
z = z*BOARD_HEIGHT * SCALE
return LPoint3(x, y, z)
def square_color(x, y, z):
# Checks whether a square should be black or white
if (x+y+z) % 2 == 0:
return BLACK
else:
return WHITE
# BOARDS
# 1 = Pawn
# 2 = Rook
# 3 = Knight
# 4 = Bishop
# 5 = Queen
# 6 = King
# 7 = Unicorn
# + = white
# - = black
# First array = lowest level
# Highest part of the array = front (white pieces)
PIECES = {
0: 'empty space',
-1: 'black pawn',
-2: 'black rook',
-3: 'black knight',
-4: 'black bishop',
-5: 'black queen',
-6: 'black king',
-7: 'black unicorn',
1: 'white pawn',
2: 'white rook',
3: 'white knight',
4: 'white bishop',
5: 'white queen',
6: 'white king',
7: 'white unicorn',
}
RAUMSCHACH_PAWN_2STEP = False
RAUMSCHACH_BOARD = [
[
[ 2, 3, 6, 3, 2],
[ 1, 1, 1, 1, 1],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 4, 7, 5, 7, 4],
[ 1, 1, 1, 1, 1],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[-1,-1,-1,-1,-1],
[-4,-7,-5,-7,-4],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[-1,-1,-1,-1,-1],
[-2,-3,-6,-3,-2],
],
]
SMALL_RAUMSCHACH_PAWN_2STEP = False
SMALL_RAUMSCHACH_BOARD = [
[
[ 2, 4, 6, 4, 2],
[ 3, 1, 1, 1, 3],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[-3,-1,-1,-1,-3],
[-2,-4,-6,-4,-2],
],
]
CARD_PAWN_2STEP = True
CARD_BOARD = [
[
[ 2, 5, 6, 2],
[ 1, 1, 1, 1],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
],
[
[ 4, 3, 3, 4],
[ 1, 1, 1, 1],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[-1,-1,-1,-1],
[-4,-3,-3,-4],
],
[
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[-1,-1,-1,-1],
[-2,-5,-6,-2],
],
]
CLASSIC_PAWN_2STEP = True
CLASSIC_BOARD = [
[
[ 2, 3, 4, 5, 6, 4, 3, 2],
[ 1, 1, 1, 1, 1, 1, 1, 1],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[-1,-1,-1,-1,-1,-1,-1,-1],
[-2,-3,-4,-5,-6,-4,-3,-2],
],
]
# NOTE: PAWN_2STEP is whether the pawn can take 2 steps if it's on the second line (bool)
RAUMSCHACH = (RAUMSCHACH_BOARD, RAUMSCHACH_PAWN_2STEP)
SMALL_RAUMSCHACH = (SMALL_RAUMSCHACH_BOARD, SMALL_RAUMSCHACH_PAWN_2STEP)
CARD = (CARD_BOARD, CARD_PAWN_2STEP)
CLASSIC = (CLASSIC_BOARD, CLASSIC_PAWN_2STEP)
TEST_PAWN_2STEP = True
TEST_BOARD = [
[
[ 0, 1, 6, 0],
[ 0, 0, 0, 0],
[ 0, 0,-2,-2],
[ 0, 0, 0, 0],
],
[
[ 0, 1, 6, 0],
[ 0, 0, 0, 0],
[ 0, 0,-2,-2],
[ 0, 0, 0, 0],
],
]
TEST = (TEST_BOARD, TEST_PAWN_2STEP)
# Edit gamemode here
GAMEMODE = SMALL_RAUMSCHACH
# Edit players here
HUMANS = (1, )
AIS = (-1, )
BOARD, PAWN_2STEP = GAMEMODE
BOARD_SIZE = (len(BOARD[0][0]), len(BOARD[0]), len(BOARD))
TEST = True
|
guille0/space-chess
|
config.py
|
Python
|
mit
| 5,079
| 0.024808
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import sqlite3
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapMissingDependence
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pysqlite.googlecode.com/ and http://packages.ubuntu.com/quantal/python-sqlite
User guide: http://docs.python.org/release/2.5/lib/module-sqlite3.html
API: http://docs.python.org/library/sqlite3.html
Debian package: python-sqlite (SQLite 2), python-pysqlite3 (SQLite 3)
License: MIT
Possible connectors: http://wiki.python.org/moin/SQLite
"""
def __init__(self):
GenericConnector.__init__(self)
self.__sqlite = sqlite3
def connect(self):
self.initConnection()
self.checkFileDb()
try:
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
cursor = self.connector.cursor()
cursor.execute("SELECT * FROM sqlite_master")
cursor.close()
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
warnMsg = "unable to connect using SQLite 3 library, trying with SQLite 2"
logger.warn(warnMsg)
try:
try:
import sqlite
except ImportError:
errMsg = "sqlmap requires 'python-sqlite' third-party library "
errMsg += "in order to directly connect to the database '%s'" % self.db
raise SqlmapMissingDependence(errMsg)
self.__sqlite = sqlite
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
raise SqlmapConnectionException(msg[0])
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
return None
def execute(self, query):
try:
self.cursor.execute(utf8encode(query))
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
except self.__sqlite.DatabaseError, msg:
raise SqlmapConnectionException(msg[0])
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
|
V11/volcano
|
server/sqlmap/plugins/dbms/sqlite/connector.py
|
Python
|
mit
| 3,003
| 0.00333
|
""" SQLAlchemy support. """
from __future__ import absolute_import
import datetime
from types import GeneratorType
import decimal
from sqlalchemy import func
# from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.sql.type_api import TypeDecorator
try:
from sqlalchemy.orm.relationships import RelationshipProperty
except ImportError:
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.types import (
BIGINT, BOOLEAN, BigInteger, Boolean, CHAR, DATE, DATETIME, DECIMAL, Date,
DateTime, FLOAT, Float, INT, INTEGER, Integer, NCHAR, NVARCHAR, NUMERIC,
Numeric, SMALLINT, SmallInteger, String, TEXT, TIME, Text, Time, Unicode,
UnicodeText, VARCHAR, Enum)
from .. import mix_types as t
from ..main import (
SKIP_VALUE, LOGGER, TypeMixer as BaseTypeMixer, GenFactory as BaseFactory,
Mixer as BaseMixer, partial, faker)
class GenFactory(BaseFactory):
""" Map a sqlalchemy classes to simple types. """
types = {
(String, VARCHAR, Unicode, NVARCHAR, NCHAR, CHAR): str,
(Text, UnicodeText, TEXT): t.Text,
(Boolean, BOOLEAN): bool,
(Date, DATE): datetime.date,
(DateTime, DATETIME): datetime.datetime,
(Time, TIME): datetime.time,
(DECIMAL, Numeric, NUMERIC): decimal.Decimal,
(Float, FLOAT): float,
(Integer, INTEGER, INT): int,
(BigInteger, BIGINT): t.BigInteger,
(SmallInteger, SMALLINT): t.SmallInteger,
}
class TypeMixer(BaseTypeMixer):
""" TypeMixer for SQLAlchemy. """
factory = GenFactory
def __init__(self, cls, **params):
""" Init TypeMixer and save the mapper. """
super(TypeMixer, self).__init__(cls, **params)
self.mapper = self.__scheme._sa_class_manager.mapper
def postprocess(self, target, postprocess_values):
""" Fill postprocess values. """
mixed = []
for name, deffered in postprocess_values:
value = deffered.value
if isinstance(value, GeneratorType):
value = next(value)
if isinstance(value, t.Mix):
mixed.append((name, value))
continue
if isinstance(getattr(target, name), InstrumentedList) and not isinstance(value, list):
value = [value]
setattr(target, name, value)
for name, mix in mixed:
setattr(target, name, mix & target)
if self.__mixer:
target = self.__mixer.postprocess(target)
return target
@staticmethod
def get_default(field):
""" Get default value from field.
:return value: A default value or NO_VALUE
"""
column = field.scheme
if isinstance(column, RelationshipProperty):
column = column.local_remote_pairs[0][0]
if not column.default:
return SKIP_VALUE
if column.default.is_callable:
return column.default.arg(None)
return getattr(column.default, 'arg', SKIP_VALUE)
def gen_select(self, field_name, select):
""" Select exists value from database.
:param field_name: Name of field for generation.
:return : None or (name, value) for later use
"""
if not self.__mixer or not self.__mixer.params.get('session'):
return field_name, SKIP_VALUE
relation = self.mapper.get_property(field_name)
session = self.__mixer.params.get('session')
value = session.query(
relation.mapper.class_
).filter(*select.choices).order_by(func.random()).first()
return self.get_value(field_name, value)
@staticmethod
def is_unique(field):
""" Return True is field's value should be a unique.
:return bool:
"""
scheme = field.scheme
if isinstance(scheme, RelationshipProperty):
scheme = scheme.local_remote_pairs[0][0]
return scheme.unique
@staticmethod
def is_required(field):
""" Return True is field's value should be defined.
:return bool:
"""
column = field.scheme
if isinstance(column, RelationshipProperty):
column = column.local_remote_pairs[0][0]
if field.params:
return True
# According to the SQLAlchemy docs, autoincrement "only has an effect for columns which are
# Integer derived (i.e. INT, SMALLINT, BIGINT) [and] Part of the primary key [...]".
return not column.nullable and not (column.autoincrement and column.primary_key and
isinstance(column.type, Integer))
def get_value(self, field_name, field_value):
""" Get `value` as `field_name`.
:return : None or (name, value) for later use
"""
field = self.__fields.get(field_name)
if field and isinstance(field.scheme, RelationshipProperty):
return field_name, t._Deffered(field_value, field.scheme)
return super(TypeMixer, self).get_value(field_name, field_value)
def make_fabric(self, column, field_name=None, fake=False, kwargs=None): # noqa
""" Make values fabric for column.
:param column: SqlAlchemy column
:param field_name: Field name
:param fake: Force fake data
:return function:
"""
kwargs = {} if kwargs is None else kwargs
if isinstance(column, RelationshipProperty):
return partial(type(self)(
column.mapper.class_, mixer=self.__mixer, fake=self.__fake, factory=self.__factory
).blend, **kwargs)
ftype = type(column.type)
# augmented types created with TypeDecorator
# don't directly inherit from the base types
if TypeDecorator in ftype.__bases__:
ftype = ftype.impl
stype = self.__factory.cls_to_simple(ftype)
if stype is str:
fab = super(TypeMixer, self).make_fabric(
stype, field_name=field_name, fake=fake, kwargs=kwargs)
return lambda: fab()[:column.type.length]
if ftype is Enum:
return partial(faker.random_element, column.type.enums)
return super(TypeMixer, self).make_fabric(
stype, field_name=field_name, fake=fake, kwargs=kwargs)
def guard(self, *args, **kwargs):
""" Look objects in database.
:returns: A finded object or False
"""
try:
session = self.__mixer.params.get('session')
assert session
except (AttributeError, AssertionError):
raise ValueError('Cannot make request to DB.')
qs = session.query(self.mapper).filter(*args, **kwargs)
count = qs.count()
if count == 1:
return qs.first()
if count:
return qs.all()
return False
def reload(self, obj):
""" Reload object from database. """
try:
session = self.__mixer.params.get('session')
session.expire(obj)
session.refresh(obj)
return obj
except (AttributeError, AssertionError):
raise ValueError('Cannot make request to DB.')
def __load_fields(self):
""" Prepare SQLALchemyTypeMixer.
Select columns and relations for data generation.
"""
mapper = self.__scheme._sa_class_manager.mapper
relations = set()
if hasattr(mapper, 'relationships'):
for rel in mapper.relationships:
relations |= rel.local_columns
yield rel.key, t.Field(rel, rel.key)
for key, column in mapper.columns.items():
if column not in relations:
yield key, t.Field(column, key)
class Mixer(BaseMixer):
""" Integration with SQLAlchemy. """
type_mixer_cls = TypeMixer
def __init__(self, session=None, commit=True, **params):
"""Initialize the SQLAlchemy Mixer.
:param fake: (True) Generate fake data instead of random data.
:param session: SQLAlchemy session. Using for commits.
:param commit: (True) Commit instance to session after creation.
"""
super(Mixer, self).__init__(**params)
self.params['session'] = session
self.params['commit'] = bool(session) and commit
def postprocess(self, target):
""" Save objects in db.
:return value: A generated value
"""
if self.params.get('commit'):
session = self.params.get('session')
if not session:
LOGGER.warn("'commit' set true but session not initialized.")
else:
session.add(target)
session.commit()
return target
# Default mixer
mixer = Mixer()
# pylama:ignore=E1120,E0611
|
Nebucatnetzer/tamagotchi
|
pygame/lib/python3.4/site-packages/mixer/backend/sqlalchemy.py
|
Python
|
gpl-2.0
| 8,887
| 0.000675
|
# Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if sys.version_info[0] >= 3:
basestring = str
unicode = str
def encode_string( value ):
return value.encode('utf-8') if isinstance(value, unicode) else value
def decode_string(value):
return value if isinstance(value, basestring) else value.decode('utf-8')
# hmac.compare_digest were introduced in python 2.7.7
if sys.version_info >= ( 2, 7, 7 ):
from hmac import compare_digest as SecureStringsEqual
else:
# This is the compare_digest function from python 3.4, adapted for 2.6:
# http://hg.python.org/cpython/file/460407f35aa9/Lib/hmac.py#l16
#
# Stolen from https://github.com/Valloric/ycmd
def SecureStringsEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
if not ( isinstance( a, str ) and isinstance( b, str ) ):
raise TypeError( "inputs must be str instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= ord( x ) ^ ord( y )
return result == 0
def compare_digest( a, b ):
return SecureStringsEqual( a, b )
|
NcLang/vimrc
|
sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/JediHTTP/jedihttp/compatibility.py
|
Python
|
mit
| 2,108
| 0.019924
|
import unittest
import sys
import numpy as np
from opm.util import EModel
try:
from tests.utils import test_path
except ImportError:
from utils import test_path
class TestEModel(unittest.TestCase):
def test_open_model(self):
refArrList = ["PORV", "CELLVOL", "DEPTH", "DX", "DY", "DZ", "PORO", "PERMX", "PERMY", "PERMZ", "NTG", "TRANX",
"TRANY", "TRANZ", "ACTNUM", "ENDNUM", "EQLNUM", "FIPNUM", "FLUXNUM", "IMBNUM", "PVTNUM",
"SATNUM", "SWL", "SWCR", "SGL", "SGU", "ISWL", "ISWCR", "ISGL", "ISGU", "PPCW", "PRESSURE",
"RS", "RV", "SGAS", "SWAT", "SOMAX", "SGMAX"]
self.assertRaises(RuntimeError, EModel, "/file/that/does_not_exists")
self.assertRaises(ValueError, EModel, test_path("data/9_EDITNNC.EGRID"))
self.assertRaises(ValueError, EModel, test_path("data/9_EDITNNC.UNRST"))
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
arrayList = mod1.get_list_of_arrays()
for n, element in enumerate(arrayList):
self.assertEqual(element[0], refArrList[n])
celvol1 = mod1.get("CELLVOL")
self.assertEqual(len(celvol1), 2794)
def test_add_filter(self):
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
celvol1 = mod1.get("CELLVOL")
depth1 = mod1.get("DEPTH")
self.assertTrue(isinstance(celvol1, np.ndarray))
self.assertEqual(celvol1.dtype, "float32")
refVol1 = 2.79083e8
self.assertTrue( abs((sum(celvol1) - refVol1)/refVol1) < 1.0e-5)
mod1.add_filter("EQLNUM","eq", 1);
mod1.add_filter("DEPTH","lt", 2645.21);
refVol2 = 1.08876e8
refPorvVol2 = 2.29061e7
porv2 = mod1.get("PORV")
celvol2 = mod1.get("CELLVOL")
self.assertTrue( abs((sum(celvol2) - refVol2)/refVol2) < 1.0e-5)
self.assertTrue( abs((sum(porv2) - refPorvVol2)/refPorvVol2) < 1.0e-5)
mod1.reset_filter()
mod1.add_filter("EQLNUM","eq", 2);
mod1.add_filter("DEPTH","in", 2584.20, 2685.21);
refPorvVol3 = 3.34803e7
porv3 = mod1.get("PORV")
self.assertTrue( abs((sum(porv3) - refPorvVol3)/refPorvVol3) < 1.0e-5)
mod1.reset_filter()
mod1.add_filter("I","lt", 10);
mod1.add_filter("J","between", 3, 15);
mod1.add_filter("K","between", 2, 9);
poro = mod1.get("PORO")
self.assertEqual(len(poro), 495)
def test_paramers(self):
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
self.assertFalse("XXX" in mod1)
self.assertTrue("PORV" in mod1)
self.assertTrue("PRESSURE" in mod1)
self.assertTrue("RS" in mod1)
self.assertTrue("RV" in mod1)
self.assertEqual(mod1.active_report_step(), 0)
rsteps = mod1.get_report_steps()
self.assertEqual(rsteps, [0, 4, 7, 10, 15, 20, 27, 32, 36, 39])
mod1.set_report_step(7)
# parameter RS and RV is missing in report step number 7
self.assertFalse("RS" in mod1)
self.assertFalse("RV" in mod1)
mod1.set_report_step(15)
self.assertTrue("RS" in mod1)
self.assertTrue("RV" in mod1)
arrayList = mod1.get_list_of_arrays()
def test_rsteps_steps(self):
pres_ref_4_1_10 = [272.608, 244.461, 228.503, 214.118, 201.147, 194.563, 178.02, 181.839, 163.465, 148.677]
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
mod1.add_filter("I","eq", 4);
mod1.add_filter("J","eq", 1);
mod1.add_filter("K","eq", 10);
self.assertTrue(mod1.has_report_step(4))
self.assertFalse(mod1.has_report_step(2))
rsteps = mod1.get_report_steps()
for n, step in enumerate(rsteps):
mod1.set_report_step(step)
pres = mod1.get("PRESSURE")
self.assertTrue(abs(pres[0] - pres_ref_4_1_10[n])/pres_ref_4_1_10[n] < 1.0e-5)
def test_grid_props(self):
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
nI,nJ,nK = mod1.grid_dims()
self.assertEqual((nI,nJ,nK), (13, 22, 11))
nAct = mod1.active_cells()
self.assertEqual(nAct, 2794)
def test_hc_filter(self):
nAct_hc_eqln1 = 1090
nAct_hc_eqln2 = 1694
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
porv = mod1.get("PORV")
mod1.set_depth_fwl([2645.21, 2685.21])
mod1.add_hc_filter()
porv = mod1.get("PORV")
self.assertEqual(len(porv), nAct_hc_eqln1 + nAct_hc_eqln2)
mod1.reset_filter()
mod1.add_filter("EQLNUM","eq", 1);
mod1.add_filter("DEPTH","lt", 2645.21);
porv1 = mod1.get("PORV")
self.assertEqual(len(porv1), nAct_hc_eqln1)
mod1.reset_filter()
mod1.add_filter("EQLNUM","eq", 2);
mod1.add_filter("DEPTH","lt", 2685.21);
porv2 = mod1.get("PORV")
self.assertEqual(len(porv2), nAct_hc_eqln2)
ivect = mod1.get("I")
if __name__ == "__main__":
unittest.main()
|
blattms/opm-common
|
python/tests/test_emodel.py
|
Python
|
gpl-3.0
| 5,037
| 0.010125
|
# (c) 2014, Nandor Sivok <dominis@haxor.hu>
# (c) 2016, Redhat Inc
#
# ansible-console is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ansible-console is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
########################################################
# ansible-console is an interactive REPL shell for ansible
# with built-in tab completion for all the documented modules
#
# Available commands:
# cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*)
# list - list available hosts in the current path
# forks - change fork
# become - become
# ! - forces shell module instead of the ansible module (!yum update -y)
import atexit
import cmd
import getpass
import readline
import os
import sys
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.vars import VariableManager
from ansible.utils import module_docs
from ansible.utils.color import stringc
from ansible.utils.unicode import to_unicode, to_str
from ansible.plugins import module_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ConsoleCLI(CLI, cmd.Cmd):
modules = []
def __init__(self, args):
super(ConsoleCLI, self).__init__(args)
self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n'
self.groups = []
self.hosts = []
self.pattern = None
self.variable_manager = None
self.loader = None
self.passwords = dict()
self.modules = None
cmd.Cmd.__init__(self)
def parse(self):
self.parser = CLI.base_parser(
usage='%prog <host-pattern> [options]',
runas_opts=True,
inventory_opts=True,
connect_opts=True,
check_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
)
# options unique to shell
self.parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
self.parser.set_defaults(cwd='*')
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
return True
def get_names(self):
return dir(self)
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
self.do_exit(self)
def set_prompt(self):
login_user = self.options.remote_user or getpass.getuser()
self.selected = self.inventory.list_hosts(self.options.cwd)
prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks)
if self.options.become and self.options.become_user in [None, 'root']:
prompt += "# "
color = C.COLOR_ERROR
else:
prompt += "$ "
color = C.COLOR_HIGHLIGHT
self.prompt = stringc(prompt, color)
def list_modules(self):
modules = set()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
module_paths = module_loader._get_paths()
for path in module_paths:
if path is not None:
modules.update(self._find_modules_in_path(path))
return modules
def _find_modules_in_path(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self._find_modules_in_path(module)
elif module.startswith('__'):
continue
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = module.replace('_', '', 1)
module = os.path.splitext(module)[0] # removes the extension
yield module
def default(self, arg, forceshell=False):
""" actually runs modules """
if arg.startswith("#"):
return False
if not self.options.cwd:
display.error("No host found")
return False
if arg.split()[0] in self.modules:
module = arg.split()[0]
module_args = ' '.join(arg.split()[1:])
else:
module = 'shell'
module_args = arg
if forceshell is True:
module = 'shell'
module_args = arg
self.options.module_name = module
result = None
try:
check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw')
play_ds = dict(
name = "Ansible Shell",
hosts = self.options.cwd,
gather_facts = 'no',
tasks = [ dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))]
)
play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
except Exception as e:
display.error(u"Unable to build command: %s" % to_unicode(e))
return False
try:
cb = 'minimal' #FIXME: make callbacks configurable
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=self.passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=False,
)
result = self._tqm.run(play)
finally:
if self._tqm:
self._tqm.cleanup()
if self.loader:
self.loader.cleanup_all_tmp_files()
if result is None:
display.error("No hosts found")
return False
except KeyboardInterrupt:
display.error('User interrupted execution')
return False
except Exception as e:
display.error(to_unicode(e))
#FIXME: add traceback in very very verbose mode
return False
def emptyline(self):
return
def do_shell(self, arg):
"""
You can run shell commands through the shell module.
eg.:
shell ps uax | grep java | wc -l
shell killall python
shell halt -n
You can use the ! to force the shell module. eg.:
!ps aux | grep java | wc -l
"""
self.default(arg, True)
def do_forks(self, arg):
"""Set the number of forks"""
if not arg:
display.display('Usage: forks <number>')
return
self.options.forks = int(arg)
self.set_prompt()
do_serial = do_forks
def do_verbosity(self, arg):
"""Set verbosity level"""
if not arg:
display.display('Usage: verbosity <number>')
else:
display.verbosity = int(arg)
display.v('verbosity level set to %s' % arg)
def do_cd(self, arg):
"""
Change active host/group. You can use hosts patterns as well eg.:
cd webservers
cd webservers:dbservers
cd webservers:!phoenix
cd webservers:&staging
cd webservers:dbservers:&staging:!phoenix
"""
if not arg:
self.options.cwd = '*'
elif arg == '..':
try:
self.options.cwd = self.inventory.groups_for_host(self.options.cwd)[1].name
except Exception:
self.options.cwd = ''
elif arg in '/*':
self.options.cwd = 'all'
elif self.inventory.get_hosts(arg):
self.options.cwd = arg
else:
display.display("no host matched")
self.set_prompt()
def do_list(self, arg):
"""List the hosts in the current group"""
if arg == 'groups':
for group in self.groups:
display.display(group)
else:
for host in self.selected:
display.display(host.name)
def do_become(self, arg):
"""Toggle whether plays run with become"""
if arg:
self.options.become = C.mk_boolean(arg)
display.v("become changed to %s" % self.options.become)
self.set_prompt()
else:
display.display("Please specify become value, e.g. `become yes`")
def do_remote_user(self, arg):
"""Given a username, set the remote user plays are run by"""
if arg:
self.options.remote_user = arg
self.set_prompt()
else:
display.display("Please specify a remote user, e.g. `remote_user root`")
def do_become_user(self, arg):
"""Given a username, set the user that plays are run by when using become"""
if arg:
self.options.become_user = arg
else:
display.display("Please specify a user, e.g. `become_user jenkins`")
display.v("Current user is %s" % self.options.become_user)
self.set_prompt()
def do_become_method(self, arg):
"""Given a become_method, set the privilege escalation method when using become"""
if arg:
self.options.become_method = arg
display.v("become_method changed to %s" % self.options.become_method)
else:
display.display("Please specify a become_method, e.g. `become_method su`")
def do_check(self, arg):
"""Toggle whether plays run with check mode"""
if arg:
self.options.check = C.mk_boolean(arg)
display.v("check mode changed to %s" % self.options.check)
else:
display.display("Please specify check mode value, e.g. `check yes`")
def do_diff(self, arg):
"""Toggle whether plays run with diff"""
if arg:
self.options.diff = C.mk_boolean(arg)
display.v("diff mode changed to %s" % self.options.diff)
else:
display.display("Please specify a diff value , e.g. `diff yes`")
def do_exit(self, args):
"""Exits from the console"""
sys.stdout.write('\n')
return -1
do_EOF = do_exit
def helpdefault(self, module_name):
if module_name in self.modules:
in_path = module_loader.find_plugin(module_name)
if in_path:
oc, a, _ = module_docs.get_docstring(in_path)
if oc:
display.display(oc['short_description'])
display.display('Parameters:')
for opt in oc['options'].keys():
display.display(' ' + stringc(opt, C.COLOR_HIGHLIGHT) + ' ' + oc['options'][opt]['description'][0])
else:
display.error('No documentation found for %s.' % module_name)
else:
display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)
def complete_cd(self, text, line, begidx, endidx):
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
if self.options.cwd in ('all','*','\\'):
completions = self.hosts + self.groups
else:
completions = [x.name for x in self.inventory.list_hosts(self.options.cwd)]
return [to_str(s)[offs:] for s in completions if to_str(s).startswith(to_str(mline))]
def completedefault(self, text, line, begidx, endidx):
if line.split()[0] in self.modules:
mline = line.split(' ')[-1]
offs = len(mline) - len(text)
completions = self.module_args(line.split()[0])
return [s[offs:] + '=' for s in completions if s.startswith(mline)]
def module_args(self, module_name):
in_path = module_loader.find_plugin(module_name)
oc, a, _ = module_docs.get_docstring(in_path)
return oc['options'].keys()
def run(self):
super(ConsoleCLI, self).run()
sshpass = None
becomepass = None
vault_pass = None
# hosts
if len(self.args) != 1:
self.pattern = 'all'
else:
self.pattern = self.args[0]
self.options.cwd = self.pattern
# dynamically add modules as commands
self.modules = self.list_modules()
for module in self.modules:
setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
self.passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
self.loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader)
self.loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords()[0]
self.loader.set_vault_password(vault_pass)
self.variable_manager = VariableManager()
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=self.options.inventory)
self.variable_manager.set_inventory(self.inventory)
no_hosts = False
if len(self.inventory.list_hosts()) == 0:
# Empty inventory
no_hosts = True
display.warning("provided hosts list is empty, only localhost is available")
self.inventory.subset(self.options.subset)
hosts = self.inventory.list_hosts(self.pattern)
if len(hosts) == 0 and not no_hosts:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
self.groups = self.inventory.list_groups()
self.hosts = [x.name for x in hosts]
# This hack is to work around readline issues on a mac:
# http://stackoverflow.com/a/7116997/541202
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
self.set_prompt()
self.cmdloop()
|
filipenf/ansible
|
lib/ansible/cli/console.py
|
Python
|
gpl-3.0
| 16,263
| 0.003136
|
from abc import ABCMeta, abstractmethod
class ProgressMessage(object):
def __init__(self, path, bytes_per_second, bytes_read, bytes_expected):
self._path = path
self._bytes_per_second = bytes_per_second
self._bytes_read = bytes_read
self._bytes_expected = bytes_expected
@property
def path(self):
return self._path
@property
def bytes_per_second(self):
return self._bytes_per_second
@property
def bytes_read(self):
return self._bytes_read
@property
def bytes_expected(self):
return self._bytes_expected
class BucketFile(object):
"""
This class defines the contract for a file, that is used across
all buckets
"""
def __init__(self, path, name, folder, contentType=None):
self._path = path
self._name = name
self._folder = folder
self._contentType = contentType
self._hash = None
self._dateModified = None
def get_hash(self):
return self._hash
def set_hash(self, value):
self._hash = value
def get_dateModified(self):
return self._dateModified
def set_dateModified(self, value):
self._dateModified = value
def get_content_type(self):
return self._contentType
def set_content_type(self, value):
self._contentType = value
@property
def path(self):
return self._path
@property
def name(self):
return self._name
@property
def isFolder(self):
return self._folder
contentType = property(get_content_type, set_content_type)
hash = property(get_hash, set_hash)
dateModified = property(get_dateModified, set_dateModified)
class AbstractProvider:
"""
This class defines a contract for all our different storage sources
e.g: Amazon S3, Local Files, Openstack Swift etc. etc.
"""
__metaclass__ = ABCMeta
@abstractmethod
def delete_object(self, path):
return NotImplemented
@abstractmethod
def list_dir(self, path):
return NotImplemented
@abstractmethod
def authenticate(self):
"""
Return True is it works, False if it fails
"""
return False
@abstractmethod
def download_object(self, sourcePath, targetPath):
"""
Download source to target
"""
return NotImplemented
|
Sybrand/digital-panda
|
digitalpanda/bucket/abstract.py
|
Python
|
mit
| 2,411
| 0
|
from direct.directnotify import DirectNotifyGlobal
from BaseActivityFSM import BaseActivityFSM
from activityFSMMixins import IdleMixin
from activityFSMMixins import RulesMixin
from activityFSMMixins import ActiveMixin
from activityFSMMixins import DisabledMixin
from activityFSMMixins import ConclusionMixin
from activityFSMMixins import WaitForEnoughMixin
from activityFSMMixins import WaitToStartMixin
from activityFSMMixins import WaitClientsReadyMixin
from activityFSMMixins import WaitForServerMixin
class FireworksActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('FireworksActivityFSM')
def __init__(self, activity):
FireworksActivityFSM.notify.debug('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Idle': ['Active', 'Disabled'],
'Active': ['Disabled'],
'Disabled': []}
class CatchActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('CatchActivityFSM')
def __init__(self, activity):
CatchActivityFSM.notify.debug('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Idle': ['Active', 'Conclusion'],
'Active': ['Conclusion'],
'Conclusion': ['Idle']}
class TrampolineActivityFSM(BaseActivityFSM, IdleMixin, RulesMixin, ActiveMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('TrampolineActivityFSM')
def __init__(self, activity):
TrampolineActivityFSM.notify.debug('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Idle': ['Rules', 'Active'],
'Rules': ['Active', 'Idle'],
'Active': ['Idle']}
class DanceActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('DanceActivityFSM')
def __init__(self, activity):
DanceActivityFSM.notify.debug('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Active': ['Disabled'],
'Disabled': ['Active']}
class TeamActivityAIFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, WaitClientsReadyMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('TeamActivityAIFSM')
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
self.notify.debug('__init__')
self.defaultTransitions = {'WaitForEnough': ['WaitToStart'],
'WaitToStart': ['WaitForEnough', 'WaitClientsReady'],
'WaitClientsReady': ['WaitForEnough', 'Active'],
'Active': ['WaitForEnough', 'Conclusion'],
'Conclusion': ['WaitForEnough']}
class TeamActivityFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, RulesMixin, WaitForServerMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('TeamActivityFSM')
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'WaitForEnough': ['WaitToStart'],
'WaitToStart': ['WaitForEnough', 'Rules'],
'Rules': ['WaitForServer', 'Active', 'WaitForEnough'],
'WaitForServer': ['Active', 'WaitForEnough'],
'Active': ['Conclusion', 'WaitForEnough'],
'Conclusion': ['WaitForEnough']}
|
ksmit799/Toontown-Source
|
toontown/parties/activityFSMs.py
|
Python
|
mit
| 3,442
| 0.004067
|
# -*- coding: utf-8 -*-
"""
tomorrow night blue
---------------------
Port of the Tomorrow Night Blue colour scheme https://github.com/chriskempson/tomorrow-theme
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#002451"
CURRENT_LINE = "#00346e"
SELECTION = "#003f8e"
FOREGROUND = "#ffffff"
COMMENT = "#7285b7"
RED = "#ff9da4"
ORANGE = "#ffc58f"
YELLOW = "#ffeead"
GREEN = "#d1f1a9"
AQUA = "#99ffff"
BLUE = "#bbdaff"
PURPLE = "#ebbbff"
class TomorrownightblueStyle(Style):
"""
Port of the Tomorrow Night Blue colour scheme https://github.com/chriskempson/tomorrow-theme
"""
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
|
thergames/thergames.github.io
|
lib/tomorrow-pygments/styles/tomorrownightblue.py
|
Python
|
mit
| 5,509
| 0.000363
|
import unittest
from PyFoam.Applications.ConvertToCSV import ConvertToCSV
theSuite=unittest.TestSuite()
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
|
unittests/Applications/test_ConvertToCSV.py
|
Python
|
gpl-2.0
| 106
| 0.009434
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration mean tests"""
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, axis, keepdims, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
casted = relay.op.cast(a, "int32")
mean = relay.mean(casted, axis, keepdims)
model = relay.qnn.op.requantize(
mean,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
def test_mean():
trials = [(1, 7, 7, 2048), (1, 8, 8)]
np.random.seed(0)
for shape in trials:
inputs = {
"a": tvm.nd.array(np.random.randint(0, high=255, size=shape, dtype="uint8")),
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, [1, 2], True, 128, 0.0784314, 128, 0.0784314, "uint8")
mod = tei.make_module(model, [])
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, "uint8", 1)
|
dmlc/tvm
|
tests/python/contrib/test_ethosn/test_mean.py
|
Python
|
apache-2.0
| 2,066
| 0.001452
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: Tables that exist here should not be used anywhere, they only exist
# here for migration support with alembic. If any of these tables end up
# being used they should be moved outside of warehouse.legacy. The goal
# is that once the legacy PyPI code base is gone, that these tables
# can just be deleted and a migration made to drop them from the
# database.
from citext import CIText
from sqlalchemy import (
CheckConstraint, Column, ForeignKey, ForeignKeyConstraint, Index, Table,
UniqueConstraint,
Boolean, Date, DateTime, Integer, LargeBinary, String, Text,
)
from warehouse import db
accounts_gpgkey = Table(
"accounts_gpgkey",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column(
"user_id",
Integer(),
ForeignKey(
"accounts_user.id",
deferrable=True,
initially="DEFERRED",
),
nullable=False,
),
Column("key_id", CIText(), nullable=False),
Column("verified", Boolean(), nullable=False),
UniqueConstraint("key_id", name="accounts_gpgkey_key_id_key"),
CheckConstraint(
"key_id ~* '^[A-F0-9]{8}$'::citext",
name="accounts_gpgkey_valid_key_id",
),
)
Index("accounts_gpgkey_user_id", accounts_gpgkey.c.user_id)
browse_tally = Table(
"browse_tally",
db.metadata,
Column("trove_id", Integer(), primary_key=True, nullable=False),
Column("tally", Integer()),
)
cheesecake_main_indices = Table(
"cheesecake_main_indices",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("absolute", Integer(), nullable=False),
Column("relative", Integer(), nullable=False),
)
cheesecake_subindices = Table(
"cheesecake_subindices",
db.metadata,
Column(
"main_index_id",
Integer(),
ForeignKey("cheesecake_main_indices.id"),
primary_key=True,
nullable=False,
),
Column("name", Text(), primary_key=True, nullable=False),
Column("value", Integer(), nullable=False),
Column("details", Text(), nullable=False),
)
comments = Table(
"comments",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column(
"rating",
Integer(),
ForeignKey("ratings.id", ondelete="CASCADE"),
),
Column(
"user_name",
CIText(),
ForeignKey("accounts_user.username", ondelete="CASCADE"),
),
Column("date", DateTime(timezone=False)),
Column("message", Text()),
Column(
"in_reply_to",
Integer(),
ForeignKey("comments.id", ondelete="CASCADE"),
),
)
comments_journal = Table(
"comments_journal",
db.metadata,
Column("name", Text()),
Column("version", Text()),
Column("id", Integer()),
Column(
"submitted_by",
CIText(),
ForeignKey("accounts_user.username", ondelete="CASCADE"),
),
Column("date", DateTime(timezone=False)),
Column("action", Text()),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
ondelete="CASCADE",
),
)
cookies = Table(
"cookies",
db.metadata,
Column("cookie", Text(), primary_key=True, nullable=False),
Column(
"name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
Column("last_seen", DateTime(timezone=False)),
)
Index("cookies_last_seen", cookies.c.last_seen)
csrf_tokens = Table(
"csrf_tokens",
db.metadata,
Column(
"name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
primary_key=True,
nullable=False,
),
Column("token", Text()),
Column("end_date", DateTime(timezone=False)),
)
description_urls = Table(
"description_urls",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("name", Text()),
Column("version", Text()),
Column("url", Text()),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
Index("description_urls_name_idx", description_urls.c.name)
Index(
"description_urls_name_version_idx",
description_urls.c.name,
description_urls.c.version,
)
dual = Table(
"dual",
db.metadata,
Column("dummy", Integer()),
)
mirrors = Table(
"mirrors",
db.metadata,
Column("ip", Text(), primary_key=True, nullable=False),
Column("user_name", CIText(), ForeignKey("accounts_user.username")),
Column("index_url", Text()),
Column("last_modified_url", Text()),
Column("local_stats_url", Text()),
Column("stats_url", Text()),
Column("mirrors_url", Text()),
)
oauth_access_tokens = Table(
"oauth_access_tokens",
db.metadata,
Column("token", String(32), primary_key=True, nullable=False),
Column("secret", String(64), nullable=False),
Column("consumer", String(32), nullable=False),
Column("date_created", Date(), nullable=False),
Column("last_modified", Date(), nullable=False),
Column(
"user_name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
)
oauth_consumers = Table(
"oauth_consumers",
db.metadata,
Column("consumer", String(32), primary_key=True, nullable=False),
Column("secret", String(64), nullable=False),
Column("date_created", Date(), nullable=False),
Column(
"created_by",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
),
),
Column("last_modified", Date(), nullable=False),
Column("description", String(255), nullable=False),
)
oauth_nonce = Table(
"oauth_nonce",
db.metadata,
Column("timestamp", Integer(), nullable=False),
Column("consumer", String(32), nullable=False),
Column("nonce", String(32), nullable=False),
Column("token", String(32)),
)
oauth_request_tokens = Table(
"oauth_request_tokens",
db.metadata,
Column("token", String(32), primary_key=True, nullable=False),
Column("secret", String(64), nullable=False),
Column("consumer", String(32), nullable=False),
Column("callback", Text()),
Column("date_created", Date(), nullable=False),
Column(
"user_name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
)
oid_associations = Table(
"oid_associations",
db.metadata,
Column("server_url", String(2047), primary_key=True, nullable=False),
Column("handle", String(255), primary_key=True, nullable=False),
Column("secret", LargeBinary(128), nullable=False),
Column("issued", Integer(), nullable=False),
Column("lifetime", Integer(), nullable=False),
Column("assoc_type", String(64), nullable=False),
CheckConstraint(
"length(secret) <= 128",
name="secret_length_constraint",
),
)
oid_nonces = Table(
"oid_nonces",
db.metadata,
Column("server_url", String(2047), primary_key=True, nullable=False),
Column("timestamp", Integer(), primary_key=True, nullable=False),
Column("salt", String(40), primary_key=True, nullable=False),
)
openid_discovered = Table(
"openid_discovered",
db.metadata,
Column("url", Text(), primary_key=True, nullable=False),
Column("created", DateTime(timezone=False)),
Column("services", LargeBinary()),
Column("op_endpoint", Text()),
Column("op_local", Text()),
)
openid_nonces = Table(
"openid_nonces",
db.metadata,
Column("created", DateTime(timezone=False)),
Column("nonce", Text()),
)
Index("openid_nonces_created", openid_nonces.c.created)
Index("openid_nonces_nonce", openid_nonces.c.nonce)
openid_sessions = Table(
"openid_sessions",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("url", Text()),
Column("assoc_handle", Text()),
Column("expires", DateTime(timezone=False)),
Column("mac_key", Text()),
)
openid_whitelist = Table(
"openid_whitelist",
db.metadata,
Column("name", Text(), primary_key=True, nullable=False),
Column("trust_root", Text(), primary_key=True, nullable=False),
Column("created", DateTime(timezone=False)),
)
openids = Table(
"openids",
db.metadata,
Column("id", Text(), primary_key=True, nullable=False),
Column(
"name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
)
ratings = Table(
"ratings",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("name", Text(), nullable=False),
Column("version", Text(), nullable=False),
Column(
"user_name",
CIText(),
ForeignKey(
"accounts_user.username",
ondelete="CASCADE",
),
nullable=False
),
Column("date", DateTime(timezone=False)),
Column("rating", Integer()),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
ondelete="CASCADE",
),
UniqueConstraint("name", "version", "user_name", name="ratings_name_key"),
)
Index("rating_name_version", ratings.c.name, ratings.c.version)
rego_otk = Table(
"rego_otk",
db.metadata,
Column(
"name",
CIText(),
ForeignKey(
"accounts_user.username",
ondelete="CASCADE",
),
),
Column("otk", Text()),
Column("date", DateTime(timezone=False)),
UniqueConstraint("otk", name="rego_otk_unique"),
)
Index("rego_otk_name_idx", rego_otk.c.name)
Index("rego_otk_otk_idx", rego_otk.c.otk)
release_requires_python = Table(
"release_requires_python",
db.metadata,
Column("name", Text()),
Column("version", Text()),
Column("specifier", Text()),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
Index("rel_req_python_name_idx", release_requires_python.c.name)
Index(
"rel_req_python_name_version_idx",
release_requires_python.c.name,
release_requires_python.c.version,
)
Index("rel_req_python_version_id_idx", release_requires_python.c.version)
release_urls = Table(
"release_urls",
db.metadata,
Column("name", Text()),
Column("version", Text()),
Column("url", Text()),
Column("packagetype", Text()),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
Index("release_urls_name_idx", release_urls.c.name)
Index("release_urls_packagetype_idx", release_urls.c.packagetype)
Index("release_urls_version_idx", release_urls.c.version)
sshkeys = Table(
"sshkeys",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column(
"name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
Column("key", Text()),
)
Index("sshkeys_name", sshkeys.c.name)
timestamps = Table(
"timestamps",
db.metadata,
Column("name", Text(), primary_key=True, nullable=False),
Column("value", DateTime(timezone=False)),
)
|
HonzaKral/warehouse
|
warehouse/legacy/tables.py
|
Python
|
apache-2.0
| 12,359
| 0
|
#!/usr/bin/env python
import os
import os.path
path = "source"
import doctest
for f in os.listdir(path):
if f.endswith(".txt"):
print f
doctest.testfile(os.path.join(path, f), module_relative=False)
|
tectronics/mpmath
|
doc/run_doctest.py
|
Python
|
bsd-3-clause
| 222
| 0.004505
|
"""
Virtualization installation functions.
Copyright 2007-2008 Red Hat, Inc.
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
module for creating fullvirt guests via KVM/kqemu/qemu
requires python-virtinst-0.200.
"""
import os, sys, time, stat
import tempfile
import random
from optparse import OptionParser
import exceptions
import errno
import re
import tempfile
import shutil
import virtinst
import app as koan
import sub_process as subprocess
import utils
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def start_install(name=None,
ram=None,
disks=None,
mac=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None,
arch=None,
no_gfx=False,
fullvirt=True,
bridge=None,
virt_type=None,
virt_auto_boot=False):
vtype = "qemu"
if virtinst.util.is_kvm_capable():
vtype = "kvm"
arch = None # let virtinst.FullVirtGuest() default to the host arch
elif virtinst.util.is_kqemu_capable():
vtype = "kqemu"
print "- using qemu hypervisor, type=%s" % vtype
if arch is not None and arch.lower() in ["x86","i386"]:
arch = "i686"
guest = virtinst.FullVirtGuest(hypervisorURI="qemu:///system",type=vtype, arch=arch)
if not profile_data.has_key("file"):
# images don't need to source this
if not profile_data.has_key("install_tree"):
raise koan.InfoException("Cannot find install source in kickstart file, aborting.")
if not profile_data["install_tree"].endswith("/"):
profile_data["install_tree"] = profile_data["install_tree"] + "/"
# virt manager doesn't like nfs:// and just wants nfs:
# (which cobbler should fix anyway)
profile_data["install_tree"] = profile_data["install_tree"].replace("nfs://","nfs:")
if profile_data.has_key("file"):
# this is an image based installation
input_path = profile_data["file"]
print "- using image location %s" % input_path
if input_path.find(":") == -1:
# this is not an NFS path
guest.cdrom = input_path
else:
(tempdir, filename) = utils.nfsmount(input_path)
guest.cdrom = os.path.join(tempdir, filename)
kickstart = profile_data.get("kickstart","")
if kickstart != "":
# we have a (windows?) answer file we have to provide
# to the ISO.
print "I want to make a floppy for %s" % kickstart
floppy_path = utils.make_floppy(kickstart)
guest.disks.append(virtinst.VirtualDisk(device=virtinst.VirtualDisk.DEVICE_FLOPPY, path=floppy_path))
else:
guest.location = profile_data["install_tree"]
extra = extra.replace("&","&")
guest.extraargs = extra
if profile_data.has_key("breed"):
breed = profile_data["breed"]
if breed != "other" and breed != "":
if breed in [ "debian", "suse", "redhat" ]:
guest.set_os_type("linux")
elif breed in [ "windows" ]:
guest.set_os_type("windows")
else:
guest.set_os_type("unix")
if profile_data.has_key("os_version"):
# FIXME: when os_version is not defined and it's linux, do we use generic24/generic26 ?
version = profile_data["os_version"]
if version != "other" and version != "":
try:
guest.set_os_variant(version)
except:
print "- virtinst library does not understand variant %s, treating as generic" % version
pass
guest.set_name(name)
guest.set_memory(ram)
guest.set_vcpus(vcpus)
# for KVM, we actually can't disable this, since it's the only
# console it has other than SDL
guest.set_graphics("vnc")
if uuid is not None:
guest.set_uuid(uuid)
for d in disks:
print "- adding disk: %s of size %s" % (d[0], d[1])
if d[1] != 0 or d[0].startswith("/dev"):
guest.disks.append(virtinst.VirtualDisk(d[0], size=d[1]))
else:
raise koan.InfoException("this virtualization type does not work without a disk image, set virt-size in Cobbler to non-zero")
if profile_data.has_key("interfaces"):
counter = 0
interfaces = profile_data["interfaces"].keys()
interfaces.sort()
vlanpattern = re.compile("[a-zA-Z0-9]+\.[0-9]+")
for iname in interfaces:
intf = profile_data["interfaces"][iname]
if intf["bonding"] == "master" or vlanpattern.match(iname) or iname.find(":") != -1:
continue
mac = intf["mac_address"]
if mac == "":
mac = random_mac()
if bridge is None:
profile_bridge = profile_data["virt_bridge"]
intf_bridge = intf["virt_bridge"]
if intf_bridge == "":
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
intf_bridge = profile_bridge
else:
if bridge.find(",") == -1:
intf_bridge = bridge
else:
bridges = bridge.split(",")
intf_bridge = bridges[counter]
nic_obj = virtinst.VirtualNetworkInterface(macaddr=mac, bridge=intf_bridge)
guest.nics.append(nic_obj)
counter = counter + 1
else:
if bridge is not None:
profile_bridge = bridge
else:
profile_bridge = profile_data["virt_bridge"]
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
nic_obj = virtinst.VirtualNetworkInterface(macaddr=random_mac(), bridge=profile_bridge)
guest.nics.append(nic_obj)
guest.start_install()
return "use virt-manager and connect to qemu to manage guest: %s" % name
|
ssalevan/cobbler
|
koan/qcreate.py
|
Python
|
gpl-2.0
| 7,315
| 0.008612
|
'''
Created on Nov 21, 2013
@author: ezulkosk
'''
from FeatureSplitConfig import ers_optional_names, bdb_optional_names, \
webportal_optional_names, eshop_optional_names, ers_config_split_names, \
webportal_config_split_names, eshop_config_split_names, bdb_config_split_names
from consts import METRICS_MAXIMIZE, METRICS_MINIMIZE
from npGIAforZ3 import GuidedImprovementAlgorithm, \
GuidedImprovementAlgorithmOptions
from src.FeatureSplitConfig import ers_better_config_names, \
eshop_better_config_names, webportal_better_config_names
from z3 import *
import argparse
import csv
import importlib
import itertools
import math
import multiprocessing
import operator
import os
import sys
import time
#from Z3ModelEmergencyResponseUpdateAllMin import *
#from Z3ModelWebPortal import *
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, totalTime,CurrentNotDomConstraints_queuelist, index, outputFileParentName, num_consumers, s, extraConstraint):
multiprocessing.Process.__init__(self)
s.add(extraConstraint)
self.task_queue = task_queue
self.result_queue = result_queue
self.CurrentNotDomConstraints_queuelist = CurrentNotDomConstraints_queuelist
self.totalTime = totalTime
self.index = index
self.outputFileParentName = outputFileParentName
self.num_consumers = num_consumers
# each group has an individual model and has two member consumers running on the model
self.groupid = self.index / 2
self.memberid= self.index % 2
# split the objective space in terms of num_groups = num_consumers / 2
# maximum 30 cores -> minimum 3 degrees, so we use range [degree, degree)
num_groups = self.num_consumers / 2
degree = 90.0 / num_groups
# radian = degree * math.pi / 180.0
self.GIAOptions = GuidedImprovementAlgorithmOptions(verbosity=0, \
incrementallyWriteLog=False, \
writeTotalTimeFilename="timefile.csv", \
writeRandomSeedsFilename="randomseed.csv", useCallLogs=False)
self.GIAAlgorithm = GuidedImprovementAlgorithm(s, metrics_variables, \
metrics_objective_direction, FeatureVariable, options=self.GIAOptions)
self.count_sat_calls = 0
self.count_unsat_calls = 0
self.count_paretoPoints = 0
self.startTime = time.time()
def run(self):
while True:
if self.task_queue[self.groupid].empty() == True:
break
else:
next_task = self.task_queue[self.groupid].get(False)
if next_task is None:
self.task_queue[self.groupid].task_done()
self.totalTime.put(str(time.time()-self.startTime))
outputFileChild = open(str(str(self.outputFileParentName)+'C'+str(self.index)+'.csv'), 'a')
try:
outputFileChild.writelines(str(self.index)+','+
str(self.count_paretoPoints) + ',' +
str(self.count_sat_calls) + ',' +
str(self.count_unsat_calls) + ',' +
str(time.time()-self.startTime) +',' +
'\n')
finally:
outputFileChild.close()
break
# execute a task, i.e., find a Pareto point
# 1) update CurrentNotDomConstraints
while self.CurrentNotDomConstraints_queuelist[self.index].empty() != True:
strconstraintlist = self.CurrentNotDomConstraints_queuelist[self.index].get()
ConvertedZ3ConstraintList = list()
for constraint in strconstraintlist:
constraintSplitList = []
if constraint.find('>') != -1:
constraintSplitList = constraint.split('>')
#print constraintSplitList
if constraintSplitList[1].find('/') != -1:
ConvertedZ3ConstraintList.append( Real(constraintSplitList[0].strip()) > RealVal(constraintSplitList[1].strip()))
else:
ConvertedZ3ConstraintList.append( Int(constraintSplitList[0].strip()) > IntVal(constraintSplitList[1].strip()))
#print ConvertedZ3ConstraintList
else:
constraintSplitList = constraint.split('<')
#print constraintSplitList
if constraintSplitList[1].find('/') != -1:
ConvertedZ3ConstraintList.append( Real(constraintSplitList[0].strip()) < RealVal(constraintSplitList[1].strip()))
else:
ConvertedZ3ConstraintList.append( Int(constraintSplitList[0].strip()) < IntVal(constraintSplitList[1].strip()))
#print ConvertedZ3ConstraintList
#print Or(ConvertedZ3ConstraintList)
tmpNotDominatedByNextParetoPoint = Or(ConvertedZ3ConstraintList)
#print tmpNotDominatedByNextParetoPoint
self.GIAAlgorithm.s.add(tmpNotDominatedByNextParetoPoint)
# 2) if find all Pareto points, add a poison pill; otherwise find a Pareto point
start_time = time.time()
if self.GIAAlgorithm.s.check() != sat:
self.count_unsat_calls += 1
self.task_queue[self.groupid].put(None)
else:
self.count_sat_calls += 1
self.task_queue[self.groupid].put("Task")
prev_solution = self.GIAAlgorithm.s.model()
self.GIAAlgorithm.s.push()
NextParetoPoint, local_count_sat_calls, local_count_unsat_calls = self.GIAAlgorithm.ranToParetoFront(prev_solution)
end_time = time.time()
self.count_sat_calls += local_count_sat_calls
self.count_unsat_calls += local_count_unsat_calls
self.count_paretoPoints += 1
# RecordPoint
strNextParetoPoint = list((d.name(), str(NextParetoPoint[d])) for d in NextParetoPoint.decls())
if RECORDPOINT:
strNextParetoPoint = list((d.name(), str(NextParetoPoint[d])) for d in NextParetoPoint.decls())
outputFileChild = open(str(str(self.outputFileParentName)+'C'+str(self.index)+'.csv'), 'a')
try:
outputFileChild.writelines(str(self.index)+','+
str(self.count_paretoPoints) + ',' +
str(self.count_sat_calls) + ',' +
str(end_time-start_time) +',' +
str(strNextParetoPoint) +',' +
'\n')
finally:
outputFileChild.close()
self.GIAAlgorithm.s.pop()
tmpNotDominatedByNextParetoPoint = self.GIAAlgorithm.ConstraintNotDominatedByX(NextParetoPoint)
self.GIAAlgorithm.s.add(tmpNotDominatedByNextParetoPoint)
# picklize and store Pareto point and constraints
self.result_queue.put(strNextParetoPoint)
constraintlist = self.GIAAlgorithm.EtractConstraintListNotDominatedByX(NextParetoPoint)
strconstraintlist = list(str(item) for item in constraintlist)
# broadcast the constraints to the other queue in the same group
brother_index = self.groupid * 2 + (1-self.memberid)
self.CurrentNotDomConstraints_queuelist[brother_index].put(strconstraintlist)
self.task_queue[self.groupid].task_done()
return 0
def generateConsumerConstraints(features):
list_of_list_of_perms = [itertools.combinations(features, i) for i in range(len(features)+1)]
conds = []
for list_of_perms in list_of_list_of_perms:
for perm in list_of_perms:
str_perm = [str(i) for i in perm]
cond = []
for feature in features:
if str(feature) in str_perm:
cond.append(feature)
else:
cond.append(Not(feature))
conds.append(And(*cond))
return conds
def getWeightRanges(weights):
Max = {}
Min = {}
for i in weights:
(objective, weight, feature) = i
if Max.get(str(objective)):
currMin = Min.get(str(objective))
currMax = Max.get(str(objective))
Min[str(objective)] = currMin if weight > currMin else weight
Max[str(objective)] = currMax if weight < currMax else weight
else:
Min[str(objective)] = weight
Max[str(objective)] = weight
#print Max
#print Min
return (Max, Min)
def replicateSolver(solver, num_consumers):
solvers = []
for i in range(num_consumers):
newSolver =Solver()
for j in solver.assertions():
newSolver.add(j)
solvers.append(newSolver)
return solvers
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
'''
def extractWeights(c, weights = [], curr_objective=None):
k = c.decl().kind()
if k == Z3_OP_MUL:
#print(c.children()[0])
try:
print str(c.children()[0])
weights.append((curr_objective,
int(str(c.children()[0])),
c.children()[1].children()[0]))
except:
print("Not handling multiplicative")
#return weights
pass
if k == Z3_OP_EQ and (str(c.children()[0].sort()) == "Int" or str(c.children()[0].sort()) == "Real"):
curr_objective = c.children()[0]
for child in c.children():
weights = extractWeights(child, weights, curr_objective)
return weights
'''
#(objective, weight, feature)
def extractWeights(csvfile):
weights = []
ifile = open(csvfile, "rb")
reader = csv.reader(ifile)
for row in reader:
if row[2]== 'false':
row[2] = 0
elif row[2] == 'true':
row[2] = 1
weights.append((row[1], float(row[2]), row[0]))
return weights
def getBestForGreatestTotalWeight(num_consumers):
'''
weights is a list of triples: (objective, weight, feature)
Sort features according to weights,
where the weight of a feature is defined as:
weight(f:feature) = abs(Sum("f's weights in maximize objectives")
- Sum("f's weights in minimize objectives"))
...might be better to look at std dev.
'''
features = {}
features_str = {}
metrics_variables_string = [str(i) for i in metrics_variables]
for i in weights:
(objective, weight, feature) = i
if features.get(str(feature)):
currWeight = features[str(feature)]
else:
currWeight = 0
features_str[str(feature)] = feature
polarity = metrics_objective_direction[metrics_variables_string.index(str(objective))]
if(polarity == METRICS_MINIMIZE):
polarity = -1
currWeight = currWeight + polarity * weight
features[str(feature)] = currWeight
sorted_features = sorted(features.iteritems(), key=operator.itemgetter(1))
sorted_features = [(features_str[f],abs(w)) for (f, w) in sorted_features]
#sorted_features.reverse()
return sorted_features
def getBestForAbsoluteNormalized(weights, ranges, num_consumers):
'''
weights is a list of triples: (objective, weight, feature)
Sort features according to weights,
where the weight of a feature is defined as:
weight(f:feature) = abs(Sum("f's weights in maximize objectives")
- Sum("f's weights in minimize objectives"))
...might be better to look at std dev.
'''
features = {}
features_str = {}
#print weights
(maxes, mins) = ranges
metrics_variables_string = [str(i) for i in metrics_variables]
#print weights
for i in weights:
(objective, weight, feature) = i
if features.get(str(feature)):
currWeight = features[str(feature)]
else:
currWeight = 0
features_str[str(feature)] = feature
#print objective
polarity = metrics_objective_direction[metrics_variables_string.index(str(objective))]
#print objective
if maxes[str(objective)] - mins[str(objective)] == 0:
currWeight = currWeight + 1
elif(polarity == METRICS_MAXIMIZE):
currWeight = currWeight + (float(weight) - mins[str(objective)]) / (maxes[str(objective)] - mins[str(objective)])
else:
currWeight = currWeight + (maxes[str(objective)] - float(weight)) / (maxes[str(objective)] - mins[str(objective)])
features[str(feature)] = currWeight
sorted_features = sorted(features.iteritems(), key=operator.itemgetter(1))
sorted_features = [(features_str[f],abs(w)) for (f, w) in sorted_features]
#print sorted_features
#sorted_features.reverse()
return sorted_features
def getBestMinusWorst(weights, ranges, num_consumers):
features = {}
features_str = {}
#print weights
(maxes, mins) = ranges
metrics_variables_string = [str(i) for i in metrics_variables]
#print weights
for i in weights:
(objective, weight, feature) = i
if features.get(str(feature)):
currWeight = features[str(feature)]
else:
currWeight = (1, 0)
features_str[str(feature)] = feature
#print objective
(currMin, currMax) = currWeight
polarity = metrics_objective_direction[metrics_variables_string.index("total_" + str(objective))]
#print objective
if maxes[str(objective)] - mins[str(objective)] == 0:
denom = 1
else:
denom = (maxes[str(objective)] - mins[str(objective)])
if(polarity == METRICS_MAXIMIZE):
newWeight = (float(weight) - mins[str(objective)]) /denom
else:
newWeight = (maxes[str(objective)] - float(weight)) / denom
#print features
#print(newWeight if newWeight < currMin else currMin)
#print newWeight
features[str(feature)] = (newWeight if newWeight < currMin else currMin, newWeight if newWeight > currMax else currMax)
for i in features.keys():
(l, r) = features.get(i)
features[i] = r - l
sorted_features = sorted(features.iteritems(), key=operator.itemgetter(1))
sorted_features = [(features_str[f],abs(w)) for (f, w) in sorted_features]
#print sorted_features
#sorted_features.reverse()
return sorted_features
def getBestByName(num_consumers, weights, names):
#need to clean
features=[]
for i in weights:
(name, weight) = i
if str(name) in names:
#print name
features.append((name,weight))
'''
while names:
for i in weights:
(_, _, feature) = i
if str(feature) == names[0]:
print names[0]
features.append((feature,1))
names.pop(0)
break
'''
#print features
return features
def getBestFeatures(heuristic, weights, ranges, num_consumers, names):
if heuristic == GREATEST_TOTAL_WEIGHT:
print("Obsolete, do not use. ")
#sys.exit()
return getBestForGreatestTotalWeight(num_consumers)
elif heuristic == ABSOLUTE_NORMALIZED:
return getBestForAbsoluteNormalized(weights, ranges, num_consumers)
elif heuristic == BY_NAME:
#initial_list = getBestForAbsoluteNormalized(weights, ranges, num_consumers)
#initial_list.reverse()
initial_list = getBestMinusWorst(weights, ranges, num_consumers)
return getBestByName(num_consumers, initial_list, names)
def getZ3Feature(feature, expr):
if(str(expr) == feature):
return expr
for child in expr.children():
result = getZ3Feature(feature, child)
if result:
return result
return []
ABSOLUTE_NORMALIZED = 1
GREATEST_TOTAL_WEIGHT = 2
BY_NAME = 3
CONFIG=False
RECORDPOINT = False
if __name__ == '__main__':
print("Running: " + str(sys.argv))
if len(sys.argv) < 6:
RECORDPOINT= False
elif sys.argv[5] == "1":
RECORDPOINT = True
if sys.argv[4] == "1":
CONFIG = True
else:
CONFIG = False
#print CONFIG
if sys.argv[4] == "2":
BETTER_CONFIG = True
else:
BETTER_CONFIG = False
if sys.argv[1] == "BDB":
from Z3ModelBerkeleyDB import *
csvfile = './bdb_attributes.csv'
if CONFIG:
names = bdb_config_split_names
elif BETTER_CONFIG:
sys.exit("bdb not set up for better config.")
else:
names = bdb_optional_names
elif sys.argv[1] == "ERS":
csvfile = './ers_attributes.csv'
from Z3ModelEmergencyResponseOriginal import *
if CONFIG:
names = ers_config_split_names
elif BETTER_CONFIG:
names = ers_better_config_names
else:
names = ers_optional_names
elif sys.argv[1] == "ESH":
RECORDPOINT=True
from Z3ModelEShopOriginal import *
csvfile = './eshop_attributes.csv'
if CONFIG:
names = eshop_config_split_names
elif BETTER_CONFIG:
names = eshop_better_config_names
else:
names = eshop_optional_names
elif sys.argv[1] == "WPT":
csvfile = './wpt_attributes.csv'
from Z3ModelWebPortalUpdate import *
#names=["ban_flash", "keyword", "popups", "text"]
if CONFIG:
names = webportal_config_split_names
elif BETTER_CONFIG:
names = webportal_better_config_names
else:
names = webportal_optional_names
else:
print("passed")
sys.exit()
outputFileParentName = sys.argv[2]
num_consumers = int(sys.argv[3])
num_groups = num_consumers / 2
if not is_power2(num_consumers):
sys.exit("Number of consumers must be a power of 2.")
weights = extractWeights(csvfile)
#print weights
ranges = getWeightRanges(weights)
sorted_features = getBestFeatures(BY_NAME, weights, ranges, num_consumers, names)
num_desired_features = int(math.log(num_consumers, 2))-1
#i didnt reverse, but also try middle of the pack
sorted_features.reverse()
print sorted_features
#random.shuffle(sorted_features)
desired_features = [i for (i, _) in sorted_features][:num_desired_features]
#desired_features = [i for (i, _) in sorted_features][(len(sorted_features)-num_desired_features)/2:
# (len(sorted_features)-num_desired_features)/2 + num_desired_features]
#print desired_features
new_desired_features= []
for i in desired_features:
for j in s.assertions():
result = getZ3Feature(i, j)
if result:
new_desired_features.append(result)
break
desired_features = new_desired_features
print desired_features
consumerConstraints = generateConsumerConstraints(desired_features)
consumerConstraints = [[i,i] for i in consumerConstraints]
consumerConstraints = [item for sublist in consumerConstraints for item in sublist]
print consumerConstraints
#print sorted_features
#print desired_features
solvers = replicateSolver(s, num_consumers)
mgr = multiprocessing.Manager()
taskQueue = []
for i in xrange(num_groups):
taskQueue.append(mgr.Queue())
ParetoFront = mgr.Queue()
totalTime = mgr.Queue()
CurrentNotDomConstraintsQueueList = []
# each consumer has a communication queue to communicate with the other consumer in the same group
for i in xrange(num_consumers):
CurrentNotDomConstraintsQueueList.append(mgr.Queue())
# Enqueue initial tasks
# each group has two consumers in our setting
for i in xrange(num_groups):
taskQueue[i].put("Task")
taskQueue[i].put("Task")
# Start consumers
#print 'Creating %d consumers' % num_consumers
consumersList = [ Consumer(taskQueue, ParetoFront, totalTime,CurrentNotDomConstraintsQueueList, i, outputFileParentName, num_consumers, j, k)
for i,j,k in zip(xrange(num_consumers), solvers, consumerConstraints)]
for w in consumersList:
w.start()
for w in consumersList:
w.join()
TotalOverlappingParetoFront = ParetoFront.qsize()
ParetoPointsList=[]
while ParetoFront.qsize() > 0:
paretoPoint = ParetoFront.get()
if paretoPoint in ParetoPointsList:
pass
else:
ParetoPointsList.append(paretoPoint)
TotalUniqueParetoFront = len(ParetoPointsList)
runningtime = 0.0
while totalTime.qsize() > 0:
time = totalTime.get()
if (float(time) > runningtime):
runningtime = float(time)
outputFileParent = open(str(outputFileParentName+'.csv'), 'a')
try:
outputFileParent.writelines(str(num_consumers) + ',' + str(TotalOverlappingParetoFront) +',' + str(TotalUniqueParetoFront) + ',' + str(runningtime) + ',' + '\n')
finally:
outputFileParent.close()
'''
splitRuleList = []
if sys.argv[1] == "ERS":
if (self.groupid == 0):
# from the reference point with a larger angle -> a bigger range
# radian_higher = (degree + 1) * math.pi / 180.0
radian_higher = (degree) * math.pi / 180.0
gradient_higher = int(1000*round(math.tan(radian_higher), 3))
# print str(self.groupid) + ">=" + str(gradient_higher)
# squarization
# choosing "the two best" dimensions of the projective plane could be an interesting problem
# try to use Shannon Diversity Index, but seems not working; i think it only works when we normalize all values into [0, 1]
# so, still use the two dimensions with the maximum value range
# the challenge is how to know the scattering of configurations in the objective space, given the quality attributes of each feature?
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 >= IntVal(gradient_higher) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_responsetime - 2070) * 629 >= IntVal(gradient_higher) * (total_cost - 3145) * 414 )
tmpsplitRuleList = And(splitRuleList)
# print tmpsplitRuleList
s.add(tmpsplitRuleList)
elif (self.groupid == num_groups - 1):
# from the reference point with a smaller angle -> a bigger range
# radian_lower = (degree * self.groupid - 1) * math.pi / 180.0
radian_lower = (degree * self.groupid) * math.pi / 180.0
gradient_lower = int(1000*round(math.tan(radian_lower), 3))
# print str(self.groupid) + "<" + str(gradient_lower)
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 < IntVal(gradient_lower) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_responsetime - 2070) * 629 < IntVal(gradient_lower) * (total_cost - 3145) * 414 )
tmpsplitRuleList = And(splitRuleList)
# print tmpsplitRuleList
s.add(tmpsplitRuleList)
else:
# radian_lower = (degree * self.groupid - 1) * math.pi / 180.0
radian_lower = (degree * self.groupid) * math.pi / 180.0
gradient_lower = int(1000*round(math.tan(radian_lower), 3))
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 < IntVal(gradient_lower) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_responsetime - 2070) * 629 < IntVal(gradient_lower) * (total_cost - 3145) * 414 )
# radian_higher = (degree * (self.groupid+1) + 1) * math.pi / 180.0
radian_higher = (degree * (self.groupid + 1)) * math.pi / 180.0
gradient_higher = int(1000*round(math.tan(radian_higher), 3))
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 >= IntVal(gradient_higher) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_responsetime - 2070) * 629 >= IntVal(gradient_higher) * (total_cost - 3145) * 414 )
# print str(self.groupid) + ">=" + str(gradient_higher) + "<" + str(gradient_lower)
tmpsplitRuleList = And(splitRuleList)
# print tmpsplitRuleList
s.add(tmpsplitRuleList)
elif sys.argv[1] == "ESH":
if (self.groupid == 0):
# from the reference point with a larger angle -> a bigger range
# radian_higher = (degree + 1) * math.pi / 180.0
radian_higher = (degree) * math.pi / 180.0
gradient_higher = int(1000*round(math.tan(radian_higher), 3))
# print str(self.groupid) + ">=" + str(gradient_higher)
# squarization
# choosing "the two best" dimensions of the projective plane could be an interesting problem
# try to use Shannon Diversity Index, but seems not working; i think it only works when we normalize all values into [0, 1]
# so, still use the two dimensions with the maximum value range
# the challenge is how to know the scattering of configurations in the objective space, given the quality attributes of each feature?
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 >= IntVal(gradient_higher) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_Cost - 2887) * 708 >= IntVal(gradient_higher) * (total_Defects - 708) * 2887 )
tmpsplitRuleList = And(splitRuleList)
# print tmpsplitRuleList
s.add(tmpsplitRuleList)
elif (self.groupid == num_groups - 1):
# from the reference point with a smaller angle -> a bigger range
# radian_lower = (degree * self.groupid - 1) * math.pi / 180.0
radian_lower = (degree * self.groupid) * math.pi / 180.0
gradient_lower = int(1000*round(math.tan(radian_lower), 3))
# print str(self.groupid) + "<" + str(gradient_lower)
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 < IntVal(gradient_lower) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_Cost - 2887) * 708 < IntVal(gradient_lower) * (total_Defects - 708) * 2887 )
tmpsplitRuleList = And(splitRuleList)
# print tmpsplitRuleList
s.add(tmpsplitRuleList)
else:
# radian_lower = (degree * self.groupid - 1) * math.pi / 180.0
radian_lower = (degree * self.groupid) * math.pi / 180.0
gradient_lower = int(1000*round(math.tan(radian_lower), 3))
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 < IntVal(gradient_lower) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_Cost - 2887) * 708 < IntVal(gradient_lower) * (total_Defects - 708) * 2887 )
# radian_higher = (degree * (self.groupid+1) + 1) * math.pi / 180.0
radian_higher = (degree * (self.groupid + 1)) * math.pi / 180.0
gradient_higher = int(1000*round(math.tan(radian_higher), 3))
# splitRuleList.append( 1000 * (total_rampuptime - 130) * 121 >= IntVal(gradient_higher) * (total_batteryusage - 121) * 130 )
splitRuleList.append( 1000 * (total_Cost - 2887) * 708 >= IntVal(gradient_higher) * (total_Defects - 708) * 2887 )
# print str(self.groupid) + ">=" + str(gradient_higher) + "<" + str(gradient_lower)
tmpsplitRuleList = And(splitRuleList)
# print tmpsplitRuleList
s.add(tmpsplitRuleList)
elif sys.argv[1] == "WPT":
if (self.groupid == 0):
# from the reference point with a larger angle -> a bigger range
# radian_higher = (degree + 1) * math.pi / 180.0
radian_higher = (degree) * math.pi / 180.0
gradient_higher = int(1000*round(math.tan(radian_higher), 3))
# print str(self.groupid) + ">=" + str(gradient_higher)
# squarization
# choosing "the two best" dimensions of the projective plane could be an interesting problem
# try to use Shannon Diversity Index, but seems not working; i think it only works when we normalize all values into [0, 1]
# so, still use the two dimensions with the maximum value range
# the challenge is how to know the scattering of configurations in the objective space, given the quality attributes of each feature?
# splitRuleList.append( 1000 * (total_rampuptime - 13) * 10 >= IntVal(gradient_higher) * (total_batteryusage - 10) * 13 )
splitRuleList.append( 1000 * (total_Cost - 422) * 145 >= IntVal(gradient_higher) * (total_Defects - 145) * 422 )
tmpsplitRuleList = And(splitRuleList)
s.add(tmpsplitRuleList)
elif (self.groupid == num_groups - 1):
# from the reference point with a smaller angle -> a bigger range
# radian_lower = (degree * self.index - 1) * math.pi / 180.0
radian_lower = (degree * self.groupid) * math.pi / 180.0
gradient_lower = int(1000*round(math.tan(radian_lower), 3))
# print str(self.groupid) + "<" + str(gradient_lower)
# splitRuleList.append( 1000 * (total_rampuptime - 13) * 10 < IntVal(gradient_lower) * (total_batteryusage - 10) * 13 )
splitRuleList.append( 1000 * (total_Cost - 422) * 145 < IntVal(gradient_lower) * (total_Defects - 145) * 422 )
tmpsplitRuleList = And(splitRuleList)
s.add(tmpsplitRuleList)
else:
# radian_lower = (degree * self.index - 1) * math.pi / 180.0
radian_lower = (degree * self.groupid) * math.pi / 180.0
gradient_lower = int(1000*round(math.tan(radian_lower), 3))
# splitRuleList.append( 1000 * (total_rampuptime - 13) * 10 < IntVal(gradient_lower) * (total_batteryusage - 10) * 13 )
splitRuleList.append( 1000 * (total_Cost - 422) * 145 < IntVal(gradient_lower) * (total_Defects - 145) * 422 )
# radian_higher = (degree * (self.index+1) + 1) * math.pi / 180.0
radian_higher = (degree * (self.groupid+1)) * math.pi / 180.0
gradient_higher = int(1000*round(math.tan(radian_higher), 3))
# splitRuleList.append( 1000 * (total_rampuptime - 13) * 10 >= IntVal(gradient_higher) * (total_batteryusage - 10) * 13 )
splitRuleList.append( 1000 * (total_Cost - 422) * 145 >= IntVal(gradient_higher) * (total_Defects - 145) * 422 )
# print str(self.groupid) + ">=" + str(gradient_higher) + "<" + str(gradient_lower)
tmpsplitRuleList = And(splitRuleList)
s.add(tmpsplitRuleList)
elif sys.argv[1] == "BDB":
pass
else:
print "messed up"
sys.exit()
'''
|
ai-se/parGALE
|
epoal_src/parallelfeaturesplitGIA.py
|
Python
|
unlicense
| 33,034
| 0.009293
|
import load_data as ld
import sys
import os
f_list = os.listdir(sys.argv[1])
data = ld.loadIntoPandas(ld.processAllDocuments(sys.argv[1], f_list))
data.to_pickle(sys.argv[2])
|
lbybee/vc_network_learning_project
|
code/gen_load_data.py
|
Python
|
gpl-2.0
| 177
| 0
|
from fractions import gcd
def greatest_common_divisor(*args):
args = list(args)
a, b = args.pop(), args.pop()
gcd_local = gcd(a, b)
while len(args):
gcd_local = gcd(gcd_local, args.pop())
return gcd_local
def test_function():
assert greatest_common_divisor(6, 10, 15) == 1, "12"
assert greatest_common_divisor(6, 4) == 2, "Simple"
assert greatest_common_divisor(2, 4, 8) == 2, "Three arguments"
assert greatest_common_divisor(2, 3, 5, 7, 11) == 1, "Prime numbers"
assert greatest_common_divisor(3, 9, 3, 9) == 3, "Repeating arguments"
if __name__ == '__main__':
test_function()
|
denisbalyko/checkio-solution
|
gcd.py
|
Python
|
mit
| 635
| 0.001575
|
# -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.osv import expression
from openerp.tools import float_is_zero
from openerp.tools import float_compare, float_round
from openerp.tools.misc import formatLang
from openerp.exceptions import UserError, ValidationError
import time
import math
class AccountCashboxLine(models.Model):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'coin_value'
_order = 'coin_value'
@api.one
@api.depends('coin_value', 'number')
def _sub_total(self):
""" Calculates Sub total"""
self.subtotal = self.coin_value * self.number
coin_value = fields.Float(string='Coin/Bill Value', required=True, digits=0)
number = fields.Integer(string='Number of Coins/Bills', help='Opening Unit Numbers')
subtotal = fields.Float(compute='_sub_total', string='Subtotal', digits=0, readonly=True)
cashbox_id = fields.Many2one('account.bank.statement.cashbox', string="Cashbox")
class AccountBankStmtCashWizard(models.Model):
"""
Account Bank Statement popup that allows entering cash details.
"""
_name = 'account.bank.statement.cashbox'
_description = 'Account Bank Statement Cashbox Details'
cashbox_lines_ids = fields.One2many('account.cashbox.line', 'cashbox_id', string='Cashbox Lines')
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('bank_statement_id', False) or self.env.context.get('active_id', False)
bnk_stmt = self.env['account.bank.statement'].browse(bnk_stmt_id)
total = 0.0
for lines in self.cashbox_lines_ids:
total += lines.subtotal
if self.env.context.get('balance', False) == 'start':
#starting balance
bnk_stmt.write({'balance_start': total, 'cashbox_start_id': self.id})
else:
#closing balance
bnk_stmt.write({'balance_end_real': total, 'cashbox_end_id': self.id})
return {'type': 'ir.actions.act_window_close'}
class AccountBankStmtCloseCheck(models.TransientModel):
"""
Account Bank Statement wizard that check that closing balance is correct.
"""
_name = 'account.bank.statement.closebalance'
_description = 'Account Bank Statement closing balance'
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('active_id', False)
if bnk_stmt_id:
self.env['account.bank.statement'].browse(bnk_stmt_id).button_confirm_bank()
return {'type': 'ir.actions.act_window_close'}
class AccountBankStatement(models.Model):
@api.one
@api.depends('line_ids', 'balance_start', 'line_ids.amount', 'balance_end_real')
def _end_balance(self):
self.total_entry_encoding = sum([line.amount for line in self.line_ids])
self.balance_end = self.balance_start + self.total_entry_encoding
self.difference = self.balance_end_real - self.balance_end
@api.multi
def _is_difference_zero(self):
for bank_stmt in self:
bank_stmt.is_difference_zero = float_is_zero(bank_stmt.difference, precision_digits=bank_stmt.currency_id.decimal_places)
@api.one
@api.depends('journal_id')
def _compute_currency(self):
self.currency_id = self.journal_id.currency_id or self.company_id.currency_id
@api.one
@api.depends('line_ids.journal_entry_ids')
def _check_lines_reconciled(self):
self.all_lines_reconciled = all([line.journal_entry_ids.ids or line.account_id.id for line in self.line_ids])
@api.model
def _default_journal(self):
journal_type = self.env.context.get('journal_type', False)
company_id = self.env['res.company']._company_default_get('account.bank.statement').id
if journal_type:
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
return journals[0]
return False
@api.multi
def _get_opening_balance(self, journal_id):
last_bnk_stmt = self.search([('journal_id', '=', journal_id)], limit=1)
if last_bnk_stmt:
return last_bnk_stmt.balance_end
return 0
@api.multi
def _set_opening_balance(self, journal_id):
self.balance_start = self._get_opening_balance(journal_id)
@api.model
def _default_opening_balance(self):
#Search last bank statement and set current opening balance as closing balance of previous one
journal_id = self._context.get('default_journal_id', False) or self._context.get('journal_id', False)
if journal_id:
return self._get_opening_balance(journal_id)
return 0
_name = "account.bank.statement"
_description = "Bank Statement"
_order = "date desc, id desc"
_inherit = ['mail.thread']
name = fields.Char(string='Reference', states={'open': [('readonly', False)]}, copy=False, readonly=True)
date = fields.Date(required=True, states={'confirm': [('readonly', True)]}, select=True, copy=False, default=fields.Date.context_today)
date_done = fields.Datetime(string="Closed On")
balance_start = fields.Monetary(string='Starting Balance', states={'confirm': [('readonly', True)]}, default=_default_opening_balance)
balance_end_real = fields.Monetary('Ending Balance', states={'confirm': [('readonly', True)]})
state = fields.Selection([('open', 'New'), ('confirm', 'Validated')], string='Status', required=True, readonly=True, copy=False, default='open')
currency_id = fields.Many2one('res.currency', compute='_compute_currency', oldname='currency', string="Currency")
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'confirm': [('readonly', True)]}, default=_default_journal)
journal_type = fields.Selection(related='journal_id.type', help="Technical field used for usability purposes")
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env['res.company']._company_default_get('account.bank.statement'))
total_entry_encoding = fields.Monetary('Transactions Subtotal', compute='_end_balance', store=True, help="Total of transaction lines.")
balance_end = fields.Monetary('Computed Balance', compute='_end_balance', store=True, help='Balance as calculated based on Opening Balance and transaction lines')
difference = fields.Monetary(compute='_end_balance', store=True, help="Difference between the computed ending balance and the specified ending balance.")
line_ids = fields.One2many('account.bank.statement.line', 'statement_id', string='Statement lines', states={'confirm': [('readonly', True)]}, copy=True)
move_line_ids = fields.One2many('account.move.line', 'statement_id', string='Entry lines', states={'confirm': [('readonly', True)]})
all_lines_reconciled = fields.Boolean(compute='_check_lines_reconciled')
user_id = fields.Many2one('res.users', string='Responsible', required=False, default=lambda self: self.env.user)
cashbox_start_id = fields.Many2one('account.bank.statement.cashbox', string="Starting Cashbox")
cashbox_end_id = fields.Many2one('account.bank.statement.cashbox', string="Ending Cashbox")
is_difference_zero = fields.Boolean(compute='_is_difference_zero', string='Is zero', help="Check if difference is zero.")
@api.onchange('journal_id')
def onchange_journal_id(self):
self._set_opening_balance(self.journal_id.id)
@api.multi
def _balance_check(self):
for stmt in self:
if not stmt.currency_id.is_zero(stmt.difference):
if stmt.journal_type == 'cash':
if stmt.difference < 0.0:
account = stmt.journal_id.loss_account_id
name = _('Loss')
else:
# statement.difference > 0.0
account = stmt.journal_id.profit_account_id
name = _('Profit')
if not account:
raise UserError(_('There is no account defined on the journal %s for %s involved in a cash difference.') % (stmt.journal_id.name, name))
values = {
'statement_id': stmt.id,
'account_id': account.id,
'amount': stmt.difference,
'name': _("Cash difference observed during the counting (%s)") % name,
}
self.env['account.bank.statement.line'].create(values)
else:
balance_end_real = formatLang(self.env, stmt.balance_end_real, currency_obj=stmt.currency_id)
balance_end = formatLang(self.env, stmt.balance_end, currency_obj=stmt.currency_id)
raise UserError(_('The ending balance is incorrect !\nThe expected balance (%s) is different from the computed one. (%s)')
% (balance_end_real, balance_end))
return True
@api.model
def create(self, vals):
if not vals.get('name'):
journal_id = vals.get('journal_id', self._context.get('default_journal_id', False))
journal = self.env['account.journal'].browse(journal_id)
vals['name'] = journal.sequence_id.with_context(ir_sequence_date=vals.get('date')).next_by_id()
return super(AccountBankStatement, self).create(vals)
@api.multi
def unlink(self):
for statement in self:
if statement.state != 'open':
raise UserError(_('In order to delete a bank statement, you must first cancel it to delete related journal items.'))
# Explicitly unlink bank statement lines so it will check that the related journal entries have been deleted first
statement.line_ids.unlink()
return super(AccountBankStatement, self).unlink()
@api.multi
def open_cashbox_id(self):
context = dict(self.env.context or {})
if context.get('cashbox_id'):
context['active_id'] = self.id
return {
'name': _('Cash Control'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.bank.statement.cashbox',
'view_id': self.env.ref('account.view_account_bnk_stmt_cashbox').id,
'type': 'ir.actions.act_window',
'res_id': self.env.context.get('cashbox_id'),
'context': context,
'target': 'new'
}
@api.multi
def button_cancel(self):
for statement in self:
if any(line.journal_entry_ids.ids for line in statement.line_ids):
raise UserError(_('A statement cannot be canceled when its lines are reconciled.'))
self.state = 'open'
@api.multi
def check_confirm_bank(self):
if self.journal_type == 'cash' and not self.currency_id.is_zero(self.difference):
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_view_account_bnk_stmt_check')
if action_rec:
action = action_rec.read([])[0]
return action
return self.button_confirm_bank()
@api.multi
def button_confirm_bank(self):
self._balance_check()
statements = self.filtered(lambda r: r.state == 'open')
for statement in statements:
moves = self.env['account.move']
for st_line in statement.line_ids:
if st_line.account_id and not st_line.journal_entry_ids.ids:
st_line.fast_counterpart_creation()
elif not st_line.journal_entry_ids.ids:
raise UserError(_('All the account entries lines must be processed in order to close the statement.'))
moves = (moves | st_line.journal_entry_ids)
if moves:
moves.post()
statement.message_post(body=_('Statement %s confirmed, journal items were created.') % (statement.name,))
statements.link_bank_to_partner()
statements.write({'state': 'confirm', 'date_done': time.strftime("%Y-%m-%d %H:%M:%S")})
@api.multi
def button_journal_entries(self):
context = dict(self._context or {})
context['journal_id'] = self.journal_id.id
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('statement_id', 'in', self.ids)],
'context': context,
}
@api.multi
def button_open(self):
""" Changes statement state to Running."""
for statement in self:
if not statement.name:
context = {'ir_sequence_date', statement.date}
if statement.journal_id.sequence_id:
st_number = statement.journal_id.sequence_id.with_context(context).next_by_id()
else:
SequenceObj = self.env['ir.sequence']
st_number = SequenceObj.with_context(context).next_by_code('account.bank.statement')
statement.name = st_number
statement.state = 'open'
@api.multi
def reconciliation_widget_preprocess(self):
""" Get statement lines of the specified statements or all unreconciled statement lines and try to automatically reconcile them / find them a partner.
Return ids of statement lines left to reconcile and other data for the reconciliation widget.
"""
statements = self
bsl_obj = self.env['account.bank.statement.line']
# NB : The field account_id can be used at the statement line creation/import to avoid the reconciliation process on it later on,
# this is why we filter out statements lines where account_id is set
st_lines_filter = [('journal_entry_ids', '=', False), ('account_id', '=', False)]
if statements:
st_lines_filter += [('statement_id', 'in', statements.ids)]
# Try to automatically reconcile statement lines
automatic_reconciliation_entries = []
st_lines_left = self.env['account.bank.statement.line']
for st_line in bsl_obj.search(st_lines_filter):
res = st_line.auto_reconcile()
if not res:
st_lines_left = (st_lines_left | st_line)
else:
automatic_reconciliation_entries.append(res.ids)
# Try to set statement line's partner
for st_line in st_lines_left:
if st_line.name and not st_line.partner_id:
additional_domain = [('ref', '=', st_line.name)]
match_recs = st_line.get_move_lines_for_reconciliation(limit=1, additional_domain=additional_domain, overlook_partner=True)
if match_recs and match_recs[0].partner_id:
st_line.write({'partner_id': match_recs[0].partner_id.id})
# Collect various informations for the reconciliation widget
notifications = []
num_auto_reconciled = len(automatic_reconciliation_entries)
if num_auto_reconciled > 0:
auto_reconciled_message = num_auto_reconciled > 1 \
and _("%d transactions were automatically reconciled.") % num_auto_reconciled \
or _("1 transaction was automatically reconciled.")
notifications += [{
'type': 'info',
'message': auto_reconciled_message,
'details': {
'name': _("Automatically reconciled items"),
'model': 'account.move',
'ids': automatic_reconciliation_entries
}
}]
lines = []
for el in statements:
lines.extend(el.line_ids.ids)
lines = list(set(lines))
return {
'st_lines_ids': st_lines_left.ids,
'notifications': notifications,
'statement_name': len(statements) == 1 and statements[0].name or False,
'num_already_reconciled_lines': statements and bsl_obj.search_count([('journal_entry_ids', '!=', False), ('id', 'in', lines)]) or 0,
}
@api.multi
def link_bank_to_partner(self):
for statement in self:
for st_line in statement.line_ids:
if st_line.bank_account_id and st_line.partner_id and st_line.bank_account_id.partner_id != st_line.partner_id:
st_line.bank_account_id.partner_id = st_line.partner_id
class AccountBankStatementLine(models.Model):
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_order = "statement_id desc, sequence"
_inherit = ['ir.needaction_mixin']
name = fields.Char(string='Memo', required=True)
date = fields.Date(required=True, default=lambda self: self._context.get('date', fields.Date.context_today(self)))
amount = fields.Monetary(digits=0, currency_field='journal_currency_id')
journal_currency_id = fields.Many2one('res.currency', related='statement_id.currency_id',
help='Utility field to express amount currency', readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner')
bank_account_id = fields.Many2one('res.partner.bank', string='Bank Account')
account_id = fields.Many2one('account.account', string='Counterpart Account', domain=[('deprecated', '=', False)],
help="This technical field can be used at the statement line creation/import time in order to avoid the reconciliation"
" process on it later on. The statement line will simply create a counterpart on this account")
statement_id = fields.Many2one('account.bank.statement', string='Statement', index=True, required=True, ondelete='cascade')
journal_id = fields.Many2one('account.journal', related='statement_id.journal_id', string='Journal', store=True, readonly=True)
partner_name = fields.Char(help="This field is used to record the third party name when importing bank statement in electronic format,"
" when the partner doesn't exist yet in the database (or cannot be found).")
ref = fields.Char(string='Reference')
note = fields.Text(string='Notes')
sequence = fields.Integer(index=True, help="Gives the sequence order when displaying a list of bank statement lines.", default=1)
company_id = fields.Many2one('res.company', related='statement_id.company_id', string='Company', store=True, readonly=True)
journal_entry_ids = fields.One2many('account.move', 'statement_line_id', 'Journal Entries', copy=False, readonly=True)
amount_currency = fields.Monetary(help="The amount expressed in an optional other currency if it is a multi-currency entry.")
currency_id = fields.Many2one('res.currency', string='Currency', help="The optional other currency if it is a multi-currency entry.")
@api.one
@api.constrains('amount')
def _check_amount(self):
# This constraint could possibly underline flaws in bank statement import (eg. inability to
# support hacks such as using dummy transactions to give additional informations)
if self.amount == 0:
raise ValidationError(_('A transaction can\'t have a 0 amount.'))
@api.one
@api.constrains('amount', 'amount_currency')
def _check_amount_currency(self):
if self.amount_currency != 0 and self.amount == 0:
raise ValidationError(_('If "Amount Currency" is specified, then "Amount" must be as well.'))
@api.multi
def unlink(self):
for line in self:
if line.journal_entry_ids.ids:
raise UserError(_('In order to delete a bank statement line, you must first cancel it to delete related journal items.'))
return super(AccountBankStatementLine, self).unlink()
@api.model
def _needaction_domain_get(self):
return [('journal_entry_ids', '=', False), ('account_id', '=', False)]
@api.multi
def button_cancel_reconciliation(self):
# TOCKECK : might not behave as expected in case of reconciliations (match statement line with already
# registered payment) or partial reconciliations : it will completely remove the existing payment.
move_recs = self.env['account.move']
for st_line in self:
move_recs = (move_recs | st_line.journal_entry_ids)
if move_recs:
for move in move_recs:
move.line_ids.remove_move_reconcile()
move_recs.write({'statement_line_id': False})
move_recs.button_cancel()
move_recs.unlink()
####################################################
# Reconciliation interface methods
####################################################
@api.multi
def get_data_for_reconciliation_widget(self, excluded_ids=None):
""" Returns the data required to display a reconciliation widget, for each statement line in self """
excluded_ids = excluded_ids or []
ret = []
for st_line in self:
aml_recs = st_line.get_reconciliation_proposition(excluded_ids=excluded_ids)
target_currency = st_line.currency_id or st_line.journal_id.currency_id or st_line.journal_id.company_id.currency_id
rp = aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=st_line.date)
excluded_ids += [move_line['id'] for move_line in rp]
ret.append({
'st_line': st_line.get_statement_line_for_reconciliation_widget(),
'reconciliation_proposition': rp
})
return ret
def get_statement_line_for_reconciliation_widget(self):
""" Returns the data required by the bank statement reconciliation widget to display a statement line """
statement_currency = self.journal_id.currency_id or self.journal_id.company_id.currency_id
if self.amount_currency and self.currency_id:
amount = self.amount_currency
amount_currency = self.amount
amount_currency_str = amount_currency > 0 and amount_currency or -amount_currency
amount_currency_str = formatLang(self.env, amount_currency_str, currency_obj=statement_currency)
else:
amount = self.amount
amount_currency_str = ""
amount_str = formatLang(self.env, abs(amount), currency_obj=self.currency_id or statement_currency)
data = {
'id': self.id,
'ref': self.ref,
'note': self.note or "",
'name': self.name,
'date': self.date,
'amount': amount,
'amount_str': amount_str, # Amount in the statement line currency
'currency_id': self.currency_id.id or statement_currency.id,
'partner_id': self.partner_id.id,
'journal_id': self.journal_id.id,
'statement_id': self.statement_id.id,
'account_code': self.journal_id.default_debit_account_id.code,
'account_name': self.journal_id.default_debit_account_id.name,
'partner_name': self.partner_id.name,
'communication_partner_name': self.partner_name,
'amount_currency_str': amount_currency_str, # Amount in the statement currency
'has_no_partner': not self.partner_id.id,
}
if self.partner_id:
if amount > 0:
data['open_balance_account_id'] = self.partner_id.property_account_receivable_id.id
else:
data['open_balance_account_id'] = self.partner_id.property_account_payable_id.id
return data
@api.multi
def get_move_lines_for_reconciliation_widget(self, excluded_ids=None, str=False, offset=0, limit=None):
""" Returns move lines for the bank statement reconciliation widget, formatted as a list of dicts
"""
aml_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str, offset=offset, limit=limit)
target_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
return aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=self.date)
####################################################
# Reconciliation methods
####################################################
def get_move_lines_for_reconciliation(self, excluded_ids=None, str=False, offset=0, limit=None, additional_domain=None, overlook_partner=False):
""" Return account.move.line records which can be used for bank statement reconciliation.
:param excluded_ids:
:param str:
:param offset:
:param limit:
:param additional_domain:
:param overlook_partner:
"""
# Domain to fetch registered payments (use case where you encode the payment before you get the bank statement)
reconciliation_aml_accounts = [self.journal_id.default_credit_account_id.id, self.journal_id.default_debit_account_id.id]
domain_reconciliation = ['&', ('statement_id', '=', False), ('account_id', 'in', reconciliation_aml_accounts)]
# Domain to fetch unreconciled payables/receivables (use case where you close invoices/refunds by reconciling your bank statements)
domain_matching = [('reconciled', '=', False)]
if self.partner_id.id or overlook_partner:
domain_matching = expression.AND([domain_matching, [('account_id.internal_type', 'in', ['payable', 'receivable'])]])
else:
# TODO : find out what use case this permits (match a check payment, registered on a journal whose account type is other instead of liquidity)
domain_matching = expression.AND([domain_matching, [('account_id.reconcile', '=', True)]])
# Let's add what applies to both
domain = expression.OR([domain_reconciliation, domain_matching])
if self.partner_id.id and not overlook_partner:
domain = expression.AND([domain, [('partner_id', '=', self.partner_id.id)]])
# Domain factorized for all reconciliation use cases
ctx = dict(self._context or {})
ctx['bank_statement_line'] = self
generic_domain = self.env['account.move.line'].with_context(ctx).domain_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str)
domain = expression.AND([domain, generic_domain])
# Domain from caller
if additional_domain is None:
additional_domain = []
else:
additional_domain = expression.normalize_domain(additional_domain)
domain = expression.AND([domain, additional_domain])
return self.env['account.move.line'].search(domain, offset=offset, limit=limit, order="date_maturity asc, id asc")
def _get_domain_maker_move_line_amount(self):
""" Returns a function that can create the appropriate domain to search on move.line amount based on statement.line currency/amount """
company_currency = self.journal_id.company_id.currency_id
st_line_currency = self.currency_id or self.journal_id.currency_id
currency = (st_line_currency and st_line_currency != company_currency) and st_line_currency.id or False
field = currency and 'amount_residual_currency' or 'amount_residual'
precision = st_line_currency and st_line_currency.decimal_places or company_currency.decimal_places
def ret(comparator, amount, p=precision, f=field, c=currency):
if comparator == '<':
if amount < 0:
domain = [(f, '<', 0), (f, '>', amount)]
else:
domain = [(f, '>', 0), (f, '<', amount)]
elif comparator == '=':
domain = [(f, '=', float_round(amount, precision_digits=p))]
else:
raise UserError(_("Programmation error : domain_maker_move_line_amount requires comparator '=' or '<'"))
domain += [('currency_id', '=', c)]
return domain
return ret
def get_reconciliation_proposition(self, excluded_ids=None):
""" Returns move lines that constitute the best guess to reconcile a statement line
Note: it only looks for move lines in the same currency as the statement line.
"""
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
elif len(match_recs) == 0:
move = self.env['account.move'].search([('name', '=', self.name)], limit=1)
if move:
domain = [('move_id', '=', move.id)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
amount = self.amount_currency or self.amount
# Look for a single move line with the same amount
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=1, additional_domain=amount_domain_maker('=', amount))
if match_recs:
return match_recs
if not self.partner_id:
return self.env['account.move.line']
# Select move lines until their total amount is greater than the statement line amount
domain = [('reconciled', '=', False)]
domain += [('account_id.user_type_id.type', '=', amount > 0 and 'receivable' or 'payable')] # Make sure we can't mix receivable and payable
domain += amount_domain_maker('<', amount) # Will also enforce > 0
mv_lines = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=5, additional_domain=domain)
st_line_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
ret = self.env['account.move.line']
total = 0
for line in mv_lines:
total += line.currency_id and line.amount_residual_currency or line.amount_residual
if float_compare(total, abs(amount), precision_digits=st_line_currency.rounding) != -1:
break
ret = (ret | line)
return ret
def _get_move_lines_for_auto_reconcile(self):
""" Returns the move lines that the method auto_reconcile can use to try to reconcile the statement line """
pass
@api.multi
def auto_reconcile(self):
""" Try to automatically reconcile the statement.line ; return the counterpart journal entry/ies if the automatic reconciliation succeeded, False otherwise.
TODO : this method could be greatly improved and made extensible
"""
self.ensure_one()
match_recs = self.env['account.move.line']
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
equal_amount_domain = amount_domain_maker('=', self.amount_currency or self.amount)
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = equal_amount_domain + [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) != 1:
return False
# Look for a single move line with the same partner, the same amount
if not match_recs:
if self.partner_id:
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=equal_amount_domain)
if match_recs and len(match_recs) != 1:
return False
if not match_recs:
return False
# Now reconcile
counterpart_aml_dicts = []
payment_aml_rec = self.env['account.move.line']
for aml in match_recs:
if aml.account_id.internal_type == 'liquidity':
payment_aml_rec = (payment_aml_rec | aml)
else:
amount = aml.currency_id and aml.amount_residual_currency or aml.amount_residual
counterpart_aml_dicts.append({
'name': aml.name if aml.name != '/' else aml.move_id.name,
'debit': amount < 0 and -amount or 0,
'credit': amount > 0 and amount or 0,
'move_line': aml
})
try:
with self._cr.savepoint():
counterpart = self.process_reconciliation(counterpart_aml_dicts=counterpart_aml_dicts, payment_aml_rec=payment_aml_rec)
return counterpart
except UserError:
# A configuration / business logic error that makes it impossible to auto-reconcile should not be raised
# since automatic reconciliation is just an amenity and the user will get the same exception when manually
# reconciling. Other types of exception are (hopefully) programmation errors and should cause a stacktrace.
self.invalidate_cache()
self.env['account.move'].invalidate_cache()
self.env['account.move.line'].invalidate_cache()
return False
def _prepare_reconciliation_move(self, move_name):
""" Prepare the dict of values to create the move from a statement line. This method may be overridden to adapt domain logic
through model inheritance (make sure to call super() to establish a clean extension chain).
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'statement_line_id': self.id,
'journal_id': self.statement_id.journal_id.id,
'date': self.date,
'name': move_name,
'ref': self.ref,
}
def _prepare_reconciliation_move_line(self, move, amount):
""" Prepare the dict of values to create the move line from a statement line.
:param recordset move: the account.move to link the move line
:param float amount: the amount of transaction that wasn't already reconciled
"""
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
amount_currency = False
if statement_currency != company_currency or st_line_currency != company_currency:
# First get the ratio total mount / amount not already reconciled
if statement_currency == company_currency:
total_amount = self.amount
elif st_line_currency == company_currency:
total_amount = self.amount_currency
else:
total_amount = statement_currency.with_context({'date': self.date}).compute(self.amount, company_currency)
ratio = total_amount / amount
# Then use it to adjust the statement.line field that correspond to the move.line amount_currency
if statement_currency != company_currency:
amount_currency = self.amount * ratio
elif st_line_currency != company_currency:
amount_currency = self.amount_currency * ratio
return {
'name': self.name,
'date': self.date,
'ref': self.ref,
'move_id': move.id,
'partner_id': self.partner_id and self.partner_id.id or False,
'account_id': amount >= 0 \
and self.statement_id.journal_id.default_credit_account_id.id \
or self.statement_id.journal_id.default_debit_account_id.id,
'credit': amount < 0 and -amount or 0.0,
'debit': amount > 0 and amount or 0.0,
'statement_id': self.statement_id.id,
'journal_id': self.statement_id.journal_id.id,
'currency_id': statement_currency != company_currency and statement_currency.id or (st_line_currency != company_currency and st_line_currency.id or False),
'amount_currency': amount_currency,
}
@api.v7
def process_reconciliations(self, cr, uid, ids, data, context=None):
""" Handles data sent from the bank statement reconciliation widget (and can otherwise serve as an old-API bridge)
:param list of dicts data: must contains the keys 'counterpart_aml_dicts', 'payment_aml_ids' and 'new_aml_dicts',
whose value is the same as described in process_reconciliation except that ids are used instead of recordsets.
"""
aml_obj = self.pool['account.move.line']
for id, datum in zip(ids, data):
st_line = self.browse(cr, uid, id, context)
payment_aml_rec = aml_obj.browse(cr, uid, datum.get('payment_aml_ids', []), context)
for aml_dict in datum.get('counterpart_aml_dicts', []):
aml_dict['move_line'] = aml_obj.browse(cr, uid, aml_dict['counterpart_aml_id'], context)
del aml_dict['counterpart_aml_id']
st_line.process_reconciliation(datum.get('counterpart_aml_dicts', []), payment_aml_rec, datum.get('new_aml_dicts', []))
def fast_counterpart_creation(self):
for st_line in self:
# Technical functionality to automatically reconcile by creating a new move line
vals = {
'name': st_line.name,
'debit': st_line.amount < 0 and -st_line.amount or 0.0,
'credit': st_line.amount > 0 and st_line.amount or 0.0,
'account_id': st_line.account_id.id,
}
st_line.process_reconciliation(new_aml_dicts=[vals])
def process_reconciliation(self, counterpart_aml_dicts=None, payment_aml_rec=None, new_aml_dicts=None):
""" Match statement lines with existing payments (eg. checks) and/or payables/receivables (eg. invoices and refunds) and/or new move lines (eg. write-offs).
If any new journal item needs to be created (via new_aml_dicts or counterpart_aml_dicts), a new journal entry will be created and will contain those
items, as well as a journal item for the bank statement line.
Finally, mark the statement line as reconciled by putting the matched moves ids in the column journal_entry_ids.
:param (list of dicts) counterpart_aml_dicts: move lines to create to reconcile with existing payables/receivables.
The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'move_line'
# The move line to reconcile (partially if specified debit/credit is lower than move line's credit/debit)
:param (list of recordsets) payment_aml_rec: recordset move lines representing existing payments (which are already fully reconciled)
:param (list of dicts) new_aml_dicts: move lines to create. The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'account_id'
- (optional) 'tax_ids'
- (optional) Other account.move.line fields like analytic_account_id or analytics_id
:returns: The journal entries with which the transaction was matched. If there was at least an entry in counterpart_aml_dicts or new_aml_dicts, this list contains
the move created by the reconciliation, containing entries for the statement.line (1), the counterpart move lines (0..*) and the new move lines (0..*).
"""
counterpart_aml_dicts = counterpart_aml_dicts or []
payment_aml_rec = payment_aml_rec or self.env['account.move.line']
new_aml_dicts = new_aml_dicts or []
aml_obj = self.env['account.move.line']
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
counterpart_moves = self.env['account.move']
# Check and prepare received data
if self.journal_entry_ids.ids:
raise UserError(_('The bank statement line was already reconciled.'))
if any(rec.statement_id for rec in payment_aml_rec):
raise UserError(_('A selected move line was already reconciled.'))
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].reconciled:
raise UserError(_('A selected move line was already reconciled.'))
if isinstance(aml_dict['move_line'], (int, long)):
aml_dict['move_line'] = aml_obj.browse(aml_dict['move_line'])
for aml_dict in (counterpart_aml_dicts + new_aml_dicts):
if aml_dict.get('tax_ids') and aml_dict['tax_ids'] and isinstance(aml_dict['tax_ids'][0], (int, long)):
# Transform the value in the format required for One2many and Many2many fields
aml_dict['tax_ids'] = map(lambda id: (4, id, None), aml_dict['tax_ids'])
# Fully reconciled moves are just linked to the bank statement
for aml_rec in payment_aml_rec:
aml_rec.write({'statement_id': self.statement_id.id})
aml_rec.move_id.write({'statement_line_id': self.id})
counterpart_moves = (counterpart_moves | aml_rec.move_id)
# Create move line(s). Either matching an existing journal entry (eg. invoice), in which
# case we reconcile the existing and the new move lines together, or being a write-off.
if counterpart_aml_dicts or new_aml_dicts:
st_line_currency = self.currency_id or statement_currency
st_line_currency_rate = self.currency_id and (self.amount_currency / self.amount) or False
# Create the move
move_name = (self.statement_id.name or self.name) + "/" + str(self.sequence)
move_vals = self._prepare_reconciliation_move(move_name)
move = self.env['account.move'].create(move_vals)
counterpart_moves = (counterpart_moves | move)
# Complete dicts to create both counterpart move lines and write-offs
to_create = (counterpart_aml_dicts + new_aml_dicts)
ctx = dict(self._context, date=self.date)
for aml_dict in to_create:
aml_dict['move_id'] = move.id
aml_dict['partner_id'] = self.partner_id.id
aml_dict['statement_id'] = self.statement_id.id
if st_line_currency.id != company_currency.id:
aml_dict['amount_currency'] = aml_dict['debit'] - aml_dict['credit']
aml_dict['currency_id'] = st_line_currency.id
if self.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:
# Statement is in company currency but the transaction is in foreign currency
aml_dict['debit'] = company_currency.round(aml_dict['debit'] / st_line_currency_rate)
aml_dict['credit'] = company_currency.round(aml_dict['credit'] / st_line_currency_rate)
elif self.currency_id and st_line_currency_rate:
# Statement is in foreign currency and the transaction is in another one
aml_dict['debit'] = statement_currency.with_context(ctx).compute(aml_dict['debit'] / st_line_currency_rate, company_currency)
aml_dict['credit'] = statement_currency.with_context(ctx).compute(aml_dict['credit'] / st_line_currency_rate, company_currency)
else:
# Statement is in foreign currency and no extra currency is given for the transaction
aml_dict['debit'] = st_line_currency.with_context(ctx).compute(aml_dict['debit'], company_currency)
aml_dict['credit'] = st_line_currency.with_context(ctx).compute(aml_dict['credit'], company_currency)
elif statement_currency.id != company_currency.id:
# Statement is in foreign currency but the transaction is in company currency
prorata_factor = (aml_dict['debit'] - aml_dict['credit']) / self.amount_currency
aml_dict['amount_currency'] = prorata_factor * self.amount
aml_dict['currency_id'] = statement_currency.id
# Create write-offs
for aml_dict in new_aml_dicts:
aml_obj.with_context(check_move_validity=False).create(aml_dict)
# Create counterpart move lines and reconcile them
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].partner_id.id:
aml_dict['partner_id'] = aml_dict['move_line'].partner_id.id
aml_dict['account_id'] = aml_dict['move_line'].account_id.id
counterpart_move_line = aml_dict.pop('move_line')
if counterpart_move_line.currency_id and counterpart_move_line.currency_id != company_currency and not aml_dict.get('currency_id'):
aml_dict['currency_id'] = counterpart_move_line.currency_id.id
aml_dict['amount_currency'] = company_currency.with_context(ctx).compute(aml_dict['debit'] - aml_dict['credit'], counterpart_move_line.currency_id)
new_aml = aml_obj.with_context(check_move_validity=False).create(aml_dict)
(new_aml | counterpart_move_line).reconcile()
# Create the move line for the statement line using the bank statement line as the remaining amount
# This leaves out the amount already reconciled and avoids rounding errors from currency conversion
st_line_amount = -sum([x.balance for x in move.line_ids])
aml_obj.with_context(check_move_validity=False).create(self._prepare_reconciliation_move_line(move, st_line_amount))
move.post()
counterpart_moves.assert_balanced()
return counterpart_moves
|
angelapper/odoo
|
addons/account/models/account_bank_statement.py
|
Python
|
agpl-3.0
| 47,237
| 0.004573
|
class APIError(Exception):
"""Represents an error returned in a response to a fleet API call
This exception will be raised any time a response code >= 400 is returned
Attributes:
code (int): The response code
message(str): The message included with the error response
http_error(googleapiclient.errors.HttpError): The underlying exception that caused this exception to be raised
If you need access to the raw response, this is where you'll find
it.
"""
def __init__(self, code, message, http_error):
"""Construct an exception representing an error returned by fleet
Args:
code (int): The response code
message(str): The message included with the error response
http_error(googleapiclient.errors.HttpError): The underlying exception that caused this exception
to be raised.
"""
self.code = code
self.message = message
self.http_error = http_error
def __str__(self):
# Return a string like r'Some bad thing happened(400)'
return '{1} ({0})'.format(
self.code,
self.message
)
def __repr__(self):
# Retun a string like r'<Fleetv1Error; Code: 400; Message: Some bad thing happened>'
return '<{0}; Code: {1}; Message: {2}>'.format(
self.__class__.__name__,
self.code,
self.message
)
|
cnelson/python-fleet
|
fleet/v1/errors.py
|
Python
|
apache-2.0
| 1,594
| 0.002509
|
# Copyright (C) 2018, Yu Sheng Lin, johnjohnlys@media.ee.ntu.edu.tw
# This file is part of Nicotb.
# Nicotb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Nicotb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Nicotb. If not, see <http://www.gnu.org/licenses/>.
from nicotb import *
from nicotb.utils import Scoreboard, BusGetter
from nicotb.protocol import Ahb
import operator as op
import numpy as np
from os import getenv
def main():
N = 10
scb = Scoreboard()
test = scb.GetTest("ahb", ne=op.ne, max_err=10)
bg = BusGetter(callbacks=[test.Get])
ms = Ahb.Master(hsel, haddr, hwrite, htrans, hsize, hburst, hready, hresp, rd, wd, ck_ev)
yield rs_ev
for i in range(10):
yield ck_ev
def rng(magic):
while True:
magic = (magic*199 + 12345) & 65535
yield magic
r = rng(25251)
MAGIC = next(r)
ADR = 0
print(
"Test Single R/W\n"
f"MAGIC/ADR is {MAGIC}/{ADR}"
)
test.Expect(MAGIC)
yield from ms.Write(ADR, MAGIC)
read_v = yield from ms.Read(ADR)
test.Get(read_v)
yield ck_ev
MAGIC = next(r)
ADR = 100
print(
"Test Pipelined R/W\n"
f"MAGIC/ADR is {MAGIC}/{ADR}"
)
wcmd = [(True, ADR+i*4, MAGIC+i) for i in range(N)]
rcmd = [(False, ADR+i*4) for i in range(N)]
test.Expect([MAGIC+i for i in range(N)])
read_v = yield from ms.IssueCommands(wcmd + rcmd)
test.Get(read_v)
yield ck_ev
MAGIC = next(r)
ADR = 200
print(
"Test Pipelined Interleaved R/W\n"
f"MAGIC/ADR is {MAGIC}/{ADR}"
)
wcmd = [(True, ADR+i*4, MAGIC+i) for i in range(N)]
rcmd = [(False, ADR+i*4) for i in range(N)]
cmd = [v for p in zip(wcmd, rcmd) for v in p]
test.Expect([MAGIC+i for i in range(N)])
read_v = yield from ms.IssueCommands(cmd)
test.Get(read_v)
for i in range(10):
yield ck_ev
wd, rd = CreateBuses([("wd",), ("rd",),])
hsel, haddr, hwrite, htrans, hsize, hburst, hready, hresp = CreateBuses([
(("u_dut", "HSEL"),),
(("u_dut", "HADDR"),),
(("u_dut", "HWRITE"),),
(("u_dut", "HTRANS"),),
(("u_dut", "HSIZE"),),
(("u_dut", "HBURST"),),
(("u_dut", "HREADY"),),
(("u_dut", "HRESP"),),
])
ck_ev, rs_ev = CreateEvents(["ck_ev", "rst_out",])
RegisterCoroutines([
main(),
])
|
johnjohnlin/nicotb
|
sim/ahb/Ahb_test.py
|
Python
|
gpl-3.0
| 2,569
| 0.024912
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from horizon.utils import html
class Breadcrumb(html.HTMLElement):
def __init__(self, request, template, root,
subfolder_path, url, attr=None):
super(Breadcrumb, self).__init__()
self.template = template
self.request = request
self.root = root
self.subfolder_path = subfolder_path
self.url = url
self._subfolders = []
def get_subfolders(self):
if self.subfolder_path and not self._subfolders:
(parent, slash, folder) = self.subfolder_path.strip('/') \
.rpartition('/')
while folder:
path = "%s%s%s/" % (parent, slash, folder)
self._subfolders.insert(0, (folder, path))
(parent, slash, folder) = parent.rpartition('/')
return self._subfolders
def render(self):
"""Renders the table using the template from the table options."""
breadcrumb_template = template.loader.get_template(self.template)
extra_context = {"breadcrumb": self}
context = template.RequestContext(self.request, extra_context)
return breadcrumb_template.render(context)
|
ChinaMassClouds/copenstack-server
|
openstack/src/horizon-2014.2/horizon/browsers/breadcrumb.py
|
Python
|
gpl-2.0
| 1,803
| 0
|
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains the new image code, which includes provisions for
# size-based caching and constructing images from operations (like
# cropping and scaling).
import renpy.display
import math
import zipfile
import cStringIO
import threading
import time
# This is an entry in the image cache.
class CacheEntry(object):
def __init__(self, what, surf):
# The object that is being cached (which needs to be
# hashable and comparable).
self.what = what
# The pygame surface corresponding to the cached object.
self.surf = surf
# The size of this image.
w, h = surf.get_size()
self.size = w * h
# The time when this cache entry was last used.
self.time = 0
# This is the singleton image cache.
class Cache(object):
def __init__(self):
# The current arbitrary time. (Increments by one for each
# interaction.)
self.time = 0
# A map from Image object to CacheEntry.
self.cache = { }
# A list of Image objects that we want to preload.
self.preloads = [ ]
# False if this is not the first preload in this tick.
self.first_preload_in_tick = True
# The total size of the current generation of images.
self.size_of_current_generation = 0
# The total size of everything in the cache.
self.total_cache_size = 0
# A lock that must be held when updating the above.
self.lock = threading.Condition()
# Is the preload_thread alive?
self.keep_preloading = True
# A map from image object to surface, only for objects that have
# been pinned into memory.
self.pin_cache = { }
# Images that we tried, and failed, to preload.
self.preload_blacklist = set()
# The size of the cache, in pixels.
self.cache_limit = 0
# The preload thread.
self.preload_thread = threading.Thread(target=self.preload_thread_main, name="preloader")
self.preload_thread.setDaemon(True)
self.preload_thread.start()
# Have we been added this tick?
self.added = set()
# A list of (time, filename, preload) tuples. This is updated when
# config.developer is True and an image is loaded. Preload is a
# flag that is true if the image was loaded from the preload
# thread. The log is limited to 100 entries, and the newest entry
# is first.
#
# This is only updated when config.developer is True.
self.load_log = [ ]
def init(self):
"""
Updates the cache object to make use of settings that might be provided
by the game-maker.
"""
self.cache_limit = renpy.config.image_cache_size * renpy.config.screen_width * renpy.config.screen_height
def quit(self): #@ReservedAssignment
if not self.preload_thread.isAlive():
return
self.lock.acquire()
self.keep_preloading = False
self.lock.notify()
self.lock.release()
self.preload_thread.join()
# Clears out the cache.
def clear(self):
self.lock.acquire()
self.preloads = [ ]
self.pin_cache = { }
self.cache = { }
self.first_preload_in_tick = True
self.size_of_current_generation = 0
self.total_cache_size = 0
self.added.clear()
self.lock.release()
# Increments time, and clears the list of images to be
# preloaded.
def tick(self):
with self.lock:
self.time += 1
self.preloads = [ ]
self.first_preload_in_tick = True
self.size_of_current_generation = 0
self.added.clear()
if renpy.config.debug_image_cache:
renpy.display.ic_log.write("----")
filename, line = renpy.exports.get_filename_line()
renpy.display.ic_log.write("%s %d", filename, line)
# The preload thread can deal with this update, so we don't need
# to lock things.
def end_tick(self):
self.preloads = [ ]
# This returns the pygame surface corresponding to the provided
# image. It also takes care of updating the age of images in the
# cache to be current, and maintaining the size of the current
# generation of images.
def get(self, image, predict=False):
if not isinstance(image, ImageBase):
raise Exception("Expected an image of some sort, but got" + str(image) + ".")
if not image.cache:
surf = image.load()
renpy.display.render.mutated_surface(surf)
return surf
ce = None
# First try to grab the image out of the cache without locking it.
if image in self.cache:
ce = self.cache[image]
# Now, grab the cache and try again. This deals with the case where the image
# was already in the middle of preloading.
if ce is None:
self.lock.acquire()
ce = self.cache.get(image, None)
if ce is not None:
self.lock.release()
# Otherwise, we keep the lock, and load the image ourselves.
if ce is None:
try:
if image in self.pin_cache:
surf = self.pin_cache[image]
else:
surf = image.load()
except:
self.lock.release()
raise
ce = CacheEntry(image, surf)
self.total_cache_size += ce.size
self.cache[image] = ce
# Indicate that this surface had changed.
renpy.display.render.mutated_surface(ce.surf)
if renpy.config.debug_image_cache:
if predict:
renpy.display.ic_log.write("Added %r (%.02f%%)", ce.what, 100.0 * self.total_cache_size / self.cache_limit)
else:
renpy.display.ic_log.write("Total Miss %r", ce.what)
renpy.display.draw.load_texture(ce.surf)
self.lock.release()
# Move it into the current generation. This isn't protected by
# a lock, so in certain circumstances we could have an
# inaccurate size. But that's pretty unlikely, as the
# preloading thread should never run at the same time as an
# actual load from the normal thread.
if ce.time != self.time:
ce.time = self.time
self.size_of_current_generation += ce.size
# Done... return the surface.
return ce.surf
# This kills off a given cache entry.
def kill(self, ce):
# Should never happen... but...
if ce.time == self.time:
self.size_of_current_generation -= ce.size
self.total_cache_size -= ce.size
del self.cache[ce.what]
if renpy.config.debug_image_cache:
renpy.display.ic_log.write("Removed %r", ce.what)
def cleanout(self):
"""
Cleans out the cache, if it's gotten too large. Returns True
if the cache is smaller than the size limit, or False if it's
bigger and we don't want to continue preloading.
"""
# If we're within the limit, return.
if self.total_cache_size <= self.cache_limit:
return True
# If we're outside the cache limit, we need to go and start
# killing off some of the entries until we're back inside it.
for ce in sorted(self.cache.itervalues(), key=lambda a : a.time):
if ce.time == self.time:
# If we're bigger than the limit, and there's nothing
# to remove, we should stop the preloading right away.
return False
# Otherwise, kill off the given cache entry.
self.kill(ce)
# If we're in the limit, we're done.
if self.total_cache_size <= self.cache_limit:
break
return True
# Called to report that a given image would like to be preloaded.
def preload_image(self, im):
if not isinstance(im, ImageBase):
return
with self.lock:
if im in self.added:
return
self.added.add(im)
if im in self.cache:
self.get(im)
in_cache = True
else:
self.preloads.append(im)
self.lock.notify()
in_cache = False
if in_cache and renpy.config.debug_image_cache:
renpy.display.ic_log.write("Kept %r", im)
def preload_thread_main(self):
while self.keep_preloading:
self.lock.acquire()
self.lock.wait()
self.lock.release()
while self.preloads and self.keep_preloading:
# If the size of the current generation is bigger than the
# total cache size, stop preloading.
if self.size_of_current_generation > self.cache_limit:
if renpy.config.debug_image_cache:
for i in self.preloads:
renpy.display.ic_log.write("Overfull %r", i)
self.preloads = [ ]
break
with self.lock:
try:
image = self.preloads.pop(0)
if image not in self.preload_blacklist:
try:
self.get(image, True)
except:
self.preload_blacklist.add(image)
except:
pass
if not self.cleanout():
self.preloads = [ ]
# If we have time, preload pinned images.
if self.keep_preloading and not renpy.game.less_memory:
workset = set(renpy.store._cache_pin_set)
# Remove things that are not in the workset from the pin cache,
# and remove things that are in the workset from pin cache.
for i in self.pin_cache.keys():
if i in workset:
workset.remove(i)
else:
surf = self.pin_cache[i]
del self.pin_cache[i]
# For each image in the worklist...
for image in workset:
if image in self.preload_blacklist:
continue
# If we have normal preloads, break out.
if self.preloads:
break
try:
surf = image.load()
self.pin_cache[image] = surf
renpy.display.draw.load_texture(surf)
except:
self.preload_blacklist.add(image)
def add_load_log(self, filename):
if not renpy.config.developer:
return
preload = (threading.current_thread() is self.preload_thread)
self.load_log.insert(0, (time.time(), filename, preload))
while len(self.load_log) > 100:
self.load_log.pop()
# The cache object.
cache = Cache()
def free_memory():
"""
Frees some memory.
"""
renpy.display.draw.free_memory()
cache.clear()
class ImageBase(renpy.display.core.Displayable):
"""
This is the base class for all of the various kinds of images that
we can possibly have.
"""
__version__ = 1
def after_upgrade(self, version):
if version < 1:
self.cache = True
def __init__(self, *args, **properties):
self.rle = properties.pop('rle', None)
self.cache = properties.pop('cache', True)
properties.setdefault('style', 'image')
super(ImageBase, self).__init__(**properties)
self.identity = (type(self).__name__, ) + args
def __hash__(self):
return hash(self.identity)
def __eq__(self, other):
if not isinstance(other, ImageBase):
return False
return self.identity == other.identity
def __repr__(self):
return "<" + " ".join([repr(i) for i in self.identity]) + ">"
def load(self):
"""
This function is called by the image cache code to cause this
image to be loaded. It's expected that children of this class
would override this.
"""
assert False
def render(self, w, h, st, at):
im = cache.get(self)
texture = renpy.display.draw.load_texture(im)
w, h = im.get_size()
rv = renpy.display.render.Render(w, h)
rv.blit(texture, (0, 0))
return rv
def predict_one(self):
renpy.display.predict.image(self)
def predict_files(self):
"""
Returns a list of files that will be accessed when this image
operation is performed.
"""
return [ ]
class Image(ImageBase):
"""
This image manipulator loads an image from a file.
"""
def __init__(self, filename, **properties):
"""
@param filename: The filename that the image will be loaded from.
"""
super(Image, self).__init__(filename, **properties)
self.filename = filename
def get_mtime(self):
return renpy.loader.get_mtime(self.filename)
def load(self, unscaled=False):
cache.add_load_log(self.filename)
try:
if unscaled:
surf = renpy.display.pgrender.load_image_unscaled(renpy.loader.load(self.filename), self.filename)
else:
surf = renpy.display.pgrender.load_image(renpy.loader.load(self.filename), self.filename)
return surf
except Exception, e:
if renpy.config.missing_image_callback:
im = renpy.config.missing_image_callback(self.filename)
if im is None:
raise e
return im.load()
raise
def predict_files(self):
if renpy.loader.loadable(self.filename):
return [ self.filename ]
else:
if renpy.config.missing_image_callback:
im = renpy.config.missing_image_callback(self.filename)
if im is not None:
return im.predict_files()
return [ self.filename ]
class ZipFileImage(ImageBase):
def __init__(self, zipfilename, filename, mtime=0, **properties):
super(ZipFileImage, self).__init__(zipfilename, filename, mtime, **properties)
self.zipfilename = zipfilename
self.filename = filename
def load(self):
try:
zf = zipfile.ZipFile(self.zipfilename, 'r')
data = zf.read(self.filename)
sio = cStringIO.StringIO(data)
rv = renpy.display.pgrender.load_image(sio, self.filename)
zf.close()
return rv
except:
return renpy.display.pgrender.surface((2, 2), True)
def predict_files(self):
return [ ]
class Composite(ImageBase):
"""
:doc: im_im
This image manipulator composites multiple images together to
form a single image.
The `size` should be a (width, height) tuple giving the size
of the composed image.
The remaining positional arguments are interpreted as groups of
two. The first argument in a group should be an (x, y) tuple,
while the second should be an image manipulator. The image
produced by the image manipulator is composited at the location
given by the tuple.
::
image girl clothed happy = im.Composite(
(300, 600),
(0, 0), "girl_body.png",
(0, 0), "girl_clothes.png",
(100, 100), "girl_happy.png"
)
"""
def __init__(self, size, *args, **properties):
super(Composite, self).__init__(size, *args, **properties)
if len(args) % 2 != 0:
raise Exception("Composite requires an odd number of arguments.")
self.size = size
self.positions = args[0::2]
self.images = [ image(i) for i in args[1::2] ]
def get_mtime(self):
return min(i.get_mtime() for i in self.images)
def load(self):
if self.size:
size = self.size
else:
size = cache.get(self.images[0]).get_size()
rv = renpy.display.pgrender.surface(size, True)
for pos, im in zip(self.positions, self.images):
rv.blit(cache.get(im), pos)
return rv
def predict_files(self):
rv = [ ]
for i in self.images:
rv.extend(i.predict_files())
return rv
class Scale(ImageBase):
"""
:doc: im_im
An image manipulator that scales `im` (an image manipulator) to
`width` and `height`.
If `bilinear` is true, then bilinear interpolation is used for
the scaling. Otherwise, nearest neighbor interpolation is used.
::
image logo scale = im.Scale("logo.png", 100, 150)
"""
def __init__(self, im, width, height, bilinear=True, **properties):
im = image(im)
super(Scale, self).__init__(im, width, height, bilinear, **properties)
self.image = im
self.width = int(width)
self.height = int(height)
self.bilinear = bilinear
def get_mtime(self):
return self.image.get_mtime()
def load(self):
child = cache.get(self.image)
if self.bilinear:
try:
renpy.display.render.blit_lock.acquire()
rv = renpy.display.scale.smoothscale(child, (self.width, self.height))
finally:
renpy.display.render.blit_lock.release()
else:
try:
renpy.display.render.blit_lock.acquire()
rv = renpy.display.pgrender.transform_scale(child, (self.width, self.height))
finally:
renpy.display.render.blit_lock.release()
return rv
def predict_files(self):
return self.image.predict_files()
class FactorScale(ImageBase):
"""
:doc: im_im
An image manipulator that scales `im` (a second image manipulator)
to `width` times its original `width`, and `height` times its
original height. If `height` is ommitted, it defaults to `width`.
If `bilinear` is true, then bilinear interpolation is used for
the scaling. Otherwise, nearest neighbor interpolation is used.
::
image logo doubled = im.FactorScale("logo.png", 1.5)
"""
def __init__(self, im, width, height=None, bilinear=True, **properties):
if height is None:
height = width
im = image(im)
super(FactorScale, self).__init__(im, width, height, bilinear, **properties)
self.image = im
self.width = width
self.height = height
self.bilinear = bilinear
def get_mtime(self):
return self.image.get_mtime()
def load(self):
surf = cache.get(self.image)
width, height = surf.get_size()
width = int(width * self.width)
height = int(height * self.height)
if self.bilinear:
try:
renpy.display.render.blit_lock.acquire()
rv = renpy.display.scale.smoothscale(surf, (width, height))
finally:
renpy.display.render.blit_lock.release()
else:
try:
renpy.display.render.blit_lock.acquire()
rv = renpy.display.pgrender.transform_scale(surf, (width, height))
finally:
renpy.display.render.blit_lock.release()
return rv
def predict_files(self):
return self.image.predict_files()
class Flip(ImageBase):
"""
:doc: im_im
An image manipulator that flips `im` (an image manipulator)
vertically or horizontally. `vertical` and `horizontal` control
the directions in which the image is flipped.
::
image eileen flip = im.Flip("eileen_happy.png", vertical=True)
"""
def __init__(self, im, horizontal=False, vertical=False, **properties):
if not (horizontal or vertical):
raise Exception("im.Flip must be called with a true value for horizontal or vertical.")
im = image(im)
super(Flip, self).__init__(im, horizontal, vertical, **properties)
self.image = im
self.horizontal = horizontal
self.vertical = vertical
def get_mtime(self):
return self.image.get_mtime()
def load(self):
child = cache.get(self.image)
try:
renpy.display.render.blit_lock.acquire()
rv = renpy.display.pgrender.flip(child, self.horizontal, self.vertical)
finally:
renpy.display.render.blit_lock.release()
return rv
def predict_files(self):
return self.image.predict_files()
class Rotozoom(ImageBase):
"""
This is an image manipulator that is a smooth rotation and zoom of another image manipulator.
"""
def __init__(self, im, angle, zoom, **properties):
"""
@param im: The image to be rotozoomed.
@param angle: The number of degrees counterclockwise the image is
to be rotated.
@param zoom: The zoom factor. Numbers that are greater than 1.0
lead to the image becoming larger.
"""
im = image(im)
super(Rotozoom, self).__init__(im, angle, zoom, **properties)
self.image = im
self.angle = angle
self.zoom = zoom
def get_mtime(self):
return self.image.get_mtime()
def load(self):
child = cache.get(self.image)
try:
renpy.display.render.blit_lock.acquire()
rv = renpy.display.pgrender.rotozoom(child, self.angle, self.zoom)
finally:
renpy.display.render.blit_lock.release()
return rv
def predict_files(self):
return self.image.predict_files()
class Crop(ImageBase):
"""
:doc: im_im
:args: (im, rect)
An image manipulator that crops `rect`, a (x, y, width, height) tuple,
out of `im`, an image manipulator.
::
image logo crop = im.Crop("logo.png", (0, 0, 100, 307))
"""
def __init__(self, im, x, y=None, w=None, h=None, **properties):
im = image(im)
if y is None:
(x, y, w, h) = x
super(Crop, self).__init__(im, x, y, w, h, **properties)
self.image = im
self.x = x
self.y = y
self.w = w
self.h = h
def get_mtime(self):
return self.image.get_mtime()
def load(self):
return cache.get(self.image).subsurface((self.x, self.y,
self.w, self.h))
def predict_files(self):
return self.image.predict_files()
ramp_cache = { }
def ramp(start, end):
"""
Returns a 256 character linear ramp, where the first character has
the value start and the last character has the value end. Such a
ramp can be used as a map argument of im.Map.
"""
rv = ramp_cache.get((start, end), None)
if rv is None:
chars = [ ]
for i in range(0, 256):
i = i / 255.0
chars.append(chr(int( end * i + start * (1.0 - i) ) ) )
rv = "".join(chars)
ramp_cache[start, end] = rv
return rv
identity = ramp(0, 255)
class Map(ImageBase):
"""
This adjusts the colors of the image that is its child. It takes
as arguments 4 256 character strings. If a pixel channel has a
value of 192, then the value of the 192nd character in the string
is used for the mapped pixel component.
"""
def __init__(self, im, rmap=identity, gmap=identity, bmap=identity,
amap=identity, force_alpha=False, **properties):
im = image(im)
super(Map, self).__init__(im, rmap, gmap, bmap, amap, force_alpha, **properties)
self.image = im
self.rmap = rmap
self.gmap = gmap
self.bmap = bmap
self.amap = amap
self.force_alpha = force_alpha
def get_mtime(self):
return self.image.get_mtime()
def load(self):
surf = cache.get(self.image)
rv = renpy.display.pgrender.surface(surf.get_size(), True)
renpy.display.module.map(surf, rv,
self.rmap, self.gmap, self.bmap, self.amap)
return rv
def predict_files(self):
return self.image.predict_files()
class Twocolor(ImageBase):
"""
This takes as arguments two colors, white and black. The image is
mapped such that pixels in white have the white color, pixels in
black have the black color, and shades of gray are linearly
interpolated inbetween. The alpha channel is mapped linearly
between 0 and the alpha found in the white color, the black
color's alpha is ignored.
"""
def __init__(self, im, white, black, force_alpha=False, **properties):
white = renpy.easy.color(white)
black = renpy.easy.color(black)
im = image(im)
super(Twocolor, self).__init__(im, white, black, force_alpha, **properties)
self.image = im
self.white = white
self.black = black
self.force_alpha = force_alpha
def get_mtime(self):
return self.image.get_mtime()
def load(self):
surf = cache.get(self.image)
rv = renpy.display.pgrender.surface(surf.get_size(), True)
renpy.display.module.twomap(surf, rv,
self.white, self.black)
return rv
def predict_files(self):
return self.image.predict_files()
class Recolor(ImageBase):
"""
This adjusts the colors of the image that is its child. It takes as an
argument 4 numbers between 0 and 255, and maps each channel of the image
linearly between 0 and the supplied color.
"""
def __init__(self, im, rmul=255, gmul=255, bmul=255,
amul=255, force_alpha=False, **properties):
im = image(im)
super(Recolor, self).__init__(im, rmul, gmul, bmul, amul, force_alpha, **properties)
self.image = im
self.rmul = rmul + 1
self.gmul = gmul + 1
self.bmul = bmul + 1
self.amul = amul + 1
self.force_alpha = force_alpha
def get_mtime(self):
return self.image.get_mtime()
def load(self):
surf = cache.get(self.image)
rv = renpy.display.pgrender.surface(surf.get_size(), True)
renpy.display.module.linmap(surf, rv,
self.rmul, self.gmul, self.bmul, self.amul)
return rv
def predict_files(self):
return self.image.predict_files()
class MatrixColor(ImageBase):
"""
:doc: im_matrixcolor
An image operator that uses `matrix` to linearly transform the
image manipulator `im`.
`Matrix` should be a list, tuple, or :func:`im.matrix` that is 20
or 25 elements long. If the object has 25 elements, then elements
past the 20th are ignored.
When the four components of the source color are R, G, B, and A,
which range from 0.0 to 1.0; the four components of the transformed
color are R', G', B', and A', with the same range; and the elements
of the matrix are named::
[ a, b, c, d, e,
f, g, h, i, j,
k, l, m, n, o,
p, q, r, s, t ]
the transformed colors can be computed with the formula::
R' = (a * R) + (b * G) + (c * B) + (d * A) + e
G' = (f * R) + (g * G) + (h * B) + (i * A) + j
B' = (k * R) + (l * G) + (m * B) + (n * A) + o
A' = (p * R) + (q * G) + (r * B) + (s * A) + t
The components of the transformed color are clamped to the
range [0.0, 1.0].
"""
def __init__(self, im, matrix, **properties):
im = image(im)
if len(matrix) != 20 and len(matrix) != 25:
raise Exception("ColorMatrix expects a 20 or 25 element matrix, got %d elements." % len(matrix))
matrix = tuple(matrix)
super(MatrixColor, self).__init__(im, matrix, **properties)
self.image = im
self.matrix = matrix
def get_mtime(self):
return self.image.get_mtime()
def load(self):
surf = cache.get(self.image)
rv = renpy.display.pgrender.surface(surf.get_size(), True)
renpy.display.module.colormatrix(surf, rv, self.matrix)
return rv
def predict_files(self):
return self.image.predict_files()
class matrix(tuple):
"""
:doc: im_matrixcolor
Constructs an im.matrix object from `matrix`. im.matrix objects
support The operations supported are matrix multiplication, scalar
multiplication, element-wise addition, and element-wise
subtraction. These operations are invoked using the standard
mathematical operators (\\*, \\*, +, and -, respectively). If two
im.matrix objects are multiplied, matrix multiplication is
performed, otherwise scalar multiplication is used.
`matrix` is a 20 or 25 element list or tuple. If it is 20 elements
long, it is padded with (0, 0, 0, 0, 1) to make a 5x5 matrix,
suitable for multiplication.
"""
def __new__(cls, *args):
if len(args) == 1:
args = tuple(args[0])
if len(args) == 20:
args = args + (0, 0, 0, 0, 1)
if len(args) != 25:
raise Exception("Matrix expects to be given 20 or 25 entries, not %d." % len(args))
return tuple.__new__(cls, args)
def mul(self, a, b):
if not isinstance(a, matrix):
a = matrix(a)
if not isinstance(b, matrix):
b = matrix(b)
result = [ 0 ] * 25
for y in range(0, 5):
for x in range(0, 5):
for i in range(0, 5):
result[x + y * 5] += a[x + i * 5] * b[i + y * 5]
return matrix(result)
def scalar_mul(self, other):
other = float(other)
return matrix([ i * other for i in self ])
def vector_mul(self, o):
return (o[0]*self[0] + o[1]*self[1] + o[2]*self[2] + o[3]*self[3] + self[4],
o[0]*self[5] + o[1]*self[6] + o[2]*self[7] + o[3]*self[8] + self[9],
o[0]*self[10] + o[1]*self[11] + o[2]*self[12] + o[3]*self[13] + self[14],
o[0]*self[15] + o[1]*self[16] + o[2]*self[17] + o[3]*self[18] + self[19],
1)
def __add__(self, other):
if isinstance(other, (int, float)):
other = float(other)
return matrix([ i + other for i in self ])
other = matrix(other)
return matrix([ i + j for i, j in zip(self, other)])
__radd__ = __add__
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return self * -1 + other
def __mul__(self, other):
if isinstance(other, (int, float)):
return self.scalar_mul(other)
return self.mul(self, other)
def __rmul__(self, other):
if isinstance(other, (int, float)):
return self.scalar_mul(other)
return self.mul(other, self)
def __repr__(self):
return """\
im.matrix(%f, %f, %f, %f, %f.
%f, %f, %f, %f, %f,
%f, %f, %f, %f, %f,
%f, %f, %f, %f, %f,
%f, %f, %f, %f, %f)""" % self
@staticmethod
def identity():
"""
:doc: im_matrixcolor
:name: im.matrix.identity
Returns an identity matrix, one that does not change color or
alpha.
"""
return matrix(1, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0)
@staticmethod
def saturation(level, desat=(0.2126, 0.7152, 0.0722)):
"""
:doc: im_matrixcolor
:name: im.matrix.saturation
Returns an im.matrix that alters the saturation of an
image. The alpha channel is untouched.
`level`
The amount of saturation in the resulting image. 1.0 is
the unaltered image, while 0.0 is grayscale.
`desat`
This is a 3-element tuple that controls how much of the
red, green, and blue channels will be placed into all
three channels of a fully desaturated image. The default
is based on the constants used for the luminance channel
of an NTSC television signal. Since the human eye is
mostly sensitive to green, more of the green channel is
kept then the other two channels.
"""
r, g, b = desat
def I(a, b):
return a + (b - a) * level
return matrix(I(r, 1), I(g, 0), I(b, 0), 0, 0,
I(r, 0), I(g, 1), I(b, 0), 0, 0,
I(r, 0), I(g, 0), I(b, 1), 0, 0,
0, 0, 0, 1, 0)
@staticmethod
def desaturate():
"""
:doc: im_matrixcolor
:name: im.matrix.desaturate
Returns an im.matrix that desaturates the image (makes it
grayscale). This is equivalent to calling
im.matrix.saturation(0).
"""
return matrix.saturation(0.0)
@staticmethod
def tint(r, g, b):
"""
:doc: im_matrixcolor
:name: im.matrix.tint
Returns an im.matrix that tints an image, without changing
the alpha channel. `r`, `g`, and `b` should be numbers between
0 and 1, and control what fraction of the given channel is
placed into the final image. (For example, if `r` is .5, and
the value of the red channel is 100, the transformed color
will have a red value of 50.)
"""
return matrix(r, 0, 0, 0, 0,
0, g, 0, 0, 0,
0, 0, b, 0, 0,
0, 0, 0, 1, 0)
@staticmethod
def invert():
"""
:doc: im_matrixcolor
:name: im.matrix.invert
Returns an im.matrix that inverts the red, green, and blue
channels of the image without changing the alpha channel.
"""
return matrix(-1, 0, 0, 0, 1,
0, -1, 0, 0, 1,
0, 0, -1, 0, 1,
0, 0, 0, 1, 0)
@staticmethod
def brightness(b):
"""
:doc: im_matrixcolor
:name: im.matrix.brightness
Returns an im.matrix that alters the brightness of an image.
`b`
The amount of change in image brightness. This should be
a number between -1 and 1, with -1 the darkest possible
image and 1 the brightest.
"""
return matrix(1, 0, 0, 0, b,
0, 1, 0, 0, b,
0, 0, 1, 0, b,
0, 0, 0, 1, 0)
@staticmethod
def opacity(o):
"""
:doc: im_matrixcolor
:name: im.matrix.opacity
Returns an im.matrix that alters the opacity of an image. An
`o` of 0.0 is fully transparent, while 1.0 is fully opaque.
"""
return matrix(1, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, o, 0)
@staticmethod
def contrast(c):
"""
:doc: im_matrixcolor
:name: im.matrix.contrast
Returns an im.matrix that alters the contrast of an image. `c` should
be greater than 0.0, with values between 0.0 and 1.0 decreasing contrast, and
values greater than 1.0 increasing contrast.
"""
return matrix.brightness(-.5) * matrix.tint(c, c, c) * matrix.brightness(.5)
# from http://www.gskinner.com/blog/archives/2005/09/flash_8_source.html
@staticmethod
def hue(h):
"""
:doc: im_matrixcolor
:name: im.matrix.hue
Returns an im.matrix that rotates the hue by `h` degrees, while
preserving luminosity.
"""
h = h * math.pi / 180
cosVal = math.cos(h)
sinVal = math.sin(h)
lumR = 0.213
lumG = 0.715
lumB = 0.072
return matrix(
lumR+cosVal*(1-lumR)+sinVal*(-lumR),lumG+cosVal*(-lumG)+sinVal*(-lumG),lumB+cosVal*(-lumB)+sinVal*(1-lumB),0,0,
lumR+cosVal*(-lumR)+sinVal*(0.143),lumG+cosVal*(1-lumG)+sinVal*(0.140),lumB+cosVal*(-lumB)+sinVal*(-0.283),0,0,
lumR+cosVal*(-lumR)+sinVal*(-(1-lumR)),lumG+cosVal*(-lumG)+sinVal*(lumG),lumB+cosVal*(1-lumB)+sinVal*(lumB),0,0,
0,0,0,1,0,
0,0,0,0,1
)
@staticmethod
def colorize(black_color, white_color):
"""
:doc: im_matrixcolor
:name: im.matrix.colorize
Returns an im.matrix that colorizes a black and white image.
`black_color` and `white_color` are Ren'Py style colors, so
they may be specfied as strings or tuples of (0-255) color
values. ::
# This makes black colors red, and white colors blue.
image logo colored = im.MatrixColor(
"bwlogo.png",
im.matrix.colorize("#f00", "#00f"))
"""
(r0, g0, b0, _a0) = renpy.easy.color(black_color)
(r1, g1, b1, _a1) = renpy.easy.color(white_color)
r0 /= 255.0
g0 /= 255.0
b0 /= 255.0
r1 /= 255.0
g1 /= 255.0
b1 /= 255.0
return matrix((r1-r0), 0, 0, 0, r0,
0, (g1-g0), 0, 0, g0,
0, 0, (b1-b0), 0, b0,
0, 0, 0, 1, 0)
def Grayscale(im, desat=(0.2126, 0.7152, 0.0722), **properties):
"""
:doc: im_im
:args: (im, **properties)
An image manipulator that creats a desaturated version of the image
manipulator `im`.
"""
return MatrixColor(im, matrix.saturation(0.0, desat), **properties)
def Sepia(im, tint=(1.0, .94, .76), desat=(0.2126, 0.7152, 0.0722), **properties):
"""
:doc: im_im
:args: (im, **properties)
An image manipulator that creates a sepia-toned version of the image
manipulator `im`.
"""
return MatrixColor(im, matrix.saturation(0.0, desat) * matrix.tint(tint[0], tint[1], tint[2]), **properties)
def Color(im, color):
"""
This recolors the supplied image, mapping colors such that black is
black and white is the supplied color.
"""
r, g, b, a = renpy.easy.color(color)
return Recolor(im, r, g, b, a)
def Alpha(image, alpha, **properties):
"""
Returns an alpha-mapped version of the image. Alpha is the maximum
alpha that this image can have, a number between 0.0 (fully
transparent) and 1.0 (opaque).
If an image already has an alpha channel, values in that alpha
channel are reduced as appropriate.
"""
return Recolor(image, 255, 255, 255, int(255 * alpha), force_alpha=True, **properties)
class Tile(ImageBase):
"""
:doc: im_im
An image manipulator that tiles the image manipulator `im`, until
it is `size`.
`size`
If not None, a (width, height) tuple. If None, this defaults to
(:var:`config.screen_width`, :var:`config.screen_height`).
"""
def __init__(self, im, size=None, **properties):
im = image(im)
super(Tile, self).__init__(im, size, **properties)
self.image = im
self.size = size
def get_mtime(self):
return self.image.get_mtime()
def load(self):
size = self.size
if size is None:
size = (renpy.config.screen_width, renpy.config.screen_height)
surf = cache.get(self.image)
rv = renpy.display.pgrender.surface(size, True)
width, height = size
sw, sh = surf.get_size()
for y in range(0, height, sh):
for x in range(0, width, sw):
rv.blit(surf, (x, y))
return rv
def predict_files(self):
return self.image.predict_files()
class AlphaMask(ImageBase):
"""
:doc: im_im
An image manipulator that takes two image manipulators, `base` and
`mask`, as arguments. It replaces the alpha channel of `base` with
the red channel of `mask`.
This is used to provide an image's alpha channel in a second
image, like having one jpeg for color data, and a second one
for alpha. In some cases, two jpegs can be smaller than a
single png file.
"""
def __init__(self, base, mask, **properties):
super(AlphaMask, self).__init__(base, mask, **properties)
self.base = image(base)
self.mask = image(mask)
def get_mtime(self):
return max(self.base.get_mtime(), self.image.get_mtime())
def load(self):
basesurf = cache.get(self.base)
masksurf = cache.get(self.mask)
if basesurf.get_size() != masksurf.get_size():
raise Exception("AlphaMask surfaces must be the same size.")
# Used to copy the surface.
rv = renpy.display.pgrender.copy_surface(basesurf)
renpy.display.module.alpha_munge(masksurf, rv, identity)
return rv
def predict_files(self):
return self.base.predict_files() + self.mask.predict_files()
def image(arg, loose=False, **properties):
"""
:doc: im_image
:name: Image
:args: (filename, **properties)
Loads an image from a file. `filename` is a
string giving the name of the file.
`filename` should be a JPEG or PNG file with an appropriate
extension.
"""
"""
(Actually, the user documentation is a bit misleading, as
this tries for compatibility with several older forms of
image specification.)
If the loose argument is False, then this will report an error if an
arbitrary argument is given. If it's True, then the argument is passed
through unchanged.
"""
if isinstance(arg, ImageBase):
return arg
elif isinstance(arg, basestring):
return Image(arg, **properties)
elif isinstance(arg, renpy.display.image.ImageReference):
arg.find_target()
return image(arg.target, loose=loose, **properties)
elif isinstance(arg, tuple):
params = [ ]
for i in arg:
params.append((0, 0))
params.append(i)
return Composite(None, *params)
elif loose:
return arg
if isinstance(arg, renpy.display.core.Displayable):
raise Exception("Expected an image, but got a general displayable.")
else:
raise Exception("Could not construct image from argument.")
def load_image(fn):
"""
This loads an image from the given filename, using the cache.
"""
surf = cache.get(image(fn))
return renpy.display.draw.load_texture(surf)
|
MSEMJEJME/Get-Dumped
|
renpy/display/im.py
|
Python
|
gpl-2.0
| 45,147
| 0.005759
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 19 de Novembro de 2016
@author: Denis Varise Bernardes & Eder Martioli
Descricao: esta biblioteca possui as seguintes funcoes:
mkDir_saveCombinedImages: pela chamada da funcao LeArquivoReturnLista retorna a lista de todas as imagens adquiridas no ensaio;
realiza a subtração entre cada par de imagens, salvando o resultado em um novo diretório 'Imagens_reduzidas' . Feito isso, a
funcao cria uma lista com o nomes das novas imagens atraves da chamada da funcao criaArquivo_listaImagensCombinadas.
readArqDetector: esta funcao recebe o nome do arquivo contendo os PAR2s do detector, retornando um vetor com os valores medidos.
ImagemUnica_returnHeader: esta funcao recebe uma unica imagem da lista, retornando o header para a retirada de informacoes.
LeArquivoReturnLista: esta funcao faz a leitura do arquivo listaImagens gerado pela funcao criaArq_listaImgInput, retornando
uma lista com o nome das imagens.
criaArquivo_listaImagensCombinadas: esta funcao cria um arquivo chamado listaImagensCombinadas contendo o nome das imagens
combinadas geradas na funcao mkDir_saveCombinedImages.
LeArqFluxoCamera: esta funcao faz a leitura do arquivo Fluxo camera.dat gerado pela funcao criaArqFluxoCamera, retornado dois
vetores com os valores do fluxo e dos desvio padrao.
LeArq_curvaCalibDetector: PAR2 o nome do arquivo da curva de calibracao do detector e o numero do conjunto de imagens, esta
funcao retornara um vetor contendo os valores da curva caso a opcao seja fornecida; caso contrario, a funcao retorna um vetor
contendo o valor 1.
Laboratorio Nacional de Astrofisica, Brazil.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import astropy.io.fits as fits
import numpy as np
import os
from sys import exit
from math import sqrt
from geraArquivo import geraArquivo
def mkDir_saveCombinedImages(nImages, images_dir):
print('Criando diretorio: Mediana das Imagens')
#recebe uma lista de imagens de retorna um diretorio com as imagens combinadas
lista = LeArquivoReturnLista('listaImagens', images_dir)
n, i = 0, 0
VetorImagens = []
for i in range(len(lista)):
if i%nImages == nImages-1:
imagem = fits.getdata(images_dir + '\\' + lista[i])
VetorImagens.append(imagem)
os.chdir(chdir)
geraArquivo(VetorImagens, n)
os.chdir(cwd)
VetorImagens = []
n+=1
else:
imagem = fits.getdata(images_dir + '\\' + lista[i])
VetorImagens.append(imagem)
criaArquivo_listaImagensReduzidas()
return
def mkDir_ImgPair(tagPAR2, tagPAR1, ganho, images_dir):
print('Criando diretorio: Imagens reduzidas')
if not os.path.exists(images_dir + '\\' + 'Imagens_reduzidas'): os.makedirs(images_dir + '\\' + 'Imagens_reduzidas')
chdir = images_dir + '\\' + 'Imagens_reduzidas'
#recebe uma lista de imagens de retorna um diretorio com as imagens reduzidas de raios cosmicos e erro do shutter
listaPAR2 = LeArquivoReturnLista(tagPAR2+'List.txt', images_dir)
listaPAR1 = LeArquivoReturnLista(tagPAR1+'List.txt', images_dir)
VetorImagens = [[],[]]
i,n, string, VetorStdSignal = 0, 0, '', []
for i in range(len(listaPAR2)):
imagemPAR2 = fits.getdata(images_dir + '\\' + listaPAR2[i])[0].astype(float)
imagemPAR1 = fits.getdata(images_dir + '\\' + listaPAR1[i])[0].astype(float)
imgReducePAR = imagemPAR2 - imagemPAR1
VetorStdSignal.append(sqrt(sum(sum(imagemPAR2 + imagemPAR1))*ganho))
os.chdir(chdir)
if n < 10: string = '00%i'%(n)
if 10 <= n < 100: string = '0%i'%(n)
if n >= 100: string = '%i'%(n)
print('ImagemReduzida%s.fits'%(string))
fits.writeto('ImagemReduzida_%s.fits'%(string),imgReducePAR, overwrite=True)
os.chdir(images_dir)
VetorImagens = [[],[]]
n+=1
criaArquivo_StdDiffImagens(VetorStdSignal, images_dir)
criaArquivo_listaImagensReduzidas(images_dir)
return
def readArqDetector(name, images_dir):
valores=[]
with open(images_dir + '\\' + name) as arq:
Strvalores = arq.read().splitlines()
for valor in Strvalores[1:]:
valores.append(float(valor))
arq.close()
return valores
def ImagemUnica_returnHeader(tagPAR2, images_path):
with open(images_path + '\\' + tagPAR2+'List.txt') as arq:
imagem = arq.read().splitlines()[0].split(',')[0]
arq.close()
header = fits.getheader(images_path + '\\' + imagem)
return header
def LeArquivoReturnLista(arquivo, images_path):
with open(images_path + '\\' + arquivo) as arq:
lista = []
linhas = arq.read().splitlines()
for lin in linhas:
for img in lin.split(','):
lista.append(img)
arq.close()
return lista
def criaArquivo_listaImagensReduzidas(images_path):
nome = images_path + '\Imagens_reduzidas\listaImagensReduzidas'
try: File = open(nome,'w')
except:
nome.remove()
File = open(nome,'w')
listaImagemCombinada = os.listdir(images_path + '\Imagens_reduzidas')
listaImagemCombinada.sort()
for img in listaImagemCombinada:
if '.fits' in img:
File.write(img+'\n')
File.close()
def criaArquivo_StdDiffImagens(vetorStd, images_path):
nome = images_path + '\\' + 'StdDiffImages'
try: arq = open(nome,'w')
except:
nome.remove()
arq = open(nome,'w')
arq.write('-Desvio padrao das imagens reduzidas:\n')
for std in vetorStd:
arq.write(' \t\t ' + str(std) + '\n')
arq.close()
def LeArqFluxoCamera(images_path):
vetorFluxoCamera, vetorSigmaBackground_Signal = [],[]
with open(images_path + '\\' + 'Fluxo camera.dat') as arq:
listaValores = arq.read().splitlines()
for linha in listaValores[1:]:
Fluxo_e_Sigma = linha.split('\t\t\t')
vetorFluxoCamera.append(float(Fluxo_e_Sigma[0]))
vetorSigmaBackground_Signal.append(float(Fluxo_e_Sigma[1]))
return vetorFluxoCamera, vetorSigmaBackground_Signal
def LeArq_curvaCalibFiltroDensidade(nome, numeroImagens, images_path):
VetorPAR2s=[]
if nome != '':
with open(images_path + '\\' + nome) as arq:
linhas = arq.read().splitlines()
arq.close()
for PAR2 in linhas[1:]:
if PAR2 == '':continue
VetorPAR2s.append(float(PAR2))
else:
for i in range(numeroImagens):
VetorPAR2s.append(1)
return VetorPAR2s
def LeArq_curvaEQFabricante(name, images_path):
espectro, vetorEQ = [], []
with open(images_path + '\\' + name) as arq:
linhas = arq.read().splitlines()
arq.close()
for linha in linhas:
if linha == '':continue
valores = linha.split('\t')
espectro.append(float(valores[0]))
vetorEQ.append(float(valores[1]))
return vetorEQ, espectro
|
DBernardes/ProjetoECC
|
Eficiência_Quântica/Codigo/QE_reduceImgs_readArq.py
|
Python
|
mit
| 7,313
| 0.012585
|
import os
from .PBX_Base_Reference import *
from ...Helpers import path_helper
class PBXLibraryReference(PBX_Base_Reference):
def __init__(self, lookup_func, dictionary, project, identifier):
super(PBXLibraryReference, self).__init__(lookup_func, dictionary, project, identifier);
|
samdmarshall/xcparse
|
xcparse/Xcode/PBX/PBXLibraryReference.py
|
Python
|
bsd-3-clause
| 307
| 0.019544
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fastqvalidator(MakefilePackage):
"""The fastQValidator validates the format of fastq files."""
homepage = "http://genome.sph.umich.edu/wiki/FastQValidator"
url = "https://github.com/statgen/fastQValidator/archive/v0.1.1a.tar.gz"
version('2017-01-10', commit='6d619a34749e9d33c34ef0d3e0e87324ca77f320',
git='https://github.com/statgen/fastQValidator.git')
resource(
name='libStatGen',
git='https://github.com/statgen/libStatGen.git',
commit='9db9c23e176a6ce6f421a3c21ccadedca892ac0c'
)
@property
def build_targets(self):
return ['LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen'))]
@property
def install_targets(self):
return [
'INSTALLDIR={0}'.format(self.prefix.bin),
'LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen')),
'install'
]
|
lgarren/spack
|
var/spack/repos/builtin/packages/fastqvalidator/package.py
|
Python
|
lgpl-2.1
| 2,230
| 0.000897
|
"""
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
iscarecrow/sb
|
server/wsgi.py
|
Python
|
mit
| 387
| 0.002584
|
00000 0 output/lattice.py.err
32074 1 output/lattice.py.out
|
Conedy/Conedy
|
testing/createNetwork/expected/sum_lattice.py
|
Python
|
gpl-2.0
| 68
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import Namespace
import mock
import pytest
from .helper import broker_range
from kafka_utils.kafka_cluster_manager.cluster_info.error import RebalanceError
from kafka_utils.kafka_cluster_manager.cluster_info \
.partition_count_balancer import PartitionCountBalancer
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import calculate_partition_movement
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_broker_leader_counts
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_net_imbalance
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_replication_group_imbalance_stats
class TestPartitionCountBalancer(object):
@pytest.fixture
def create_balancer(self):
def build_balancer(cluster_topology, **kwargs):
args = mock.Mock(spec=Namespace)
args.balancer_args = []
args.configure_mock(**kwargs)
return PartitionCountBalancer(cluster_topology, args)
return build_balancer
def assert_valid(self, new_assignment, orig_assignment, orig_brokers):
"""Assert if new-assignment is valid based on given assignment.
Asserts the results for following parameters:
a) Asserts that keys in both assignments are same
b) Asserts that replication-factor of result remains same
c) Assert that new-replica-brokers are amongst given broker-list
"""
# Verify that partitions remain same
assert set(orig_assignment.keys()) == set(new_assignment.keys())
for t_p, new_replicas in new_assignment.iteritems():
orig_replicas = orig_assignment[t_p]
# Verify that new-replicas are amongst given broker-list
assert all([broker in orig_brokers for broker in new_replicas])
# Verify that replication-factor remains same
assert len(new_replicas) == len(orig_replicas)
def assert_leader_valid(self, orig_assignment, new_assignment):
"""Verify that new-assignment complies with just leader changes.
Following characteristics are verified for just leader-changes.
a) partitions remain same
b) replica set remains same
"""
# Partition-list remains unchanged
assert sorted(orig_assignment.keys()) == sorted(new_assignment.keys())
# Replica-set remains same
for partition, orig_replicas in orig_assignment.iteritems():
assert set(orig_replicas) == set(new_assignment[partition])
def test_rebalance_replication_groups(
self,
create_balancer,
create_cluster_topology,
default_assignment,
):
ct = create_cluster_topology()
cb = create_balancer(ct)
cb.rebalance_replication_groups()
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify that rg-group-balanced
assert net_imbal == 0
# Verify that new-assignment is valid
self.assert_valid(
ct.assignment,
default_assignment,
ct.brokers.keys(),
)
def test_rebalance_replication_groups_balanced(
self,
create_balancer,
create_cluster_topology,
):
# Replication-group is already balanced
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T0', 1), ['0', '3']),
((u'T2', 0), ['2']),
((u'T3', 0), ['0', '1', '2']),
]
)
ct = create_cluster_topology(assignment, broker_range(5))
cb = create_balancer(ct)
cb.rebalance_replication_groups()
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify that rg-group-balanced
assert net_imbal == 0
# Verify that new-assignment same as previous
assert ct.assignment == assignment
def test_rebalance_replication_groups_error(
self,
create_balancer,
create_cluster_topology,
):
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T0', 1), ['0', '3']),
((u'T2', 0), ['2']),
((u'T3', 0), ['0', '1', '9']), # broker 9 is not active
]
)
ct = create_cluster_topology(assignment, broker_range(5))
with pytest.raises(RebalanceError):
cb = create_balancer(ct)
cb.rebalance_replication_groups()
def test__rebalance_groups_partition_cnt_case1(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 6 partitions
# rg2 has 2 partitions
# Both rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
((u'T1', 1), ['0', '1', '2']),
((u'T1', 0), ['1']),
((u'T3', 0), ['1']),
((u'T2', 0), ['0', '1', '3']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Verify both replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains unaltered
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case2(
self,
create_balancer,
create_cluster_topology,
):
# 1 over-balanced, 2 under-balanced replication-groups
# rg1 has 4 partitions
# rg2 has 1 partition
# rg3 has 1 partition
# All rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
((u'T1', 1), ['0', '2']),
((u'T3', 1), ['0']),
((u'T3', 0), ['0']),
((u'T2', 0), ['0', '5']),
]
)
brokers = {
'0': mock.MagicMock(),
'2': mock.MagicMock(),
'5': mock.MagicMock(),
}
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance brokers
cb._rebalance_groups_partition_cnt()
# Verify all replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg3'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains 0
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case3(
self,
create_balancer,
create_cluster_topology,
):
# 1 over-balanced, 1 under-balanced, 1 opt-balanced replication-group
# rg1 has 3 partitions
# rg2 has 2 partitions
# rg3 has 1 partition
# All rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
((u'T1', 1), ['0', '2']),
((u'T3', 1), ['2']),
((u'T3', 0), ['0']),
((u'T2', 0), ['0', '5']),
]
)
brokers = {
'0': mock.MagicMock(),
'2': mock.MagicMock(),
'5': mock.MagicMock(),
}
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance brokers across replication-groups
cb._rebalance_groups_partition_cnt()
# Verify all replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg3'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements
assert total_movements == 1
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains 0
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case4(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 4 partitions
# rg2 has 2 partitions
# Both rg's are balanced(based on replica-count) initially
# Result: rg's couldn't be balanced partition-count since
# no available broker without partition movement
assignment = dict(
[
((u'T1', 1), ['0', '1', '2']),
((u'T2', 0), ['0', '1', '2']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Verify no change in assignment
assert ct.assignment == assignment
def test__rebalance_groups_partition_cnt_case5(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 4 partitions
# rg2 has 2 partitions
# rg3 has 2 partitions
# Result: rg's will be balanced for partition-count
# All rg's will be balanced with just 1 partition-movement
brokers = {
"0": {"host": "host1"},
"1": {"host": "host2"},
"2": {"host": "host3"},
"3": {"host": "host4"},
"5": {"host": "host5"},
}
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T1', 0), ['1', '3']),
((u'T2', 0), ['0', '5']),
((u'T3', 0), ['1', '5']),
]
)
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Assert partition is moved from rg1 only
print(ct.assignment)
assert len(ct.rgs['rg1'].partitions) == 3
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 1
assert total_movements == 1
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains unaltered
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case6(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 5 partitions
# rg2 has 1 partitions
# rg3 has 1 partitions
# Result: rg's will be balanced for partition-count
# All rg's will be balanced with 2 partition-movements
# This test case covers the aspect that even if the partition
# count difference b/w the replication-groups is > 1,
# we still move onto next replication-group if either of the
# replication-groups reaches the optimal partition-count.
brokers = {
"0": {"host": "host1"},
"1": {"host": "host2"},
"2": {"host": "host3"},
"3": {"host": "host4"},
"5": {"host": "host5"},
}
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T1', 0), ['1', '0']),
((u'T2', 0), ['0', '5']),
((u'T3', 0), ['1']),
]
)
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Assert final partition counts in replication-groups
assert len(ct.rgs['rg1'].partitions) == 3
assert len(ct.rgs['rg2'].partitions) == 2
assert len(ct.rgs['rg3'].partitions) == 2
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
# Tests for leader-balancing
def test_rebalance_leaders_balanced_case1(
self,
create_balancer,
create_cluster_topology,
):
# Already balanced-assignment with evenly-distributed
# (broker-id: leader-count): {0: 1, 1:1, 2:1}
# opt-count: 3/3 = 1, extra-count: 3%3 = 0
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['2', '0']),
((u'T1', 0), ['0', '2']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
orig_assignment = ct.assignment
cb = create_balancer(ct)
cb.rebalance_leaders()
net_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
# No changed in already-balanced assignment
assert orig_assignment == ct.assignment
# Assert leader-balanced
assert net_imbal == 0
def test_rebalance_leaders_balanced_case2(
self,
create_balancer,
create_cluster_topology,
):
# Already balanced-assignment NOT evenly-distributed
# (broker-id: leader-count): {0: 1, 1:1, 2:1}
# opt-count: 2/3 = 0, extra-count: 2%3 = 2
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['2', '0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
orig_assignment = ct.assignment
cb = create_balancer(ct)
cb.rebalance_leaders()
net_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
# No changed in already-balanced assignment
assert orig_assignment == ct.assignment
# Assert leader-balanced
assert net_imbal == 0
def test_rebalance_leaders_unbalanced_case1(
self,
create_balancer,
create_cluster_topology,
):
# Balance leader-imbalance successfully
# (broker-id: leader-count): {0: 0, 1:2, 2:1}
# Net-leader-imbalance: 1
# opt-count: 3/3 = 1, extra-count: 3%3 = 0
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['2', '0']),
((u'T1', 0), ['1', '0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
orig_assignment = ct.assignment
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify if valid-leader assignment
self.assert_leader_valid(orig_assignment, ct.assignment)
# New-leader imbalance-count be less than previous imbal count
new_leaders_per_broker = {
broker.id: broker.count_preferred_replica()
for broker in ct.brokers.itervalues()
}
new_leader_imbal = get_net_imbalance(new_leaders_per_broker.values())
# Verify leader-balanced
assert new_leader_imbal == 0
# Verify partitions-changed assignment
assert new_leaders_per_broker['0'] == 1
assert new_leaders_per_broker['1'] == 1
assert new_leaders_per_broker['2'] == 1
def test_rebalance_leaders_unbalanced_case2(
self,
create_balancer,
create_cluster_topology,
):
# (Broker: leader-count): {0: 2, 1: 1, 2:0}
# opt-count: 3/3 = 1, extra-count = 0
# Leader-imbalance-value: 1
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T1', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2a(
self,
create_balancer,
create_cluster_topology,
):
# (Broker: leader-count): {0: 2, 1: 1, 2:0, 3:1}
# opt-count: 3/4 = 1, extra-count = 3
# Leader-imbalance-value: 1
# imbalanced-broker: 0,2; balanced-brokers: 1,3
assignment = dict(
[
((u'T0', 0), ['3', '2']),
((u'T0', 1), ['1', '3']),
((u'T1', 1), ['0', '1']),
((u'T1', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
# Verify that (T0, 1) also swapped even if 1 and 3 were balanced
# Rebalancing through non-followers
replica_ids = [b.id for b in ct.partitions[('T0', 1)].replicas]
assert replica_ids == ['3', '1']
def test_rebalance_leaders_unbalanced_case2b(
self,
create_balancer,
create_cluster_topology,
):
assignment = dict(
[
((u'T0', 0), ['3', '2']),
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2c(
self,
create_balancer,
create_cluster_topology,
):
# Broker-2 imbalance value: 2 with different brokers
# Broker-2 requests leadership from multiple brokers (0, 1) once
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
((u'T2', 1), ['0']),
((u'T3', 0), ['3', '2']),
((u'T3', 1), ['1', '3']),
((u'T4', 0), ['1']),
((u'T4', 2), ['3']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2d(
self,
create_balancer,
create_cluster_topology,
):
# Broker-2 imbalanced with same brokers
# Broker-2 requests leadership from same broker-1 twice
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T1', 2), ['0']),
((u'T1', 3), ['1', '2']),
((u'T1', 4), ['0', '1']),
((u'T1', 5), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2e(
self,
create_balancer,
create_cluster_topology,
):
# Imbalance-val 2
# Multiple imbalanced brokers (2, 5) gets non-follower balanced
# from multiple brokers (1,4)
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
((u'T3', 0), ['4', '5']),
((u'T3', 1), ['3', '4']),
((u'T4', 0), ['3']),
]
)
ct = create_cluster_topology(assignment, broker_range(6))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case3(
self,
create_balancer,
create_cluster_topology,
):
# Imbalanced 0 and 2. No re-balance possible.
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0']),
((u'T2', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify still leader-imbalanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 1
# No change in assignment
assert sorted(ct.assignment) == sorted(assignment)
def test_rebalance_leaders_unbalanced_case4(
self,
create_balancer,
create_cluster_topology,
):
# Imbalanced assignment
# Partial leader-imbalance possible
# (Broker: leader-count): {0: 3, 1: 1, 2:0}
# opt-count: 5/3 = 1, extra-count = 2
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['0', '2']),
((u'T1', 0), ['0']),
((u'T1', 1), ['0']),
((u'T1', 2), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
net_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
cb = create_balancer(ct)
cb.rebalance_leaders()
new_leaders_per_broker = {
broker.id: broker.count_preferred_replica()
for broker in ct.brokers.itervalues()
}
new_net_imbal = get_net_imbalance(new_leaders_per_broker.values())
# Verify that net-imbalance has reduced but not zero
assert new_net_imbal > 0 and new_net_imbal < net_imbal
# Verify the changes in leaders-per-broker count
assert new_leaders_per_broker['2'] == 1
assert new_leaders_per_broker['1'] == 1
assert new_leaders_per_broker['0'] == 3
def test_rebalance_leaders_unbalanced_case2f(
self,
create_balancer,
create_cluster_topology,
):
assignment = dict(
[
((u'T0', 0), ['2', '0']),
((u'T1', 0), ['2', '0']),
((u'T1', 1), ['0']),
((u'T2', 0), ['1']),
((u'T2', 1), ['2']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case5(
self,
create_balancer,
create_cluster_topology,
):
# Special case, wherein none under-balanced
# but 0 is overbalanced
assignment = dict(
[
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
((u'T2', 1), ['0']),
((u'T3', 0), ['2', '3']),
((u'T3', 1), ['3', '1']),
((u'T4', 0), ['1']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
|
anthonysandrin/kafka-utils
|
tests/kafka_cluster_manager/partition_count_balancer_test.py
|
Python
|
apache-2.0
| 26,033
| 0
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
r"""Micro benchmark.
bazel run -c opt --config=cuda \
//third_party/tensorflow/python/ops/numpy_ops/benchmarks:micro_benchmarks -- \
--number=100 --repeat=100 \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
from absl import flags
from absl import logging
import numpy as np # pylint: disable=unused-import
import tensorflow.compat.v2 as tf
from tensorflow.python.ops import numpy_ops as tfnp # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops.numpy_ops.integration_test.benchmarks import numpy_mlp
from tensorflow.python.ops.numpy_ops.integration_test.benchmarks import tf_numpy_mlp
FLAGS = flags.FLAGS
flags.DEFINE_integer('repeat', 100, '#Measurements per benchmark.')
flags.DEFINE_integer('number', 100, '#Runs per a measure.')
class MicroBenchmarks(tf.test.Benchmark):
"""Main micro benchmark class."""
def _benchmark_and_report(
self,
name,
fn,
repeat=None,
number=None):
"""Run fn repeat * number times, report time, and return fastest time."""
# Can't make these default above since the flags may not have been parsed
# at module import time.
repeat = repeat or int(FLAGS.repeat)
number = number or int(FLAGS.number)
# Warmup
fn()
times = []
for _ in range(repeat):
gc.disable()
start = time.time()
for _ in range(number):
fn()
times.append(time.time() - start)
gc.enable()
gc.collect()
# Regular benchmark to report numbers.
fastest_time_us = min(times) * 1e6 / number
total_time = sum(times)
self.report_benchmark(name=name,
wall_time=total_time,
extras={'fastest_time_us': fastest_time_us})
return fastest_time_us
def benchmark_tf_np_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def benchmark_tf_np_tf_function_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(
self._get_name(), tf.function(lambda: model.inference(x)))
def benchmark_numpy_mlp_inference_batch_1_cpu(self):
model = numpy_mlp.MLP()
x = np.random.uniform(size=(1, 10)).astype(np.float32, copy=False)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def _benchmark_np_and_tf_np(self, name, op, args, repeat=None): # pylint: disable=redefined-builtin
fn = getattr(np, op)
assert fn is not None
np_time = self._benchmark_and_report(
'{}_numpy'.format(name), lambda: fn(*args), repeat=repeat)
fn = getattr(tfnp, op)
assert fn is not None
with tf.device('CPU:0'):
tf_time = self._benchmark_and_report(
'{}_tfnp_cpu'.format(name), lambda: fn(*args), repeat=repeat)
return np_time, tf_time
def _print_times(self, op, sizes, times):
# For easy reporting.
print('For np.{}:'.format(op))
print('{:<15} {:>11} {:>11}'.format('Size', 'NP time', 'TF NP Time'))
for size, (np_time, tf_time) in zip(sizes, times):
print('{:<15} {:>10.5}us {:>10.5}us'.format(
str(size), np_time, tf_time))
print()
def _benchmark_np_and_tf_np_unary(self, op):
sizes = [(100,), (10000,), (1000000,)]
repeats = [FLAGS.repeat] * 2 + [10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(self._benchmark_np_and_tf_np(name, op, (x,), repeat))
self._print_times(op, sizes, times)
def benchmark_count_nonzero(self):
self._benchmark_np_and_tf_np_unary('count_nonzero')
def benchmark_log(self):
self._benchmark_np_and_tf_np_unary('log')
def benchmark_exp(self):
self._benchmark_np_and_tf_np_unary('exp')
def benchmark_tanh(self):
self._benchmark_np_and_tf_np_unary('tanh')
def benchmark_matmul(self):
sizes = [(2, 2), (10, 10), (100, 100), (200, 200), (1000, 1000)]
# Override repeat flag since this can be very slow.
repeats = [FLAGS.repeat] * 3 + [50, 10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(
self._benchmark_np_and_tf_np(name, 'matmul', (x, x), repeat=repeat))
self._print_times('matmul', sizes, times)
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
tf.enable_v2_behavior()
tf.test.main()
|
annarev/tensorflow
|
tensorflow/python/ops/numpy_ops/integration_test/benchmarks/micro_benchmarks.py
|
Python
|
apache-2.0
| 5,557
| 0.007558
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.