repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
hackerspace-silesia/cebulany-manager
|
migrations/versions/c19852e4dcda_add_unique_key_to_username.py
|
Python
|
mit
| 953
| 0.001049
|
"""add unique key to username
Revision ID: c19852e4dcda
Revises: 1478867a872a
Create Date: 2020-08-06 00:39:03.004053
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c19852e4dcda'
down_revision = '1478867a872a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_tab
|
le('user', schema=None) as batch_op:
batch_op.drop_index('ix_user_username')
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], uni
|
que=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
batch_op.create_index('ix_user_username', ['username'], unique=False)
# ### end Alembic commands ###
|
zbase/disk_mapper
|
dm_server/lib/urlmapper.py
|
Python
|
apache-2.0
| 2,103
| 0.000951
|
#!/bin/env python
# Copyright 2
|
013 Zynga Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re
|
quired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module maps request to function based on the url and method
"""
import re
import os
import cgi
import urlrelay
from cgi import parse_qs
from diskmapper import DiskMapper
@urlrelay.url('^.*$', 'GET')
def index(environ, start_response):
"""Handles GET requests
"""
query_string = parse_qs(environ.get("QUERY_STRING"))
status = '202 Accepted'
response_headers = [('Content-type', 'text/plain')]
dm = DiskMapper(environ, start_response)
if "action" in query_string:
action = query_string["action"]
if "get_host_config" in action:
return dm.get_host_config()
elif "get_all_config" in action:
return dm.get_all_config()
elif "get_vb_mapping" in action:
key = None
if "vbucket" in query_string:
key = query_string["vbucket"][0]
return dm.get_vbuckets("vbucket", key)
elif "get_ss_mapping" in action:
key = None
if "storage_server" in query_string:
key = query_string["storage_server"][0]
return dm.get_vbuckets("storage_server", key)
return dm.forward_request()
@urlrelay.url('^.*$', 'DELETE')
def delete(environ, start_response):
"""Handles GET requests
"""
dm = DiskMapper(environ, start_response)
return dm.forward_request()
@urlrelay.url('^.*$', 'POST')
def upload(environ, start_response):
dm = DiskMapper(environ, start_response)
return dm.upload()
|
HayaoSuzuki/django-tutorial
|
mysite/mysite/settings.py
|
Python
|
mit
| 2,134
| 0
|
"""
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'at0zvwnp1y=4sva38l)0)ejiaiq$aqap
|
8ehs7uld0g948yj-fy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware
|
.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Template files
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
|
cloudbase/nova-virtualbox
|
nova/tests/unit/conf_fixture.py
|
Python
|
apache-2.0
| 3,169
| 0
|
# Copyr
|
ight 2010 United States Government as repre
|
sented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from nova import config
from nova import ipv6
from nova import paths
from nova.tests.unit import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.linux_net')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.openstack.common.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(config_fixture.Config):
"""Fixture to manage global conf settings."""
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver',
'nova.virt.fake.SmallFakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('vlan_interface', 'eth0')
self.conf.set_default('auth_strategy', 'noauth')
config.parse_args([], default_config_files=[])
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('fatal_exception_format_errors', True)
self.conf.set_default('enabled', True, 'osapi_v3')
self.conf.set_default('force_dhcp_release', False)
self.conf.set_default('periodic_enable', False)
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)
|
openstenoproject/plover
|
plover/machine/base.py
|
Python
|
gpl-2.0
| 8,198
| 0.000732
|
# Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
# TODO: add tests for all machines
# TODO: add tests for new status callbacks
"""Base classes for machine types. Do not use directly."""
import binascii
import threading
import serial
from plover import _, log
from plover.machine.keymap import Keymap
from plover.misc import boolean
# i18n: Machine state.
STATE_STOPPED = _('stopped')
# i18n: Machine state.
STATE_INITIALIZING = _('initializing')
# i18n: Machine state.
STATE_RUNNING = _('connected')
# i18n: Machine state.
STATE_ERROR = _('disconnected')
class StenotypeBase:
"""The base class for all Stenotype classes."""
# Layout of physical keys.
KEYS_LAYOUT = ''
# And special actions to map to.
ACTIONS = ()
# Fallback to use as machine type for finding a compatible keymap
# if one is not already available for this machine type.
KEYMAP_MACHINE_TYPE = None
def __init__(self):
# Setup default keymap with no translation of keys.
keys = self.get_keys()
self.keymap = Keymap(keys, keys)
self.keymap.set_mappings(zip(keys, keys))
self.stroke_subscribers = []
self.state_subscribers = []
self.state = STATE_STOPPED
def set_keymap(self, keymap):
"""Setup machine keymap."""
self.keymap = keymap
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
pass
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
pass
def add_stroke_callback(self, callback):
"""Subscribe to output from the stenotype machine.
Argument:
callback -- The function to call whenever there is output from
the stenotype machine and output is being captured.
"""
self.stroke_subscribers.append(callback)
def remove_stroke_callback(self, callback):
"""Unsubscribe from output from the stenotype machine.
Argument:
callback -- A function that was previously subscribed.
"""
self.stroke_subscribers.remove(callback)
def add_state_callback(self, callback):
self.state_subscribers.append(callback)
def remove_state_callback(self, callback):
self.state_subscribers.remove(callback)
def _notify(self, steno_keys):
"""Invoke the callback of each subscriber with the given argument."""
for callback in self.stroke_subscribers:
callback(steno_keys)
def set_suppression(self, enabled):
'''Enable keyboard suppression.
This is only of use for the keyboard machine,
to suppress the keyboard when then engine is running.
'''
pass
def suppress_last_stroke(self, send_backspaces):
'''Suppress the last stroke key events after the fact.
This is only of use for the keyboard machine,
and the engine is resumed with a command stroke.
Argument:
send_backspaces -- The function to use to send backspaces.
'''
pass
def _set_state(self, state):
self.state = state
for callback in self.state_subscribers:
callback(state)
def _stopped(self):
self._set_state(STATE_STOPPED)
def _initializing(self):
self._set_state(STATE_INITIALIZING)
def _ready(self):
self._set_state(STATE_RUNNING)
def _error(self):
self._set_state(STATE_ERROR)
@classmethod
def get_actions(cls):
"""List of supported actions to map to."""
return cls.ACTIONS
@classmethod
def get_keys(cls):
return tuple(cls.KEYS_LAYOUT.split())
@classmethod
def get_option_info(cls):
"""Get the default options for this machine."""
return {}
class ThreadedStenotypeBase(StenotypeBase, threading.Thread):
"""Base class for thread based machines.
Subclasses should override run.
"""
def __init__(self):
threading.Thread.__init__(self)
self.name += '-machine'
StenotypeBase.__init__(self)
self.finished = threading.Event()
def run(self):
"""This method should be overridden by a subclass."""
pass
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self.finished.clear()
self._initializing()
self.start()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
self.finished.set()
try:
self.join()
except RuntimeError:
pass
self._stopped()
class SerialStenotypeBase(ThreadedStenotypeBase):
"""For use with stenotype machines that connect via serial port.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
# Default serial parameters.
SERIAL_PARAMS = {
'port': None,
'baudrate': 9600,
'bytesize': 8,
'parity': 'N',
'stopbits': 1,
'timeout': 2.0,
}
def __init__(self, serial_params):
"""Monitor the stenotype over a serial port.
The key-value pairs in the <serial_params> dict are the same
as the keyword arguments for a serial.Serial object.
"""
ThreadedStenotypeBase.__init__(self)
self.serial_port = None
self.serial_params = serial_params
def _close_port(self):
if self.serial_port is None:
return
self.serial_port.close()
self.serial_port = None
def start_capture(self):
self._close_port()
try:
self.serial_port = serial.Serial(**self.serial_params)
except (serial.SerialException, OSError):
log.warning('Can\'t open serial port', exc_info=True)
self._error()
return
if not self.serial_port.isOpen():
log.warning('Serial port is not open: %s', self.serial_params.get('port'))
self._error()
return
return ThreadedStenotypeBase.start_capture(self)
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
Threa
|
dedStenotypeBase.stop_capture(self)
|
self._close_port()
@classmethod
def get_option_info(cls):
"""Get the default options for this machine."""
sb = lambda s: int(float(s)) if float(s).is_integer() else float(s)
converters = {
'port': str,
'baudrate': int,
'bytesize': int,
'parity': str,
'stopbits': sb,
'timeout': float,
'xonxoff': boolean,
'rtscts': boolean,
}
return {
setting: (default, converters[setting])
for setting, default in cls.SERIAL_PARAMS.items()
}
def _iter_packets(self, packet_size):
"""Yield packets of <packets_size> bytes until the machine is stopped.
N.B.: to workaround the fact that the Toshiba Bluetooth stack
on Windows does not correctly handle the read timeout setting
(returning immediately if some data is already available):
- the effective timeout is re-configured to <timeout/packet_size>
- multiple reads are done (until a packet is complete)
- an incomplete packet will only be discarded if one of
those reads return no data (but not on short read)
"""
self.serial_port.timeout = max(
self.serial_params.get('timeout', 1.0) / packet_size,
0.01,
)
packet = b''
while not self.finished.is_set():
raw = self.serial_port.read(packet_size - len(packet))
if not raw:
if packet:
log.error('discarding incomplete packet: %s',
binascii.hexlify(packet))
packet = b''
continue
packet += raw
if len(packet) != packet_size:
continue
yield packet
|
ocefpaf/folium
|
folium/plugins/fast_marker_cluster.py
|
Python
|
mit
| 3,954
| 0.000506
|
# -*- coding: utf-8 -*-
from folium.plugins.marker_cluster import MarkerCluster
from folium.utilities import if_pandas_df_convert_to_numpy, validate_location
from jinja2 import Template
class FastMarkerCluster(MarkerCluster):
"""
Add marker clusters to a map using in-browser rendering.
Using FastMarkerCluster it is possible to render 000's of
points far quicker than the MarkerCluster class.
Be aware that the FastMarkerCluster class passes an empty
list to the parent class' __init__ method during initialisation.
This means that the add_child method is never called, and
no reference to any marker data
|
are r
|
etained. Methods such
as get_bounds() are therefore not available when using it.
Parameters
----------
data: list of list with values
List of list of shape [[lat, lon], [lat, lon], etc.]
When you use a custom callback you could add more values after the
lat and lon. E.g. [[lat, lon, 'red'], [lat, lon, 'blue']]
callback: string, optional
A string representation of a valid Javascript function
that will be passed each row in data. See the
FasterMarkerCluster for an example of a custom callback.
name : string, optional
The name of the Layer, as it will appear in LayerControls.
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening (only for overlays).
icon_create_function : string, default None
Override the default behaviour, making possible to customize
markers colors and sizes.
**kwargs
Additional arguments are passed to Leaflet.markercluster options. See
https://github.com/Leaflet/Leaflet.markercluster
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = (function(){
{{ this.callback }}
var data = {{ this.data|tojson }};
var cluster = L.markerClusterGroup({{ this.options|tojson }});
{%- if this.icon_create_function is not none %}
cluster.options.iconCreateFunction =
{{ this.icon_create_function.strip() }};
{%- endif %}
for (var i = 0; i < data.length; i++) {
var row = data[i];
var marker = callback(row);
marker.addTo(cluster);
}
cluster.addTo({{ this._parent.get_name() }});
return cluster;
})();
{% endmacro %}""")
def __init__(self, data, callback=None, options=None,
name=None, overlay=True, control=True, show=True, icon_create_function=None, **kwargs):
if options is not None:
kwargs.update(options) # options argument is legacy
super(FastMarkerCluster, self).__init__(name=name, overlay=overlay,
control=control, show=show,
icon_create_function=icon_create_function,
**kwargs)
self._name = 'FastMarkerCluster'
data = if_pandas_df_convert_to_numpy(data)
self.data = [[*validate_location(row[:2]), *row[2:]] # noqa: E999
for row in data]
if callback is None:
self.callback = """
var callback = function (row) {
var icon = L.AwesomeMarkers.icon();
var marker = L.marker(new L.LatLng(row[0], row[1]));
marker.setIcon(icon);
return marker;
};"""
else:
self.callback = 'var callback = {};'.format(callback)
|
xumiao/pymonk
|
tests/kafka_tester.py
|
Python
|
mit
| 1,929
| 0.007258
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 01 10:45:09 2014
Training models remotely in cloud
@author: pacif_000
"""
from kafka.client import KafkaClient
from kafka.consumer import SimpleConsumer
import os
import platform
if platform.system() == 'Windows':
import win32api
else:
import signal
import thread
import traceback
kafkaHost = 'monkkafka.cloudapp.net:9092,monkkafka.cloudapp.net:9093,monkkafka.cloudapp.net:9094'
kafkaTopic = 'expr'
kafkaGroup = 'expr'
kafka = None
producer = None
consumer = None
def onexit():
global kafka, consumer,
|
producer
if consumer:
consumer.commit()
consumer.stop()
consumer = None
if producer:
producer.stop()
producer = None
|
if kafka:
kafka.close()
kafka = None
print('remote_rainter {0} is shutting down'.format(os.getpid()))
def handler(sig, hook = thread.interrupt_main):
global kafka, consumer, producer
if consumer:
consumer.commit()
consumer.stop()
consumer = None
if producer:
producer.stop()
producer = None
if kafka:
kafka.close()
kafka = None
print('remote_rainter {0} is shutting down'.format(os.getpid()))
exit(1)
def server():
global kafka, producer, consumer
if platform.system() == 'Windows':
win32api.SetConsoleCtrlHandler(handler, 1)
else:
signal.signal(signal.SIGINT, onexit)
try:
kafka = KafkaClient(kafkaHost,timeout=None)
consumer = SimpleConsumer(kafka, kafkaGroup, kafkaTopic, partitions=[0,1,2])
for message in consumer:
print(message)
except Exception as e:
print('Exception {0}'.format(e))
print('Can not consume actions')
print(traceback.format_exc())
except KeyboardInterrupt:
onexit()
finally:
onexit()
if __name__=='__main__':
while 1:
server()
|
adamcaudill/yawast
|
yawast/scanner/plugins/http/servers/rails.py
|
Python
|
mit
| 1,712
| 0.001752
|
# Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
#
|
See the LICENSE file or go to https://yawast.org/license/ for full license
|
details.
import re
from typing import List
from yawast.reporting.enums import Vulnerabilities
from yawast.scanner.plugins.result import Result
from yawast.shared import network, output
_checked: List[str] = []
def reset():
global _checked
_checked = []
def check_cve_2019_5418(url: str) -> List[Result]:
global _checked
# this only applies to controllers, so skip the check unless the link ends with '/'
if not url.endswith("/") or url in _checked:
return []
results: List[Result] = []
_checked.append(url)
try:
res = network.http_get(
url, False, {"Accept": "../../../../../../../../../e*c/p*sswd{{"}
)
if network.response_body_is_text(res):
body = res.text
req = network.http_build_raw_request(res.request)
# check to see if "root" is in the string, then do the proper check
if "root:" in body:
pattern = r"root:[a-zA-Z0-9]+:0:0:.+$"
mtch = re.search(pattern, body)
if mtch:
results.append(
Result(
f"Rails CVE-2019-5418: File Content Disclosure: {url} - {mtch.group(0)}",
Vulnerabilities.SERVER_RAILS_CVE_2019_5418,
url,
[body, req],
)
)
except Exception:
output.debug_exception()
return results
|
MingfeiPan/leetcode
|
dp/213.py
|
Python
|
apache-2.0
| 1,077
| 0.003865
|
#因为首尾相连, 考虑第一位抢或不抢 两种情况分开
class Solution:
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
if len(nums) == 1:
return nums[0]
if len(nums) == 2:
return max(nums)
l = []
l1 = []
|
# while nums[1] > nums[0] or nums[len(nums)-1] > nums[0]:
# temp = nums[0]
|
# del nums[0]
# nums.append(temp)
# print(nums)
# l = [0] * len(nums)
l.append(nums[0])
l1.append(0)
l.append(max(nums[0], nums[1]))
l1.append(nums[1])
for i in range(2, len(nums)):
if i == len(nums) - 1:
l.append(l[i-1])
else:
l.append(max(l[i-2] + nums[i], l[i-1]))
if i == 2:
l1.append(max(l1[i-1], nums[i]))
else:
l1.append(max(l1[i-2] + nums[i], l1[i-1]))
return max(max(l), max(l1))
|
CartoDB/geocoder-api
|
server/lib/python/cartodb_services/test/test_tomtomisoline.py
|
Python
|
bsd-3-clause
| 992
| 0
|
import unittest
from mock import Mock
from cartodb_services.tomtom.isolines im
|
port TomTomIsolines, DEFAULT_PROFILE
from cartodb_services.tools import Coordinate
from cred
|
entials import tomtom_api_key
VALID_ORIGIN = Coordinate(-73.989, 40.733)
class TomTomIsolinesTestCase(unittest.TestCase):
def setUp(self):
self.tomtom_isolines = TomTomIsolines(apikey=tomtom_api_key(),
logger=Mock())
def test_calculate_isochrone(self):
time_ranges = [300, 900]
solution = self.tomtom_isolines.calculate_isochrone(
origin=VALID_ORIGIN,
profile=DEFAULT_PROFILE,
time_ranges=time_ranges)
assert solution
def test_calculate_isodistance(self):
distance_range = 10000
solution = self.tomtom_isolines.calculate_isodistance(
origin=VALID_ORIGIN,
profile=DEFAULT_PROFILE,
distance_range=distance_range)
assert solution
|
Storj/accounts
|
accounts/ledger.py
|
Python
|
mit
| 1,481
| 0.00135
|
# -*- coding: utf-8 -*-
class Ledger(object):
def __init__(self, db):
self.db = db
def balance(self, token):
cursor = self.db.cursor(
|
)
cursor.execute("""SELECT * FROM balances WHERE TOKEN = %s""", [token])
row = cursor.fetchone()
return 0 if row is None else row[2]
def deposit(self, token, amount):
cursor = self.db.cursor()
cursor.execute(
"""INSERT INTO balances (token, amount)
SELECT %s, 0
WHERE NOT EXISTS (SELECT 1 FROM balances WHERE token = %s)""
|
",
[token, token])
cursor.execute(
"""UPDATE balances SET amount = amount + %s WHERE token = %s""",
[amount, token])
cursor.execute(
"""INSERT INTO movements (token, amount) VALUES(%s, %s)""",
[token, amount])
self.db.commit()
return True
def withdraw(self, token, amount):
"""Remove the given amount from the token's balance."""
cursor = self.db.cursor()
cursor.execute("""
UPDATE balances
SET amount = amount - %s
WHERE token = %s AND amount >= %s""",
[amount, token, amount])
success = (cursor.rowcount == 1)
if success:
cursor.execute(
"""INSERT INTO movements (token, amount) VALUES(%s, %s)""",
[token, -amount])
self.db.commit()
return success
|
PaulWay/spacewalk
|
client/solaris/rhnclient/setup.py
|
Python
|
gpl-2.0
| 610
| 0.029508
|
#!/usr/bin/python
#
#
from distutils.core import setup
from spacewal
|
k.common.rhnConfig import CFG, initCFG
initCFG('web')
setup(name = "rhnclient",
version = "5.5.9",
|
description = CFG.PRODUCT_NAME + " Client Utilities and Libraries",
long_description = CFG.PRODUCT_NAME + """\
Client Utilities
Includes: rhn_check, action handler, and modules to allow
client packages to communicate with RHN.""",
author = 'Joel Martin',
author_email = 'jmartin@redhat.com',
url = 'http://rhn.redhat.com',
packages = ["rhn.actions", "rhn.client"],
license = "GPL",
)
|
Ultimaker/Cura
|
plugins/UltimakerMachineActions/__init__.py
|
Python
|
lgpl-3.0
| 366
| 0.008197
|
# Copyright (c) 2019 Ultimaker B.
|
V.
# Cura is released under the terms of the LGPLv3 or higher.
from . import BedLevelMachineAction
from . import UMOUpgradeSelection
def getMetaData():
return {}
def register(app):
return { "machine_action": [
BedLevelMachineAction.BedLevelMachineAct
|
ion(),
UMOUpgradeSelection.UMOUpgradeSelection()
]}
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatter/line/_width.py
|
Python
|
mit
| 523
| 0.001912
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="scatter.line", **kwargs):
|
super(WidthValidator, self).__init__(
plot
|
ly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
rohitjogson/pythonwork
|
assign27.09.py
|
Python
|
gpl-3.0
| 355
| 0.059155
|
n=int(input('Enter any number: '))
if n%2
|
!=0:
n=n
|
+1
for i in range(n):
for j in range(n):
if (i==int(n/2)) or j==int(n/2) or ((i==0)and (j>=int(n/2))) or ((j==0)and (i<=int(n/2))) or ((j==n-1)and (i>=int(n/2))) or ((i==n-1)and (j<=int(n/2))):
print('*',end='')
else:
print(' ',end='')
print()
|
bitmingw/FindYourSister
|
sloth/sloth/gui/utils.py
|
Python
|
bsd-2-clause
| 994
| 0.002012
|
from PyQt4.QtCore import QSize
from PyQt4.QtGui import QVBoxLayout
# This is really really ugly, but the QDockWidget for some reason does not notice when
# its child widget becomes smaller...
# Therefore we manually set its minimum size when our own minimum size changes
class
|
MyVBoxLayout(QVBoxLayout):
def __init__(self, parent=None):
QVBoxLayout.__init__(self, parent)
self._last_size = QSize(0, 0)
def setGeometry(self, r):
QVBoxLayout.setGeometry(self, r)
try:
wid = self.parentWidget().parentWidget()
new_size = self.minimumSize()
if new_s
|
ize == self._last_size: return
self._last_size = new_size
twid = wid.titleBarWidget()
if twid is not None:
theight = twid.sizeHint().height()
else:
theight = 0
new_size += QSize(0, theight)
wid.setMinimumSize(new_size)
except Exception:
pass
|
barnsnake351/neutron
|
neutron/tests/unit/extensions/test_agent.py
|
Python
|
apache-2.0
| 7,027
| 0
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from webob import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.extensions import agent
from neutron.tests.common import helpers
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
LBAAS_HOSTA = 'hosta'
LBAAS_HOSTB = 'hostb'
class AgentTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
return agent.Agent.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is just for testing
class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
supported_extension_aliases = ["agent"]
class AgentDBTestMixIn(object):
def _list_agents(self, expected_res_status=None,
neutron_context=None,
query_string=None):
agent_res = self._list('agents',
neutron_context=neutron_context,
query_params=query_string)
if expected_res_status:
self.assertEqual(agent_res.status_int, expected_res_status)
return agent_res
def _register_agent_states(self, lbaas_agents=False):
"""Register two L3 agents and two DHCP agents."""
l3_hosta = helpers._get_l3_agent_dict(
L3_HOSTA, constants.L3_AGENT_MODE_LEGACY)
l3_hostb = helpers._get_l3_agent_dict(
L3_HOSTB, constants.L3_AGENT_MODE_LEGACY)
dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA)
dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC)
helpers.register_l3_agent(host=L3_HOSTA)
helpers.register_l3_agent(host=L3_HOSTB)
helpers.register_dhcp_agent(host=DHCP_HOSTA)
helpers.register_dhcp_agent(host=DHCP_HOSTC)
res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
if lbaas_agents:
lbaas_hosta = {
'binary': 'neutron-loadbalancer-agent',
'host': LBAAS_HOSTA,
'topic': 'LOADBALANCER_AGENT',
'configurations': {'device_drivers': ['haproxy_ns']},
'agent_type': constants.AGENT_TYPE_LOADBALANCER}
lbaas_hostb = copy.deepcopy(lbaas_hosta)
lbaas_hostb['host'] = LBAAS_HOSTB
ca
|
llback = agents_db.AgentExtRpcCallback()
callback.report_state(
self.adminContext,
agent_state={'agent_state': lbaas_hosta},
time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT))
callback.report_state(
self.adminContext,
agent_state={'agent_state': lbaas_hostb},
time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT
|
))
res += [lbaas_hosta, lbaas_hostb]
return res
def _register_dvr_agents(self):
dvr_snat_agent = helpers.register_l3_agent(
host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
dvr_agent = helpers.register_l3_agent(
host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR)
return [dvr_snat_agent, dvr_agent]
class AgentDBTestCase(AgentDBTestMixIn,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
fmt = 'json'
def setUp(self):
plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin'
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
self.useFixture(tools.AttributeMapMemento())
ext_mgr = AgentTestExtensionManager()
super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
self.adminContext = context.get_admin_context()
def test_create_agent(self):
data = {'agent': {}}
_req = self.new_create_request('agents', data, self.fmt)
_req.environ['neutron.context'] = context.Context(
'', 'tenant_id')
res = _req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_list_agent(self):
agents = self._register_agent_states()
res = self._list('agents')
self.assertEqual(len(agents), len(res['agents']))
def test_show_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent')
self.assertEqual(2, len(agents['agents']))
agent = self._show('agents', agents['agents'][0]['id'])
self.assertEqual('neutron-l3-agent', agent['agent']['binary'])
def test_update_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertEqual(1, len(agents['agents']))
com_id = agents['agents'][0]['id']
agent = self._show('agents', com_id)
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = False
new_agent['agent']['description'] = 'description'
self._update('agents', com_id, new_agent)
agent = self._show('agents', com_id)
self.assertFalse(agent['agent']['admin_state_up'])
self.assertEqual('description', agent['agent']['description'])
def test_dead_agent(self):
cfg.CONF.set_override('agent_down_time', 1)
self._register_agent_states()
time.sleep(1.5)
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertFalse(agents['agents'][0]['alive'])
|
zachary-williamson/ITK
|
Utilities/Maintenance/VNL_ModernizeNaming.py
|
Python
|
apache-2.0
| 3,528
| 0.014172
|
#!/bin/env python
# \author Hans J. Johnson
#
# Prepare for the future by recommending
# use of itk::Math:: functions over
# vnl_math:: functions.
# Rather than converting vnl_math_ to vnl_math::
# this prefers to convert directly to itk::Math::
# namespace. In cases where vnl_math:: is simply
# an alias to std:: functions, itk::Math directly
# uses the std:: version of the function.
import os
import sys
from collections import OrderedDict
## slight modification from grep command
info_for_conversion="""
XXXX,vnl_math_isnan,itk::Math::isnan
XXXX,vnl_math_isinf,itk::Math::isinf
XXXX,vnl_math_isfinite,itk::Math::isfinite
XXXX,vnl_math_isnormal,itk::Math::isnormal
XXXX,vnl_math_max,std::max
XXXX,vnl_math_min,std::min
XXXX,vnl_math_cuberoot,itk::Math::cbrt
XXXX,vnl_math_hypot,itk::Math::hypot
XXXX,vnl_math_angle_0_to_2pi,itk::Math::angle_0_to_2pi
XXXX,vnl_math_angle_minuspi_to_pi,itk::Math::angle_minuspi_to_pi
XXXX,vnl_math_rnd_halfinttoeven,itk::Math::halfinttoeven
XXXX,vnl_math_rnd_halfintup,itk::Math::rnd_halfintup
XXXX,vnl_math_rnd,itk::Math::rnd
XXXX,vnl_math_floor,itk::Math::floor
XXXX,vnl_math_ceil,itk::Math::ceil
XXXX,vnl_math_abs,itk::Math::abs
XXXX,vnl_math_sqr,itk::Math::sqr
XXXX,vnl_math_cube,itk::Math::cube
XXXX,vnl_math_sgn,itk::Math::sgn
XXXX,vnl_math_sgn0,itk::Math::sgn0
XXXX,vnl_math_squared_magnitude,itk::Math::squared_magnitude
XXXX,vnl_math_remainder_truncated,itk::Math::remainder_truncated
XXXX,vnl_math_remainder_floored,itk::Math::remainder_floored
"""
ITK_replace_head_names = OrderedDict()
ITK_replace_functionnames = OrderedDict()
ITK_replace_manual = OrderedDict()
ITK_replace_manual['"vnl/vnl_math.h"']='"itkMath.h"'
ITK_replace_manual['<vnl/vnl_math.h>']='<itkMath.h>'
for line in info_for_conversion.splitlines():
linevalues = line.split(",")
if len(linevalues) != 3:
#print("SKIPPING: " + str(linevalues))
continue
fname=linevalues[0]
new_name=fname.replace("ITK_","").replace(".h","")
ITK_replace_head_names['#inclu
|
de "{0}"'.format(fname)]="""#if !defined( ITK_LEGACY_FUTURE_REMOVE )
# include "{0}"
#endif
#include <{1}>""".format(fname,new_name)
ITK_replace_head_names['#include <{0}>'.format(fname)]="""#if !defined( ITK_LEGACY_FUTURE_REMOVE )
# include <{0}>
#endif
#include <{1}>""".format(fname,new_name)
ITK_pat=linevalues[1]
|
new_pat=linevalues[2]
ITK_replace_functionnames[ITK_pat]=new_pat
# Need to fix the fact that both std::ios is a base and a prefix
if "std::ios::" in new_pat:
ITK_replace_manual[new_pat.replace("std::ios::","std::ios_")] = new_pat
#print(ITK_replace_head_names)
#print(ITK_replace_functionnames)
cfile=sys.argv[1]
file_as_string=""
with open(cfile,"r") as rfp:
original_string=rfp.read()
file_as_string=original_string
required_header="" ## For ITK, this is always empty
for searchval,replaceval in ITK_replace_head_names.items():
file_as_string_new = file_as_string.replace(searchval,required_header+replaceval)
if file_as_string_new != file_as_string:
required_header=""
file_as_string=file_as_string_new
for searchval,replaceval in ITK_replace_functionnames.items():
file_as_string = file_as_string.replace(searchval,replaceval)
for searchval,replaceval in ITK_replace_manual.items():
file_as_string = file_as_string.replace(searchval,replaceval)
if file_as_string != original_string:
print("Processing: {0}".format(cfile))
with open(cfile,"w") as wfp:
wfp.write(file_as_string)
else:
print("SKIPPING: {0}".format(cfile))
|
pllim/astropy
|
astropy/samp/errors.py
|
Python
|
bsd-3-clause
| 637
| 0
|
# Licensed under a 3-
|
clause BSD style license - see LICENSE.rst
"""
Defines custom errors and exceptions used in `astropy.samp`.
"""
import xmlrpc.client as xmlrpc
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['SAMPWarning', 'SAMPHubError', 'SAMPClientError', 'SAMPProxyError']
class SAMPWarning(AstropyUserWarning):
"""
SAMP-specific Astropy warning class
"""
class SAMPHubError(Exception):
"""
SAMP Hub exception.
"""
class SAMPClientError(Exception):
|
"""
SAMP Client exceptions.
"""
class SAMPProxyError(xmlrpc.Fault):
"""
SAMP Proxy Hub exception
"""
|
mmolero/pcloudpy
|
pcloudpy/gui/components/ObjectInspectorWidget.py
|
Python
|
bsd-3-clause
| 1,774
| 0.012401
|
#Author: Miguel Molero <miguel.molero@gmail.com>
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class ObjectInspectorWidget(QWidget):
def __init__(self, parent = None):
super(ObjectInspectorWidget, self).__init__(parent)
layout = QVBoxLayout()
self.tab = QTabWidget()
self.properties_tree = QTreeWidget()
self.properties_tree.setHeaderLabels(["",""])
self.properties_tree.setAlternatingRowColors(True)
self.properties_tree.setColumnCount(2)
self.properties_tree.header().resizeSection(0, 200)
self.tab.addTab(self.properties_tree, "Properties")
layout.addWidget(self.tab)
self.setLayout(layout)
self.setGeometry(0,0,100, 400)
def u
|
pdate(self, props):
self.properties_tree.clear()
data_tree = QTreeWidgetItem(self.properties_tree)
data_tree.setText(0,"Data")
#data_tree.setFont(0,QFont(c.FONT_NAME, c.FONT_SIZE_1, QFont.Bold))
labels = props.keys()
values = props.values()
self.populateTree(data_tree, labels, values)
def populateTree(self, parent,labels,values):
for i,j in zip(labe
|
ls,values):
if j is None:
continue
item = QTreeWidgetItem(parent)
item.setText(0,i)
#item.setFont(0,QFont(c.FONT_NAME, c.FONT_SIZE_2, QFont.Normal))
if isinstance(j,bool):
if j is True:
item.setText(1, c.MARK)
else:
item.setText(1, c.CROSS)
else:
item.setText(1,str(j))
#item.setFont(1,QFont(c.FONT_NAME, c.FONT_SIZE_3, QFont.Normal))
self.properties_tree.expandItem(parent)
|
olbat/o1b4t
|
coding/crypto/test_hmac.py
|
Python
|
gpl-3.0
| 10,162
| 0.000098
|
import unittest
from collections import namedtuple
from io import BytesIO
import codecs
import sha2
import hmac
class TestSHA2(unittest.TestCase):
# test vectors from https://csrc.nist.gov/projects/cryptographic-standards-and-guidelines/example-values
TestVector = namedtuple('TestVector', ['digestcls', 'text', 'key', 'mac'])
TEST_VECTORS = (
# SHA-224 based HMACs
TestVector(
digestcls=sha2.SHA224,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F'
'10111213' '14151617' '18191A1B' '1C1D1E1F'
'20212223' '24252627' '28292A2B' '2C2D2E2F'
'30313233' '34353637' '38393A3B' '3C3D3E3F',
'hex',
),
mac=codecs.decode(
'C7405E3A' 'E058E8CD' '30B08B41' '40248581' 'ED174CB3'
'4E1224BC' 'C1EFC81B',
'hex',
),
),
TestVector(
digestcls=sha2.SHA224,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B',
'hex',
),
mac=codecs.decode(
'E3D249A8' 'CFB67EF8' 'B7A169E9' 'A0A59971' '4A2CECBA'
'65999A51' 'BEB8FBBE',
'hex',
),
),
TestVector(
digestcls=sha2.SHA224,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263',
'hex',
),
mac=codecs.decode(
'91C52509' 'E5AF8531' '601AE623' '0099D90B' 'EF88AAEF'
'B961F408' '0ABC014D',
'hex',
),
),
# SHA-256 based HMACs
TestVector(
digestcls=sha2.SHA256,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F10111213' '14151617'
'18191A1B' '1C1D1E1F' '20212223' '2425262728292A2B' '2C2D2E2F'
'30313233' '34353637' '38393A3B' '3C3D3E3F',
'hex',
),
mac=codecs.decode(
'8BB9A1DB' '9806F20DF7F77B82' '138C7914' 'D174D59E' '13DC4D01'
'69C9057B' '133E1D62',
'hex',
),
),
TestVector(
digestcls=sha2.SHA256,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '0405060708090A0B' '0C0D0E0F' '10111213' '14151617'
'18191A1B' '1C1D1E1F',
'hex',
),
mac=codecs.decode(
'A28CF431' '30EE696A98F14A37' '678B56BC' 'FCBDD9E5' 'CF69717F'
'ECF5480F' '0EBDF790',
'hex',
),
),
TestVector(
digestcls=sha2.SHA256,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263',
'hex',
),
mac=codecs.decode(
'BDCCB6C7' '2DDEADB5' '00AE7683' '86CB38CC' '41C63DBB'
'0878DDB9' 'C7A38A43' '1B78378D',
'hex',
),
),
# SHA-384 based HMACs
TestVector(
digestcls=sha2.SHA384,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263'
'64656667' '68696A6B' '6C6D6E6F' '70717273' '74757677'
'78797A7B' '7C7D7E7F',
'hex',
),
mac=codecs.decode(
'63C5DAA5' 'E651847C' 'A897C958' '14AB830B' 'EDEDC7D2'
'5E83EEF9' '195CD458' '57A37F44' '8947858F' '5AF50CC2'
'B1B730DD' 'F29671A9',
'hex',
),
),
TestVector(
digestcls=sha2.SHA384,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'1415161718191A1B' '1C1D1E1F' '20212223' '24252627' '28292A2B'
'2C2D2E2F',
'hex',
),
mac=codecs.decode(
'6EB242BD' 'BB582CA1' '7BEBFA48' '1B1E2321' '1464D2B7'
'F8C20B9FF2201637' 'B93646AF' '5AE9AC31' '6E98DB45' 'D9CAE773'
'675EEED0',
'hex',
),
),
TestVector(
digestcls=sha2.SHA384,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263'
'64656667' '68696A6B' '6C6D6E6F' '70717273' '74757677'
'78797A7B' '7C7D7E7F' '80818283' '84858687' '88898A8B'
'8C8D8E8F' '90919293' '94959697' '98999A9B' '9C9D9E9F'
'A0A1A2A3' 'A4A5A6A7' 'A8A
|
9AAAB' 'ACADAEAF' 'B0B1B2B3'
'B4B5B6B7' 'B8B9BABB' 'BCBDBEBF' 'C0C1C2C3' 'C4C5C6C7',
'hex',
|
),
mac=codecs.decode(
'5B664436' 'DF69B0CA' '22551231' 'A3F0A3D5' 'B4F97991'
'713CFA84' 'BFF4D079' '2EFF96C2' '7DCCBBB6' 'F79B65D5'
'48B40E85' '64CEF594',
'hex',
),
),
# SHA-512 based HMACs
TestVector(
digestcls=sha2.SHA512,
text=b'Sample message for keylen=blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F' '40414243' '44454647' '48494A4B' '4C4D4E4F'
'50515253' '54555657' '58595A5B' '5C5D5E5F' '60616263'
'64656667' '68696A6B' '6C6D6E6F' '70717273' '74757677'
'78797A7B' '7C7D7E7F',
'hex',
),
mac=codecs.decode(
'FC25E240' '658CA785' 'B7A811A8' 'D3F7B4CA' '48CFA26A'
'8A366BF2' 'CD1F836B' '05FCB024' 'BD368530' '81811D6C'
'EA4216EB' 'AD79DA1C' 'FCB95EA4' '586B8A0C' 'E356596A'
'55FB1347',
'hex',
),
),
TestVector(
digestcls=sha2.SHA512,
text=b'Sample message for keylen<blocklen',
key=codecs.decode(
'00010203' '04050607' '08090A0B' '0C0D0E0F' '10111213'
'14151617' '18191A1B' '1C1D1E1F' '20212223' '24252627'
'28292A2B' '2C2D2E2F' '30313233' '34353637' '38393A3B'
'3C3D3E3F',
'hex',
|
wyzekid/Python_Projects
|
Perceptron/Rosenblatt_perceptron.py
|
Python
|
gpl-3.0
| 1,928
| 0.015659
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, clf, res=0.02):
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, res),
np.arange(y_min, y_max, res))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
plt.xlim(xx.min(), xx.max())
plt.yli
|
m(yy.min(), yy.max())
class Perceptron(object):
def __init__(self, eta=0.01, epochs=50):
self.eta = eta
self.epochs = epochs
|
def train(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.epochs):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
# Корректные выходы перцептрона для данной выборки
y = np.array([[1],[1],[1],[1],[-1],[-1],[-1],[-1]]).reshape(8,1)
# Массив входных данных для перцептрона
X = np.array([[0,3],[1,2],[2,2],[4,0],[-1,2],[2,0],[3,-1],[4,-1]]).reshape(8,2)
ppn = Perceptron(epochs=10, eta=0.1)
ppn.train(X, y)
plot_decision_regions(X, y, clf=ppn)
plt.title('Perceptron')
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
plt.plot(range(1, len(ppn.errors_)+1), ppn.errors_, marker='o')
plt.xlabel('Iterations')
plt.ylabel('Misclassifications')
plt.show()
|
fusionbox/satchless
|
satchless/contrib/checkout/singlestep/tests/__init__.py
|
Python
|
bsd-3-clause
| 7,298
| 0.00274
|
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import Client
from .....checkout.tests import BaseCheckoutAppTests
from .....delivery.tests import TestDeliveryProvider
from .....order import handler as order_handler
from .....payment import ConfirmationFormNeeded
from .....payment.tests import TestPaymentProvider
from .....pricing import handler as pricing_handler
from .....product import handler as product_handler
from .....product.tests import DeadParrot
from .....product.tests.pricing import FiveZlotyPriceHandler
from ..app import checkout_app
from .....cart.tests import TestCart
from .....order.tests import TestOrder
class TestPaymentProviderWithConfirmation(TestPaymentProvider):
def confirm(self, order, typ=None):
raise ConfirmationFormNeeded(action='http://test.payment.gateway.example.com')
class App(BaseCheckoutAppTests):
checkout_app = checkout_app
urls = BaseCheckoutAppTests.MockUrls(checkout_app=checkout_app)
def setUp(self):
checkout_app.cart_model = TestCart
checkout_app.order_model = TestOrder
self.parrot = DeadParrot.objects.create(slug='parrot',
species='Hyacinth Macaw')
self.dead_parrot = self.parrot.variants.create(color='blue',
looks_alive=False)
satchless_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
self.custom_settings = {
'SATCHLESS_PRODUCT_VIEW_HANDLERS': ('satchless.cart.add_to_cart_handler',),
'TEMPLATE_DIRS': (os.path.join(satchless_dir, 'category', 'templates'),
os.path.join(satchless_dir, 'order', 'templates'),
os.path.join(satchless_dir, 'cart', 'templates'),
os.path.join(satchless_dir, 'cart', 'templates'),
os.path.join(os.path.join(os.path.dirname(__file__),
'templates')),
os.path.join(os.path.join(os.path.dirname(__file__), '..',
'templates'))),
'TEMPLATE_LOADERS': (
'django.template.loaders.filesystem.Loader',
)
}
self.original_settings = self._setup_settings(self.custom_settings)
product_handler.init_queue()
order_handler.delivery_queue = order_handler.DeliveryQueue(TestDeliveryProvider)
order_handler.payment_queue = order_handler.PaymentQueue(TestPaymentProviderWithConfirmation)
self.anon_client = Client()
self.original_pricing_handlers = settings.SATCHLESS_PRICING_HANDLERS
pricing_handler.pricing_queue = pricing_handler.PricingQueue(FiveZlotyPriceHandler)
def tearDown(self):
self._teardown_settings(self.original_settings, self.custom_settings)
product_handler.init_queue()
pricing_handler.pricing_queue = pricing_handler.PricingQueue(*self.original_pricing_handlers)
def test_checkout_view_passes_with_correct_data(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.dead_parrot, 1)
order = self._get_or_create_order_for_client(self.anon_client)
response = self._test_status(reverse('checkout:checkout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
data={'email': 'foo@example.com'})
dg = response.context['delivery_group_forms']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo
|
1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
for g, typ, form in dg:
data[form.add_prefix('email')] = 'foo@example.com'
response = self._test_status(self.checkout_app.reverse('ch
|
eckout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=302, method='post', data=data,
follow=True)
order = self.checkout_app.order_model.objects.get(pk=order.pk)
self.assertRedirects(response, reverse('checkout:confirmation',
kwargs={'order_token':
order.token}))
self.assertEqual(order.status, 'payment-pending')
def test_confirmation_view_redirects_when_order_or_payment_is_missing(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.dead_parrot, 1)
order = self._get_or_create_order_for_client(self.anon_client)
# without payment
self._test_status(reverse('checkout:confirmation',
kwargs={'order_token': order.token}),
client_instance=self.anon_client, status_code=302)
# finish checkout view
response = self._test_status(self.checkout_app.reverse('checkout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
data={'email': 'foo@example.com'})
dg = response.context['delivery_group_forms']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
for g, typ, form in dg:
data[form.add_prefix('email')] = 'foo@example.com'
response = self._test_status(self.checkout_app.reverse('checkout',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=302, method='post', data=data,
follow=True)
self._test_status(self.checkout_app.reverse('confirmation',
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=200)
|
La0/mozilla-relengapi
|
src/shipit/api/tests/conftest.py
|
Python
|
mpl-2.0
| 1,089
| 0.000918
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not d
|
istributed with this
# file, You can obtain one at htt
|
p://mozilla.org/MPL/2.0/.
import os
import pytest
import backend_common
@pytest.fixture(scope='session')
def app():
'''Load shipit_api in test mode
'''
import shipit_api
config = backend_common.testing.get_app_config({
'SQLALCHEMY_DATABASE_URI': 'sqlite://',
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'AUTH_CLIENT_ID': 'dummy_id',
'AUTH_CLIENT_SECRET': 'dummy_secret',
'AUTH_DOMAIN': 'auth.localhost',
'AUTH_REDIRECT_URI': 'http://localhost/login',
'OIDC_USER_INFO_ENABLED': True,
'OIDC_CLIENT_SECRETS': os.path.join(os.path.dirname(__file__), 'client_secrets.json'),
'TASKCLUSTER_CLIENT_ID': 'something',
'TASKCLUSTER_ACCESS_TOKEN': 'something',
})
app = shipit_api.create_app(config)
with app.app_context():
backend_common.testing.configure_app(app)
yield app
|
MarineDataTools/pymqdatastream
|
pymqdatastream/connectors/todl/tools/todl_quickview.py
|
Python
|
gpl-3.0
| 9,129
| 0.011173
|
import sys
import argparse
import numpy as np
import pylab as pl
import netCDF4
import logging
import pymqdatastream.connectors.todl.todl_data_processing as todl_data_processing
try:
from PyQt5 import QtCore, QtGui, QtWidgets
except:
from qtpy import QtCore, QtGui, QtWidgets
#https://matplotlib.org/3.1.0/gallery/user_interfaces/embedding_in_qt_sgskip.html
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger('todl_quickview')
logger.setLevel(logging.DEBUG)
# FP07 Polynom hack
T = np.asarray([1.4, 9.01, 20.96, 27.55,34.77])
V = np.asarray([2.95, 2.221, 1.508, 1.26, 1.07])
P = np.polyfit(V,T,2)
#print('Polynom',P)
#https://stackoverflow.com/questions/18539679/embedding-the-matplotlib-toolbar-in-pyqt4-using-matplotlib-custom-widget#18563884
class MplCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class MplWidget(QtWidgets.QWidget):
def __init__(self, parent = None):
QtWidgets.QWidget.__init__(self, parent)
self.canvas = MplCanvas()
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
self.vbl = QtWidgets.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.vbl.addWidget(self.mpl_toolbar)
self.setLayout(self.vbl)
class todlquickviewMainWindow(QtWidgets.QMainWindow):
"""The main interface of the TODL-Quickview gui
"""
def __init__(self,fname):
QtWidgets.QMainWindow.__init__(self)
self.all_widgets = []
mainMenu = self.menuBar()
self.setWindowTitle("TODL Quickview")
#self.setWindowIcon(QtGui.QIcon('logo/pymqdatastream_logo_v0.2.svg.png'))
extractAction = QtWidgets.QAction("&Quit", self)
extractAction.setShortcut("Ctrl+Q")
extractAction.setStatusTip('Closing the program')
extractAction.triggered.connect(self.close_application)
fileMenu = mainMenu.addMenu('&File')
fileMenu.addAction(extractAction)
self.statusBar()
self.mainwidget = todlquickviewWidget(fname)
self.setCentralWidget(self.mainwidget)
self.width_orig = self.frameGeometry().width()
self.height_orig = self.frameGeometry().height()
self.width_main = self.width_orig
self.height_main = self.height_orig
def close_application(self):
logger.debug('Goodbye!')
self.close()
for w in self.mainwidget.plotWidgets:
w.close()
self.mainwidget.close()
class todlquickviewWidget(QtWidgets.QWidget):
"""
"""
def __init__(self,fname=None):
QtWidgets.QMainWindow.__init__(self)
layout = QtWidgets.QGridLayout()
self.plotWidgets = []
self.data = {}
self.layout = layout
self.setLayout(layout)
|
self.plot_button = QtWidgets.QPushButton('Plot')
self.plot_button.clicked.connect(self.plot_data)
self.var_combo = QtWidgets.QComboBox(self)
self.layout.addWidget(self.var_combo,0,0)
self.layout.addWidget(self.plot_button,0,1)
if(fname is not None):
logger.debug('Opening file:' + fname)
self.read_ncfile(fname)
self.show()
def plot_data(self):
print('Plotting')
pl
|
otvar_y = self.var_combo.currentText()
plotdata_y = self.data[plotvar_y][plotvar_y][:]
plotdata_x = self.data[plotvar_y]['x0'][:]
try:
lab_unit = '[' + self.data[plotvar_y][plotvar_y].units + ']'
except:
lab_unit = ''
ylabel = plotvar_y + lab_unit
#if('ch1' in plotvar_y):
if False:
print('Calculating temperature from polynom')
plotdata_y = np.polyval(P,plotdata_y)
plotdata_y = np.ma.masked_where((plotdata_y > T.max()) | (plotdata_y < T.min()),plotdata_y)
#print(T.max(),T.min())
# Calculate the frequency
fi = 1/(np.diff(plotdata_x).mean())
plotFrame = MplWidget()
ax = plotFrame.canvas.ax
plotFrame.canvas.ax.plot(plotdata_x,plotdata_y)
ax.set_title('Frequency:' + str(fi))
ax.set_xlabel('t [s]')
ax.set_ylabel(ylabel)
plotFrame.show()
self.plotWidgets.append(plotFrame)
def read_ncfile(self,fname):
nc = netCDF4.Dataset(fname)
# Try to read ADC data
try:
nca = nc.groups['adc']
except:
nca = None
pass
if(nca is not None):
for varname in nca.variables:
vartmp = nca.variables[varname]
print(vartmp)
print(vartmp.dimensions[0])
if(not "cnt" in varname):
self.data[vartmp.name] = {vartmp.name:vartmp,vartmp.dimensions[0]:nca.variables[vartmp.dimensions[0]]}
self.data[vartmp.name]['x0'] = self.data[vartmp.name][vartmp.dimensions[0]]
# Add to the gui
self.var_combo.addItem(varname)
#self.FLAG_CH1=True
#print('Found ch1 ADC data')
else:
print('cnt ...')
# Read in PyroScience data
print('Trying Firesting data')
try:
ncp = nc.groups['pyro']
cnt10ks_p = ncp.variables['cnt10ks_pyro'][:]
#time_p = netCDF4.num2date(ncp.variables['time'][:],units = ncp.variables['time'].units)
fp = 1/(np.diff(cnt10ks_p).mean())
# Add to the gui
self.var_combo.addItem('phi')
#phi = ncp.variables['phi'][:]
# Add to the data
self.data['phi'] = {'phi':ncp.variables['phi'],'cnt10ks_p':ncp.variables['cnt10ks_pyro']}
self.data['phi']['x0'] = self.data['phi']['cnt10ks_p']
self.FLAG_PYRO=True
print('Found Pyro data')
except Exception as e:
print('Pyro:' + str(e))
self.FLAG_PYRO=False
# Read in IMU
print('Trying IMU data')
try:
self.FLAG_IMU = True
for g in nc.groups:
print(g)
if('imu' in g):
nci = nc.groups[g]
try:
cntvar = 'cnt10ks_imu'
nci.variables[cntvar][:]
except:
cntvar = 'cnt10ks'
nci.variables[cntvar][:]
cnt10ks_imu = nci.variables[cntvar][:]
#time_imu = netCDF4.num2date(nci.variables['time'][:],units=nci.variables['time'].units)
fi = 1/(np.diff(cnt10ks_imu).mean())
for vartmp in nci.variables:
print(vartmp)
if(not "cnt" in vartmp):
varname = g + ' ' + vartmp
print('reading')
self.var_combo.addItem(varname)
self.data[varname] = {varname:nci.variables[vartmp],'cnt10ks':nci.variables[cntvar]}
self.data[varname]['x0'] = self.data[varname][cntvar]
#accx = nci.variables['accx'][:]
#accy = nci.variables['accy'][:]
#accz = nci.variables['accz'][:]
#gyrox = nci.variables['gyrox'][:]
|
glennmckechnie/weewx-wxobs
|
bin/user/wxobs.py
|
Python
|
gpl-3.0
| 27,446
| 0.000911
|
# Copyright (c) 2017-2020 Glenn McKechnie <glenn.mckechnie@gmail.com>
# Credit to Tom Keffer <tkeffer@gmail.com>, Matthew Wall and the core
# weewx team, all from whom I've borrowed heavily.
# Mistakes are mine, corrections and or improvements welcomed
# https://github.com/glennmckechnie/weewx-wxobs
#
# rsync code based on weeutil/rsyncupload.py by
#
|
Will Page <companyguy@gmail.com> and
#
# See the file LICENSE.txt for your full rights.
#
#
# added text
import subprocess
import time
import errno
import os
import weewx.engine
from weeutil.weeutil import to_bool
from weewx.cheetahgenerator import SearchList
wxobs_version = "0.7.4"
try:
# weewx4 logging
import weeutil.logger
import logging
log = logging.get
|
Logger(__name__)
def logdbg(msg):
log.debug(msg)
def loginf(msg):
log.info(msg)
def logerr(msg):
log.error(msg)
except ImportError:
# old-style weewx logging
import syslog
def logmsg(level, msg):
syslog.syslog(level, 'wxobs: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
def wxrsync(rsync_user, rsync_server, rsync_options, rsync_loc_file,
rsync_loc_file2, rsync_ssh_str, rem_path, wxobs_debug,
log_success):
"""
rsync_user
rsync_server
rsync_options
rsync_loc_file
rsync_loc_file2 # maybe empty
rsync_ssh_str
rem_path
wxobs_debug
log_success
"""
t_1 = time.time()
# construct the command argument
cmd = ['rsync']
cmd.extend([rsync_options])
# cmd.extend(["-tOJrl"])
# provide some stats on the transfer
cmd.extend(["--stats"])
cmd.extend(["--compress"])
cmd.extend([rsync_loc_file])
cmd.extend([rsync_loc_file2])
cmd.extend([rsync_ssh_str])
try:
# perform the actual rsync transfer...
if wxobs_debug == 2:
loginf("rsync cmd is ... %s" % (cmd))
# rsynccmd = subprocess.Popen(cmd, stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT, close_fds=True)
rsynccmd = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = rsynccmd.communicate()[0]
stroutput = stdout.decode("utf-8").strip()
# rsyncpid = rsynccmd.pid
# loginf(" pre.wait rsync pid is %s" % rsyncpid)
# rsynccmd.wait()
# rsyncpid = rsynccmd.pid
# loginf(" post.wait rsync pid is %s" % rsyncpid)
# subprocess.call( ('ps', '-l') )
except OSError as e:
# print "EXCEPTION"
if e.errno == errno.ENOENT:
logerr("rsync does not appear to be installed on this system. \
(errno %d, \"%s\")" % (e.errno, e.strerror))
raise
# we have some output from rsync so generate an appropriate message
if stroutput.find('rsync error:') < 0:
# no rsync error message so parse rsync --stats results
rsyncinfo = {}
for line in iter(stroutput.splitlines()):
if line.find(':') >= 0:
(n, v) = line.split(':', 1)
rsyncinfo[n.strip()] = v.strip()
# get number of files and bytes transferred and produce an
# appropriate message
try:
if 'Number of regular files transferred' in rsyncinfo:
n_ber = rsyncinfo['Number of regular files transferred']
else:
n_ber = rsyncinfo['Number of files transferred']
n_bytes = rsyncinfo['Total file size']
n_sent = rsyncinfo['Literal data']
if n_ber is not None and n_bytes is not None:
rsync_message = ("rsync'd %s of %s files (%s) in "
"%%0.2f seconds" % (n_sent, n_ber, n_bytes))
else:
rsync_message = "rsync executed in %0.2f seconds"
# loginf("%s " % (rsync_message))
except:
rsync_message = ("rsync exception raised:"
"executed in %0.2f seconds")
loginf(" ERR %s " % (rsync_message))
else:
# suspect we have an rsync error so tidy stroutput
# and display a message
stroutput = stroutput.replace("\n", ". ")
stroutput = stroutput.replace("\r", "")
# Attempt to catch a few errors that may occur and deal with them
# see man rsync for EXIT VALUES
rsync_message = ("rsync command failed after %0.2f secs (set"
"set 'wxobs_debug = 2' in skin.conf),")
if "code 1)" in stroutput:
if wxobs_debug == 2:
logerr("rsync code 1 - %s" % stroutput)
rsync_message = ('syntax error in rsync command'
'- set debug = 1 - ! FIX ME !')
loginf(" ERR %s " % (rsync_message))
rsync_message = ("code 1, syntax error, failed"
" rsync executed in %0.2f seconds")
elif ("code 23" and "Read-only file system") in stroutput:
# read-only file system
# sadly, won't be detected until after first succesful transfer
# but it's useful then!
if wxobs_debug == 2:
logerr("rsync code 23 - %s" % stroutput)
loginf("ERR Read only file system ! FIX ME !")
rsync_message = ("code 23, read-only, rsync failed"
" executed in %0.2f seconds")
elif ("code 23" and "link_stat") in stroutput:
# likely to be that a local path doesn't exist - possible typo?
if wxobs_debug == 2:
logdbg("rsync code 23 found %s" % stroutput)
rsync_message = ("rsync code 23 : is %s correct?"
"! FIXME !" % (rsync_loc_file))
loginf(" ERR %s " % rsync_message)
rsync_message = ("code 23, link_stat, rsync failed"
" executed in %0.2f seconds")
elif "code 11" in stroutput:
# directory structure at remote end is missing - needs creating
# on this pass. Should be Ok on next pass.
if wxobs_debug == 2:
loginf("rsync code 11 - %s" % stroutput)
rsync_message = ("rsync code 11 found Creating %s"
" as a fix?, space issue?" % (rem_path))
loginf("%s" % rsync_message)
# laborious but apparently necessary, the only way the command
# will run!? build the ssh command - n.b: spaces cause wobblies!
cmd = ['ssh']
cmd.extend(["%s@%s" % (rsync_user, rsync_server)])
mkdirstr = "mkdir -p"
cmd.extend([mkdirstr])
cmd.extend([rem_path])
if wxobs_debug == 2:
loginf("sshcmd %s" % cmd)
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
rsync_ssh_str = rem_path
rsync_message = ("code 11, rsync mkdir cmd executed"
" in % 0.2f seconds")
elif ("code 12") and ("Permission denied") in stroutput:
if wxobs_debug == 2:
logdbg("rsync code 12 - %s" % stroutput)
rsync_message = ("Permission error in rsync command, probably at"
" remote end authentication ! FIX ME !")
loginf(" ERR %s " % (rsync_message))
rsync_message = "code 12, rsync failed, executed in % 0.2f seconds"
elif ("code 12") and ("No route to host") in stroutput:
if wxobs_debug == 2:
logdbg("rsync code 12 - %s" % stroutput)
rsync_message = "No route to host error in rsync command ! FIX ME!"
loginf(" ERR %s " % (rsync_message))
rsync_message = "code 12, rsync failed, executed in % 0.2f seconds"
else:
logerr("rsync [%s] reported this error: %s" % (cmd, stroutput))
if log_success:
if wxobs_debug == 0:
t_o = ''
rsync_ssh_str
|
OCA/margin-analysis
|
account_invoice_margin/__init__.py
|
Python
|
agpl-3.0
| 194
| 0
|
# © 2017 Sergio T
|
eruel <sergio.teruel@tecnativa.com>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from .hooks import pre_init_hook
from . import models
from . import
|
report
|
befelix/GPy
|
GPy/core/parameterization/transformations.py
|
Python
|
bsd-3-clause
| 194
| 0
|
# Copyright (c) 2014, Max Zwiesse
|
le, James Hensman
# Licensed under the BSD 3-clause licen
|
se (see LICENSE.txt)
from paramz.transformations import *
from paramz.transformations import __fixed__
|
mganeva/mantid
|
scripts/AbinsModules/CalculateS.py
|
Python
|
gpl-3.0
| 2,205
| 0.004989
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import AbinsModules
class CalculateS(object):
"""
Class producer for generating required S calculator
Currently available S c
|
alculators:
* SPowderSemiEmpiricalCalculator
"""
@staticmethod
def init(filename=None, temperature=None, sample_form=None, abins_data=None, instr
|
ument=None,
quantum_order_num=None, bin_width=1.0):
"""
:param filename: name of input DFT file (CASTEP: foo.phonon)
:param temperature: temperature in K for which calculation of S should be done
:param sample_form: form in which experimental sample is: Powder or SingleCrystal (str)
:param abins_data: object of type AbinsData with data from phonon file
:param instrument: object of type Instrument for which simulation should be performed
:param quantum_order_num: number of quantum order events taken into account during the simulation
:param bin_width: width of bins in wavenumber
"""
if sample_form in AbinsModules.AbinsConstants.ALL_SAMPLE_FORMS:
if sample_form == "Powder":
return AbinsModules.SPowderSemiEmpiricalCalculator(filename=filename, temperature=temperature,
abins_data=abins_data, instrument=instrument,
quantum_order_num=quantum_order_num,
bin_width=bin_width)
# TODO: implement numerical powder averaging
# elif sample == "SingleCrystal": #TODO implement single crystal scenario
else:
raise ValueError("Only implementation for sample in the form of powder is available.")
else:
raise ValueError("Invalid sample form %s" % sample_form)
|
atomic83/youtube-dl
|
youtube_dl/extractor/iqiyi.py
|
Python
|
unlicense
| 9,558
| 0.000745
|
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import math
import random
import time
import uuid
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import ExtractorError
class IqiyiIE(InfoExtractor):
IE_NAME = 'iqiyi'
IE_DESC = '爱奇艺'
_VALID_URL = r'http://(?:[^.]+\.)?iqiyi\.com/.+\.html'
_TESTS = [{
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
'md5': '2cb594dc2781e6c941a110d8f358118b',
'info_dict': {
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
'title': '美国德州空中惊现奇异云团 酷似UFO',
'ext': 'f4v',
}
}, {
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb',
'title': '名侦探柯南第752集',
},
'playlist': [{
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part1',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part2',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part3',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part4',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part5',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part6',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part7',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part8',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}],
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
'only_matching': True,
}, {
'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
'only_matching': True,
}, {
'url': 'http://yule.iqiyi.com/pcb.html',
'only_matching': True,
}]
_FORMATS_MAP = [
('1', 'h6'),
('2', 'h5'),
('3', 'h4'),
('4', 'h3'),
('5', 'h2'),
('10', 'h1'),
]
@staticmethod
def md5_text(text):
return hashlib.md5(text.encode('utf-8')).hexdigest()
def construct_video_urls(self, data, video_id, _uuid):
def do_xor(x, y):
a = y % 3
if a == 1:
return x ^ 121
if a == 2:
return x ^ 72
return x ^ 103
def get_encode_code(l):
a = 0
b = l.split('-')
c = len(b)
s = ''
for i in range(c - 1, -1, -1):
a = do_xor(int(b[c - i - 1], 16), i)
s += chr(a)
return s[::-1]
def get_path_key(x, format_id, segment_index):
mg = ')(*&^flash@#$%a'
tm = self._download_json(
'http://data.video.qiyi.com/t?tn=' + str(random.random()), video_id,
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
)['t']
t = str(int(math.floor(int(tm) / (600.0))))
return self.md5_text(t + mg + x)
video_urls_dict = {}
for format_item in data['vp']['tkl'][0]['vs']:
if 0 < int(format_item['bid']) <= 10:
format_id = self.get_format(format_item['bid'])
else:
continue
video_urls = []
video_urls_info = format_item['fs']
if not format_item['fs'][0]['l'].startswith('/'):
t = get_encode_code(format_item['fs'][0]['l'])
if t.endswith('mp4'):
video_urls_info = format_item['flvs']
for segment_index, segment in enumerate(video_urls_info):
vl = segment['l']
if not vl.startswith('/'):
vl = get_encode_code(vl)
key = get_path_key(
vl.split('/')[-1].split('.')[0], format_id, segment_index)
filesize = segment['b']
base_url = data['vp']['du'].split('/')
base_url.insert(-1, key)
base_url = '/'.join(base_url)
param = {
'su': _uuid,
'qyid': uuid.uuid4().hex,
'client': '',
'z': '',
'bt': '',
'ct': '',
'tn': str(int(time.time()))
}
api_video_url = base_url + vl + '?' + \
compat_urllib_parse.urlencode(param)
js = self._download_json(
api_video_url, video_id,
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
video_url = js['l']
video_urls.append(
(video_url, filesize))
video_urls_dict[format_id] = video_urls
return video_urls_dict
def get_format(self, bid):
matched_format_ids = [_format_id for _bid, _format_id in self._FORMATS_MAP if _bid == str(bid)]
return matched_format_ids[0] if len(matched_format_ids) else None
def get_bid(self, format_id):
matched_bids = [_bid for _bid, _format_id in
|
self._FORMATS_MAP if _format_id == format_id]
return matched_bids[0] if len(matched_bids) else None
def get_raw_data(self, tvid, video_id, enc_key, _uuid):
tm = str(int(time.time()))
tail = tm + tvid
param = {
'key': 'fvip',
'src': self.md5_text('youtube-dl'),
'tvId': tvid,
'vid': video_id,
'vinfo': 1,
'tm': tm,
'enc': self.md5_text(enc_key + tail),
'
|
qyid': _uuid,
'tn': random.random(),
'um': 0,
'authkey': self.md5_text(self.md5_text('') + tail),
}
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
compat_urllib_parse.urlencode(param)
raw_data = self._download_json(api_url, video_id)
return raw_data
def get_enc_key(self, swf_url, video_id):
# TODO: automatic key extraction
# last update at 2015-12-18 for Zombie::bite
enc_key = '8b6b683780897eb8d9a48a02ccc4817d'[::-1]
return enc_key
def _real_extract(self, url):
webpage = self._download_webpage(
url, 'temp_id', note='download video page')
tvid = self._search_regex(
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
video_id = self._search_regex(
r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
swf_url = self._search_regex(
r'(http://[^\'"]+MainPlayer[^.]+\.swf)', webpage, 'swf player URL')
_uuid = uuid.uuid4().hex
enc_key = self.get_enc_key(swf_url, video_id)
raw_data = self.get_raw_data(tvid, video_id, enc_key, _uuid)
if raw_data['code'] != 'A000000':
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
if not raw_data['data']['vp']['tkl']:
raise ExtractorError('No support iQiqy VIP video')
data = raw_data['data']
title = data['vi']['vn']
# generate video_urls_dict
video_urls_dict = self.construct_video_urls(
data, video_id, _uuid)
# c
|
upconsulting/IsisCB
|
isiscb/isisdata/migrations/0020_auto_20160615_1630.py
|
Python
|
mit
| 15,648
| 0.000831
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-15 16:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0019_auto_20160427_1520'),
]
operations = [
migrations.AddField(
model_name='aarelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='aarelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='aarelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='personal_name_first',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='personal_name_last',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='personal_name_suffix',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='acrelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='attribute',
name='type_qualifier',
field=models.CharField(blank=True, choices=[(b'BGN', b'Began'), (b'END', b'Ended'), (b'OCR', b'Occurred')], max_length=3, null=True),
),
migrations.AddField(
model_name='authority',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='authority',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='authority',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='ccrelation',
name='data_display_order',
field=models.FloatField(default=1.0, help_text=b'Position at which the citation should be displayed in the citation detail view. Whole numbers or decimals can be used.'),
),
migrations.AddField(
model_name='ccrelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='ccrelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='ccrelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='additional_titles',
field=models.TextField(blank=True, help_text=b'Additional titles (not delimited, free text).', null=True),
),
migrations.AddField(
model_name='citation',
name='book_series',
field=models.CharField(blank=True, help_text=b'Used for books, and potentially other works in a series.', max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='citation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='personal_name_first',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='personal_name_last',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='personal_name_suffix',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalacrelation',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='record_status_explanation',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalattribute',
name='type_qualifier',
field=models.CharField(blank=True, choices=[(b'BGN', b'Began'), (b'END', b'Ended'), (b'OCR', b'Occurred')], max_length=3, null=True),
),
migrations.AddField(
model_name='historicalauthority',
name='dataset',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalauthority',
name='record_status_explanation',
field=models.CharField(bl
|
ank=True, max_length=255, null=True),
),
migrat
|
ions.AddField(
model_name='historicalauthority',
name='record_status_value',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalccrelation',
name='data_display_order',
field=models.FloatField(default=1.0, help_text=b'Position at which th
|
kyunooh/JellyBlog
|
lifeblog/migrations/0001_initial.py
|
Python
|
apache-2.0
| 900
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-14 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('time', models.D
|
ateTimeField(auto_now_add=True)),
('meta_tag', models.CharField(m
|
ax_length=150)),
('view_count', models.IntegerField(default=0, editable=False)),
('public_doc', models.BooleanField()),
('update_time', models.DateTimeField(auto_now=True)),
],
),
]
|
Azure/azure-sdk-for-python
|
sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/upload_certificate_response.py
|
Python
|
mit
| 3,198
| 0.001876
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UploadCertificateResponse(Model):
"""The upload registration certificate response.
All required parameters must be populated in order to send to Azure.
:param auth_type: Specifies authentication type. Possible values include:
'Invalid', 'AzureActiveDirectory'
:type auth_type: str or ~azure.mgmt.edgegateway.models.AuthenticationType
:param resource_id: Required. The resource ID of the Data Box Edge/Gateway
device.
:type resource_id: str
:param aad_authority: Required. Azure Active Directory tenant authority.
:type aad_authority: str
:param aad_tenant_id: Required. Azure Active Directory tenant ID.
:type aad_tenant_id: str
:param service_principal_client_id: Required. Azure Active Directory
service principal client ID.
:type service_principal_client_id: str
:param service_principal_object_id: Required. Azure Active Directory
service principal object ID.
:type service_principal_object_id: str
:param azure_management_endpoint_audience: Required. The azure management
endpoint audience.
:type azure_management_endpoint
|
_audience: str
"""
_validation = {
|
'resource_id': {'required': True},
'aad_authority': {'required': True},
'aad_tenant_id': {'required': True},
'service_principal_client_id': {'required': True},
'service_principal_object_id': {'required': True},
'azure_management_endpoint_audience': {'required': True},
}
_attribute_map = {
'auth_type': {'key': 'authType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'aad_authority': {'key': 'aadAuthority', 'type': 'str'},
'aad_tenant_id': {'key': 'aadTenantId', 'type': 'str'},
'service_principal_client_id': {'key': 'servicePrincipalClientId', 'type': 'str'},
'service_principal_object_id': {'key': 'servicePrincipalObjectId', 'type': 'str'},
'azure_management_endpoint_audience': {'key': 'azureManagementEndpointAudience', 'type': 'str'},
}
def __init__(self, **kwargs):
super(UploadCertificateResponse, self).__init__(**kwargs)
self.auth_type = kwargs.get('auth_type', None)
self.resource_id = kwargs.get('resource_id', None)
self.aad_authority = kwargs.get('aad_authority', None)
self.aad_tenant_id = kwargs.get('aad_tenant_id', None)
self.service_principal_client_id = kwargs.get('service_principal_client_id', None)
self.service_principal_object_id = kwargs.get('service_principal_object_id', None)
self.azure_management_endpoint_audience = kwargs.get('azure_management_endpoint_audience', None)
|
codetalkio/TelegramIRCImageProxy
|
asyncirc/ircbot.py
|
Python
|
mit
| 5,983
| 0.00234
|
'''Todo:
* Add multiple thread support for async_process functions
* Potentially thread each handler function? idk
'''
import sys
import socket
import re
import threading
import logging
import time
if sys.hexversion < 0x03000000:
#Python 2
import Queue as queue
BlockingIOError = socket.error
else:
import queue
from .ircclient import IRCClient
logger = logging.getLogger(__name__)
#Somewhat complex regex that accurately matches nick!username@host, with named groups for easy parsing and usage
user_re = re.compile(r'(?P<nick>[\w\d<-\[\]\^\{\}\~]+)!(?P<user>[\w\d<-\[\]\^\{\}\~]+)@(?P<host>.+)')
class IRCBot(IRCClient):
'''See `IRCClient` for basic client usage, here is usage for the bot system
Handler notation:
on_join(self, nick, host, channel)
on_topic(self, nick, host, channel, topic)
on_part(self, nick, host, channel, message)
on_msg(self, nick, host, channel, message)
on_privmsg(self, nick, host, message)
on_chanmsg(self, nick, host, channel, message)
on_notice(self, nick, host, channel, message)
on_nick(self, nick, new_nick, host)
'''
_handlers = {
'join': [],
'part': [],
'kick': [],
'topic': [],
'msg': [],
'privmsg': [],
'chanmsg': [],
'notice': [],
'nick': []
}
_process_thread = None
def _async_process(self):
while not self._stop_event.is_set():
time.sleep(0.01)
try:
args = self._in_queue.get_nowait()
#These "msg"s will be raw irc received lines, which have several forms
# basically, we should be looking for
# :User!Name@host COMMAND <ARGS>
userhost = user_re.search(args[0][1:])
if userhost:
nick, host, user = userhost.groups()
command = args[1]
if command == 'JOIN':
channel = args[2][1:] #JOIN Channels are : prefixed
for handler in self._handlers['join']:
handler(self, nick, host, channel)
elif command == 'TOPIC':
channel = args[2]
topic = ' '.join(args[3:])
for handler in self._handlers['topic']:
handler(self, nick, host, channel, topic)
elif command == 'PART':
channel = args[2]
message = ' '.join(args[3:])
for handler in self._handlers['part']:
handler(self, nick, host, channel, message)
elif command == 'PRIVMSG':
channel = args[2]
message = ' '.join(args[3:])[1:]
for handler in self._handlers['msg']:
handler(self, nick, host, channel, message)
if channel[0] == '#':
#this is a channel
for handler in self._handlers['chanmsg']:
handler(self, nick, host, channel, message)
else:
#private message
for handler in self._handlers['privmsg']:
handler(self, nick, host, message)
elif command == 'KICK':
channel = args[2]
kicked_nick = args[3]
reason = ' '.join(args[4:])[1:]
for handler in self._handlers['kick']:
handler(self, nick, host, channel, kicked_nick, reason)
elif command == 'NICK':
new_nick =
|
args[2][1:]
for handler in self._handlers['nick']:
handler(self, nick, new_nick, host)
elif command == 'NOTICE':
#:nick!user@host NOTICE <userchan> :message
channel = args[2]
message = ' '.join(args[3:])
for handler in self._handlers['notice']:
handle
|
r(self, nick, host, channel, message)
else:
logger.warning("Unhandled command %s" % command)
self._in_queue.task_done()
except queue.Empty as e: pass
except Exception as e:
logger.exception("Error while handling message " + str(args))
def start(self):
IRCClient.start(self)
self._process_thread = threading.Thread(target=self._async_process)
self._process_thread.start()
def on(self, type):
'''Decorator function'''
def decorator(self, func):
'''decorated functions should be written as class methods
@on('join')
def on_join(self, channel):
print("Joined channel %s" % channel)
'''
self._handlers[type].append(func)
return func
return decorator
def on_join(self, func):
self._handlers['join'].append(func)
return func
def on_part(self, func):
self._handlers['part'].append(func)
return func
def on_kick(self, func):
self._handlers['kick'].append(func)
return func
def on_msg(self, func):
self._handlers['msg'].append(func)
return func
def on_privmsg(self, func):
self._handlers['privmsg'].append(func)
return func
def on_chanmsg(self, func):
self._handlers['chanmsg'].append(func)
return func
def on_notice(self, func):
self._handlers['notice'].append(func)
return func
def on_nick(self, func):
self._handlers['nick'].append(func)
return func
__all__ = ['IRCBot']
|
saurabhVisie/appserver
|
rest_auth/registration/serializers.py
|
Python
|
mit
| 6,316
| 0.000792
|
from django.http import HttpRequest
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
try:
from allauth.account import app_settings as allauth_settings
from allauth.utils import (email_address_exists,
get_username_max_length)
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
except ImportError:
raise ImportError("allauth needs to be added to INSTALLED_APPS.")
from rest_framework import serializers
from requests.exceptions import HTTPError
# Import is needed only if we are using social login, in which
# case the allauth.socialaccount will be declared
if 'allauth.socialaccount' in settings.INSTALLED_APPS:
from allauth.socialaccount.helpers import complete_social_login
class SocialLoginSerializer(serializers.Serializer):
access_token = serializers.CharField(required=False, allow_blank=True)
code = serializers.CharField(required=False, allow_blank=True)
def _get_request(self):
request = self.context.get('request')
if not isinstance(request, HttpRequest):
request = request._request
return request
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass.
Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:returns: A populated instance of the
`allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token, response=response)
social_login.token = token
return social_login
def validate(self, attrs):
view = self.context.get('view')
request = self._get_request()
if not view:
raise serializers.ValidationError(
_("View is not defined, pass it as a context variable")
)
adapter_class = getattr(view, 'adapter_class', None)
if not adapter_class:
raise serializers.ValidationError(_("Define adapter_class in view"))
adapter = adapter_class(request)
app = adapter.get_provider().get_app(request)
# More info on code vs access_token
# http://stackoverflow.com/questions/8666316/facebook-oauth-2-0-code-and-token
# Case 1: We received the access_token
if attrs.get('access_token'):
access_token = attrs.get('access_token')
# Case 2: We received the authorization code
elif attrs.get('code'):
self.callback_url = getattr(view, 'callback_url', None)
self.client_class = getattr(view, 'client_class', None)
if not self.callback_url:
raise serializers.ValidationError(
_("Define callback_url in view")
)
if not self.client_class:
raise serializers.ValidationError(
_("Define client_class in view")
)
code = attrs.get('code')
provider = adapter.get_provider()
scope = provider.get_scope(request)
client = self.client_class(
request,
app.client_id,
app.secret,
adapter.access_token_method,
adapter.access_token_url,
self.callback_url,
scope
)
token = client.get_access_token(code)
access_token = token['access_token']
else:
raise serializers.ValidationError(
_("Incorrect input. access_token or code is required."))
social_token = adapter.parse_token({'access_token': access_token})
social_token.app = app
try:
login = self.get_social_login(adapter, app, social_token, access_token)
complete_social_login(request, login)
except HTTPError:
raise serializers.ValidationError(_('Incorrect value'))
if not login.is_existing:
login.lookup()
login.save(request, connect=True)
attrs['user'] = login.account.user
return attrs
class RegisterSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=allauth_settings.USERNAME_REQUIRED
)
email = serializers.EmailField(required=allauth_settings.EMAIL_REQUIRED)
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_("A user is already registered with this e-mail address."))
return email
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(_("The two password fields didn't match."))
return data
def custom_signup(self, request, user):
pass
|
def get_cleaned_data(self):
return {
'username': self.validate
|
d_data.get('username', ''),
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', '')
}
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
adapter.save_user(request, user, self)
self.custom_signup(request, user)
setup_user_email(request, user, [])
return user
class VerifyEmailSerializer(serializers.Serializer):
key = serializers.CharField()
|
HyperloopTeam/FullOpenMDAO
|
bin/rst2odt_prepstyles.py
|
Python
|
gpl-2.0
| 237
| 0.004219
|
#!/
|
Users/shreyashirday/Personal/openmdao-0.13.0/bin/python
# EASY-INSTALL-SCRIPT: 'docutils==0.10','rst2odt_prepstyles.py'
__requires__ = 'docutils==0.10'
__import__('pkg_resources').run_script('docutils==0.10', 'rst2odt_prepstyles.py')
| |
tst-mswartz/earthenterprise
|
earth_enterprise/src/server/wsgi/search/common/exceptions.py
|
Python
|
apache-2.0
| 1,602
| 0.006866
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or im
|
plied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all exception's which search services may raise."""
from search.common import utils
class Error(Exception):
|
"""Generic error."""
def ToString(self, error_prefix):
"""Builds error message string escaping it for HTML.
Args:
error_prefix: an error prefix.
Returns:
HTML escaped error message.
"""
if error_prefix:
return utils.HtmlEscape(
"{0}: {1}".format(error_prefix, str("\n".join(self.args))))
else:
return utils.HtmlEscape("Error: {0}".format(str("\n".join(self.args))))
def __str__(self):
return self.ToString("Error")
class BadQueryException(Error):
"""BadQueryException error."""
def __str__(self):
return self.ToString("BadQueryException")
# Places search service pool exception.
class PoolConnectionException(Error):
"""PoolConnectionException error."""
def __str__(self):
return self.ToString("PoolConnectionException")
def main():
pass
if __name__ == "__main__":
main()
|
BaileySN/Raspberry-Pi-Shutdown-Button
|
shutdown_script.py
|
Python
|
gpl-3.0
| 586
| 0.030717
|
#! /usr/b
|
in/env p
|
ython
# coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
import os
#config
#change the GPIO Port number
gpioport=24
sdate = time.strftime("%H:%M:%S")
stime = time.strftime("%Y-%m-%d")
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpioport, GPIO.IN)
def sysshutdown(channel):
msg="System shutdown GPIO.Low state"
logpath="/var/log/shutdown.log"
print("System shutdown")
f = open(logpath, "a")
f.write(str(sdate)+";"+str(stime)+";"+str(msg)+";")
f.close()
os.system("shutdown -h now")
while True:
if(GPIO.input(gpioport)):
sysshutdown("1")
break
time.sleep(2)
|
cmhill/q-compression
|
src/compress.py
|
Python
|
mit
| 50,746
| 0.005656
|
#!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
from collections import deque
from itertools import islice
#from subprocess import call
import subprocess
from optparse import OptionParser
from tempfile import mkstemp
import glob
import os
import random
import re
import shlex
import shutil
import sys
import tempfile
import time
import resource
import locale
#import file
locale.setlocale(locale.LC_ALL, "C")
FNULL = open('/dev/null', 'w')
base_path = os.path.dirname(sys.argv[0])[:-len('src/')]
dry_run = False
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def call(call_arr, stdout=sys.stdout, stderr=sys.stderr):
if not dry_run:
subprocess.call(call_arr, stdout=stdout, stderr=stderr)
def sort_reads_command(options,reads_filename):
"""
Sort the incoming FASTQ filename.
"""
SORT_CMD = "fastq-sort " + reads_filename
call_arr = SORT_CMD.split()
output_fp = open(options.output_dir + '/sorted/' + os.path.basename(reads_filename), 'w')
out_cmd(output_fp.name, FNULL.name, call_arr)
call(call_arr, stdout=output_fp, stderr=FNULL)
return output_fp.name
def sort_reads(options):
"""
Sort the FASTQ reads and update the options accordingly.
"""
ensure_dir(options.output_dir + '/sorted/')
if options.unpaired_reads_filenames:
new_filenames = []
for reads_filenames in options.unpaired_reads_filenames.split(','):
new_filenames.append(sort_reads_command(options, reads_filenames))
options.unpaired_reads_filenames = ','.join(new_filenames)
if options.first_mate_filenames:
new_filenames = []
for reads_filenames in options.first_mate_filenames.split(','):
new_filenames.append(sort_reads_command(options, reads_filenames))
options.first_mate_filenames = ','.join(new_filenames)
if options.second_mate_filenames:
new_filenames = []
for reads_filenames in options.second_mate_filenames.split(','):
new_filenames.append(sort_reads_command(options, reads_filenames))
options.second_mate_filenames = ','.join(new_filenames)
def compress(options):
"""
Compress the reads using all methods.
"""
ensure_dir(options.output_dir + '/original/')
ensure_dir(options.output_dir + '/goodbad/')
ensure_dir(options.output_dir + '/maxqual/')
ensure_dir(options.output_dir + '/minqual/')
std_err_file = open('compress.log', 'w')
# Basic command line scripts to run the individual comp
|
ression schemes.
GB_COMPRESSION_CMD = "./src/good_bad_coding.py -r [READ] -c 2 -b 0 -i [COMPRESSED_FILE]"
MAX_VALUE_COMPRESSION_CMD = "./src/good_bad_coding.py -r [READ] -g [MAX_QV
|
] -b 40 -c 2 -i [COMPRESSED_FILE]"
MIN_VALUE_COMPRESSION_CMD = "./src/good_bad_coding.py -r [READ] -g 0 -b 0 -c 2 -i [COMPRESSED_FILE]"
POLY_REGRESSION_CMD = "Rscript src/poly_regression_parallel.R [READ] [OUTPUT] [DEGREE] [COMPRESSED_FILE] [NUM_THREADS] [MAX_QV]"
PROFILE_COMPRESSION_CMD = "Rscript src/profile_parallel.R [READ] [OUTPUT] [TRAINING_SIZE] [NUM_PROFILES] [COMPRESSED_FILE] [NUM_THREADS]"
QUALCOMP_COMPRESS_CMD = "./runCompress.sh -i [READ] -c [CLUSTERS] -r [RATE]"
QUALCOMP_DECOMPRESS_CMD = "./runDecompress.sh -p [DIR] -c [CLUSTERS] -r [RATE]"
RQS_COMPRESS_CMD = "./src/run_rqs.sh [READ] [OUTPUT]"
#qvz -c 3 -r .10 -v test_results4/original/frag_1.fastq.quals tmp/test_1_c3_r.10
QVZ_COMPRESS_CMD = "[QVZ]/qvz -c [CLUSTERS] -r [RATE] -v [READ] [OUTPUT]"
QVZ_DECOMPRESS_CMD = "[QVZ]/qvz -x -v [INPUT] [OUTPUT]"
# Store which compression directories we created.
options.compressed_dirs = []
options.compressed_dirs.append('original')
options.compressed_dirs.append('goodbad')
options.compressed_dirs.append('maxqual')
options.compressed_dirs.append('minqual')
for reads_filename in options.reads_filenames:
# Copy the original sequences over.
out_cmd("", std_err_file.name, ["cp", reads_filename, options.output_dir + '/original/' + os.path.basename(reads_filename)])
shutil.copyfile(reads_filename, options.output_dir + '/original/' + os.path.basename(reads_filename))
# Good/bad binary compression.
call_arr = GB_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/goodbad/' + os.path.basename(reads_filename) + '.comp').split()
output_fp = open(options.output_dir + '/goodbad/' + os.path.basename(reads_filename), 'w')
out_cmd(options.output_dir + '/goodbad/' + os.path.basename(reads_filename), std_err_file.name, call_arr)
call(call_arr, stdout=output_fp, stderr=std_err_file)
# Max/min quality value compression. We can use good_bad.py script to do this.
call_arr = MAX_VALUE_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/maxqual/' + os.path.basename(reads_filename) + '.comp')\
.replace('[MAX_QV]', options.max_quality).split()
output_fp = open(options.output_dir + '/maxqual/' + os.path.basename(reads_filename), 'w')
out_cmd(options.output_dir + '/maxqual/' + os.path.basename(reads_filename), std_err_file.name, call_arr)
call(call_arr, stdout=output_fp, stderr=std_err_file)
call_arr = MIN_VALUE_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/minqual/' + os.path.basename(reads_filename) + '.comp').split()
output_fp = open(options.output_dir + '/minqual/' + os.path.basename(reads_filename), 'w')
out_cmd(options.output_dir + '/minqual/' + os.path.basename(reads_filename), std_err_file.name, call_arr)
call(call_arr, stdout=output_fp, stderr=std_err_file)
#continue
# Polynomial regression.
if options.poly_degrees:
for degree in options.poly_degrees.split(','):
ensure_dir(options.output_dir + '/degree_' + degree + '/')
if 'degree_' + degree not in options.compressed_dirs:
options.compressed_dirs.append('degree_' + degree)
#continue
call_arr = POLY_REGRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[OUTPUT]', options.output_dir + '/degree_' + degree + '/' + os.path.basename(reads_filename))\
.replace('[DEGREE]', degree)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/degree_' + degree +'/' + os.path.basename(reads_filename) + '.comp')\
.replace('[NUM_THREADS]', options.threads)\
.replace('[MAX_QV]', options.max_quality).split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# Profile compression using k-means.
if options.profile_sizes:
for profiles in options.profile_sizes.split(','):
ensure_dir(options.output_dir + '/profile_' + profiles + '/')
if 'profile_' + profiles not in options.compressed_dirs:
options.compressed_dirs.append('profile_' + profiles)
#continue
call_arr = PROFILE_COMPRESSION_CMD.replace('[READ]', reads_filename)\
.replace('[OUTPUT]', options.output_dir + '/profile_' + profiles + '/' + os.path.basename(reads_filename))\
.replace('[NUM_PROFILES]', profiles)\
.replace('[TRAINING_SIZE]', options.training_size)\
.replace('[COMPRESSED_FILE]', options.output_dir + '/profile_' + profiles +'/' + os.path.basename(reads_filename) + '.comp')\
.replace('[NUM_THREADS]', options.threads).split()
out_cmd("", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
# Compress using QualComp.
if options.rates:
|
yongwen/makahiki
|
makahiki/apps/widgets/popular_tasks/views.py
|
Python
|
mit
| 997
| 0.008024
|
"""Prepare rendering of popular smart grid actions widget"""
from apps.widgets.smartgrid import smartgrid
def supply(request, page_name):
"""Supply view_objects content, which are the popular actions from the smart grid game."""
_ = request
num_results = 5 if page_name != "status" else None
#contruct a dictionary containing the most popular tasks.
#The keys are the type of the task and the values are a list of tasks."""
popular_tasks = {
"Activity": smartgrid.get_popular_actions("activity", "approved", num_results),
"Commitment": smartgrid.get_popular_actions("commitment", "approved", num_results),
"Event": smartgrid.get_popul
|
ar_actions("event", "pend
|
ing", num_results),
"Excursion": smartgrid.get_popular_actions("excursion", "pending", num_results),
}
count = len(popular_tasks)
return {
"popular_tasks": popular_tasks,
"no_carousel": page_name == "status",
"range": count,
}
|
openplans/shareabouts-api
|
src/sa_api_v2/migrations/0005_add_dimensions_to_attachments.py
|
Python
|
gpl-3.0
| 702
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-02-29 16:58
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.
|
deletion
class Migration(migrations.Migration):
dependencies = [
('sa_api_v2', '0004_django_19_updates'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='height',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='attachment',
|
name='width',
field=models.IntegerField(blank=True, null=True),
),
]
|
meng-sun/hil
|
haas/auth.py
|
Python
|
apache-2.0
| 4,101
| 0.000244
|
"""Authentication and authorization."""
from haas.errors import AuthorizationError
from haas import model
from abc import ABCMeta, abstractmethod
import sys
_auth_backend = None
class AuthBackend(object):
"""An authentication/authorization backend.
Extensions which implement authentication/authorization backends should
inherit from this class, and invoke ``set_auth_backend()`` on an instance
of the subclass
Subclasses of AuthBackend must override `authenticate`, `_have_admin`,
and `_have_project_access`, and nothing else. Users of the AuthBackend must
not invoke `_have_admin` and `_have_project_access`, preferring
`have_admin` and `have_project_access`.
"""
__metaclass__ = ABCMeta
@abstractmethod
def authenticate(self):
"""Authenticate the api call, and prepare for later authorization checks.
This method will be invoked inside of a flask request context,
with ``haas.rest.local.db`` initialized to a valid database session.
It is responsible for authenticating the request, and storing any
data it will need later to determine whether the requested operation
is authorized.
The attribute ``haas.rest.local.auth`` is reserved for use by auth
backends; A backend may store any information it needs as that
attribute.
This method must return a boolean indicating whether or not
authentication was successful -- True if so, False if not.
"""
@abstractmethod
def _have_admin(self):
"""Check if the request is authorized to act as an administrator.
Return True if so, False if not. This will be called sometime after
``authenticate()``.
"""
@abstractmethod
def _have_project_access(self, project):
"""Check if the request is authorized to act as the given project.
Each backend must implement this method. The backend does not need
to deal with the case where the authenticated user is an admin here;
the `have_*` and `require_*` wrappers handle this.
"""
def have_admin(self):
"""Check if the request is authorized to act as an administrator.
Return True if so, False if not. This will be caled sometime after
``authenticate()``.
"""
return self._have_admin()
def have_project_access(self, project):
"""Check if the request is authorized to act as the given project.
Return True if so, False if not. This will be caled sometime after
``authenticate()``.
``project`` will be a ``Project`` object, *not* the name of the
project.
Note that have_admin implies have_project_acccess.
"""
assert isinstance(project, model.Project)
return self._have_admin() or self._have_project_access(project)
def require_admin(self):
"""Ensure the request is authorized to act as an administrator.
Raises an ``AuthorizationError`` on failure, instead of returning
False. This is a convienence wrapper around ``have_admin``,
and should not be overwritten by subclasses.
"""
if not self.have_admin():
raise AuthorizationEr
|
ror("This operation is administrator-only.")
def require_project_access(self, project):
"""Like ``require_admin()``, but wraps ``have_project_access()``."""
if not self.have_project_access(project):
raise AuthorizationError(
"You do not have access to the required project.")
def set_auth_backend(backend):
"""Set the authentication backend to ``backend``.
This should be called exactly once, on startup, with an instance of
``AuthBackend``
|
as it's argument.
"""
global _auth_backend
if _auth_backend is not None:
sys.exit("Fatal Error: set_auth_backed() called twice. Make sure "
"you don't have conflicting extensions loaded.")
_auth_backend = backend
def get_auth_backend():
"""Return the current auth backend."""
return _auth_backend
|
shamidreza/unitselection
|
experiment.py
|
Python
|
gpl-2.0
| 3,464
| 0.018764
|
"""
Author: Seyed Hamidreza Mohammadi
This file is part of the shamidreza/uniselection software.
Please refer to the LICENSE provided alongside the software (which is GPL v2,
http://www.gnu.org/licenses/gpl-2.0.html).
This file includes the code for putting all the pieces together.
"""
from utils import *
from extract_unit_info import *
from search import *
from generate_speech import *
if __name__ == "__main__":
if 0: # test pit2gci
pit_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.lf0'
target_gci = pit2gci(pit_file)
if 1: # test read_dur,pit,for methods
dur_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.dur'
for_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.for'
pit_file='/Users/hamid/Code/hts/HTS-demo_CMU-ARCTIC-SLT2/gen/qst001/ver1/2mix/2/alice01.lf0'
#a=read_hts_for(for_file)
#b=read_hts_pit(pit_file)
#c=read_hts_dur(dur_file)
fname = 'arctic_a0001'
lab_name=corpus_path+'/lab/'+fname+'.lab'
wav_name=corpus_path+'/wav/'+fname+'.wav'
pm_name=corpus_path+'/pm/'+fname+'.pm'
##target_units = load_input(lab_name)
#times, labs = read_lab(lab_name)
##tmp_units=extract_info(lab_name, wav_name, 0,0)
times, pits, vox_times, vox_vals = read_hts_pit(pit_file)
frm_time, frm_val = read_hts_for(for_file)
gcis=pit2gci(times, pits, vox_times, vox_vals)
tmp_units, times=read_input_lab(dur_file, pit_file)
#tmp_units = tmp_units[128:140]##
target_units = np.zeros(len(tmp_units), 'object')
for j in xrange(len(tmp_units)):
target_units[j] = tmp_units[j]
if 0:
units, fnames=load_units()
units = units[:int(units.shape[0]*(100.0/100.0))]
best_units_indice=search(target_units, units,limit=20)
best_units = units[best_units_indice]
f=open('tmp2.pkl','w+')
import pickle
pickle.dump(best_units,f)
pickle.dump(fnames,f)
f.flush()
f.close()
else:
f=open('tmp2.pkl','r')
import pickle
best_units=pickle.load(f)
fnames=pickle.load(f)
#best_units = best_units[128:140]##
f.close()
for i in xrange(target_units.shape[0]):
print target_units[i].phone, best_units[i].phone, best_units[i].unit_id
#wavs=concatenate_units_overlap(best_units, fnames)
#gcis = gcis[(gcis>times[128]) * (gcis<times[140])]
#gcis -= times[128]
##$frm_time, frm_val = units2for(best_units, fnames, times, frm_time, frm_val)
frm_time *= 16000.0
gcis=units2gci(best_units, fnames)##$
gcis = np.array(gcis)
##$gcis *= 16000
gcis = gcis.astype(np.uint32)
old_times = np.array(times).copy()
old_times *= 16000.0
times=units2dur(best_units, fnames)##$
times = np.array(times)
##$times *= 16000
times = times.astype(np.uint32)
#times = times[128:141]##
#aa=times[0]##
#for i in range(len(times)):##
#times[i] -= aa##
#frm_time *= 16000
wavs=concatenate_units_psola_har_overlap(best_units, fnames, old_times, times, gcis, frm_time, frm_val, overlap=0.5)
#wavs=concatenate_units_nooverlap(best_units, fnames)
ftime, fval = get_formant(wavs, 16000)
from scipy.i
|
o.wavfile import write as wwrite
wwrite('out.wav', 16000, wavs)
print 'successfully saved o
|
ut.wav'
|
hackaugusto/raiden
|
raiden/tests/utils/app.py
|
Python
|
mit
| 351
| 0.002849
|
import os
import os.path
from raiden.constants import RAIDEN_DB_VERSION
def database_from_privatekey(base_dir, app_number):
""" Format a database path based on the p
|
rivate key and app number. """
dbpath = os.path.join(base_dir, f"app{app_number}", f"v{RAIDEN_DB_VERSION}_log.db")
os.makedirs(
|
os.path.dirname(dbpath))
return dbpath
|
5monkeys/django-formapi
|
formapi/tests.py
|
Python
|
mit
| 8,355
| 0.001078
|
# coding=utf-8
from datetime import datetime, date, time
from decimal import Decimal
import json
import django
from django.forms import IntegerField
from django.test import TransactionTestCase, Client
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy
import pytz
from formapi.api import DjangoJSONEncoder
from formapi.compat import smart_u, get_user_model
from formapi.models import APIKey
from formapi.utils import get_sign
TOTAL_TESTS = 19
class SignedRequestTest(TransactionTestCase):
def setUp(self):
self.api_key = APIKey.objects.create(email="test@example.com")
self.api_key_revoked = APIKey.objects.create(email="test3@example.com", revoked=True)
self.client = Client()
self.user = get_user_model().objects.create(email="user@example.com", username="räksmörgås")
self.user.set_password("rosebud")
self.user.save()
self.authenticate_url = '/api/v1.0.0/user/authenticate/'
self.language_url = '/api/v1.0.0/comp/lang/'
def send_request(self, url, data, key=None, secret=None, req_method="POST"):
if not key:
key = self.api_key.key
if not secret:
secret = self.api_key.secret
sign = get_sign(secret, **data)
data['key'] = key
data['sign'] = sign
if req_method == 'POST':
return self.client.post(url, data)
elif req_method == 'GET':
return self.client.get(url, data)
def test_api_key(self):
smart_u(self.api_key)
def test_valid_auth(self):
response = self.send_request(self.authenticate_url, {'username': self.user.username, 'password': 'rosebud'})
self.assertEqual(response.status_code, 200)
response_data = json.lo
|
ads(smart_u(response.content))
s
|
elf.assertEqual(response_data['errors'], {})
self.assertTrue(response_data['success'])
self.assertIsNotNone(response_data['data'])
def test_invalid_call(self):
response = self.send_request('/api/v1.0.0/math/subtract/', {'username': self.user.username, 'password': 'rosebud'})
self.assertEqual(response.status_code, 404)
def test_unsigned_auth(self):
data = {'username': self.user.username, 'password': 'rosebud'}
response = self.client.post(self.authenticate_url, data)
self.assertEqual(response.status_code, 401)
def test_invalid_sign(self):
data = {'username': self.user.username, 'password': 'rosebud'}
sign = get_sign(self.api_key.secret, **data)
data['key'] = self.api_key.key
data['sign'] = sign + "bug"
response = self.client.post(self.authenticate_url, data)
self.assertEqual(response.status_code, 401)
def test_invalid_password(self):
data = {'username': self.user.username, 'password': '1337hax/x'}
response = self.send_request(self.authenticate_url, data)
self.assertEqual(response.status_code, 400)
response_data = json.loads(smart_u(response.content))
self.assertGreater(len(response_data['errors']), 0)
self.assertFalse(response_data['success'])
self.assertFalse(response_data['data'])
def test_invalid_parameters(self):
data = {'email': self.user.email, 'password': 'rosebud'}
response = self.send_request(self.authenticate_url, data)
self.assertEqual(response.status_code, 401)
def test_revoked_api_key(self):
data = {'username': self.user.username, 'password': 'rosebud'}
response = self.send_request(self.authenticate_url, data, self.api_key_revoked.key, self.api_key_revoked.secret)
self.assertEqual(response.status_code, 401)
def test_get_call(self):
data = {'username': self.user.username, 'password': '1337haxx'}
response = self.send_request(self.authenticate_url, data, req_method='GET')
self.assertEqual(response.status_code, 200)
def test_multiple_values(self):
data = {'languages': ['python', 'java']}
response = self.send_request(self.language_url, data, req_method='GET')
self.assertEqual(response.status_code, 200)
class HMACTest(TransactionTestCase):
def setUp(self):
self.api_key = APIKey.objects.create(email="test@example.com")
def test_parameter_sign(self):
# test unicode
url_params = u'first_name=mårten&last_name=superkebab'
dict_params = {'first_name': u'mårten', 'last_name': u'superkebab'}
self.assert_equal_signs(url_params, dict_params)
# test string
url_params = 'first_name=mårten&last_name=superkebab'
dict_params = {'first_name': 'mårten', 'last_name': 'superkebab'}
self.assert_equal_signs(url_params, dict_params)
# test integer
url_params = u'dividend=4&divisor=2'
dict_params = {'dividend': 4, 'divisor': 2}
self.assert_equal_signs(url_params, dict_params)
# test boolean
url_params = u'secure=True'
dict_params = {'secure': True}
self.assert_equal_signs(url_params, dict_params)
def assert_equal_signs(self, url_params, dict_params):
sign1 = get_sign(self.api_key.secret, querystring=url_params)
sign2 = get_sign(self.api_key.secret, **dict_params)
self.assertEqual(sign1, sign2)
class UnsignedRequestTest(TransactionTestCase):
def setUp(self):
self.client = Client()
self.divide_url = '/api/v1.0.0/math/divide/'
def test_ok_call(self):
data = {'dividend': 7, 'divisor': 2}
response = self.client.post(self.divide_url, data)
self.assertEqual(response.status_code, 200)
response_data = json.loads(smart_u(response.content))
self.assertEqual(response_data['data'], 3.5)
def test_invalid_call(self):
data = {'dividend': "a", 'divisor': 2}
response = self.client.post(self.divide_url, data)
self.assertEqual(response.status_code, 400)
response_data = json.loads(smart_u(response.content))
dividend_error = response_data['errors']['dividend']
self.assertEqual(dividend_error[0], smart_u(IntegerField().error_messages['invalid']))
self.assertGreater(len(response_data['errors']), 0)
self.assertFalse(response_data['success'])
self.assertFalse(response_data['data'])
def test_error_call(self):
data = {'dividend': "42", 'divisor': 0}
response = self.client.post(self.divide_url, data)
response_data = json.loads(smart_u(response.content))
self.assertFalse(response_data['success'])
class JSONEncoderTest(TransactionTestCase):
def setUp(self):
self.dumps = curry(json.dumps, cls=DjangoJSONEncoder)
def test_datetime_encode(self):
naive_micro_datetime = {'datetime': datetime.now(), 'int': 1}
self.dumps(naive_micro_datetime)
naive_second_datetime = {'datetime': datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
self.dumps(naive_second_datetime)
tz_utc_datetime = {'datetime': datetime.now().replace(tzinfo=pytz.UTC)}
self.dumps(tz_utc_datetime)
datetime_date = {'datetime': date.today()}
self.dumps(datetime_date)
naive_datetime_time = {'datetime': time()}
self.dumps(naive_datetime_time)
naive_datetime_micro_time = {'datetime': time(microsecond=100)}
self.dumps(naive_datetime_micro_time)
def test_decimal_encode(self):
decimal_data = {'decimal': Decimal("1.504")}
self.dumps(decimal_data)
def test_queryset(self):
user_manager = get_user_model().objects
user_manager.create(username="test", email="test@example.com")
queryset = {'queryset': user_manager.all()}
self.dumps(queryset)
self.dumps(user_manager.all())
def test_values_list(self):
if django.VERSION < (1, 9):
user_manager = get_user_model().objects
user_manager.create(username="test", email="test@example.com")
values = user_manager.values('id', 'email')
self.dumps(values)
values_list = user_manager.values_list('id', flat=True)
|
Jim-Rod/csv_summary
|
csv_summary.py
|
Python
|
mit
| 2,979
| 0.006378
|
'''
20140213
Import CSV Data - Dict
Save as JASON?
Basic Stats
Save to file
Find Key Words
Generate Reports...
Generate Plots
'''
import csv
import numpy as np
import matplotlib as mpl
from scipy.stats import nanmean
filename = '20140211_ING.csv'
###____________ Helper ___________###
def number_fields(data):
'''gets numeric fields from loaded csv data'''
names = data.dtype.names
dtypes = data.dtype
NUM_FIELDS = []
for i in range(len(names)):
if ('float' in str(dtypes[i])) or ('int' in str(dtypes[i])):
NUM_FIELDS.append(str(names[i]))
return NUM_FIELDS
def string_fields(data):
'''gets text fields from loaded csv data'''
names = data.dtype.names
dtypes = data.dtype
STRING_FIELDS = []
for i in range(len(names)):
if 'S' in str(dtypes[i]):
STRING_FIELDS.append(str(names[i]))
return STRING_FIELDS
def count_vals(array):
vals = len(array)
for i in array:
if np.isnan(i):
vals = vals - 1
return vals
def number_summary(data, num_fields):
'''take data and numeric feilds and do stuff'''
sum_dict = {}
for i in num_fields:
sum_dict[i] = {}
sum_dict[i]['Mean'] = nanmean(data[i])
sum_dict[i]['#Values'] = count_vals(data[i])
sum_dict[i]['Max'] = np.nanmax(data[i])
sum_dict[i]['Min'] = np.nanmin(data[i])
return sum_dict
###________ reports _________###
def basic_report(filename):
'''prints summary report form file'''
data = np.recfromcsv(filename)
NUM_COL = len(data.dtype.names)
NUM_ROW = len(data)
NAMES = data.dtype.names
DTYPES = data.dtype
print('--------------------')
print('---- CSV REPORT ----')
print('--------------------')
print('')
print('Filename: \t %s' % filename)
print('')
print('# records: \t %s' % NUM_ROW)
print('# columns: \t %s' % NUM_COL)
print('')
print('--------------------')
print('- name - data type ')
for i in range(len(NAMES)):
print('-- %s \t %s --' % (NAMES[i], DTYPES[i]))
print('--------------------')
def numeric_report(filename):
data = np.recfromcsv(filename)
fields = number_fields(data)
d = number_summary(data, fields)
print('------------------------')
print('---- NUMERIC REPORT ----')
print('------------------------')
print('')
print('Filename: \t %s' % filename)
print('')
print('--------------------'
|
)
for i in fields:
print('FIELD: \t\t %s' % i)
print('#Values: \t %s' % d[i]['#Values'])
print('Max: \t\t %s' % d[i]['Max'])
print('Min: \t\t %s' % d[i]['Min'])
print('Mean: \t\t %s' % round(d[i]['Mean'], 2))
print('--------------------')
print('')
###________ main _________###
def main(filename):
basic_report(filename)
print(
|
"")
numeric_report(filename)
main(filename)
|
hsoft/musicguru
|
qt/ignore_box.py
|
Python
|
bsd-3-clause
| 1,816
| 0.008811
|
# -*- coding: utf-8 -*-
# Created By: Virgil Dupras
# Created On: 2009-09-19
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from PyQt4.QtCore import SIGNAL, Qt
from PyQt4.QtGui import QWidget, QHeaderView
from fs_model import FSModel, IGNORE_BOX_NAME
from ui.ignore_box_ui import Ui_IgnoreBox
class IgnoreBoxModel(FSModel):
def __init__(self, app):
FS
|
Model.__init__(self, app, app.board.ignore_box, IGNORE_BOX_NAME)
self.connect(self.app, SIGNAL('ignoreBoxChanged()'), self.ignoreBoxChanged)
#--- Events
def ignoreBoxChan
|
ged(self):
self.reset()
class IgnoreBox(QWidget, Ui_IgnoreBox):
def __init__(self, app):
QWidget.__init__(self, None)
self.app = app
self.boxModel = IgnoreBoxModel(app)
self._setupUi()
self.connect(self.browserView.selectionModel(), SIGNAL('selectionChanged(QItemSelection,QItemSelection)'), self.browserSelectionChanged)
def _setupUi(self):
self.setupUi(self)
self.setWindowFlags(Qt.Tool)
self.browserView.setModel(self.boxModel)
h = self.browserView.header()
h.setResizeMode(QHeaderView.Fixed)
h.resizeSection(1, 120)
h.setResizeMode(0, QHeaderView.Stretch)
#--- Events
def browserSelectionChanged(self, selected, deselected):
selectedIndexes = self.browserView.selectionModel().selectedRows()
nodes = [index.internalPointer() for index in selectedIndexes]
items = [node.ref for node in nodes]
self.app.selectBoardItems(items)
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/Interface/EigentransformWavefunction.py
|
Python
|
bsd-3-clause
| 2,041
| 0.00294
|
"""The WaveBlocks Project
Compute the transformation to the eigen basis for wavefunction.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
from WaveBlocksND import BlockFactory
from WaveBlocksND import WaveFunction
from WaveBlocksND import BasisTransformationWF
def transform_wavefunction_to_eigen(iomin, iomout, blockidin=0, blockidout=0):
|
"""Compute the transformation to the eigenbasis for a wavefunction.
Save the result back to a file.
:param iomin: An :py:class:`IOManager: instance providing the simulation data.
:param iomout: An :py:class:`IOManager: instance for saving the transformed data.
:param blockidin: The data block from which the values are read. Default is `0`.
:param blockidout: The data block to which the values are written. Default is `0`.
"""
parameters = iomin.load_parameters()
# Number of time steps we saved
timesteps = iomin.load_wavefunction_timegrid(blockid=blockidin)
nrtimesteps = timesteps.shape[0]
iomout.add_wavefunction(parameters, timeslots=nrtimesteps, blockid=blockidout)
# The grid on the domain
grid = BlockFactory().create_grid(parameters)
# The potential used
Potential = BlockFactory().create_potential(parameters)
# Basis transformator
BT = BasisTransformationWF(Potential)
BT.set_grid(grid)
# And two empty wavefunctions
WF = WaveFunction(parameters)
WF.set_grid(grid)
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Compute eigentransform at timestep %d" % step)
# Retrieve simulation data
values = iomin.load_wavefunction(timestep=step, blockid=blockidin)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# Project wavefunction values to eigenbasis
BT.transform_to_eigen(WF)
# Save the transformed values
iomout.save_wavefunction(WF.get_values(), timestep=step, blockid=blockidout)
|
|
emilydolson/forestcat
|
pyrobot/tools/sound.py
|
Python
|
agpl-3.0
| 6,306
| 0.014589
|
try:
import ossaudiodev
except:
print "ossaudiodev not installed"
ossaudiodev = None
try:
import FFT
except:
print "FFT not installed"
ossaudiodev = None
try:
import Numeric
except:
print "Numeric not installed"
ossaudiodev = None
import struct, math, time, threading, copy
def add(s1, s2):
return mi
|
nmax([(v1 + v2) for (v1, v2) in zip(s1, s2)])
def minmax(vector):
return [min(max(v,0),255) for v in vector]
def scale(sample, value):
return minmax([((s
|
- 128) * value) + 128 for s in sample])
def sine(freqs, seconds, volume = 1.0, sample_rate = 8000.0):
sample = [128] * int(sample_rate * seconds)
if type(freqs) == type(0):
freqs = [freqs]
for freq in freqs:
for n in range(len(sample)):
sample[n] += int(127 * math.sin(n * 2 * math.pi * freq/sample_rate) * volume)
return minmax(sample)
class SoundThread(threading.Thread):
def __init__(self, parent, name = "sound thread"):
threading.Thread.__init__(self, name = name)
self.parent = parent
self.event = threading.Event()
self.start()
def run(self):
while not self.event.isSet():
self.parent.lock.acquire()
buffer = copy.copy(self.parent.buffer)
self.parent.buffer = None
self.parent.lock.release()
if buffer != None:
self.parent.dev.write("".join(map(chr,buffer)))
self.parent.dev.flush()
self.event.wait(.001)
def join(self, timeout=None):
self.event.set()
threading.Thread.join(self, timeout)
class SoundDevice:
def __init__(self, device, async = 0, cache = 1):
self.device = device
self.async = async
self.cache = cache
self.cacheDict = {}
self.status = "closed"
self.number_of_channels= 1
self.sample_rate= 8000
self.sample_width= 1
self.minFreq = 20
self.maxFreq = 3500
self.debug = 0
self.buffer = None
if ossaudiodev != None:
self.format = ossaudiodev.AFMT_U8
if self.debug:
self.setFile("770.txt")
if self.async:
self.lock = threading.Lock()
self.thread = SoundThread(self)
def initialize(self, mode):
if ossaudiodev == None: return
self.dev = ossaudiodev.open("/dev/dsp", mode)
self.dev.setparameters(self.format,
self.number_of_channels,
self.sample_rate)
self.status = mode
def play(self, sample):
"""
"""
if ossaudiodev == None: return
if self.status != "w":
self.initialize("w")
if self.async:
self.lock.acquire()
self.buffer = sample
self.lock.release()
else:
self.dev.write("".join(map(chr,sample)))
self.dev.flush()
def playTone(self, freqs, seconds, volume = 1.0):
"""
freq example: playTone([550,400], .1, volume=.5) # middle C for .1 seconds, half volume
"""
if ossaudiodev == None: return
if type(freqs) == type(0):
freqs = [freqs]
if self.status != "w":
self.initialize("w")
sample = [128] * int(self.sample_rate * seconds)
for freq in freqs:
if self.cache and (freq,seconds) in self.cacheDict:
sample = self.cacheDict[(freq,seconds)]
else:
for n in range(len(sample)):
sample[n] = min(max(sample[n] + int(127 * math.sin(n * 2 * math.pi * freq/self.sample_rate) * volume), 0),255)
self.cacheDict[(freq,seconds)] = sample
if self.async:
self.lock.acquire()
self.buffer = sample
self.lock.release()
else:
self.dev.write("".join(map(chr,sample)))
self.dev.flush()
def read(self, seconds):
if ossaudiodev == None: return
if self.status != "r":
self.initialize("r")
buffer = self.dev.read(int(self.sample_rate * seconds))
size = len(buffer)
return struct.unpack(str(size) + "B", buffer)
def setFile(self, filename):
if ossaudiodev == None: return
self.filename = filename
self.fp = open(self.filename, "r")
def readFile(self, seconds):
if ossaudiodev == None: return
data = None
try:
data = eval(self.fp.readline())
except:
self.fp = open(self.filename, "r")
try:
data = eval(self.fp.readline())
except:
print "Failed reading file '%s'" % self.filename
time.sleep(seconds)
return data[:int(seconds * self.sample_rate)]
def getFreq(self, seconds):
# change to read from the buffer, rather than block
if ossaudiodev == None: return
if self.debug:
data = self.readFile(1)
else:
data = self.read(seconds)
transform = FFT.real_fft(data).real
minFreqPos = self.minFreq
maxFreqPos = self.maxFreq
freq = Numeric.argmax(transform[1+minFreqPos:maxFreqPos])
value = transform[1+minFreqPos:maxFreqPos][freq]
domFreq = (freq + self.minFreq) / seconds
if self.debug and abs(value) > 8000 and self.minFreq < domFreq < self.maxFreq:
print "Frequence:", domFreq, "Value:", value, "Volume:", transform[0]
return (domFreq, value, transform[0])
def close(self):
if ossaudiodev == None: return
if self.status != "closed":
self.dev.close()
self.status = "closed"
if __name__ == "__main__":
sd = SoundDevice("/dev/dsp", async = 1)
sd.playTone(500, 1)
## DTMF Tones
## 1209 Hz 1336 Hz 1477 Hz 1633 Hz
## ABC DEF
## 697 Hz 1 2 3 A
## GHI JKL MNO
## 770 Hz 4 5 6 B
## PRS TUV WXY
## 852 Hz 7 8 9 C
## oper
## 941 Hz * 0 # D
|
kaarolch/ansible
|
test/units/parsing/vault/test_vault_editor.py
|
Python
|
gpl-3.0
| 6,376
| 0.000941
|
# (c) 2014, James Tanner <tanner.jc@gmail.com>
# (c) 2014, James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
import tempfile
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible import errors
from ansible.parsing.vault import VaultLib
from ansible.parsing.vault import VaultEditor
from ansible.module_utils._text import to_bytes, to_text
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
v10_data = """$ANSIBLE_VAULT;1.0;AES
53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9
9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1
83c62ffb04c2512995e815de4b4d29ed"""
v11_data = """$ANSIBLE_VAULT;1.1;AES256
62303130653266653331306264616235333735323636616539316433666463323964623162386137
3961616263373033353631316333623566303532663065310a393036623466376263393961326530
64336561613965383835646464623865663966323464653236343638373165343863623638316664
3631633031323837340a396530313963373030343933616133393566366137363761373930663833
3739"""
class TestVaultEditor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_methods_exist(self):
v = VaultEditor(None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data',
'shuffle_files']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
@patch.object(VaultEditor, '_editor_shell_command')
def test_create_file(self, mock_editor_shell_command):
def sc_side_effect(filename):
return ['touch', filename]
mock_editor_shell_command.side_effect = sc_side_effect
tmp_file = tempfile.NamedTemporaryFile()
os.unlink(tmp_file.name)
ve = VaultEditor("ansible")
ve.create_file(tmp_file.name)
self.assertTrue(os.path.exists(tmp_file.name))
def test_decrypt_1_0(self):
# Skip testing decrypting 1.0 files if we don't have access to AES, KDF or Counter.
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v10_file.name)
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = to_text(f.read())
f.close()
os.unlink(v10_file.name)
assert error_hit is False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v11_file = tempfile.NamedTemporaryFile(delete=False)
with v11_file as f:
f.write(to_bytes(v11_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v11_file.name)
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v11_file.name, "rb")
fdata = to_text(f.read())
f.close()
os.unlink(v11_file.name)
assert error_hit is False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_rekey_migration(self):
# Skip testing rekeyi
|
ng files if we don't have access to AES, KDF or Counter.
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v10_file = tempfile.N
|
amedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.rekey_file(v10_file.name, 'ansible2')
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = f.read()
f.close()
assert error_hit is False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib("ansible2")
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError:
error_hit = True
os.unlink(v10_file.name)
assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
assert error_hit is False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == b"foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
|
kumarisneha/practice_repo
|
techgig_rstrip.py
|
Python
|
mit
| 83
| 0.024096
|
def main():
|
a=raw_input()
|
print a.lstrip()
print "Hello world"
main()
|
ericam/sidesaddle
|
modules/typogrify.py
|
Python
|
mit
| 620
| 0.001613
|
from __future__ import absolute_import
from jinja2 import Markup
from rstblog.programs import RSTProgram
import typogrify
class TypogrifyRSTProgram(RSTProgram):
def get_fragments(self):
if self._fragment_cache is not None:
return self._fragment_cache
with self.context.open_source_file() as f:
self.get_header(f)
rv = self.context.render_rst(f.read().decode('utf-8'))
rv['frag
|
ment'] = Markup(typogrify.typogrify(rv['fragment']))
self._fragment_cache = rv
return rv
def setup(builder):
bui
|
lder.programs['rst'] = TypogrifyRSTProgram
|
abelcarreras/aiida_extensions
|
plugins/launcher/launch_lammps_md_si.py
|
Python
|
mit
| 2,634
| 0.002278
|
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory
import numpy as np
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
codename = 'lammps_md@boston'
############################
# Define input parameters #
############################
a = 5.404
cell = [[a, 0, 0],
[0, a, 0],
[0, 0, a]]
symbols=['Si'] * 8
scaled_positions = [(0.875, 0.875, 0.875),
(0.875, 0.375, 0.375),
(0.375, 0.875, 0.375),
(0.375, 0.375, 0.875),
(0.125, 0.125, 0.125),
(0.125, 0.625, 0.625),
(0.625, 0.125, 0.625),
(0.625, 0.625, 0.125)]
structure = StructureData(cell=cell)
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
structure.store()
# Silicon(C) Tersoff
tersoff_si = {'Si Si Si ': '3.0 1.0 1.7322 1.0039e5 16.218 -0.59826 0.78734 1.0999e-6 1.7322 471.18 2.85 0.15 2.4799 1830.8'}
potential ={'pair_style': 'tersoff',
'data': tersoff_si}
lammps_machine = {
'num_machines': 1,
'parallel_env': 'mpi*',
'tot_num_mpiprocs': 16}
parameters_md = {'timestep': 0.001,
'temperature' : 300,
'thermostat_variable': 0.5,
'equilibrium_steps': 100,
'total_steps': 2000,
'dump_rate': 1}
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=lammps_machine)
calc.label = "test lammps calculation"
calc.description = "A much longer description"
calc.use_code(code)
calc.use_structure(structure)
calc.use_potential(ParameterData(dict=potential))
calc.use_parameters(ParameterData(dict=parameters_md))
test_only = False
if test_only: # It will not be submitted
import os
subfolder, script_filename = calc.submit_test()
print "Test_submit for calculation (u
|
uid='{}')".format(calc.uuid)
print "Submit file in {}".format(os.path.join(
os.path.relpath(subfolder.abspath),
script_filename))
else:
calc.store_all()
print "created calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
calc.submit()
print "submitted calculation; calc=Calculation(uuid='{}') #
|
ID={}".format(
calc.uuid, calc.dbnode.pk)
|
DataDog/integrations-extras
|
riak_repl/setup.py
|
Python
|
bsd-3-clause
| 2,385
| 0.000839
|
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'riak_repl', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
def parse_pyproject_array(name):
import os
import re
from ast import literal_eval
pattern = r'^{} = (\[.*?\])$'.format(name)
with open(os.path.join(HERE, 'pyproject.toml'), 'r', encoding='utf-8') as f:
# Windows \r\n prevents match
contents = '\n'.join(line.rstrip() for line in f.readlines())
array = re.search(pattern, contents, flags=re.MULTILINE | re.DOTALL).group(1)
return literal_eval(array)
CHECKS_BASE_REQ = parse_pyproject_array('dependencies')[0]
setup(
name='datadog-riak_repl',
version=ABOUT['__version__'],
description='The Riak_repl check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent riak_repl check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author='Britt Treece',
author_email='britt.treece@gmail.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.
|
7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# The package we're going to ship
packages=['datadog_checks', 'datadog_checks.riak_repl'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps':
|
parse_pyproject_array('deps')},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
khosrow/metpx
|
sundew/doc/pds_conversion/routing_step1.py
|
Python
|
gpl-2.0
| 1,705
| 0.039883
|
import os,sys,re
# EXTRACTING ALL FILENAMES AND THEIR CLIENTS
# ---------------------------------------------------
# read in the log
# ---------------------------------------------------
f=open(sys.argv[1],'rb')
data=f.readlines()
f.close()
n=0
t=len(data)
clients = []
filename = None
for l in data :
n = n + 1
parts = l.split()
# new file to ingest
if parts[6] == 'Read' :
# all products will
|
have its first client as "allproducts"
if filename != None :
if len(clients) == 0 :
clients.append('allproducts')
else :
clients.sort()
clients.insert(0,'allproducts')
print("%s %s" % (filename,','.join(clients)) )
filepath = parts[-1]
filename = filepath.split('/')[-1]
fparts = filename.split(':')
# :20
|
070409000009 trailing get rid of it
if fparts[-1][:2] == '20' and len(fparts[-1]) == 14 :
fparts = fparts[:-1]
# '::' trailing get rid of it
if fparts[-1] == '' :
fparts = fparts[:-1]
filename = ':'.join(fparts)
clients = []
if parts[6] == 'Written' :
filepath = parts[-1]
client = 'conversion_' +filepath.split('/')[1]
if client == 'conversion_ppmtogif' : client = 'cvt_togif'
if client == 'conversion_rawtodfx' : continue
clients.append(client)
if parts[6] == 'create_link:' :
filepath = parts[-1]
client = filepath.split('/')[4]
clients.append(client)
if len(clients) == 0 :
clients.append('allproducts')
else :
clients.sort()
clients.insert(0,'allproducts')
print("%s %s" % (filename,','.join(clients)) )
|
nortd/bomfu
|
admin/users.py
|
Python
|
lgpl-3.0
| 4,033
| 0.002727
|
# -*- coding: utf-8 -*-
import webapp2
from boilerplate import models
from boilerplate import forms
from boilerplate.handlers import BaseHandler
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import ndb
from google.appengine.api import users as googleusers
from collections import OrderedDict, Counter
from wtforms import fields
class Logout(BaseHandler):
def get(self):
self.redirect(googleusers.create_logout_url(dest_url=self.uri_for('home')))
class Geochart(BaseHandler):
def get(self):
users = models.User.query().fetch(projection=['country'])
users_by_country = Counter()
for user in users:
if user.country:
users_by_country[user.country] += 1
params = {
"data": users_by_country.items()
}
return self.render_template('admin/geochart.html', **params)
class EditProfileForm(forms.EditProfileForm):
activated = fields.BooleanField('Activated')
class List(BaseHandler):
def get(self):
p = self.request.get('p')
q = self.request.get('q')
c = self.request.get('c')
forward = True if p not in ['prev'] else False
cursor = Cursor(urlsafe=c)
if q:
qry = models.User.query(ndb.OR(models.User.last_name == q,
models.User.email == q,
models.User.username == q))
else:
qry = models.User.query()
PAGE_SIZE = 5
if forward:
users, next_cursor, more = qry.order(models.User.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
if next_cursor and more:
self.view.next_cursor = next_cursor
if c:
self.view.prev_cursor = cursor.reversed()
else:
users, next_cursor, more = qry.or
|
der(-models.User.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
users = list(reversed(users))
if next_cursor and more:
self.view.prev_cursor = next_cursor
self.view.next_cursor = cursor.reversed()
|
def pager_url(p, cursor):
params = OrderedDict()
if q:
params['q'] = q
if p in ['prev']:
params['p'] = p
if cursor:
params['c'] = cursor.urlsafe()
return self.uri_for('user-list', **params)
self.view.pager_url = pager_url
self.view.q = q
params = {
"list_columns": [('username', 'Username'),
('last_name', 'Last Name'),
('email', 'E-Mail'),
('country', 'Country')],
"users" : users,
"count" : qry.count()
}
# FIXME: admin_user should probably go into BaseHandler
params['admin_user'] = googleusers.is_current_user_admin()
return self.render_template('admin/users.html', **params)
class Edit(BaseHandler):
def get_or_404(self, user_id):
try:
user = models.User.get_by_id(long(user_id))
if user:
return user
except ValueError:
pass
self.abort(404)
def edit(self, user_id):
if self.request.POST:
user = self.get_or_404(user_id)
if self.form.validate():
self.form.populate_obj(user)
user.put()
self.add_message("Changes saved!", 'success')
return self.redirect_to("user-edit", user_id=user_id)
else:
self.add_message("Could not save changes!", 'error')
else:
user = self.get_or_404(user_id)
self.form.process(obj=user)
params = {
'user' : user
}
return self.render_template('admin/edituser.html', **params)
@webapp2.cached_property
def form(self):
return EditProfileForm(self)
|
erasmospunk/electrumx
|
tests/server/test_api.py
|
Python
|
mit
| 2,877
| 0
|
import asyncio
from unittest import mock
from aiorpcx import RPCError
from server.env import Env
from server.controller import Controller
loop = asyncio.get_event_loop()
def set_env():
env = mock.create_autospec(Env)
env.coin = mock.Mock()
env.loop_policy = None
env.max_sessions = 0
env.max_subs = 0
env.max_send = 0
env.bandwidth_limit = 0
env.identities = ''
env.tor_proxy_host = env.tor_proxy_port = None
env.peer_discovery = env.PD_SELF = False
env.daemon_url = 'http://localhost:8000/'
return env
async def coro(res):
return res
def raise_exception(msg):
raise RPCError(1, msg)
def ensure_text_exception(test, exception):
res = err = None
try:
res = loop.run_until_complete(test)
except Exception as e:
err = e
assert isinstance(err, exception), (res, err)
def test_transaction_get():
async def test_verbose_ignore_by_backend():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
sut.daemon_request.return_value = coro('11'*32)
res = await sut.transaction_get('ff'*32, True)
assert res == '11'*32
async def test_verbose_ok():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
response = {
"hex": "00"*32,
"blockhash": "ff"*32
}
sut.daemon_request.return_
|
value = coro(response)
res = await sut.transaction_get('ff'*32, True)
assert res == response
response = {
"hex": "00"*32,
"blockhash": None
}
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32, True)
assert res == response
async def test_no_verbose():
|
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
response = 'cafebabe'*64
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32)
assert res == response
async def test_verbose_failure():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
sut.daemon_request.return_value = coro(
raise_exception('some unhandled error'))
await sut.transaction_get('ff' * 32, True)
async def test_wrong_txhash():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
await sut.transaction_get('cafe')
sut.daemon_request.assert_not_called()
loop.run_until_complete(asyncio.gather(
*[
test_verbose_ignore_by_backend(),
test_verbose_ok(),
test_no_verbose()
]
))
for error_test in [test_verbose_failure, test_wrong_txhash]:
ensure_text_exception(error_test(), RPCError)
|
stadelmanma/netl-ap-map-flow
|
apmapflow/data_processing/histogram_logscale.py
|
Python
|
gpl-3.0
| 2,486
| 0
|
"""
======
|
===================
|
=======================================================
Logscaled Histogram
================================================================================
| Calculates a logarithmically spaced histogram for a data map.
| Written By: Matthew Stadelman
| Date Written: 2016/03/07
| Last Modifed: 2016/10/20
"""
import scipy as sp
from .histogram import Histogram
class HistogramLogscale(Histogram):
r"""
Performs a histogram where the bin limits are logarithmically spaced
based on the supplied scale factor. If there are negative values then
the first bin contains everything below 0, the next bin will contain
everything between 0 and 1.
kwargs include:
scale_fact - numeric value to generate axis scale for bins. A
scale fact of 10 creates bins: 0-1, 1-10, 10-100, etc.
"""
def __init__(self, field, **kwargs):
super().__init__(field)
self.args.update(kwargs)
self.output_key = 'hist_logscale'
self.action = 'histogram_logscale'
@classmethod
def _add_subparser(cls, subparsers, parent):
r"""
Adds a specific action based sub-parser to the supplied arg_parser
instance.
"""
parser = subparsers.add_parser(cls.__name__,
aliases=['histlog'],
parents=[parent],
help=cls.__doc__)
#
parser.add_argument('scale_fact', type=float, nargs='?', default=10.0,
help='base to generate logscale from')
parser.set_defaults(func=cls)
def define_bins(self, **kwargs):
r"""
This defines the bins for a logscaled histogram
"""
self.data_vector.sort()
sf = self.args['scale_fact']
num_bins = int(sp.logn(sf, self.data_vector[-1]) + 1)
#
# generating initial bins from 1 - sf**num_bins
low = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[:-1]
high = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[1:]
#
# Adding "catch all" bins for anything between 0 - 1 and less than 0
if self.data_vector[0] < 1.0:
low.insert(0, 0.0)
high.insert(0, 1.0)
if self.data_vector[0] < 0.0:
low.insert(0, self.data_vector[0])
high.insert(0, 0.0)
#
self.bins = [bin_ for bin_ in zip(low, high)]
|
dergraaf/xpcc
|
tools/device_file_generator/avr_generator.py
|
Python
|
bsd-3-clause
| 1,677
| 0.019678
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import os
import sys
import glob
# add python module logger to path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'logger'))
sys.path.append(os.path.join(os.path.d
|
irname(__file__), '..', 'device_files'))
from logger import Logger
from dfg.device import Device
from dfg.merger import DeviceMerger
from dfg.avr.avr_reader import AVRDeviceReader
from dfg.avr.avr_writer import AVRDeviceWriter
if __name__ == "__main__":
"""
Some test code
"""
level = 'info'
logger = Logger(level)
devices = []
for arg in sys.argv[1:]:
if arg in ['error', 'warn', 'info', 'debug', 'disabled']:
level = arg
log
|
ger.setLogLevel(level)
continue
xml_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'AVR_devices', (arg + '*'))
files = glob.glob(xml_path)
for file in files:
# deal with this here, rather than rewrite half the name merging
if os.path.basename(file) != "ATtiny28.xml":
part = AVRDeviceReader(file, logger)
device = Device(part, logger)
devices.append(device)
merger = DeviceMerger(devices, logger)
merger.mergedByPlatform('avr')
folder = os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'xpcc', 'architecture', 'platform', 'devices', 'avr')
for dev in merger.mergedDevices:
writer = AVRDeviceWriter(dev, logger)
writer.write(folder)
|
wagnerand/zamboni
|
mkt/feed/serializers.py
|
Python
|
bsd-3-clause
| 2,948
| 0.002035
|
from rest_framework import relations, serializers
import amo
import mkt.carriers
import mkt.regions
from addons.models import Category
from mkt.api.fields import SplitField, TranslationSerializerField
from mkt.api.serializers import URLSerializerMixin
from mkt.collections.serializers import (CollectionSerializer, SlugChoiceField,
SlugModelChoiceField)
from mkt.submit.serializers import PreviewSerializer
from mkt.webapps.api import AppSerializer
from .models import FeedApp, FeedItem
class FeedAppSerializer(URLSerializerMixin, serializers.ModelSerializer):
app = SplitField(relations.PrimaryKeyRelatedField(required=True),
AppSerializer())
description = TranslationSerializerField(required=False)
preview = SplitField(relations.PrimaryKeyRelatedField(required=False),
PreviewSerializer())
pullquote_attribution = TranslationSerializerFiel
|
d(required=False)
pullquote_rating = serializers.IntegerField(required=False)
pullquote_text = TranslationSerializerField(required=False)
class Meta:
fields = ('app', 'description', 'id', 'preview',
'pullquote_attr
|
ibution', 'pullquote_rating', 'pullquote_text',
'url')
model = FeedApp
url_basename = 'feedapp'
class FeedItemSerializer(URLSerializerMixin, serializers.ModelSerializer):
carrier = SlugChoiceField(required=False,
choices_dict=mkt.carriers.CARRIER_MAP)
region = SlugChoiceField(required=False,
choices_dict=mkt.regions.REGION_LOOKUP)
category = SlugModelChoiceField(required=False,
queryset=Category.objects.filter(type=amo.ADDON_WEBAPP))
item_type = serializers.SerializerMethodField('get_item_type')
# Types of objects that are allowed to be a feed item.
collection = SplitField(relations.PrimaryKeyRelatedField(required=False),
CollectionSerializer())
class Meta:
fields = ('carrier', 'category', 'collection', 'id', 'item_type',
'region', 'url')
item_types = ('collection',)
model = FeedItem
url_basename = 'feeditem'
def validate(self, attrs):
"""
Ensure that at least one object type is specified.
"""
item_changed = any(k for k in self.Meta.item_types if k in attrs.keys())
num_defined = sum(1 for item in self.Meta.item_types if attrs.get(item))
if item_changed and num_defined != 1:
message = ('A valid value for exactly one of the following '
'parameters must be defined: %s' % ','.join(
self.Meta.item_types))
raise serializers.ValidationError(message)
return attrs
def get_item_type(self, obj):
for item_type in self.Meta.item_types:
if getattr(obj, item_type):
return item_type
return
|
jhosmer/PySmile
|
tests/pysmile_tests.py
|
Python
|
apache-2.0
| 11,679
| 0.004196
|
#!/usr/bin/env python
import os
import glob
import unittest
import pysmile
import json
__author__ = 'Jonathan Hosmer'
class PySmileTestDecode(unittest.TestCase):
def setUp(self):
curdir = os.path.dirname(os.path.abspath(__file__))
self.smile_dir = os.path.join(curdir, 'data', 'smile')
self.json_dir = os.path.join(curdir, 'data', 'json')
def test_json_org_sample1(self):
s = os.path.join(self.smile_dir, 'json-org-sample1.smile')
j = os.path.join(self.json_dir, 'json-org-sample1.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample2(self):
s = os.path.join(self.smile_dir, 'json-org-sample2.smile')
j = os.path.join(self.json_dir, 'json-org-sample2.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample3(self):
s = os.path.join(self.smile_dir, 'json-org-sample3.smile')
j = os.path.join(self.json_dir, 'json-org-sample3.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample4(self):
s = os.path.join(self.smile_dir, 'json-org-sample4.smile')
j = os.path.join(self.json_dir, 'json-org-sample4.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample5(self):
s = os.path.join(self.smile_dir, 'json-org-sample5.smile')
j = os.path.join(self.json_dir, 'json-org-sample5.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_numbers_int_4k(self):
s = os.path.join(self.smile_dir, 'numbers-int-4k.smile')
j = os.path.join(self.json_dir, 'numbers-int-4k.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_numbers_int_64k(self):
s = os.path.join(self.smile_dir, 'numbers-int-64k.smile')
j = os.path.join(self.json_dir, 'numbers-int-64k.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_test1(self):
s = os.path.join(self.smile_dir, 'test1.smile')
j = os.path.join(self.json_dir, 'test1.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
|
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_test2(self):
s = os.path.join(self.smile_dir, 'test2.smi
|
le')
j = os.path.join(self.json_dir, 'test2.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
class PySmileTestEncode(unittest.TestCase):
def setUp(self):
curdir = os.path.dirname(os.path.abspath(__file__))
self.smile_dir = os.path.join(curdir, 'data', 'smile')
self.json_dir = os.path.join(curdir, 'data', 'json')
def test_json_org_sample1(self):
s = os.path.join(self.smile_dir, 'json-org-sample1.smile')
j = os.path.join(self.json_dir, 'json-org-sample1.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample2(self):
s = os.path.join(self.smile_dir, 'json-org-sample2.smile')
j = os.path.join(self.json_dir, 'json-org-sample2.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample3(self):
s = os.path.join(self.smile_dir, 'json-org-sample3.smile')
j = os.path.join(self.
|
atomman/nmrglue
|
examples/jbnmr_examples/s4_2d_plotting/plot_2d_pipe_spectrum.py
|
Python
|
bsd-3-clause
| 929
| 0
|
import nmrglue as ng
import matplotlib.pyplot as plt
# read in data
dic, data = ng.pipe.read("test.ft2")
# find PPM limits along each axis
uc_15n = ng.pipe.make_uc(dic, data, 0)
|
uc_13c = ng.pipe.make_uc(dic, data, 1)
x0, x1 = uc_13c.ppm_limits()
y0, y1 = uc_15n.ppm_limits()
# plot the spectrum
fig = plt.figure(figsize=(10, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
cl = [8.5e4 * 1.30 ** x for x in range(20)]
ax.contour(data, cl, colors='blue', extent=(x0, x1, y0, y1), linewidths=0.5)
# add 1D slices
x = uc_13
|
c.ppm_scale()
s1 = data[uc_15n("105.52ppm"), :]
s2 = data[uc_15n("115.85ppm"), :]
s3 = data[uc_15n("130.07ppm"), :]
ax.plot(x, -s1 / 8e4 + 105.52, 'k-')
ax.plot(x, -s2 / 8e4 + 115.85, 'k-')
ax.plot(x, -s3 / 8e4 + 130.07, 'k-')
# label the axis and save
ax.set_xlabel("13C ppm", size=20)
ax.set_xlim(183.5, 167.5)
ax.set_ylabel("15N ppm", size=20)
ax.set_ylim(139.5, 95.5)
fig.savefig("spectrum_2d.png")
|
tsavola/concrete
|
python/concrete/tools.py
|
Python
|
lgpl-2.1
| 3,421
| 0.030108
|
#!/usr/bin/env python3
#
# Copyright (c) 2012 Timo Savola
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
import argparse
import struct
class Arena(object):
class Node(object):
def __init__(self, arena, address):
self.arena = arena
self.address = address
def __eq__(self, other):
return self.address == other.address
def __lt__(self, other):
return self.address < other.address
def _uint32(self, offset):
return self.arena._uint32(self.address + offset)
def _data(self, offset, size):
return self.arena._data(self.address + offset, size)
@property
def end(self):
return self.address + self.size
class Allocated(Node):
def __init__(self, arena, address, size):
super(Arena.Allocated, self).__init__(arena, address)
self.size = size
def __str__(self):
return "Allocated space at %u: %r" % (self.address, self.data)
@property
def data(self):
return self._data(0, self.size)
class Free(Node):
def __str__(self):
return "Free space from %u to %u" % (self.address, self.address + self.size)
@property
def size(self):
return self._uint32(0)
@property
def next_node(self):
address = self.next_address
return self.arena.free_nodes[address] if address else None
@property
def next_address(self):
return self._uint32(4)
_initial_address = 8
def __init__(self, data):
self.data = data
def init(self):
self.allocations = {}
self.free_nodes = {}
if self.size < self._initial_address:
return
last_node = None
next_addr = self._uint32(0)
while next_addr:
self.__init_allocated(last_node, next_addr)
node = self.Free(self, next_addr)
self.free_nodes[node.address] = node
last_node = node
next_addr = node.next_address
assert not next_addr or last_node.address < next_addr
self.__init_allocated(last_node, self.size)
def __init_allocated(self, prev_node, end):
address = prev_node.end if prev_node else self._initial_address
self.allocations[address] = self.Allocated(self, address, end - address)
@property
def size(self):
return len(self.data)
@property
def nodes(self):
nodes = {}
nodes.update(self.allocations)
nodes.update(self.free_nodes)
return nodes
def dump(self):
for node in sorted(self.nodes.values()):
print(node)
def _uint32(self, address):
return struct.unpack("<I", self._data(address, 4))[0]
def _
|
data(self, address, size):
if address + size > len(self.data):
raise Exception("address %u size %u out of arena (size %u)" % (address, size, len(self.data)))
return self.data[address:address+size]
def main():
parser = argparse.ArgumentParser()
subparsers = parser.ad
|
d_subparsers()
arena_parser = subparsers.add_parser("arena")
arena_parser.set_defaults(func=arena_command)
arena_parser.add_argument("filename", type=str, metavar="FILE")
arena_parser.add_argument("--dump", action="store_true")
args = parser.parse_args()
args.func(args)
def arena_command(args):
error = None
with open(args.filename, "rb") as file:
arena = Arena(file.read())
try:
arena.init()
except Exception as e:
error = e
if args.dump:
arena.dump()
if error:
raise error
if __name__ == "__main__":
main()
|
astropy/photutils
|
photutils/aperture/tests/test_photometry.py
|
Python
|
bsd-3-clause
| 32,618
| 0
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the photometry module.
"""
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_less)
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import NDData, StdDevUncertainty
from astropy.table import Table
import astropy.units as u
from astropy.wcs import WCS
from ..photometry import aperture_photometry
from ..circle import (CircularAperture, CircularAnnulus, SkyCircularAperture,
SkyCircularAnnulus)
from ..ellipse import (EllipticalAperture, EllipticalAnnulus,
SkyEllipticalAperture, SkyEllipticalAnnulus)
from ..rectangle import (RectangularAperture, RectangularAnnulus,
SkyRectangularAperture, SkyRectangularAnnulus)
from ...datasets import get_path, make_4gaussians_image, make_wcs, make_gwcs
from ...utils._optional_deps import HAS_GWCS, HAS_MATPLOTLIB # noqa
APERTURE_CL = [CircularAperture,
CircularAnnulus,
EllipticalAperture,
EllipticalAnnulus,
RectangularAperture,
RectangularAnnulus]
TEST_APERTURES = list(zip(APERTURE_CL, ((3.,),
(3., 5.),
(3., 5., 1.),
(3., 5., 4., 12./5., 1.),
|
(5, 8, np.pi / 4),
(8, 12, 8, 16./3., np.pi / 8))))
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_outside_array(aperture_class, params):
data = np.ones((10, 10), dtype=
|
float)
aperture = aperture_class((-60, 60), *params)
fluxtable = aperture_photometry(data, aperture)
# aperture is fully outside array:
assert np.isnan(fluxtable['aperture_sum'])
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_inside_array_simple(aperture_class, params):
data = np.ones((40, 40), dtype=float)
aperture = aperture_class((20., 20.), *params)
table1 = aperture_photometry(data, aperture, method='center',
subpixels=10)
table2 = aperture_photometry(data, aperture, method='subpixel',
subpixels=10)
table3 = aperture_photometry(data, aperture, method='exact', subpixels=10)
true_flux = aperture.area
assert table1['aperture_sum'] < table3['aperture_sum']
if not isinstance(aperture, (RectangularAperture, RectangularAnnulus)):
assert_allclose(table3['aperture_sum'], true_flux)
assert_allclose(table2['aperture_sum'], table3['aperture_sum'],
atol=0.1)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_aperture_plots(aperture_class, params):
# This test should run without any errors, and there is no return
# value.
# TODO: check the content of the plot
aperture = aperture_class((20., 20.), *params)
aperture.plot()
def test_aperture_pixel_positions():
pos1 = (10, 20)
pos2 = [(10, 20)]
pos3 = u.Quantity((10, 20), unit=u.pixel)
pos4 = u.Quantity([(10, 20)], unit=u.pixel)
r = 3
ap1 = CircularAperture(pos1, r)
ap2 = CircularAperture(pos2, r)
ap3 = CircularAperture(pos3, r)
ap4 = CircularAperture(pos4, r)
assert not np.array_equal(ap1.positions, ap2.positions)
assert_allclose(ap1.positions, ap3.positions)
assert_allclose(ap2.positions, ap4.positions)
class BaseTestAperturePhotometry:
def test_array_error(self):
# Array error
error = np.ones(self.data.shape, dtype=float)
if not hasattr(self, 'mask'):
mask = None
true_error = np.sqrt(self.area)
else:
mask = self.mask
# 1 masked pixel
true_error = np.sqrt(self.area - 1)
table1 = aperture_photometry(self.data,
self.aperture, method='center',
mask=mask, error=error)
table2 = aperture_photometry(self.data,
self.aperture,
method='subpixel', subpixels=12,
mask=mask, error=error)
table3 = aperture_photometry(self.data,
self.aperture, method='exact',
mask=mask, error=error)
if not isinstance(self.aperture, (RectangularAperture,
RectangularAnnulus)):
assert_allclose(table3['aperture_sum'], self.true_flux)
assert_allclose(table2['aperture_sum'], table3['aperture_sum'],
atol=0.1)
assert np.all(table1['aperture_sum'] < table3['aperture_sum'])
if not isinstance(self.aperture, (RectangularAperture,
RectangularAnnulus)):
assert_allclose(table3['aperture_sum_err'], true_error)
assert_allclose(table2['aperture_sum_err'],
table3['aperture_sum_err'], atol=0.1)
assert np.all(table1['aperture_sum_err'] < table3['aperture_sum_err'])
class TestCircular(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.true_flux = self.area
class TestCircularArray(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = ((20., 20.), (25., 25.))
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.area = np.array((self.area, ) * 2)
self.true_flux = self.area
class TestCircularAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
r_in = 8.
r_out = 10.
self.aperture = CircularAnnulus(position, r_in, r_out)
self.area = np.pi * (r_out * r_out - r_in * r_in)
self.true_flux = self.area
class TestCircularAnnulusArray(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = ((20., 20.), (25., 25.))
r_in = 8.
r_out = 10.
self.aperture = CircularAnnulus(position, r_in, r_out)
self.area = np.pi * (r_out * r_out - r_in * r_in)
self.area = np.array((self.area, ) * 2)
self.true_flux = self.area
class TestElliptical(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
a = 10.
b = 5.
theta = -np.pi / 4.
self.aperture = EllipticalAperture(position, a, b, theta=theta)
self.area = np.pi * a * b
self.true_flux = self.area
class TestEllipticalAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
a_in = 5.
a_out = 8.
b_out = 5.
theta = -np.pi / 4.
self.aperture = EllipticalAnnulus(position, a_in, a_out, b_out,
theta=theta)
self.area = (np.pi * (a_out * b_out) -
np.pi * (a_in * b_out * a_in / a_out))
self.true_flux = self.area
class TestRectangularAperture(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
h = 5.
w = 8.
theta = np.pi / 4.
self.aperture = RectangularAperture(position, w, h, theta=theta)
self.area = h * w
self.true_flux = self.area
class TestRectangularAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
|
FinalAngel/django-cms
|
cms/tests/test_cache.py
|
Python
|
bsd-3-clause
| 37,055
| 0.001538
|
# -*- coding: utf-8 -*-
import time
from django.conf import settings
from django.template import Context
from sekizai.context import SekizaiContext
from cms.api import add_plugin, create_page, create_title
from cms.cache import _get_cache_version, invalidate_cms_page_cache
from cms.cache.placeholder import (
_get_placeholder_cache_version_key,
_get_placeholder_cache_version,
_set_placeholder_cache_version,
_get_placeholder_cache_key,
set_placeholder_cache,
get_placeholder_cache,
clear_placeholder_cache,
)
from cms.exceptions import PluginAlreadyRegistered
from cms.models import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.project.pluginapp.plugins.caching.cms_plugins import (
DateTimeCacheExpirationPlugin,
Le
|
gacyCachePlugin,
NoCachePlugin,
SekizaiPlugin,
TimeDeltaCacheExpirationPlugin,
TTLCacheExpirationPlugin,
VaryCacheOnPlugin,
)
from cms.test_utils.testcases i
|
mport CMSTestCase
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils import get_cms_setting
from cms.utils.helpers import get_timezone_name
class CacheTestCase(CMSTestCase):
def tearDown(self):
from django.core.cache import cache
super(CacheTestCase, self).tearDown()
cache.clear()
def setUp(self):
from django.core.cache import cache
super(CacheTestCase, self).setUp()
cache.clear()
def test_cache_placeholder(self):
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(5, 9)):
self.render_template_obj(template, {}, request)
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = False
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(1):
self.render_template_obj(template, {}, request)
# toolbar
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.show_toolbar = True
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(3):
self.render_template_obj(template, {}, request)
page1.publish('en')
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict(
CMS_PAGE_CACHE=False
)
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(13, 25)):
self.client.get('/en/')
with self.assertNumQueries(FuzzyInt(5, 11)):
self.client.get('/en/')
overrides['CMS_PLACEHOLDER_CACHE'] = False
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(7, 15)):
self.client.get('/en/')
def test_no_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot='body')[0]
placeholder2 = page1.placeholders.filter(slot='right-column')[0]
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, 'TextPlugin', 'en', body="English")
add_plugin(placeholder2, 'TextPlugin', 'en', body="Deutsch")
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict()
if getattr(settings, 'MIDDLEWARE', None):
overrides['MIDDLEWARE'] = [mw for mw in settings.MIDDLEWARE if mw not in exclude]
else:
overrides['MIDDLEWARE_CLASSES'] = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(**overrides):
# Request the page without the 'no-cache' plugin
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(18, 25)):
response1 = self.client.get('/en/')
content1 = response1.content
# Fetch it again, it is cached.
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response2 = self.client.get('/en/')
content2 = response2.content
self.assertEqual(content1, content2)
# Once again with PAGE_CACHE=False, to prove the cache can
# be disabled
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(5, 24)):
response3 = self.client.get('/en/')
content3 = response3.content
self.assertEqual(content1, content3)
# Add the 'no-cache' plugin
add_plugin(placeholder1, "NoCachePlugin", 'en')
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(4, 6)):
output = self.render_template_obj(template, {}, request)
with self.assertNumQueries(FuzzyInt(14, 24)):
response = self.client.get('/en/')
self.assertTrue("no-cache" in response['Cache-Control'])
resp1 = response.content.decode('utf8').split("$$$")[1]
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(4):
output2 = self.render_template_obj(template, {}, request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(8, 14)):
response = self.client.get('/en/')
resp2 = response.content.decode('utf8').split("$$$")[1]
self.assertNotEqual(output, output2)
self.assertNotEqual(resp1, resp2)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_timedelta_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page
|
Zex/Starter
|
cgi-bin/leave_message.py
|
Python
|
mit
| 1,575
| 0.010159
|
#!/usr/bin/python
import cgi
from redis import Connection
from socket import gethostname
from navi import *
fields = cgi.FieldStorage()
title = "Message Box"
msg_prefix = 'custom.message.'
def insert_msg(cust, tm, msg):
conn = Connection(host=gethostname(),port=6379)
conn.send_command('set', msg_prefix+cust+'--'+tm, msg)
conn.disconnect()
def read_msg():
ret = ''
conn = Connection(host=gethostname(),port=6379)
conn.send_command('keys', msg_prefix+'*')
keys = conn.read_response()
vals = []
if len(keys) != 0:
conn.send_command('mget', *keys)
vals = conn.read_response()
ret += "<h2>" + "Message
|
log" + "</h2>"
for k, v in zip(keys, vals):
ret += "<span>" + k.replace(msg_prefix, '').replace('--', ' ') + "</span>"
ret += "<pre readonly=\"true\">" + v + "</pre>"
conn.disconnect()
ret += "<br>"
return ret
def reply():
import time, os
ret = ""
ret +=
|
"Content-Type: text/html\n\n"
ret += "<!DOCTYPE html>"
ret += "<html>"
ret += default_head(title)
ret += default_navigator()
ret += "<body>"
ret += "<div class=\"content\">"
ret += "<h2>Welcome, " + os.environ["REMOTE_ADDR"] + "!</h2>"
ret += "<span>" + os.environ["HTTP_USER_AGENT"] + "</span><br><br>"
if fields.has_key('msgbox'):
insert_msg(os.environ["REMOTE_ADDR"], time.strftime(time.asctime()), fields['msgbox'].value)
ret += read_msg()
ret += "</div>"
ret += "</body>"
ret += "</html>"
print ret
reply()
|
OCA/social
|
website_mass_mailing_name/__init__.py
|
Python
|
agpl-3.0
| 91
| 0
|
# L
|
icense LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from . i
|
mport controllers
|
wglass/zoonado
|
tests/protocol/test_response.py
|
Python
|
apache-2.0
| 731
| 0
|
import struct
import unittest
from zoonado.protocol import response, primitives
class ResponseTests(unittest.TestCase):
def test_deserialize(self):
class FakeResponse(response.Response):
opcode = 99
parts = (
("first", primitives.Int),
("second", primitives.UString),
)
# note that the xid a
|
nd opcode are omitted, they're part of a preamble
# that a connection would use to determine whic
|
h Response to use
# for deserializing
raw = struct.pack("!ii6s", 3, 6, b"foobar")
result = FakeResponse.deserialize(raw)
self.assertEqual(result.first, 3)
self.assertEqual(result.second, u"foobar")
|
Yukarumya/Yukarum-Redfoxes
|
testing/mozharness/configs/builds/releng_sub_linux_configs/64_valgrind.py
|
Python
|
mpl-2.0
| 1,603
| 0.004367
|
import os
MOZ_OBJDIR = 'obj-
|
firefox'
config = {
'default_actions': [
'clobber',
'clone-tools',
'checkout-sources',
#'setup-mock',
'build',
#'upload-files',
#'sendchange',
'check-test',
'valgrind-test',
#'generate-build-stats',
#'update',
],
'stage_platform': 'linux64-valgrind',
'publish_nigh
|
tly_en_US_routes': False,
'build_type': 'valgrind',
'tooltool_manifest_src': "browser/config/tooltool-manifests/linux64/\
releng.manifest",
'platform_supports_post_upload_to_latest': False,
'enable_signing': False,
'enable_talos_sendchange': False,
'perfherder_extra_options': ['valgrind'],
#### 64 bit build specific #####
'env': {
'MOZBUILD_STATE_PATH': os.path.join(os.getcwd(), '.mozbuild'),
'MOZ_AUTOMATION': '1',
'DISPLAY': ':2',
'HG_SHARE_BASE_DIR': '/builds/hg-shared',
'MOZ_OBJDIR': 'obj-firefox',
'TINDERBOX_OUTPUT': '1',
'TOOLTOOL_CACHE': '/builds/tooltool_cache',
'TOOLTOOL_HOME': '/builds',
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'CCACHE_DIR': '/builds/ccache',
'CCACHE_COMPRESS': '1',
'CCACHE_UMASK': '002',
'LC_ALL': 'C',
## 64 bit specific
'PATH': '/tools/buildbot/bin:/usr/local/bin:/usr/lib64/ccache:/bin:\
/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/tools/git/bin:/tools/python27/bin:\
/tools/python27-mercurial/bin:/home/cltbld/bin',
},
'src_mozconfig': 'browser/config/mozconfigs/linux64/valgrind',
#######################
}
|
akademikbilisim/ab-kurs-kayit
|
abkayit/training/migrations/0007_auto_20160628_1243.py
|
Python
|
gpl-3.0
| 617
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from
|
django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('training', '0006_auto_20160627_1620'),
]
operations = [
migrations.RemoveField(
model_name='trainesscourserecord'
|
,
name='approvedby',
),
migrations.RemoveField(
model_name='trainesscourserecord',
name='createdby',
),
migrations.RemoveField(
model_name='trainesscourserecord',
name='createtimestamp',
),
]
|
shabab12/edx-platform
|
lms/djangoapps/course_api/blocks/transformers/proctored_exam.py
|
Python
|
agpl-3.0
| 2,327
| 0.003008
|
"""
Proctored Exams Transformer
"""
from django.conf import settings
from edx_proctoring.api import get_attempt_status_summary
from edx_proctoring.models import ProctoredExamStudentAttemptStatus
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer, FilteringTransformerMixin
class ProctoredExamTransformer(FilteringTransformerMixin, BlockStructureTransformer):
"""
Exclude proctored exams unless the user is not a verified student or has
declined taking the exam.
"""
VERSION = 1
BLOCK_HAS_PROCTORED_EXAM = 'has_proctored_exam'
@classmethod
def name(cls):
return "proctored_exam"
@classmethod
def collect(cls, block_structure):
"""
Computes any information for each XBlock that's necessary to execute
this transformer's transform method.
Arguments:
block_structure (BlockStructureCollectedData)
"""
block_structure.request_xblock_fields('is_proctored_enabled')
block_structure.request_xblock_fields('is_practice_exam')
def transform_block_filters(self, usage_info, block_structure):
if not settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False):
return [block_structure.create_universal_filter()]
def is_proctored_exam_for_user(block_key):
"""
Test whether the block is a proctored exam for the user in
question.
"""
if (
block_key.block_type == 'sequential' and (
block_structure.get_xblock_field(block_key, 'is_proctored_enabled') or
block_structure.get_xblock_field(block_key, 'is_practice_exam')
|
)
):
# This section is an exam. It should be excluded unless the
# user is not a verified student or has declined taking the exam.
user_exam_summary = get_attempt_status_summary(
usage_info.user.id
|
,
unicode(block_key.course_key),
unicode(block_key),
)
return user_exam_summary and user_exam_summary['status'] != ProctoredExamStudentAttemptStatus.declined
return [block_structure.create_removal_filter(is_proctored_exam_for_user)]
|
guille0/space-chess
|
config.py
|
Python
|
mit
| 5,079
| 0.024808
|
from panda3d.core import LPoint3
# EDIT GAMEMODE AT THE BOTTOM (CHESS VARIANTS)
# COLORS (for the squares)
BLACK = (0, 0, 0, 1)
WHITE = (1, 1, 1, 1)
HIGHLIGHT = (0, 1, 1, 1)
HIGHLIGHT_MOVE = (0, 1, 0, 1)
HIGHLIGHT_ATTACK = (1, 0, 0, 1)
# SCALE (for the 3D representation)
SCALE = 0.5
PIECE_SCALE = 0.3
BOARD_HEIGHT = 1.5
# MODELS
MODEL_PAWN = "models/pawn.obj"
MODEL_ROOK = "models/rook.obj"
MODEL_KNIGHT = "models/knight.obj"
MODEL_BISHOP = "models/bishop.obj"
MODEL_QUEEN = "models/queen.obj"
MODEL_KING = "models/king.obj"
MODEL_UNICORN = "models/unicorn.obj"
# MODEL TEXTURES
TEXTURE_WHITE = "models/light_wood.jpg"
TEXTURE_BLACK = "models/dark_wood.jpg"
# HELPER FUNCTIONS
def square_position(x, y, z, board_size):
# Gives the 3d position of a square based on x, y, z
xx, yy, zz = board_size
x = (x - (3.5/8)*xx) * SCALE
y = (y - (3.5/8)*yy) * SCALE
z = z*BOARD_HEIGHT * SCALE
return LPoint3(x, y, z)
def square_color(x, y, z):
# Checks whether a square should be black or white
if (x+y+z) % 2 == 0:
return BLACK
else:
return WHITE
# BOARDS
# 1 = Pawn
# 2 = Rook
# 3 = Knight
# 4 = Bishop
# 5 = Queen
# 6 = King
# 7 = Unicorn
# + = white
# - = black
# First array = lowest level
# Highest part of the array = front (white pieces)
PIECES = {
0: 'empty space',
-1: 'black pawn',
-2: 'black rook',
-3: 'black knight',
-4: 'black bishop',
-5: 'black queen',
-6: 'black king',
-7: 'black unicorn',
1: 'white pawn',
2: 'white rook',
3: 'white knight',
4: 'white bishop',
5: 'white queen',
6: 'white king',
7: 'white unicorn',
}
RAUMSCHACH_PAWN_2STEP = False
RAUMSCHACH_BOARD = [
[
[ 2, 3, 6, 3, 2],
[ 1, 1, 1, 1, 1],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 4, 7, 5, 7, 4],
[ 1, 1, 1, 1, 1],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[-1,-1,-1,-1,-1],
[-4,-7,-5,-7,-4],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[-1,-1,-1,-1,-1],
[-2,-3,-6,-3,-2],
],
]
SMALL_RAUMSCHACH_PAWN_2STEP = False
SMALL_RAUMSCHACH_BOARD = [
[
[ 2, 4, 6, 4, 2],
|
[ 3, 1, 1, 1, 3],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,
|
0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[-3,-1,-1,-1,-3],
[-2,-4,-6,-4,-2],
],
]
CARD_PAWN_2STEP = True
CARD_BOARD = [
[
[ 2, 5, 6, 2],
[ 1, 1, 1, 1],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
],
[
[ 4, 3, 3, 4],
[ 1, 1, 1, 1],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[-1,-1,-1,-1],
[-4,-3,-3,-4],
],
[
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[-1,-1,-1,-1],
[-2,-5,-6,-2],
],
]
CLASSIC_PAWN_2STEP = True
CLASSIC_BOARD = [
[
[ 2, 3, 4, 5, 6, 4, 3, 2],
[ 1, 1, 1, 1, 1, 1, 1, 1],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[-1,-1,-1,-1,-1,-1,-1,-1],
[-2,-3,-4,-5,-6,-4,-3,-2],
],
]
# NOTE: PAWN_2STEP is whether the pawn can take 2 steps if it's on the second line (bool)
RAUMSCHACH = (RAUMSCHACH_BOARD, RAUMSCHACH_PAWN_2STEP)
SMALL_RAUMSCHACH = (SMALL_RAUMSCHACH_BOARD, SMALL_RAUMSCHACH_PAWN_2STEP)
CARD = (CARD_BOARD, CARD_PAWN_2STEP)
CLASSIC = (CLASSIC_BOARD, CLASSIC_PAWN_2STEP)
TEST_PAWN_2STEP = True
TEST_BOARD = [
[
[ 0, 1, 6, 0],
[ 0, 0, 0, 0],
[ 0, 0,-2,-2],
[ 0, 0, 0, 0],
],
[
[ 0, 1, 6, 0],
[ 0, 0, 0, 0],
[ 0, 0,-2,-2],
[ 0, 0, 0, 0],
],
]
TEST = (TEST_BOARD, TEST_PAWN_2STEP)
# Edit gamemode here
GAMEMODE = SMALL_RAUMSCHACH
# Edit players here
HUMANS = (1, )
AIS = (-1, )
BOARD, PAWN_2STEP = GAMEMODE
BOARD_SIZE = (len(BOARD[0][0]), len(BOARD[0]), len(BOARD))
TEST = True
|
V11/volcano
|
server/sqlmap/plugins/dbms/sqlite/connector.py
|
Python
|
mit
| 3,003
| 0.00333
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import sqlite3
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import Sq
|
lmapMissingDependence
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pysqlite.googlecode.com/ and http://packages.ubuntu.com/quantal/python-sqlite
User guide: http://docs.python.org/release/2.5/lib/module-sqlite3.html
API: http://docs.python.org/library/sqlite3.html
Debian package: python-sqlite (SQLite 2), python-pysql
|
ite3 (SQLite 3)
License: MIT
Possible connectors: http://wiki.python.org/moin/SQLite
"""
def __init__(self):
GenericConnector.__init__(self)
self.__sqlite = sqlite3
def connect(self):
self.initConnection()
self.checkFileDb()
try:
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
cursor = self.connector.cursor()
cursor.execute("SELECT * FROM sqlite_master")
cursor.close()
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
warnMsg = "unable to connect using SQLite 3 library, trying with SQLite 2"
logger.warn(warnMsg)
try:
try:
import sqlite
except ImportError:
errMsg = "sqlmap requires 'python-sqlite' third-party library "
errMsg += "in order to directly connect to the database '%s'" % self.db
raise SqlmapMissingDependence(errMsg)
self.__sqlite = sqlite
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
raise SqlmapConnectionException(msg[0])
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
return None
def execute(self, query):
try:
self.cursor.execute(utf8encode(query))
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
except self.__sqlite.DatabaseError, msg:
raise SqlmapConnectionException(msg[0])
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
|
Nebucatnetzer/tamagotchi
|
pygame/lib/python3.4/site-packages/mixer/backend/sqlalchemy.py
|
Python
|
gpl-2.0
| 8,887
| 0.000675
|
""" SQLAlchemy support. """
from __future__ import absolute_import
import datetime
from types import GeneratorType
import decimal
from sqlalchemy import func
# from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.sql.type_api import TypeDecorator
try:
from sqlalchemy.orm.relationships import RelationshipProperty
except ImportError:
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.types import (
BIGINT, BOOLEAN, BigInteger, Boolean, CHAR, DATE, DATETIME, DECIMAL, Date,
DateTime, FLOAT, Float, INT, INTEGER, Integer, NCHAR, NVARCHAR, NUMERIC,
Numeric, SMALLINT, SmallInteger, String, TEXT, TIME, Text, Time, Unicode,
UnicodeText, VARCHAR, Enum)
from .. import mix_types as t
from ..main import (
SKIP_VALUE, LOGGER, TypeMixer as BaseTypeMixer, GenFactory as BaseFactory,
Mixer as BaseMixer, partial, faker)
class GenFactory(BaseFactory):
""" Map a sqlalchemy classes to simple types. """
types = {
(String, VARCHAR, Unicode, NVARCHAR, NCHAR, CHAR): str,
(Text, UnicodeText, TEXT): t.Text,
(Boolean, BOOLEAN): bool,
|
(Date, DATE): datetime.date,
(DateTime, DATETIME): datetime.datetime,
(Time, TIME): datetime.time,
(DECIMAL, Numeric, NUMERIC): decimal.Decimal,
(Float, FLOAT): float,
(Integer, INTEGER, INT): int,
(BigInteger, BIGINT): t.BigInteger,
(SmallInteger, SMALLINT): t.SmallInteger,
}
class TypeMixer(BaseTypeMixer):
""" TypeMixer for SQLAlche
|
my. """
factory = GenFactory
def __init__(self, cls, **params):
""" Init TypeMixer and save the mapper. """
super(TypeMixer, self).__init__(cls, **params)
self.mapper = self.__scheme._sa_class_manager.mapper
def postprocess(self, target, postprocess_values):
""" Fill postprocess values. """
mixed = []
for name, deffered in postprocess_values:
value = deffered.value
if isinstance(value, GeneratorType):
value = next(value)
if isinstance(value, t.Mix):
mixed.append((name, value))
continue
if isinstance(getattr(target, name), InstrumentedList) and not isinstance(value, list):
value = [value]
setattr(target, name, value)
for name, mix in mixed:
setattr(target, name, mix & target)
if self.__mixer:
target = self.__mixer.postprocess(target)
return target
@staticmethod
def get_default(field):
""" Get default value from field.
:return value: A default value or NO_VALUE
"""
column = field.scheme
if isinstance(column, RelationshipProperty):
column = column.local_remote_pairs[0][0]
if not column.default:
return SKIP_VALUE
if column.default.is_callable:
return column.default.arg(None)
return getattr(column.default, 'arg', SKIP_VALUE)
def gen_select(self, field_name, select):
""" Select exists value from database.
:param field_name: Name of field for generation.
:return : None or (name, value) for later use
"""
if not self.__mixer or not self.__mixer.params.get('session'):
return field_name, SKIP_VALUE
relation = self.mapper.get_property(field_name)
session = self.__mixer.params.get('session')
value = session.query(
relation.mapper.class_
).filter(*select.choices).order_by(func.random()).first()
return self.get_value(field_name, value)
@staticmethod
def is_unique(field):
""" Return True is field's value should be a unique.
:return bool:
"""
scheme = field.scheme
if isinstance(scheme, RelationshipProperty):
scheme = scheme.local_remote_pairs[0][0]
return scheme.unique
@staticmethod
def is_required(field):
""" Return True is field's value should be defined.
:return bool:
"""
column = field.scheme
if isinstance(column, RelationshipProperty):
column = column.local_remote_pairs[0][0]
if field.params:
return True
# According to the SQLAlchemy docs, autoincrement "only has an effect for columns which are
# Integer derived (i.e. INT, SMALLINT, BIGINT) [and] Part of the primary key [...]".
return not column.nullable and not (column.autoincrement and column.primary_key and
isinstance(column.type, Integer))
def get_value(self, field_name, field_value):
""" Get `value` as `field_name`.
:return : None or (name, value) for later use
"""
field = self.__fields.get(field_name)
if field and isinstance(field.scheme, RelationshipProperty):
return field_name, t._Deffered(field_value, field.scheme)
return super(TypeMixer, self).get_value(field_name, field_value)
def make_fabric(self, column, field_name=None, fake=False, kwargs=None): # noqa
""" Make values fabric for column.
:param column: SqlAlchemy column
:param field_name: Field name
:param fake: Force fake data
:return function:
"""
kwargs = {} if kwargs is None else kwargs
if isinstance(column, RelationshipProperty):
return partial(type(self)(
column.mapper.class_, mixer=self.__mixer, fake=self.__fake, factory=self.__factory
).blend, **kwargs)
ftype = type(column.type)
# augmented types created with TypeDecorator
# don't directly inherit from the base types
if TypeDecorator in ftype.__bases__:
ftype = ftype.impl
stype = self.__factory.cls_to_simple(ftype)
if stype is str:
fab = super(TypeMixer, self).make_fabric(
stype, field_name=field_name, fake=fake, kwargs=kwargs)
return lambda: fab()[:column.type.length]
if ftype is Enum:
return partial(faker.random_element, column.type.enums)
return super(TypeMixer, self).make_fabric(
stype, field_name=field_name, fake=fake, kwargs=kwargs)
def guard(self, *args, **kwargs):
""" Look objects in database.
:returns: A finded object or False
"""
try:
session = self.__mixer.params.get('session')
assert session
except (AttributeError, AssertionError):
raise ValueError('Cannot make request to DB.')
qs = session.query(self.mapper).filter(*args, **kwargs)
count = qs.count()
if count == 1:
return qs.first()
if count:
return qs.all()
return False
def reload(self, obj):
""" Reload object from database. """
try:
session = self.__mixer.params.get('session')
session.expire(obj)
session.refresh(obj)
return obj
except (AttributeError, AssertionError):
raise ValueError('Cannot make request to DB.')
def __load_fields(self):
""" Prepare SQLALchemyTypeMixer.
Select columns and relations for data generation.
"""
mapper = self.__scheme._sa_class_manager.mapper
relations = set()
if hasattr(mapper, 'relationships'):
for rel in mapper.relationships:
relations |= rel.local_columns
yield rel.key, t.Field(rel, rel.key)
for key, column in mapper.columns.items():
if column not in relations:
yield key, t.Field(column, key)
class Mixer(BaseMixer):
""" Integration with SQLAlchemy. """
type_mixer_cls = TypeMixer
def __init__(self, session=None, commit=True, **params):
"""Initialize the SQLAlchemy Mixer.
:param fake: (True) Generate fake data instead of random data.
:param session: SQLAlchemy session. Using for commits.
:par
|
NcLang/vimrc
|
sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/JediHTTP/jedihttp/compatibility.py
|
Python
|
mit
| 2,108
| 0.019924
|
# Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Ve
|
rsion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed unde
|
r the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if sys.version_info[0] >= 3:
basestring = str
unicode = str
def encode_string( value ):
return value.encode('utf-8') if isinstance(value, unicode) else value
def decode_string(value):
return value if isinstance(value, basestring) else value.decode('utf-8')
# hmac.compare_digest were introduced in python 2.7.7
if sys.version_info >= ( 2, 7, 7 ):
from hmac import compare_digest as SecureStringsEqual
else:
# This is the compare_digest function from python 3.4, adapted for 2.6:
# http://hg.python.org/cpython/file/460407f35aa9/Lib/hmac.py#l16
#
# Stolen from https://github.com/Valloric/ycmd
def SecureStringsEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
if not ( isinstance( a, str ) and isinstance( b, str ) ):
raise TypeError( "inputs must be str instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= ord( x ) ^ ord( y )
return result == 0
def compare_digest( a, b ):
return SecureStringsEqual( a, b )
|
blattms/opm-common
|
python/tests/test_emodel.py
|
Python
|
gpl-3.0
| 5,037
| 0.010125
|
import unittest
import sys
import numpy as np
from opm.util import EModel
try:
|
from tests.utils import test_path
except ImportError:
from utils import test_path
class TestEModel(unittest.TestCase):
def test_open_model(self):
refArrList = ["PORV", "CELLVOL", "DEPTH", "DX", "DY", "DZ", "PORO", "PERMX", "PERMY", "PERMZ", "NTG", "TRANX",
"TRANY", "TRANZ", "ACTNUM", "ENDNUM", "EQLNUM", "FIPNUM", "FLUXNUM",
|
"IMBNUM", "PVTNUM",
"SATNUM", "SWL", "SWCR", "SGL", "SGU", "ISWL", "ISWCR", "ISGL", "ISGU", "PPCW", "PRESSURE",
"RS", "RV", "SGAS", "SWAT", "SOMAX", "SGMAX"]
self.assertRaises(RuntimeError, EModel, "/file/that/does_not_exists")
self.assertRaises(ValueError, EModel, test_path("data/9_EDITNNC.EGRID"))
self.assertRaises(ValueError, EModel, test_path("data/9_EDITNNC.UNRST"))
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
arrayList = mod1.get_list_of_arrays()
for n, element in enumerate(arrayList):
self.assertEqual(element[0], refArrList[n])
celvol1 = mod1.get("CELLVOL")
self.assertEqual(len(celvol1), 2794)
def test_add_filter(self):
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
celvol1 = mod1.get("CELLVOL")
depth1 = mod1.get("DEPTH")
self.assertTrue(isinstance(celvol1, np.ndarray))
self.assertEqual(celvol1.dtype, "float32")
refVol1 = 2.79083e8
self.assertTrue( abs((sum(celvol1) - refVol1)/refVol1) < 1.0e-5)
mod1.add_filter("EQLNUM","eq", 1);
mod1.add_filter("DEPTH","lt", 2645.21);
refVol2 = 1.08876e8
refPorvVol2 = 2.29061e7
porv2 = mod1.get("PORV")
celvol2 = mod1.get("CELLVOL")
self.assertTrue( abs((sum(celvol2) - refVol2)/refVol2) < 1.0e-5)
self.assertTrue( abs((sum(porv2) - refPorvVol2)/refPorvVol2) < 1.0e-5)
mod1.reset_filter()
mod1.add_filter("EQLNUM","eq", 2);
mod1.add_filter("DEPTH","in", 2584.20, 2685.21);
refPorvVol3 = 3.34803e7
porv3 = mod1.get("PORV")
self.assertTrue( abs((sum(porv3) - refPorvVol3)/refPorvVol3) < 1.0e-5)
mod1.reset_filter()
mod1.add_filter("I","lt", 10);
mod1.add_filter("J","between", 3, 15);
mod1.add_filter("K","between", 2, 9);
poro = mod1.get("PORO")
self.assertEqual(len(poro), 495)
def test_paramers(self):
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
self.assertFalse("XXX" in mod1)
self.assertTrue("PORV" in mod1)
self.assertTrue("PRESSURE" in mod1)
self.assertTrue("RS" in mod1)
self.assertTrue("RV" in mod1)
self.assertEqual(mod1.active_report_step(), 0)
rsteps = mod1.get_report_steps()
self.assertEqual(rsteps, [0, 4, 7, 10, 15, 20, 27, 32, 36, 39])
mod1.set_report_step(7)
# parameter RS and RV is missing in report step number 7
self.assertFalse("RS" in mod1)
self.assertFalse("RV" in mod1)
mod1.set_report_step(15)
self.assertTrue("RS" in mod1)
self.assertTrue("RV" in mod1)
arrayList = mod1.get_list_of_arrays()
def test_rsteps_steps(self):
pres_ref_4_1_10 = [272.608, 244.461, 228.503, 214.118, 201.147, 194.563, 178.02, 181.839, 163.465, 148.677]
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
mod1.add_filter("I","eq", 4);
mod1.add_filter("J","eq", 1);
mod1.add_filter("K","eq", 10);
self.assertTrue(mod1.has_report_step(4))
self.assertFalse(mod1.has_report_step(2))
rsteps = mod1.get_report_steps()
for n, step in enumerate(rsteps):
mod1.set_report_step(step)
pres = mod1.get("PRESSURE")
self.assertTrue(abs(pres[0] - pres_ref_4_1_10[n])/pres_ref_4_1_10[n] < 1.0e-5)
def test_grid_props(self):
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
nI,nJ,nK = mod1.grid_dims()
self.assertEqual((nI,nJ,nK), (13, 22, 11))
nAct = mod1.active_cells()
self.assertEqual(nAct, 2794)
def test_hc_filter(self):
nAct_hc_eqln1 = 1090
nAct_hc_eqln2 = 1694
mod1 = EModel(test_path("data/9_EDITNNC.INIT"))
porv = mod1.get("PORV")
mod1.set_depth_fwl([2645.21, 2685.21])
mod1.add_hc_filter()
porv = mod1.get("PORV")
self.assertEqual(len(porv), nAct_hc_eqln1 + nAct_hc_eqln2)
mod1.reset_filter()
mod1.add_filter("EQLNUM","eq", 1);
mod1.add_filter("DEPTH","lt", 2645.21);
porv1 = mod1.get("PORV")
self.assertEqual(len(porv1), nAct_hc_eqln1)
mod1.reset_filter()
mod1.add_filter("EQLNUM","eq", 2);
mod1.add_filter("DEPTH","lt", 2685.21);
porv2 = mod1.get("PORV")
self.assertEqual(len(porv2), nAct_hc_eqln2)
ivect = mod1.get("I")
if __name__ == "__main__":
unittest.main()
|
filipenf/ansible
|
lib/ansible/cli/console.py
|
Python
|
gpl-3.0
| 16,263
| 0.003136
|
# (c) 2014, Nandor Sivok <dominis@haxor.hu>
# (c) 2016, Redhat Inc
#
# ansible-console is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ansible-console is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
########################################################
# ansible-console is an interactive REPL shell for ansible
# with built-in tab completion for all the documented modules
#
# Available commands:
# cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*)
# list - list available hosts in the current path
# forks - change fork
# become - become
# ! - forces shell module instead of the ansible module (!yum update -y)
import atexit
import cmd
import getpass
import readline
import os
import sys
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.vars import VariableManager
from ansible.utils import module_docs
from ansible.utils.color import stringc
from ansible.utils.unicode import to_unicode, to_str
from ansible.plugins import module_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ConsoleCLI(CLI, cmd.Cmd):
modules = []
def __init__(self, args):
super(ConsoleCLI, self).__init__(args)
self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n'
self.groups = []
self.hosts = []
self.pattern = None
self.variable_manager = None
self.loader = None
self.passwords = dict()
self.modules = None
cmd.Cmd.__init__(self)
def pars
|
e(self):
self.parser = CLI.base_parser(
usage='%prog <host-pattern> [options]',
runas_opts=True,
inventory_opts=True,
connect_opts=True,
check_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
)
# options unique to shell
self.parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
|
self.parser.set_defaults(cwd='*')
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
return True
def get_names(self):
return dir(self)
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
self.do_exit(self)
def set_prompt(self):
login_user = self.options.remote_user or getpass.getuser()
self.selected = self.inventory.list_hosts(self.options.cwd)
prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks)
if self.options.become and self.options.become_user in [None, 'root']:
prompt += "# "
color = C.COLOR_ERROR
else:
prompt += "$ "
color = C.COLOR_HIGHLIGHT
self.prompt = stringc(prompt, color)
def list_modules(self):
modules = set()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
module_paths = module_loader._get_paths()
for path in module_paths:
if path is not None:
modules.update(self._find_modules_in_path(path))
return modules
def _find_modules_in_path(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self._find_modules_in_path(module)
elif module.startswith('__'):
continue
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = module.replace('_', '', 1)
module = os.path.splitext(module)[0] # removes the extension
yield module
def default(self, arg, forceshell=False):
""" actually runs modules """
if arg.startswith("#"):
return False
if not self.options.cwd:
display.error("No host found")
return False
if arg.split()[0] in self.modules:
module = arg.split()[0]
module_args = ' '.join(arg.split()[1:])
else:
module = 'shell'
module_args = arg
if forceshell is True:
module = 'shell'
module_args = arg
self.options.module_name = module
result = None
try:
check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw')
play_ds = dict(
name = "Ansible Shell",
hosts = self.options.cwd,
gather_facts = 'no',
tasks = [ dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))]
)
play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
except Exception as e:
display.error(u"Unable to build command: %s" % to_unicode(e))
return False
try:
cb = 'minimal' #FIXME: make callbacks configurable
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=self.passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=False,
)
result = self._tqm.run(play)
finally:
if self._tqm:
self._tqm.cleanup()
if self.loader:
self.loader.cleanup_all_tmp_files()
if result is None:
display.error("No hosts found")
return False
except KeyboardInterrupt:
display.error('User interrupted execution')
return False
except Exception as e:
display.error(to_unicode(e))
#FIXME: add traceback in very very verbose mode
return False
def emptyline(self):
return
def do_shell(self, arg):
"""
You can run shell commands through the shell module.
eg.:
shell ps uax | grep java | wc -l
shell killall python
shell halt -n
You can use the ! to force the shell module. eg.:
!ps aux | grep java | wc -l
"""
self.default(arg, True)
|
Sybrand/digital-panda
|
digitalpanda/bucket/abstract.py
|
Python
|
mit
| 2,411
| 0
|
from abc import ABCMeta, abstractmethod
class ProgressMessage(object):
def __init__(self, path, bytes_per_second, bytes_read, bytes_expected):
self._path = path
self._bytes_per_second = bytes_per_second
self._bytes_read = bytes_read
self._bytes_expected = bytes_expected
@property
def path(self):
return self._path
@property
def bytes_per_second(self):
return self._bytes_per_second
@property
def bytes_read(self):
return self._bytes_read
@property
def bytes_expected(self):
return self._bytes_expected
class BucketFile(object):
"""
This class defines the contract for a file, that is used across
all buckets
"""
def __init__(self, path, name, folder, contentType=None):
self._path = path
self._name = name
self._folder = folder
self._contentType = contentType
self._hash = None
self._dateModifie
|
d = None
def get_hash(self):
return self._hash
def set_hash(self, value):
self._hash = value
def get_dateModified(self):
return self._dateModified
def set_dateModified(self, value):
self._dateModified = value
def get_content_type(self):
return self._contentType
def set_content_type(self, value):
self._contentType = value
|
@property
def path(self):
return self._path
@property
def name(self):
return self._name
@property
def isFolder(self):
return self._folder
contentType = property(get_content_type, set_content_type)
hash = property(get_hash, set_hash)
dateModified = property(get_dateModified, set_dateModified)
class AbstractProvider:
"""
This class defines a contract for all our different storage sources
e.g: Amazon S3, Local Files, Openstack Swift etc. etc.
"""
__metaclass__ = ABCMeta
@abstractmethod
def delete_object(self, path):
return NotImplemented
@abstractmethod
def list_dir(self, path):
return NotImplemented
@abstractmethod
def authenticate(self):
"""
Return True is it works, False if it fails
"""
return False
@abstractmethod
def download_object(self, sourcePath, targetPath):
"""
Download source to target
"""
return NotImplemented
|
ksmit799/Toontown-Source
|
toontown/parties/activityFSMs.py
|
Python
|
mit
| 3,442
| 0.004067
|
from direct.directnotify import DirectNotifyGlobal
from BaseActivityFSM import BaseActivityFSM
from activityFSMMixins import IdleMixin
from activityFSMMixins import RulesMixin
from activityFSMMixins import ActiveMixin
from activityFSMMixins import DisabledMixin
f
|
rom activityFSMMixins import ConclusionMixin
from activityFSMMixins import WaitForEnoughMixin
from activityFSMMixins import WaitToStartMixin
from activityFSMMixins import WaitClientsReadyMixin
from activityFSMMixins import WaitForServerMixin
class FireworksActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('FireworksActivityFSM')
def __init__(self, activity):
FireworksActivityFSM.notify.debu
|
g('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Idle': ['Active', 'Disabled'],
'Active': ['Disabled'],
'Disabled': []}
class CatchActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('CatchActivityFSM')
def __init__(self, activity):
CatchActivityFSM.notify.debug('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Idle': ['Active', 'Conclusion'],
'Active': ['Conclusion'],
'Conclusion': ['Idle']}
class TrampolineActivityFSM(BaseActivityFSM, IdleMixin, RulesMixin, ActiveMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('TrampolineActivityFSM')
def __init__(self, activity):
TrampolineActivityFSM.notify.debug('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Idle': ['Rules', 'Active'],
'Rules': ['Active', 'Idle'],
'Active': ['Idle']}
class DanceActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('DanceActivityFSM')
def __init__(self, activity):
DanceActivityFSM.notify.debug('__init__')
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'Active': ['Disabled'],
'Disabled': ['Active']}
class TeamActivityAIFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, WaitClientsReadyMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('TeamActivityAIFSM')
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
self.notify.debug('__init__')
self.defaultTransitions = {'WaitForEnough': ['WaitToStart'],
'WaitToStart': ['WaitForEnough', 'WaitClientsReady'],
'WaitClientsReady': ['WaitForEnough', 'Active'],
'Active': ['WaitForEnough', 'Conclusion'],
'Conclusion': ['WaitForEnough']}
class TeamActivityFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, RulesMixin, WaitForServerMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory('TeamActivityFSM')
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {'WaitForEnough': ['WaitToStart'],
'WaitToStart': ['WaitForEnough', 'Rules'],
'Rules': ['WaitForServer', 'Active', 'WaitForEnough'],
'WaitForServer': ['Active', 'WaitForEnough'],
'Active': ['Conclusion', 'WaitForEnough'],
'Conclusion': ['WaitForEnough']}
|
thergames/thergames.github.io
|
lib/tomorrow-pygments/styles/tomorrownightblue.py
|
Python
|
mit
| 5,509
| 0.000363
|
# -*- coding: utf-8 -*-
"""
tomorrow night blue
---------------------
Port of the Tomorrow Night Blue colour scheme https://github.com/chriskempson/tomorrow-theme
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#002451"
CURRENT_LINE = "#00346e"
SELECTION = "#003f8e"
FOREGROUND = "#ffffff"
COMMENT = "#7285b7"
RED = "#ff9da4"
ORANGE = "#ffc58f"
YELLOW = "#ffeead"
GREEN = "#d1f1a9"
AQUA = "#99ffff"
BLUE = "#bbdaff"
PURPLE = "#ebbbff"
class TomorrownightblueStyle(Style):
"""
Port of the Tomorrow Night Blue colour scheme https://github.com/chriskempson/tomorrow-theme
"""
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
|
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "",
|
# class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
|
unittests/Applications/test_ConvertToCSV.py
|
Python
|
gpl-2.0
| 106
| 0.009434
|
import unittest
from PyFoam
|
.Applications.ConvertToCSV import ConvertToCSV
theSuite=unittest.TestSui
|
te()
|
dmlc/tvm
|
tests/python/contrib/test_ethosn/test_mean.py
|
Python
|
apache-2.0
| 2,066
| 0.001452
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration mean tests"""
import numpy as np
import tvm
from tvm import relay
from
|
tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, axis, keepdims, input_zp, input_sc, output_zp, output_sc, dtype):
a =
|
relay.var("a", shape=shape, dtype=dtype)
casted = relay.op.cast(a, "int32")
mean = relay.mean(casted, axis, keepdims)
model = relay.qnn.op.requantize(
mean,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
def test_mean():
trials = [(1, 7, 7, 2048), (1, 8, 8)]
np.random.seed(0)
for shape in trials:
inputs = {
"a": tvm.nd.array(np.random.randint(0, high=255, size=shape, dtype="uint8")),
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, [1, 2], True, 128, 0.0784314, 128, 0.0784314, "uint8")
mod = tei.make_module(model, [])
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, "uint8", 1)
|
HonzaKral/warehouse
|
warehouse/legacy/tables.py
|
Python
|
apache-2.0
| 12,359
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: Tables that exist here should not be used anywhere, they only exist
# here for migration support with alembic. If any of these tables end up
# being used they should be moved outside of warehouse.legacy. The goal
# is that once the legacy PyPI code base is gone, that these tables
# can just be deleted and a migration made to drop them from the
# database.
from citext import CIText
from sqlalchemy import (
CheckConstraint, Column, ForeignKey, ForeignKeyConstraint, Index, Table,
UniqueConstraint,
Boolean, Date, DateTime, Integer, LargeBinary, String, Text,
)
from warehouse import db
accounts_gpgkey = Table(
"accounts_gpgkey",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column(
"user_id",
Integer(),
ForeignKey(
"accounts_user.id",
deferrable=True,
initially="DEFERRED",
),
nullable=False,
),
Column("key_id", CIText(), nullable=False),
Column("verified", Boolean(), nullable=False),
UniqueConstraint("key_id", name="accounts_gpgkey_key_id_key"),
CheckConstraint(
"key_id ~* '^[A-F0-9]{8}$'::citext",
name="accounts_gpgkey_valid_key_id",
),
)
Index("accounts_gpgkey_user_id", accounts_gpgkey.c.user_id)
browse_tally = Table(
"browse_tally",
db.metadata,
Column("trove_id", Integer(), primary_key=True, nullable=False),
Column("tally", Integer()),
)
cheesecake_main_indices = Table(
"cheesecake_main_indices",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("absolute", Integer(), nullable=False),
Column("relative", Integer(), nullable=False),
)
cheesecake_subindices = Table(
"cheesecake_subindices",
db.metadata,
Column(
"main_index_id",
Integer(),
ForeignKey("cheesecake_main_indices.id"),
primary_key=True,
nullable=False,
),
Column("name", Text(), primary_key=True, nullable=False),
Column("value", Integer(), nullable=False),
Column("details", Text(), nullable=False),
)
comments = Table(
"comments",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column(
"rating",
Integer(),
ForeignKey("ratings.id", ondelete="CASCADE"),
),
Column(
"user_name",
CIText(),
ForeignKey("accounts_user.username", ondelete="CASCADE"),
),
Column("date", DateTime(timezone=False)),
Column("message", Text()),
Column(
"in_reply_to",
Integer(),
ForeignKey("comments.id", ondelete="CASCADE"),
),
)
comments_journal = Table(
"comments_journal",
db.metadata,
Column("name", Text()),
Column("version", Text()),
Column("id", Integer()),
Column(
"submitted_by",
CIText(),
ForeignKey("accounts_user.username", ondelete="CASCADE"),
),
Column("date", DateTime(timezone=False)),
Column("action", Text()),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
ondelete="CASCADE",
),
)
cookies = Table(
"cookies",
db.metadata,
Column("cookie", Text(), primary_key=True, nullable=False),
Column(
"name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
Column("last_seen", DateTime(timezone=False)),
)
Index("cookies_last_seen", cookies.c.last_seen)
csrf_tokens = Table(
"csrf_tokens",
db.metadata,
Column(
"name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
primary_key=True,
nullable=False,
),
Column("token", Text()),
Column("end_date", DateTime(timezone=False)),
)
description_urls = Table(
"description_urls",
db.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("name", Text()),
Column("version", Text()),
Column("url", Text()),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
Index("description_urls_name_idx", description_urls.c.name)
Index(
"description_urls_name_version_idx",
description_urls.c.name,
description_urls.c.version,
)
dual = Table(
"dual",
db.metadata,
Column("dummy", Integer()),
)
mirrors = Table(
"mirrors",
db.metadata,
Column("ip", Text(), primary_key=True, nullable=False),
Column("user_name", CIText(), ForeignKey("accounts_user.username")),
Column("index_url", Text()),
Column("last_modified_url", Text()),
Column("local_stats_url", Text()),
Column("stats_url", Text()),
Column("mirrors_url", Text()),
)
oauth_access_tokens = Table(
"oauth_access_tokens",
db.metadata,
Column("token", String(32), primary_key=True, nullable=False),
Column("secret", String(64), nullable=False),
Column("consumer", String(32), nullable=False),
Column("date_created", Date(), nullable=False),
Column("last_modified", Date(), nullable=False),
Column(
"user_name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
)
oauth_consumers = Table(
"oauth_consumers",
db.metadata,
Column("consumer", String(32), primary_key=True, nullable=False),
Column("secret", String(64), nullable=False),
Column("date_created", Date(), nullable=False),
Column(
"created_by",
CIText(),
|
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
),
),
Column("last_modified", Date(), nullable=False),
Column("description", String(2
|
55), nullable=False),
)
oauth_nonce = Table(
"oauth_nonce",
db.metadata,
Column("timestamp", Integer(), nullable=False),
Column("consumer", String(32), nullable=False),
Column("nonce", String(32), nullable=False),
Column("token", String(32)),
)
oauth_request_tokens = Table(
"oauth_request_tokens",
db.metadata,
Column("token", String(32), primary_key=True, nullable=False),
Column("secret", String(64), nullable=False),
Column("consumer", String(32), nullable=False),
Column("callback", Text()),
Column("date_created", Date(), nullable=False),
Column(
"user_name",
CIText(),
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
ondelete="CASCADE",
),
),
)
oid_associations = Table(
"oid_associations",
db.metadata,
Column("server_url", String(2047), primary_key=True, nullable=False),
Column("handle", String(255), primary_key=True, nullable=False),
Column("secret", LargeBinary(128), nullable=False),
Column("issued", Integer(), nullable=False),
Column("lifetime", Integer(), nullable=False),
Column("assoc_type", String(64), nullable=False),
CheckConstraint(
"length(secret) <= 128",
name="secret_length_constraint",
),
)
oid_nonces = Table(
"oid_nonces",
db.metadata,
Column("server_url", String(2047), primary_key=True, nullable=False),
Column("timestamp", Integer(), primary_key=True, nullable=False),
Column("salt", String(40), primary_key=True, nullable=False),
)
openid_discovered = Table(
|
tectronics/mpmath
|
doc/run_doctest.py
|
Python
|
bsd-3-clause
| 222
| 0.004505
|
#!/usr/bin/env python
imp
|
ort os
import os.path
path = "source"
import doctest
for f in os.listdir(path):
if f.endswith(".txt"):
pri
|
nt f
doctest.testfile(os.path.join(path, f), module_relative=False)
|
ssalevan/cobbler
|
koan/qcreate.py
|
Python
|
gpl-2.0
| 7,315
| 0.008612
|
"""
Virtualization installation functions.
Copyright 2007-2008 Red Hat, Inc.
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
module for creating fullvirt guests via KVM/kqemu/qemu
requires python-virtinst-0.200.
"""
import os, sys, time, stat
import tempfile
import random
from optparse import OptionParser
import exceptions
import errno
import re
import tempfile
import shutil
import virtinst
import app as koan
import sub_process as subprocess
import utils
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def start_install(name=None,
ram=None,
disks=None,
mac=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None,
arch=None,
no_gfx=False,
fullvirt=True,
bridge=None,
virt_type=None,
virt_auto_boot=False):
vtype = "qemu"
if virtinst.util.is_kvm_capable():
vtype = "kvm"
arch = None # let virtinst.FullVirtGuest() default to the host arch
elif virtinst.util.is_kqemu_capable():
vtype = "kqemu"
print "- using qemu hypervisor, type=%s" % vtype
if arch is not None and arch.lower() in ["x86","i386"]:
arch = "i686"
guest = virtinst.FullVirtGuest(hypervisorURI="qemu:///system",type=vtype, arch=arch)
if not profile_data.has_key("file"):
# images don't need to source this
if not profile_data.has_key("install_tree"):
raise koan.InfoException("Cannot find install source in kickstart file, aborting.")
if not profile_data["install_tree"].endswith("/"):
profile_data["install_tree"] = profile_data["install_tree"] + "/"
# virt manager doesn't like nfs:// and just wants nfs:
# (which cobbler should fix anyway)
profile_data["install_tree"] = profile_data["install_tree"].replace("nfs://","nfs:")
if profile_data.has_key("file"):
# this is an image based installation
input_path = profile_data["file"]
print "- using image location %s" % input_path
if input_path.find(":") == -1:
# this is not an NFS path
guest.cdrom = input_path
else:
(tempdir, filename) = utils.nfsmount(input_path)
guest.cdrom = os.path.join(tempdir, filename)
kickstart = profile_data.get("kickstart","")
if kickstart != "":
# we have a (windows?) answer file we have to provide
# to the ISO.
print "I want to make a floppy for %s" % kickstart
floppy_path = utils.make_floppy(kickstart)
guest.disks.append(virtinst.VirtualDisk(device=virtinst.VirtualDisk.DEVICE_FLOPPY, path=floppy_path))
else:
guest.location = profile_data["install_tree"]
extra = extra.replace("&","&")
guest.extraargs = extra
if profile_data.has_key("breed"):
breed = profile_data["breed"]
if breed != "other" and breed != "":
if breed in [ "debian", "suse", "redhat" ]:
guest.set_os_type("linux")
elif breed in [ "windows" ]:
guest.set_os_type("windows")
else:
guest.set_os_type("unix")
if profile_data.has_key("os_version"):
# FIXME: when os_version is not defined and it's linux, do we use generic24/generic26 ?
version = profile_data["os_version"]
if version != "other" and version != "":
try:
guest.set_os_variant(version)
except:
print "- virtinst library does not understand variant %s, treating as generic"
|
% version
pass
guest.set_name(name)
guest.set_memory(ram)
guest.set_vcpus(vcpus)
# for KVM, we actually can't disable this, since it's the only
# console it has other than SDL
guest.set_graphics("vnc")
if uuid is not
|
None:
guest.set_uuid(uuid)
for d in disks:
print "- adding disk: %s of size %s" % (d[0], d[1])
if d[1] != 0 or d[0].startswith("/dev"):
guest.disks.append(virtinst.VirtualDisk(d[0], size=d[1]))
else:
raise koan.InfoException("this virtualization type does not work without a disk image, set virt-size in Cobbler to non-zero")
if profile_data.has_key("interfaces"):
counter = 0
interfaces = profile_data["interfaces"].keys()
interfaces.sort()
vlanpattern = re.compile("[a-zA-Z0-9]+\.[0-9]+")
for iname in interfaces:
intf = profile_data["interfaces"][iname]
if intf["bonding"] == "master" or vlanpattern.match(iname) or iname.find(":") != -1:
continue
mac = intf["mac_address"]
if mac == "":
mac = random_mac()
if bridge is None:
profile_bridge = profile_data["virt_bridge"]
intf_bridge = intf["virt_bridge"]
if intf_bridge == "":
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
intf_bridge = profile_bridge
else:
if bridge.find(",") == -1:
intf_bridge = bridge
else:
bridges = bridge.split(",")
intf_bridge = bridges[counter]
nic_obj = virtinst.VirtualNetworkInterface(macaddr=mac, bridge=intf_bridge)
guest.nics.append(nic_obj)
counter = counter + 1
else:
if bridge is not None:
profile_bridge = bridge
else:
profile_bridge = profile_data["virt_bridge"]
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
nic_obj = virtinst.VirtualNetworkInterface(macaddr=random_mac(), bridge=profile_bridge)
guest.nics.append(nic_obj)
guest.start_install()
return "use virt-manager and connect to qemu to manage guest: %s" % name
|
ai-se/parGALE
|
epoal_src/parallelfeaturesplitGIA.py
|
Python
|
unlicense
| 33,034
| 0.009293
|
'''
Created on Nov 21, 2013
@author: ezulkosk
'''
from FeatureSplitConfig import ers_optional_names, bdb_optional_names, \
webportal_optional_names, eshop_optional_names, ers_config_split_names, \
webportal_config_split_names, eshop_config_split_names, bdb_config_split_names
from consts import METRICS_MAXIMIZE, METRICS_MINIMIZE
from npGIAforZ3 import GuidedImprovementAlgorithm, \
GuidedImprovementAlgorithmOptions
from src.FeatureSplitConfig import ers_better_config_names, \
eshop_better_config_names, webportal_better_config_names
from z3 import *
import argparse
import csv
import importlib
import itertools
import math
import multiprocessing
import operator
import os
import sys
import time
#from Z3ModelEmergencyResponseUpdateAllMin import *
#from Z3ModelWebPortal import *
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, totalTime,CurrentNotDomConstraints_queuelist, index, outputFileParentName, num_consumers, s, extraConstraint):
multiprocessing.Process.__init__(self)
s.add(extraConstraint)
self.task_queue = task_queue
self.result_queue = result_queue
self.CurrentNotDomConstraints_queuelist = CurrentNotDomConstraints_queuelist
self.totalTime = totalTime
self.index = index
self.outputFileParentName = outputFileParentName
self.num_consumers = num_consumers
# each group has an individual model and has two member consumers running on the model
self.groupid = self.index / 2
self.memberid= self.index % 2
# split the objective space in terms of num_groups = num_consumers / 2
# maximum 30 cores -> minimum 3 degrees, so we use range [degree, degree)
num_groups = self.num_consumers / 2
degree = 90.0 / num_groups
# radian = degree * math.pi / 180.0
self.GIAOptions = GuidedImprovementAlgorithmOptions(verbosity=0, \
incrementallyWriteLog=False, \
writeTotalTimeFilename="timefile.csv", \
writeRandomSeedsFilename="randomseed.csv", useCallLogs=False)
self.GIAAlgorithm = GuidedImprovementAlgorithm(s, metrics_variables, \
metrics_objective_direction, FeatureVariable, options=self.GIAOptions)
self.count_sat_calls = 0
self.count_unsat_calls = 0
self.count_paretoPoints = 0
self.startTime = time.time()
def run(self):
while True:
if self.task_queue[self.groupid].empty() == True:
break
else:
next_task = self.task_queue[self.groupid].get(False)
if next_task is None:
self.task_queue[self.groupid].task_done()
self.totalTime.put(str(time.time()-self.startTime))
outputFileChild = open(str(str(self.outputFileParentName)+'C'+str(self.index)+'.csv'), 'a')
try:
outputFileChild.writelines(str(self.index)+','+
str(self.count_paretoPoints) + ',' +
str(self.count_sat_calls) + ',' +
str(self.count_unsat_calls) + ',' +
str(time.time()-self.startTime) +',' +
'\n')
finally:
outputFileChild.close()
break
# execute a task, i.e., find a Pareto point
# 1) update CurrentNotDomConstraints
while self.CurrentNotDomConstraints_queuelist[self.index].empty() != True:
strconstraintlist = self.CurrentNotDomConstraints_queuelist[self.index].get()
ConvertedZ3ConstraintList = list()
for constraint in strconstraintlist:
constraintSplitList = []
if constraint.find('>') != -1:
constraintSplitList = constraint.split('>')
#print constraintSplitList
if constraintSplitList[1].find('/') != -1:
ConvertedZ3Constra
|
intList.append( Real(constraintSplitList[0].strip()) > RealVal(constraintSplitList[1].strip()))
else:
ConvertedZ3ConstraintList.append( Int(constraintSplitList[0].strip()) > IntVal(constraintSplitList[1].strip()))
#print ConvertedZ3ConstraintList
|
else:
constraintSplitList = constraint.split('<')
#print constraintSplitList
if constraintSplitList[1].find('/') != -1:
ConvertedZ3ConstraintList.append( Real(constraintSplitList[0].strip()) < RealVal(constraintSplitList[1].strip()))
else:
ConvertedZ3ConstraintList.append( Int(constraintSplitList[0].strip()) < IntVal(constraintSplitList[1].strip()))
#print ConvertedZ3ConstraintList
#print Or(ConvertedZ3ConstraintList)
tmpNotDominatedByNextParetoPoint = Or(ConvertedZ3ConstraintList)
#print tmpNotDominatedByNextParetoPoint
self.GIAAlgorithm.s.add(tmpNotDominatedByNextParetoPoint)
# 2) if find all Pareto points, add a poison pill; otherwise find a Pareto point
start_time = time.time()
if self.GIAAlgorithm.s.check() != sat:
self.count_unsat_calls += 1
self.task_queue[self.groupid].put(None)
else:
self.count_sat_calls += 1
self.task_queue[self.groupid].put("Task")
prev_solution = self.GIAAlgorithm.s.model()
self.GIAAlgorithm.s.push()
NextParetoPoint, local_count_sat_calls, local_count_unsat_calls = self.GIAAlgorithm.ranToParetoFront(prev_solution)
end_time = time.time()
self.count_sat_calls += local_count_sat_calls
self.count_unsat_calls += local_count_unsat_calls
self.count_paretoPoints += 1
# RecordPoint
strNextParetoPoint = list((d.name(), str(NextParetoPoint[d])) for d in NextParetoPoint.decls())
if RECORDPOINT:
strNextParetoPoint = list((d.name(), str(NextParetoPoint[d])) for d in NextParetoPoint.decls())
outputFileChild = open(str(str(self.outputFileParentName)+'C'+str(self.index)+'.csv'), 'a')
try:
outputFileChild.writelines(str(self.index)+','+
str(self.count_paretoPoints) + ',' +
str(self.count_sat_calls) + ',' +
str(end_time-start_time) +',' +
str(strNextParetoPoint) +',' +
'\n')
finally:
outputFileChild.close()
self.GIAAlgorithm.s.pop()
tmpNotDominatedByNextParetoPoint = self.GIAAlgorithm.ConstraintNotDominatedByX(NextParetoPoint)
self.GIAAlgorithm.s.add(tmpNotDominatedByNextParetoPoint)
# picklize and store Pareto point and constraints
self.result_queue.put(strNextParetoPoint)
constraintlist = self.GIAAlgorithm.Etra
|
lbybee/vc_network_learning_project
|
code/gen_load_data.py
|
Python
|
gpl-2.0
| 177
| 0
|
import load_data as ld
i
|
mport sys
import os
f_list = os.listdir(sys.argv[1])
data = ld.loadIntoPandas(ld.processAllDocuments(sys.argv[1], f_list))
data.to_pickle(sy
|
s.argv[2])
|
denisbalyko/checkio-solution
|
gcd.py
|
Python
|
mit
| 635
| 0.001575
|
from fractions import gcd
def greatest_common_divisor(*args):
args = list(args)
a, b = args.pop(), args.pop()
gcd_local = gcd(a, b)
while len(args):
gcd_local = gcd(gcd_local, args.pop())
return gcd_local
def test_function():
assert greatest_common_divisor(6, 10, 15) == 1, "12"
assert greatest_common_divisor(6, 4) == 2, "Simple"
assert greatest_common_divisor(2, 4, 8) =
|
= 2, "Three argum
|
ents"
assert greatest_common_divisor(2, 3, 5, 7, 11) == 1, "Prime numbers"
assert greatest_common_divisor(3, 9, 3, 9) == 3, "Repeating arguments"
if __name__ == '__main__':
test_function()
|
angelapper/odoo
|
addons/account/models/account_bank_statement.py
|
Python
|
agpl-3.0
| 47,237
| 0.004573
|
# -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.osv import expression
from openerp.tools import float_is_zero
from openerp.tools import float_compare, float_round
from openerp.tools.misc import formatLang
from openerp.exceptions import UserError, ValidationError
import time
import math
class AccountCashboxLine(models.Model):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'coin_value'
_order = 'coin_value'
@api.one
@api.depends('coin_value', 'number')
def _sub_total(self):
""" Calculates Sub total"""
self.subtotal = self.coin_value * self.number
coin_value = fields.Float(string='Coin/Bill Value', required=True, digits=0)
number = fields.Integer(string='Number of Coins/Bills', help='Opening Unit Numbers')
subtotal = fields.Float(compute='_sub_total', string='Subtotal', digits=0, readonly=True)
cashbox_id = fields.Many2one('account.bank.statement.cashbox', string="Cashbox")
class AccountBankStmtCashWizard(models.Model):
"""
Account Bank Statement popup that allows entering cash details.
"""
_name = 'account.bank.statement.cashbox'
_description = 'Account Bank Statement Cashbox Details'
cashbox_lines_ids = fields.One2many('account.cashbox.line', 'cashbox_id', string='Cashbox Lines')
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('bank_statement_id', False) or self.env.context.get('active_id', False)
bnk_stmt = self.env['account.bank.statement'].browse(bnk_stmt_id)
total = 0.0
for lines in self.cashbox_lines_ids:
total += lines.subtotal
if self.env.context.get('balance', False) == 'start':
#starting balance
bnk_stmt.write({'balance_start': total, 'cashbox_start_id': self.id})
else:
#closing balance
bnk_stmt.write({'balance_end_real': total, 'cashbox_end_id': self.id})
return {'type': 'ir.actions.act_window_close'}
class AccountBankStmtCloseCheck(models.TransientModel):
"""
Account Bank Statement wizard that check that closing balance is correct.
"""
_name = 'account.bank.statement.closebalance'
_description = 'Account Bank Statement closing balance'
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('active_id', False)
if bnk_stmt_id:
self.env['account.bank.statement'].browse(bnk_stmt_id).button_confirm_bank()
return {'type': 'ir.actions.act_window_close'}
class AccountBankStatement(models.Model):
@api.one
@api.depends('line_ids', 'balance_start', 'line_ids.amount', 'balance_end_real')
def _end_balance(self):
self.total_entry_encoding = sum([line.amount for line in self.line_ids])
self.balance_end = self.balance_start + self.total_entry_encoding
self.difference = self.balance_end_real - self.balance_end
@api.multi
def _is_difference_zero(self):
for bank_stmt in self:
bank_stmt.is_difference_zero = float_is_zero(bank_stmt.difference, precision_digits=bank_stmt.currency_id.decimal_places)
@api.one
@api.depends('journal_id')
def _compute_currency(self):
self.currency_id = self.journal_id.currency_id or self.company_id.currency_id
@api.one
@api.depends('line_ids.journal_entry_ids')
def _check_lines_reconciled(self):
self.all_lines_reconciled = all([line.journal_entry_ids.ids or line.account_id.id for line in self.line_ids])
@api.model
def _default_journal(self):
journal_type = self.env.context.get('journal_type', False)
company_id = self.env['res.company']._company_default_get('account.bank.statement').id
if journal_type:
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
return journals[0]
return False
@api.multi
def _get_opening_balance(self, journal_id):
last_bnk_stmt = self.search([('journal_id', '=', journal_id)], limit=1)
if last_bnk_stmt:
return last_bnk_stmt.balance_end
return 0
@api.multi
def _set_opening_balance(self, journal_id):
self.balance_start = self._get_opening_balance(journal_id)
@api.model
def _default_opening_balance(self):
#Search last bank statement and set current opening balance as closing balance of previous one
journal_id = self._context.get('default_journal_id', False) or self._context.get('journal_id', False)
if journal_id:
return self._get_opening_balance(journal_id)
return 0
_name = "account.bank.statement"
_description = "Bank Statement"
_order = "date desc, id desc"
_inherit = ['mail.thread']
name = fields.Char(string='Reference', states={'open': [('readonly', False)]}, copy=False, readonly=True)
date = fiel
|
ds.Date(required=True, states={'confirm': [('readonly', True)]}, select=True, copy=False,
|
default=fields.Date.context_today)
date_done = fields.Datetime(string="Closed On")
balance_start = fields.Monetary(string='Starting Balance', states={'confirm': [('readonly', True)]}, default=_default_opening_balance)
balance_end_real = fields.Monetary('Ending Balance', states={'confirm': [('readonly', True)]})
state = fields.Selection([('open', 'New'), ('confirm', 'Validated')], string='Status', required=True, readonly=True, copy=False, default='open')
currency_id = fields.Many2one('res.currency', compute='_compute_currency', oldname='currency', string="Currency")
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'confirm': [('readonly', True)]}, default=_default_journal)
journal_type = fields.Selection(related='journal_id.type', help="Technical field used for usability purposes")
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env['res.company']._company_default_get('account.bank.statement'))
total_entry_encoding = fields.Monetary('Transactions Subtotal', compute='_end_balance', store=True, help="Total of transaction lines.")
balance_end = fields.Monetary('Computed Balance', compute='_end_balance', store=True, help='Balance as calculated based on Opening Balance and transaction lines')
difference = fields.Monetary(compute='_end_balance', store=True, help="Difference between the computed ending balance and the specified ending balance.")
line_ids = fields.One2many('account.bank.statement.line', 'statement_id', string='Statement lines', states={'confirm': [('readonly', True)]}, copy=True)
move_line_ids = fields.One2many('account.move.line', 'statement_id', string='Entry lines', states={'confirm': [('readonly', True)]})
all_lines_reconciled = fields.Boolean(compute='_check_lines_reconciled')
user_id = fields.Many2one('res.users', string='Responsible', required=False, default=lambda self: self.env.user)
cashbox_start_id = fields.Many2one('account.bank.statement.cashbox', string="Starting Cashbox")
cashbox_end_id = fields.Many2one('account.bank.statement.cashbox', string="Ending Cashbox")
is_difference_zero = fields.Boolean(compute='_is_difference_zero', string='Is zero', help="Check if difference is zero.")
@api.onchange('journal_id')
def onchange_journal_id(self):
self._set_opening_balance(self.journal_id.id)
@api.multi
def _balance_check(self):
for stmt in self:
if not stmt.currency_id.is_zero(stmt.difference):
if stmt.journal_type == 'cash':
if stmt.difference < 0.0:
account = stmt.journal_id.loss_account_id
name = _('Loss')
else:
# statement.difference > 0.0
account = stmt.journal_id.profit_account_id
name = _('Profit')
if not account:
r
|
cnelson/python-fleet
|
fleet/v1/errors.py
|
Python
|
apache-2.0
| 1,594
| 0.002509
|
class APIError(Exception):
"""Represents an error returned in a response to a fleet API call
This exception will be raised any time a response code >= 400 is returned
Attributes:
code (int): The response code
message(str): The message included with the error response
http_error(googleapiclient.errors.HttpError): The underlying exception that caused this excepti
|
on to be raised
If you need access to the raw response, this is where you'll find
it.
"""
def __init__(self, code, message, http_error):
"""Construct an exception representing an error returned by fleet
Args:
code (int): The response code
message(str): The message included with the error response
|
http_error(googleapiclient.errors.HttpError): The underlying exception that caused this exception
to be raised.
"""
self.code = code
self.message = message
self.http_error = http_error
def __str__(self):
# Return a string like r'Some bad thing happened(400)'
return '{1} ({0})'.format(
self.code,
self.message
)
def __repr__(self):
# Retun a string like r'<Fleetv1Error; Code: 400; Message: Some bad thing happened>'
return '<{0}; Code: {1}; Message: {2}>'.format(
self.__class__.__name__,
self.code,
self.message
)
|
johnjohnlin/nicotb
|
sim/ahb/Ahb_test.py
|
Python
|
gpl-3.0
| 2,569
| 0.024912
|
# Copyright (C) 2018, Yu Sheng Lin, johnjohnlys@media.ee.ntu.edu.tw
# This file is part of Nicotb.
# Nicotb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Nicotb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Nicotb. If not, see <http://www.gnu.org/licenses/>.
from nicotb import *
from nicotb.utils import Scoreboard, BusGetter
from nicotb.pr
|
otocol import Ahb
import operator as op
import numpy as np
from os import getenv
def main():
N = 10
scb = Scoreboard()
test = scb.GetTest("ahb", ne=op.ne, max_err=10)
bg = BusGetter(callbacks=[test.Get])
ms = Ahb.Master(hsel, haddr, hwrite, htrans, hsize, hburst, hready, hresp, rd, wd, ck_ev)
yield rs_ev
for i in range(10):
yield ck_ev
def rng(magic):
while True:
magic = (magic*199 + 12345) & 65535
yield magic
r = rng(25251)
MAGIC = next(r)
ADR = 0
print(
"Test Si
|
ngle R/W\n"
f"MAGIC/ADR is {MAGIC}/{ADR}"
)
test.Expect(MAGIC)
yield from ms.Write(ADR, MAGIC)
read_v = yield from ms.Read(ADR)
test.Get(read_v)
yield ck_ev
MAGIC = next(r)
ADR = 100
print(
"Test Pipelined R/W\n"
f"MAGIC/ADR is {MAGIC}/{ADR}"
)
wcmd = [(True, ADR+i*4, MAGIC+i) for i in range(N)]
rcmd = [(False, ADR+i*4) for i in range(N)]
test.Expect([MAGIC+i for i in range(N)])
read_v = yield from ms.IssueCommands(wcmd + rcmd)
test.Get(read_v)
yield ck_ev
MAGIC = next(r)
ADR = 200
print(
"Test Pipelined Interleaved R/W\n"
f"MAGIC/ADR is {MAGIC}/{ADR}"
)
wcmd = [(True, ADR+i*4, MAGIC+i) for i in range(N)]
rcmd = [(False, ADR+i*4) for i in range(N)]
cmd = [v for p in zip(wcmd, rcmd) for v in p]
test.Expect([MAGIC+i for i in range(N)])
read_v = yield from ms.IssueCommands(cmd)
test.Get(read_v)
for i in range(10):
yield ck_ev
wd, rd = CreateBuses([("wd",), ("rd",),])
hsel, haddr, hwrite, htrans, hsize, hburst, hready, hresp = CreateBuses([
(("u_dut", "HSEL"),),
(("u_dut", "HADDR"),),
(("u_dut", "HWRITE"),),
(("u_dut", "HTRANS"),),
(("u_dut", "HSIZE"),),
(("u_dut", "HBURST"),),
(("u_dut", "HREADY"),),
(("u_dut", "HRESP"),),
])
ck_ev, rs_ev = CreateEvents(["ck_ev", "rst_out",])
RegisterCoroutines([
main(),
])
|
ChinaMassClouds/copenstack-server
|
openstack/src/horizon-2014.2/horizon/browsers/breadcrumb.py
|
Python
|
gpl-2.0
| 1,803
| 0
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from horizon.utils import html
class Breadcrumb(html.HTMLElement):
def __init__(self, request, template, root,
subfolder_path, url, attr=None):
super(Breadcrumb, self).__init__()
self.template = template
self.request = request
self.root = root
self.subfolder_path = subfolder_path
self.url = url
self._subfolders = []
def get_subfolders(self):
if self.subfolder_path and not self._subfolders:
(parent, slash, folder) = self.subfolder_path.strip('/') \
.rpartition('/')
while folder:
path = "%s%s%s/" % (parent, slash, folder)
self._subfolders.insert(0, (folder, path))
(parent, slash, folder)
|
= parent.rpartition('/')
return self._subfolders
def render(self):
"""Renders the table using the template from the table options."""
breadcrumb_template = template.loader.get_template(self.template)
extra_context = {"breadcrumb": self}
|
context = template.RequestContext(self.request, extra_context)
return breadcrumb_template.render(context)
|
MSEMJEJME/Get-Dumped
|
renpy/display/im.py
|
Python
|
gpl-2.0
| 45,147
| 0.005759
|
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains the new image code, which includes provisions for
# size-based caching and constructing images from operations (like
# cropping and scaling).
import renpy.display
import math
import zipfile
import cStringIO
import threading
import time
# This is an entry in the image cache.
class CacheEntry(object):
def __init__(self, what, surf):
# The object that is being cached (which needs to be
# hashable and comparable).
self.what = what
# The pygame surface corresponding to the cached object.
self.surf = surf
# The size of this image.
w, h = surf.get_size()
self.size = w * h
# The time when this cache entry was last used.
self.time = 0
# This is the singleton image cache.
class Cache(object):
def __init__(self):
# The current arbitrary time. (Increments by one for each
# interaction.)
self.time = 0
# A map from Image object to CacheEntry.
self.cache = { }
# A list of Image objects that we want to preload.
self.preloads = [ ]
# False if this is not the first preload in this tick.
self.first_preload_in_tick = True
# The total size of the current generation of images.
self.size_of_current_generation = 0
# The total size of everything in the cache.
self.total_cache_size = 0
# A lock that must be held when updating the above.
self.lock = threading.Condition()
# Is the preload_thread alive?
self.keep_preloading = True
# A map from image object to surface, only for objects that have
# been pinned into memory.
self.pin_cache = { }
# Images that we tried, and failed, to preload.
self.preload_blacklist = set()
# The size of the cache, in pixels.
self.cache_limit = 0
# The preload thread.
self.preload_thread = threading.Thread(target=self.preload_thread_main, name="preloader")
self.preload_thread.setDaemon(True)
self.preload_thread.start()
# Have we been added this tick?
self.added = set()
# A list of (time, filename, preload) tuples. This is updated when
# config.developer is True and an image is loaded. Preload is a
# flag that is true if the image was loaded from the preload
# thread. The log is limited to 100 entries, and the newest entry
# is first.
#
# This is only updated when config.developer is True.
self.load_log = [ ]
def init(self):
"""
Updates the cache object to make use of settings that might be provided
by the game-maker.
"""
self.cache_limit = renpy.config.image_cache_size * renpy.config.screen_width * renpy.config.screen_height
def quit(self): #@ReservedAssignment
if not self.preload_thread.isAlive():
return
self.lock.acquire()
self.keep_preloading = False
self.lock.notify()
self.lock.release()
self.preload_thread.join()
# Clears out the cache.
def clear(self):
self.lock.acquire()
self.preloads = [ ]
self.pin_cache = { }
self.cache = { }
self.first_preload_in_tick = True
self.size_of_current_generation = 0
self.total_cache_size = 0
self.added.clear()
self.lock.release()
# Increments time, and clears the list of images to be
# preloaded.
def tick(self):
with self.lock:
self.time += 1
self.preloads = [ ]
self.first_preload_in_tick = True
self.size_of_current_generation = 0
self.added.clear()
if renpy.config.debug_image_cache:
renpy.display.ic_log.write("----")
filename, line = renpy.exports.get_filename_line()
renpy.display.ic_log.write("%s %d", filename, line)
# The preload thread can deal with this update, so we don't need
# to lock things.
def end_tick(self):
self.preloads = [ ]
# This returns the pygame surface corresponding to the provided
# image. It also takes care of updating the age of images in the
# cache to be current, and maintaining the size of the current
# generation of images.
def get(self, image, predict=False):
if not isinstance(image, ImageBase):
raise Exception("Expected an image of some sort, but got" + str(image) + ".")
if not image.cache:
surf = image.load()
renpy.display.render.mutated_surface(surf)
return surf
ce = None
|
# First try to grab the image out of the cache without locking it.
if image in self.ca
|
che:
ce = self.cache[image]
# Now, grab the cache and try again. This deals with the case where the image
# was already in the middle of preloading.
if ce is None:
self.lock.acquire()
ce = self.cache.get(image, None)
if ce is not None:
self.lock.release()
# Otherwise, we keep the lock, and load the image ourselves.
if ce is None:
try:
if image in self.pin_cache:
surf = self.pin_cache[image]
else:
surf = image.load()
except:
self.lock.release()
raise
ce = CacheEntry(image, surf)
self.total_cache_size += ce.size
self.cache[image] = ce
# Indicate that this surface had changed.
renpy.display.render.mutated_surface(ce.surf)
if renpy.config.debug_image_cache:
if predict:
renpy.display.ic_log.write("Added %r (%.02f%%)", ce.what, 100.0 * self.total_cache_size / self.cache_limit)
else:
renpy.display.ic_log.write("Total Miss %r", ce.what)
renpy.display.draw.load_texture(ce.surf)
self.lock.release()
# Move it into the current generation. This isn't protected by
# a lock, so in certain circumstances we could have an
# inaccurate size. But that's pretty unlikely, as the
# preloading thread should never run at the same time as an
# actual load from the normal thread.
if ce.time != self.time:
ce.time = self.time
self.size_of_current_generation += ce.size
# Done... return the surface.
return ce.surf
# This kills off a given cache entry.
def kill(self, ce):
# Should never happen... but...
if ce.time == self.time:
self.size_of_current_genera
|
DBernardes/ProjetoECC
|
Eficiência_Quântica/Codigo/QE_reduceImgs_readArq.py
|
Python
|
mit
| 7,313
| 0.012585
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 19 de Novembro de 2016
@author: Denis Varise Bernardes & Eder Martioli
Descricao: esta biblioteca possui as seguintes funcoes:
mkDir_saveCombinedImages: pela chamada da funcao LeArquivoReturnLista retorna a lista de todas as imagens adquiridas no ensaio;
realiza a subtração entre cada par de imagens, salvando o resultado em um novo diretório 'Imagens_reduzidas' . Feito isso, a
funcao cria uma lista com o nomes das novas imagens atraves da chamada da funcao criaArquivo_listaImagensCombinadas.
readArqDetector: esta funcao recebe o nome do arquivo contendo os PAR2s do detector, retornando um vetor com os valores medidos.
ImagemUnica_returnHeader: esta funcao recebe uma unica imagem da lista, retornando o header para a retirada de informacoes.
LeArquivoReturnLista: esta funcao faz a leitura do arquivo listaImagens gerado pela funcao criaArq_listaImgInput, retornando
uma lista com o nome das imagens.
criaArquivo_listaImagensCombinadas: esta funcao cria um arquivo chamado listaImagensCombinadas contendo o nome das imagens
combinadas geradas na funcao mkDir_saveCombinedImages.
LeArqFluxoCamera: esta funcao faz a leitura do arquivo Fluxo camera.dat gerado pela funcao criaArqFluxoCamera, retornado dois
vetores com os valores do fluxo e dos desvio padrao.
LeArq_curvaCalibDetector: PAR2 o nome do arquivo da curva de calibracao do detector e o numero do conjunto de imagens, esta
funcao retornara um vetor contendo os valores da curva caso a opcao seja fornecida; caso contrario, a funcao retorna um vetor
contendo o valor 1.
Laboratorio Nacional de Astrofisica, Brazil.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import astropy.io.fits as fits
import numpy as np
import os
from sys import exit
from math import sqrt
from geraArquivo import geraArquivo
def mkDir_saveCombinedImages(nImages, images_dir):
print('Criando diretorio: Mediana das Imagens')
#recebe uma lista de imagens de retorna um diretorio com as imagens combinadas
lista = LeArquivoReturnLista('listaImagens', images_dir)
n, i = 0, 0
VetorImagens = []
for i in range(len(lista)):
if i%nImages == nImages-1:
imagem = fits.getdata(images_dir + '\\' + lista[i])
|
VetorImagens.append(imagem)
os.chdir(chdir)
|
geraArquivo(VetorImagens, n)
os.chdir(cwd)
VetorImagens = []
n+=1
else:
imagem = fits.getdata(images_dir + '\\' + lista[i])
VetorImagens.append(imagem)
criaArquivo_listaImagensReduzidas()
return
def mkDir_ImgPair(tagPAR2, tagPAR1, ganho, images_dir):
print('Criando diretorio: Imagens reduzidas')
if not os.path.exists(images_dir + '\\' + 'Imagens_reduzidas'): os.makedirs(images_dir + '\\' + 'Imagens_reduzidas')
chdir = images_dir + '\\' + 'Imagens_reduzidas'
#recebe uma lista de imagens de retorna um diretorio com as imagens reduzidas de raios cosmicos e erro do shutter
listaPAR2 = LeArquivoReturnLista(tagPAR2+'List.txt', images_dir)
listaPAR1 = LeArquivoReturnLista(tagPAR1+'List.txt', images_dir)
VetorImagens = [[],[]]
i,n, string, VetorStdSignal = 0, 0, '', []
for i in range(len(listaPAR2)):
imagemPAR2 = fits.getdata(images_dir + '\\' + listaPAR2[i])[0].astype(float)
imagemPAR1 = fits.getdata(images_dir + '\\' + listaPAR1[i])[0].astype(float)
imgReducePAR = imagemPAR2 - imagemPAR1
VetorStdSignal.append(sqrt(sum(sum(imagemPAR2 + imagemPAR1))*ganho))
os.chdir(chdir)
if n < 10: string = '00%i'%(n)
if 10 <= n < 100: string = '0%i'%(n)
if n >= 100: string = '%i'%(n)
print('ImagemReduzida%s.fits'%(string))
fits.writeto('ImagemReduzida_%s.fits'%(string),imgReducePAR, overwrite=True)
os.chdir(images_dir)
VetorImagens = [[],[]]
n+=1
criaArquivo_StdDiffImagens(VetorStdSignal, images_dir)
criaArquivo_listaImagensReduzidas(images_dir)
return
def readArqDetector(name, images_dir):
valores=[]
with open(images_dir + '\\' + name) as arq:
Strvalores = arq.read().splitlines()
for valor in Strvalores[1:]:
valores.append(float(valor))
arq.close()
return valores
def ImagemUnica_returnHeader(tagPAR2, images_path):
with open(images_path + '\\' + tagPAR2+'List.txt') as arq:
imagem = arq.read().splitlines()[0].split(',')[0]
arq.close()
header = fits.getheader(images_path + '\\' + imagem)
return header
def LeArquivoReturnLista(arquivo, images_path):
with open(images_path + '\\' + arquivo) as arq:
lista = []
linhas = arq.read().splitlines()
for lin in linhas:
for img in lin.split(','):
lista.append(img)
arq.close()
return lista
def criaArquivo_listaImagensReduzidas(images_path):
nome = images_path + '\Imagens_reduzidas\listaImagensReduzidas'
try: File = open(nome,'w')
except:
nome.remove()
File = open(nome,'w')
listaImagemCombinada = os.listdir(images_path + '\Imagens_reduzidas')
listaImagemCombinada.sort()
for img in listaImagemCombinada:
if '.fits' in img:
File.write(img+'\n')
File.close()
def criaArquivo_StdDiffImagens(vetorStd, images_path):
nome = images_path + '\\' + 'StdDiffImages'
try: arq = open(nome,'w')
except:
nome.remove()
arq = open(nome,'w')
arq.write('-Desvio padrao das imagens reduzidas:\n')
for std in vetorStd:
arq.write(' \t\t ' + str(std) + '\n')
arq.close()
def LeArqFluxoCamera(images_path):
vetorFluxoCamera, vetorSigmaBackground_Signal = [],[]
with open(images_path + '\\' + 'Fluxo camera.dat') as arq:
listaValores = arq.read().splitlines()
for linha in listaValores[1:]:
Fluxo_e_Sigma = linha.split('\t\t\t')
vetorFluxoCamera.append(float(Fluxo_e_Sigma[0]))
vetorSigmaBackground_Signal.append(float(Fluxo_e_Sigma[1]))
return vetorFluxoCamera, vetorSigmaBackground_Signal
def LeArq_curvaCalibFiltroDensidade(nome, numeroImagens, images_path):
VetorPAR2s=[]
if nome != '':
with open(images_path + '\\' + nome) as arq:
linhas = arq.read().splitlines()
arq.close()
for PAR2 in linhas[1:]:
if PAR2 == '':continue
VetorPAR2s.append(float(PAR2))
else:
for i in range(numeroImagens):
VetorPAR2s.append(1)
return VetorPAR2s
def LeArq_curvaEQFabricante(name, images_path):
espectro, vetorEQ = [], []
with open(images_path + '\\' + name) as arq:
linhas = arq.read().splitlines()
arq.close()
for linha in linhas:
if linha == '':continue
valores = linha.split('\t')
espectro.append(float(valores[0]))
vetorEQ.append(float(valores[1]))
return vetorEQ, espectro
|
samdmarshall/xcparse
|
xcparse/Xcode/PBX/PBXLibraryReference.py
|
Python
|
bsd-3-clause
| 307
| 0.019544
|
import os
from
|
.PBX_Base_Reference import *
from ...Helpers import path_helper
class PBXLibraryReference(PBX_Base_Reference):
def __init__(self, lookup_func, dictionary, project, identifier):
super(PBXLibraryReference, self).__init__(lookup_func, dictionary, project, identifie
|
r);
|
lgarren/spack
|
var/spack/repos/builtin/packages/fastqvalidator/package.py
|
Python
|
lgpl-2.1
| 2,230
| 0.000897
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
#
|
LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
#
|
it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fastqvalidator(MakefilePackage):
"""The fastQValidator validates the format of fastq files."""
homepage = "http://genome.sph.umich.edu/wiki/FastQValidator"
url = "https://github.com/statgen/fastQValidator/archive/v0.1.1a.tar.gz"
version('2017-01-10', commit='6d619a34749e9d33c34ef0d3e0e87324ca77f320',
git='https://github.com/statgen/fastQValidator.git')
resource(
name='libStatGen',
git='https://github.com/statgen/libStatGen.git',
commit='9db9c23e176a6ce6f421a3c21ccadedca892ac0c'
)
@property
def build_targets(self):
return ['LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen'))]
@property
def install_targets(self):
return [
'INSTALLDIR={0}'.format(self.prefix.bin),
'LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen')),
'install'
]
|
iscarecrow/sb
|
server/wsgi.py
|
Python
|
mit
| 387
| 0.002584
|
"""
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployme
|
nt/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings")
from django.core.wsgi import get_wsgi_application
application = get
|
_wsgi_application()
|
Conedy/Conedy
|
testing/createNetwork/expected/sum_lattice.py
|
Python
|
gpl-2.0
| 68
| 0
|
00000 0 output/lattice.
|
py.err
32074 1 output/lattice.py.
|
out
|
anthonysandrin/kafka-utils
|
tests/kafka_cluster_manager/partition_count_balancer_test.py
|
Python
|
apache-2.0
| 26,033
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import Namespace
import mock
import pytest
from .helper import broker_range
from kafka_utils.kafka_cluster_manager.cluster_info.error import RebalanceError
from kafka_utils.kafka_cluster_manager.cluster_info \
.partition_count_balancer import PartitionCountBalancer
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import calculate_partition_movement
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_broker_leader_counts
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_net_imbalance
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_replication_group_imbalance_stats
class TestPartitionCountBalancer(object):
@pytest.fixture
def create_balancer(self):
def build_balancer(cluster_topology, **kwargs):
args = mock.Mock(spec=Namespace)
args.balancer_args = []
args.configure_mock(**kwargs)
return PartitionCountBalancer(cluster_topology, args)
return build_balancer
def assert_valid(self, new_assignment, orig_assignment, orig_brokers):
"""Assert if new-assignment is valid based on given assignment.
Asserts the results for following parameters:
a) Asserts that keys in both assignments are same
b) Asserts that replication-factor of result remains same
c) Assert that new-replica-brokers are amongst given broker-list
"""
# Verify that partitions remain same
assert set(orig_assignment.keys()) == set(new_assignment.keys())
for t_p, new_replicas in new_assignment.iteritems():
orig_replicas = orig_assignment[t_p]
# Verify that new-replicas are amongst given broker-list
assert all([broker in orig_brokers for broker in new_replicas])
# Verify that replication-factor remains same
assert len(new_replicas) == len(orig_replicas)
def assert_leader_valid(self, orig_assignment, new_assignment):
"""Verify that new-assignment complies with just leader changes.
Following characteristics are verified for just leader-changes.
a) partitions remain same
b) replica set remains same
"""
# Partition-list remains unchanged
assert sorted(orig_assignment.keys()) == sorted(new_assignment.keys())
# Replica-set remains same
for partition, orig_replicas in orig_assignment.iteritems():
assert set(orig_replicas) == set(new_assignment[partition])
def test_rebalance_replication_groups(
self,
create_balancer,
create_cluster_topology,
default_assignment,
):
ct = create_cluster_topology()
cb = create_balancer(ct)
cb.rebalance_replication_groups()
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify that rg-group-balanced
assert net_imbal == 0
# Verify that new-assignment is valid
self.assert_valid(
ct.assignment,
default_assignment,
ct.brokers.keys(),
)
def test_rebalance_replication_groups_balanced(
self,
create_balancer,
create_cluster_topology,
):
# Replication-group is already balanced
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T0', 1), ['0', '3']),
((u'T2', 0), ['2']),
((u'T3', 0), ['0', '1', '2']),
]
)
ct = create_cluster_topology(assignment, broker_range(5))
cb = create_balancer(ct)
cb.rebalance_replication_groups()
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify that rg-group-balanced
assert net_imbal == 0
# Verify that new-assignment same as previous
assert ct.assignment == assignment
def test_rebalance_replication_groups_error(
self,
create_balancer,
create_cluster_topology,
):
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T0', 1), ['0', '3']),
((u'T2', 0), ['2']),
((u'T3', 0), ['0', '1', '9']), # broker 9 is not active
]
)
ct = create_cluster_topology(assignment, broker_range(5))
with pytest.raises(RebalanceError):
cb = create_balancer(ct)
cb.rebalance_replication_groups()
def test__rebalance_groups_partition_cnt_case1(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 6 partitions
# rg2 has 2 partitions
# Both rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
((u'T1', 1), ['0', '1', '2']),
((u'T1', 0), ['1']),
((u'T3', 0), ['1']),
((u'T2', 0), ['0', '1', '3']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Verify both replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains unaltered
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case2(
self,
create_balancer,
create_cluster_topology,
):
# 1 over-balanced, 2 under-balanced replication-groups
# rg1 has 4 partitions
# rg2 has 1 partition
# rg3 has 1 partition
# All rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
|
((u'T1', 1), ['0', '2']),
((u'T3', 1), ['0']),
((u'T3', 0), ['0']),
((u'T2', 0), ['0', '5']),
]
)
brokers = {
'0': mock.MagicMock(),
'2': mock.MagicMock(),
'5': mock.MagicMock(),
}
ct = creat
|
e_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance brokers
cb._rebalance_groups_partition_cnt()
# Verify all replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg3'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains 0
assert net_imbal == 0
|
annarev/tensorflow
|
tensorflow/python/ops/numpy_ops/integration_test/benchmarks/micro_benchmarks.py
|
Python
|
apache-2.0
| 5,557
| 0.007558
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
r"""Micro benchmark.
bazel run -c opt --config=cuda \
//third_party/tensorflow/python/ops/numpy_ops/benchmarks:micro_benchmarks -- \
--number=100 --repeat=100 \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
from absl import flags
from absl import logging
import numpy as np # pylint: disable=unused-import
import tensorflow.compat.v2 as tf
f
|
rom tensorflow.python.ops import n
|
umpy_ops as tfnp # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops.numpy_ops.integration_test.benchmarks import numpy_mlp
from tensorflow.python.ops.numpy_ops.integration_test.benchmarks import tf_numpy_mlp
FLAGS = flags.FLAGS
flags.DEFINE_integer('repeat', 100, '#Measurements per benchmark.')
flags.DEFINE_integer('number', 100, '#Runs per a measure.')
class MicroBenchmarks(tf.test.Benchmark):
"""Main micro benchmark class."""
def _benchmark_and_report(
self,
name,
fn,
repeat=None,
number=None):
"""Run fn repeat * number times, report time, and return fastest time."""
# Can't make these default above since the flags may not have been parsed
# at module import time.
repeat = repeat or int(FLAGS.repeat)
number = number or int(FLAGS.number)
# Warmup
fn()
times = []
for _ in range(repeat):
gc.disable()
start = time.time()
for _ in range(number):
fn()
times.append(time.time() - start)
gc.enable()
gc.collect()
# Regular benchmark to report numbers.
fastest_time_us = min(times) * 1e6 / number
total_time = sum(times)
self.report_benchmark(name=name,
wall_time=total_time,
extras={'fastest_time_us': fastest_time_us})
return fastest_time_us
def benchmark_tf_np_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def benchmark_tf_np_tf_function_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(
self._get_name(), tf.function(lambda: model.inference(x)))
def benchmark_numpy_mlp_inference_batch_1_cpu(self):
model = numpy_mlp.MLP()
x = np.random.uniform(size=(1, 10)).astype(np.float32, copy=False)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def _benchmark_np_and_tf_np(self, name, op, args, repeat=None): # pylint: disable=redefined-builtin
fn = getattr(np, op)
assert fn is not None
np_time = self._benchmark_and_report(
'{}_numpy'.format(name), lambda: fn(*args), repeat=repeat)
fn = getattr(tfnp, op)
assert fn is not None
with tf.device('CPU:0'):
tf_time = self._benchmark_and_report(
'{}_tfnp_cpu'.format(name), lambda: fn(*args), repeat=repeat)
return np_time, tf_time
def _print_times(self, op, sizes, times):
# For easy reporting.
print('For np.{}:'.format(op))
print('{:<15} {:>11} {:>11}'.format('Size', 'NP time', 'TF NP Time'))
for size, (np_time, tf_time) in zip(sizes, times):
print('{:<15} {:>10.5}us {:>10.5}us'.format(
str(size), np_time, tf_time))
print()
def _benchmark_np_and_tf_np_unary(self, op):
sizes = [(100,), (10000,), (1000000,)]
repeats = [FLAGS.repeat] * 2 + [10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(self._benchmark_np_and_tf_np(name, op, (x,), repeat))
self._print_times(op, sizes, times)
def benchmark_count_nonzero(self):
self._benchmark_np_and_tf_np_unary('count_nonzero')
def benchmark_log(self):
self._benchmark_np_and_tf_np_unary('log')
def benchmark_exp(self):
self._benchmark_np_and_tf_np_unary('exp')
def benchmark_tanh(self):
self._benchmark_np_and_tf_np_unary('tanh')
def benchmark_matmul(self):
sizes = [(2, 2), (10, 10), (100, 100), (200, 200), (1000, 1000)]
# Override repeat flag since this can be very slow.
repeats = [FLAGS.repeat] * 3 + [50, 10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(
self._benchmark_np_and_tf_np(name, 'matmul', (x, x), repeat=repeat))
self._print_times('matmul', sizes, times)
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
tf.enable_v2_behavior()
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.