repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
jeffrey4l/nova
|
nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py
|
Python
|
apache-2.0
| 13,826
| 0.000072
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import range
from webob import exc
from nova.api.openstack.compute import extensions
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v2
from nova import block_device
from nova.compute import api as compute_api
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
CONF = cfg.CONF
class BlockDeviceMappingTestV21(test.TestCase):
validation_error = exception.ValidationError
def _setup_controller(self):
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
'osapi_v3')
self.no_bdm_v2_controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', '', 'osapi_v3')
def setUp(self):
super(BlockDeviceMappingTestV21, self).setUp()
self._setup_controller()
fake.stub_out_image_service(self.stubs)
self.bdm = [{
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake',
'device_name': 'vdb',
'delete_on_termination': False,
}]
def _get_servers_body(self, no_image=False):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
if no_image:
del body['server']['imageRef']
return body
def _test_create(self, params, no_image=False, override_controller=None):
body = self._get_servers_body(no_image)
body['server'].update(params)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps(body)
if override_controller:
override_controller.create(req, body=body).obj['server']
else:
self.controller.create(req, body=body).obj['server']
def test_create_instance_with_block_device_mapping_disabled(self):
bdm = [{'device_name': 'foo'}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('block_device_mapping', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
self._test_create(params,
override_controller=self.no_bdm_v2_controller)
def test_create_instance_with_volumes_enabled_no_image(self):
"""Test that the create will fail if there is no image
and no bdms supplied in the request
"""
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(exc.HTTPBadRequest,
self._test_create, {}, no_image=True)
def test_create_instance_with_bdms_and_no_image(self):
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertThat(
block_device.BlockDeviceDict(self.bdm[0]),
matchers.DictMatches(kwargs['block_device_mapping'][0])
)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
compute_api.API._validate_bdm(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(True)
compute_api.API._get_bdm_image_metadata(
mox.IgnoreArg(), mox.IgnoreArg(), False).AndReturn({})
self.mox.ReplayAll()
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self._test_create(params, no_image=True)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.p
|
arams = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.
|
bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'v da'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_invalid_size(self):
self.bdm[0]['volume_size'] = 'hello world'
old_create = compute_api.API.create
def create(*args, **kwargs):
|
ptitjes/quodlibet
|
quodlibet/ext/events/seekbar.py
|
Python
|
gpl-2.0
| 4,402
| 0
|
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version
|
2 of the License, or
# (at your option) any later version.
import contextlib
from gi.repository import GObject, Gtk
from quodlibet import _
from quodlibet i
|
mport app
from quodlibet.plugins.events import EventPlugin
from quodlibet.qltk import Icons
from quodlibet.qltk.seekbutton import TimeLabel
from quodlibet.qltk.tracker import TimeTracker
from quodlibet.qltk import Align
from quodlibet.util import connect_destroy
class SeekBar(Gtk.Box):
def __init__(self, player, library):
super(SeekBar, self).__init__()
self._elapsed_label = TimeLabel()
self._remaining_label = TimeLabel()
scale = Gtk.Scale(orientation=Gtk.Orientation.HORIZONTAL)
scale.set_adjustment(Gtk.Adjustment.new(0, 0, 0, 3, -15, 0))
scale.set_draw_value(False)
self._scale = scale
self.pack_start(Align(self._elapsed_label, border=6), False, True, 0)
self.pack_start(scale, True, True, 0)
self.pack_start(Align(self._remaining_label, border=6), False, True, 0)
for child in self.get_children():
child.show_all()
self._id = self._scale.connect(
'value-changed', self._on_user_changed, player)
self._scale.connect(
'value-changed', self._on_scale_value_changed, player)
self._tracker = TimeTracker(player)
self._tracker.connect('tick', self._on_tick, player)
connect_destroy(player, 'seek', self._on_player_seek)
connect_destroy(player, 'song-started', self._on_song_started)
connect_destroy(player, "notify::seekable", self._on_seekable_changed)
connect_destroy(
library, "changed", self._on_song_changed, player)
self.connect("destroy", self._on_destroy)
with self._inhibit():
self._update(player)
self._tracker.tick()
def _on_destroy(self, *args):
self._tracker.destroy()
@contextlib.contextmanager
def _inhibit(self):
with GObject.signal_handler_block(self._scale, self._id):
yield
def _on_user_changed(self, scale, player):
if player.seekable:
player.seek(scale.get_value() * 1000)
def _on_scale_value_changed(self, scale, player):
self._update(player)
def _on_tick(self, tracker, player):
position = player.get_position() // 1000
with self._inhibit():
self._scale.set_value(position)
def _on_seekable_changed(self, player, *args):
with self._inhibit():
self._update(player)
def _on_song_changed(self, library, songs, player):
if player.info in songs:
with self._inhibit():
self._update(player)
def _on_player_seek(self, player, song, ms):
with self._inhibit():
self._scale.set_value(ms // 1000)
self._update(player)
def _on_song_started(self, player, song):
with self._inhibit():
self._scale.set_value(0)
self._update(player)
def _update(self, player):
if player.info:
self._scale.set_range(0, player.info("~#length"))
else:
self._scale.set_range(0, 1)
if not player.seekable:
self._scale.set_value(0)
value = self._scale.get_value()
max_ = self._scale.get_adjustment().get_upper()
remaining = value - max_
self._elapsed_label.set_time(value)
self._remaining_label.set_time(remaining)
self._remaining_label.set_disabled(not player.seekable)
self._elapsed_label.set_disabled(not player.seekable)
self.set_sensitive(player.seekable)
class SeekBarPlugin(EventPlugin):
PLUGIN_ID = "SeekBar"
PLUGIN_NAME = _("Alternative Seek Bar")
PLUGIN_DESC = _("Alternative seek bar which is always visible and spans "
"the whole window width.")
PLUGIN_ICON = Icons.GO_JUMP
def enabled(self):
self._bar = SeekBar(app.player, app.librarian)
self._bar.show()
app.window.set_seekbar_widget(self._bar)
def disabled(self):
app.window.set_seekbar_widget(None)
self._bar.destroy()
del self._bar
|
nrgaway/qubes-core-admin
|
qubes/vm/templatevm.py
|
Python
|
gpl-2.0
| 4,118
| 0.001457
|
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2014-2016 Wojtek Porczyk <woju@invisiblethingslab.com>
# Copyright (C) 2016 Marek Marczykowski <marmarek@invisiblethingslab.com>)
# Copyright (C) 2016 Bahtiar `kalkin-` Gadimov <bahtiar@gadimov.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
''' This module contains the TemplateVM implementation '''
import qubes
import qubes.config
import qubes.vm.qubesvm
import qubes.vm.mix.net
from qubes.config import defaults
from qubes.vm.qubesvm import QubesVM
class TemplateVM(QubesVM):
'''Template for AppVM'''
dir_path_prefix = qubes.config.system_path['qubes_templates_dir']
@property
def appvms(self):
''' Returns a generator containing all domains based on the current
TemplateVM.
'''
for vm in self.app.domains:
if hasattr(vm, 'template') and vm.template is self:
yield vm
netvm = qubes.VMProperty('netvm', load_stage=4, allow_none=True,
default=None,
# pylint: disable=protected-access
setter=qubes.vm.qubesvm.QubesVM.netvm._setter,
doc='VM that provides network connection to this domain. When '
'`None`, machine is disconnected.')
def __init__(self, *args, **kwargs):
assert 'template' not in kwargs, "A TemplateVM can not have a template"
self.volume_config = {
'root': {
'name': 'root',
'snap_on_start': False,
'save_on_stop': True,
'rw': True,
'source': None,
'size': defaults['root_img_size'],
},
'private': {
'name': 'private',
'snap_on_start': False,
'save_on_stop': True,
'rw': True,
'source': None,
'size': defaults['private_img_size'],
'revisions_to_keep': 0,
},
|
'volatile': {
'name': 'volatile',
'size': defaults['root_img_size'],
'snap_on_start': False,
'save_on_stop': False,
'rw': True,
},
'kernel': {
'name': 'kernel',
'snap_on_start': False,
'save_on_stop': False,
'rw': False
}
}
super(TemplateVM, self).__init__(*args, **kwargs)
@qubes.e
|
vents.handler('property-set:default_user',
'property-set:kernel',
'property-set:kernelopts',
'property-set:vcpus',
'property-set:memory',
'property-set:maxmem',
'property-set:qrexec_timeout',
'property-set:shutdown_timeout',
'property-set:management_dispvm')
def on_property_set_child(self, _event, name, newvalue, oldvalue=None):
"""Send event about default value change to child VMs
(which use default inherited from the template).
This handler is supposed to be set for properties using
`_default_with_template()` function for the default value.
"""
if newvalue == oldvalue:
return
for vm in self.appvms:
if not vm.property_is_default(name):
continue
vm.fire_event('property-reset:' + name, name=name)
|
MattPerron/esper
|
esper/query/management/commands/score.py
|
Python
|
apache-2.0
| 14,310
| 0.004892
|
from __future__ import print_function
from __future__ import division
from django.core.management.base import BaseCommand
from query.models import Video, Face, LabelSet, Frame
from scannerpy import ProtobufGenerator, Config
import os
import cv2
import math
import numpy as np
import tensorflow as tf
import align.detect_face
from collections import defaultdict
from array import *
from functools import wraps
import inspect
cfg = Config()
proto = ProtobufGenerator(cfg)
def initializer(func):
"""
Automatically assigns the parameters.
>>> class process:
... @initializer
... def __init__(self, cmd, reachable=False, user='root'):
... pass
>>> p = process('halt', True)
>>> p.cmd, p.reachable, p.user
('halt', True, 'root')
"""
names, varargs, keywords, defaults = inspect.getargspec(func)
@wraps(func)
def wrapper(self, *args, **kargs):
for name, arg in list(zip(names[1:], args)) + list(kargs.items()):
setattr(self, name, arg)
for name, default in zip(reversed(names), reversed(defaults)):
if not hasattr(self, name):
setattr(self, name, default)
func(self, *args, **kargs)
return wrapper
class VideoEvalStats(object):
@initializer
def __init__(self, video_id = 0, num_frames=0, tp_frames=0, fp_frames=0, fn_frames=0, mismatched_tp_frames=0, num_detections=0, tp_detections=0, fp_detections=0, fn_detections=0, num_males=0, num_females=0, gender_matches=0, male_mismatches=0, female_mismatches=0):
pass
def compute_precision_recall(self, tp, fp, fn):
if (tp + fp) != 0:
precision = tp / (tp + fp)
else:
precision = 0.0
if (tp + fn) != 0:
recall = tp / (tp + fn)
else:
recall = 0.0
return (precision, recall)
def compute_frame_acc_stats(self):
return self.compute_precision_recall(self.tp_frames, self.fp_frames, self.fn_frames)
def compute_det_acc_stats(self):
(det_precision, det_recall) = self.compute_precision_recall(self.tp_detections, self.fp_detections, self.fn_detections)
return (det_precision, det_recall)
def compute_gender_acc_stats(self):
if self.tp_detections != 0:
gender_precision = self.gender_matches / (self.num_males + self.num_females)
else:
gender_precision = 1.0
return gender_precision
def __str__(self):
frame_stats = "Video({})[FRAME SELECTION]: num_frames({}), tp({}), fp({}), fn({})".format(self.video_id, self.num_frames, self.tp_frames, self.fp_frames, self.fn_frames)
frame_acc_stats = "Video({})[FRAME SELECTION]: Frame selection precision({}), Frame selection recall({})".format(self.video_id, *self.compute_frame_acc_stats())
det_stats = "Video({})[DETECTION]: num_detections({}), tp({}), fp({}), fn({}), mismatched_frames({})".format(self.video_id, self.num_detections, self.tp_detections, self.fp_detections, self.fn_detections, self.mismatched_tp_frames)
det_acc_stats = "Video({})[DETECTION]: Detection precision({}), Detection recall({})".format(self.video_id, *self.compute_det_acc_stats())
gender_stats = "Video({})[GENDER]: males({}), females({}), gender_matches({}), male_mismatches({}), female_mismatches({})".format(self.video_id, self.num_males, self.num_females, self.gender_matches, self.male_mismatches, self.female_mismatches)
gender_acc_stats = "Video({})[GENDER]: Gender precision({})".format(self.video_id, self.compute_gender_acc_stats())
return frame_stats + "\n" + frame_acc_stats + "\n" + det_stats + "\n" + det_acc_stats + "\n" + gender_stats + "\n" + gender_acc_stats
def __add__(self, other):
num_frames = self.num_frames + other.num_frames
# frame selection
tp_frames = self.tp_frames + other.tp_frames
fp_frames = self.fp_frames + other.fp_frames
fn_frames = self.fn_frames + other.fn_frames
# face detection
num_detections = self.num_detections + other.num_detections
mismatched_tp_frames = self.mismatched_tp_frames + other.mismatched_tp_frames
tp_detections = self.tp_detections + other.tp_detections
fp_detections = self.fp_detections + other.fp_detections
fn_detections = self.fn_detections + other.fn_detections
# gender detection
num_males = self.num_males + other.num_males
num_females = self.num_females + other.num_females
gender_matches = self.gender_matches + other.gender_matches
male_mismatches = self.male_mismatches + other.male_mismatches
female_mismatches = self.female_mismatches + other.female_mismatches
return VideoEvalStats(self.video_id, num_frames, tp_frames, fp_frames, fn_frames, mismatched_tp_frames, num_detections, tp_detections, fp_detections, fn_detections, num_males, num_females, gender_matches, male_mismatches, female_mismatches)
class VideoStats(object):
@initializer
def __init__(self, video_id = 0, num_frames=0, selected_frames=0, num_detections=0, num_males=0, num_females=0):
pass
def __str__(self):
stats = "Video({}): num_frames({}), selected_frames({}), num_detections({}), num_males({}), num_females({})".format(self.video_id, self.num_frames, self.selected_frames, self.num_detections, self.num_males, self.num_females)
return stats
def __add__(self, other):
num_frames = self.num_frames + other.num_frames
selected_frames = self.selected_frames + other.selected_frames
num_detections = self.num_detections + other.num_detections
num_males = self.num_males + other.num_males
num_females = self.num_females + other.num_females
return VideoStats(self.video_id, num_frames, selected_frames, num_detections, num_males, num_females)
class Command(BaseCommand):
help = 'Detect faces in videos'
def add_arguments(self, parser):
parser.add_argument('command')
def bbox_area(self, bbox, video):
return ((bbox.x2 - bbox.x1)*video.width) * \
((bbox.y2 - bbox.y1)*video.height)
def compute_iou(self, bbox1, bbox2, video):
int_x1=max(bbox1.x1, bbox2.x1)
int_y1=max(bbox1.y1, bbox2.y1)
int_x2=min(bbox1.x2, bbox2.x2)
int_y2=min(bbox1.y2, bbox2.y2)
int_area = 0.0
if(int_x2 > int_x1 and int_y2 > int_y1):
int_area = ((int_x2 - int_x1)*video.width) * \
((int_y2 - int_y1)*video.height)
iou = int_area/(self.bbox_area(bbox1, video)+self.bbox_area(bbox2, video)-int_area)
return iou
def remove_duplicates(self, l):
s = set()
return [x for x in l
if x not in s and not s.add(x)]
def fetch_ground_truth(self, video, label = "Talking Heads"):
g_labelset = video.handlabeled_labelset() # ground truth
#g_faces = Face.objects.filter(frame__labelset=g_labelset).prefetch_related('frame').all()
g_faces = Face.objects.filter(frame__labelset=g_labelset, frame__labels__name="Talking Heads").prefetch_related('frame').all()
ground_truth_frames = []
g_faces_dict = defaultdict(list)
for g_face in g_faces:
g_faces_dict[g_face.frame.number].append(g_face)
ground_truth_frames.append(g_face.frame.number)
ground_truth_
|
frames = self.remove_duplicates(ground_truth_frames)
return (ground_truth_frames, g_faces_dict)
def fetch_automatic_detections(self, video, label = "Talking Heads"):
|
d_labelset = video.detected_labelset() # prediction
#d_faces = Face.objects.filter(frame__labelset=d_labelset).prefetch_related('frame').all()
#d_faces = Face.objects.filter(frame__labelset=d_labelset, frame__number__in=ground_truth_frames).prefetch_related('frame').all()
d_faces = Face.objects.filter(frame__labelset=d_labelset).prefetch_related('frame').all()
detected_frames = []
d_faces_dict = defaultdict(list)
# metrics for automatic detection of frames with "talking heads
|
reingart/gui2py
|
gui/event.py
|
Python
|
lgpl-3.0
| 6,795
| 0.003974
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py's Event Model (encapsulates wx.Event)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"
# Initial implementation was inspired on PythonCard's event module, altought
# it was almost completely discarded and re-written from scratch to make it
# simpler and mimic web (html/javascript) event models
# References
# https://developer.mozilla.org/en-US/docs/Mozilla_event_reference
# http://wxpython.org/docs/api/wx.Event-class.html
import time
class Event:
"Generic Event Object: holds actual event data (created by EventHandler)"
cancel_default = False # flag to avoid default (generic) handlers
def __init__(self, name="", wx_event=None):
self.wx_event = wx_event
# retrieve wxPython event properties:
wx_obj = self.wx_event.GetEventObject()
# look for gui object (compound control with wx childs controls)
while wx_obj and not hasattr(wx_obj, "obj"):
wx_obj = wx_obj.Parent
self.target = wx_obj.obj if wx_obj else None
self.timestamp = wx_event.GetTimestamp()
# check if timestamp (wx report it only for mouse or keyboard)
if not self.timestamp:
self.timestamp = time.time() # create a new timestamp if not given
self.name = name # name (type), i.e.: "click"
def prevent_default(self, cancel=True):
self.wx_event.Skip(not cancel)
self.cancel_default = cancel
def stop_propagation(self):
self.wx_event.StopPropagation()
class UIEvent(Event):
"General -window- related events (detail can hold additional data)"
names = ["load", "resize", "scroll", "paint", "unload"]
def __init__(self, name, detail=None, wx_event=None):
Event.__init__(self, name, wx_event)
self.detail = detail
# get the top level window:
obj = self.target
while obj and obj.parent:
obj = obj.get_parent()
self.window = obj
def prevent_default(self):
if self.name == 'unload':
if self.wx_event.CanVeto():
self.wx_event.Veto()
else:
raise RuntimeError("Cannot Veto!")
else:
Event.prevent_default(self) # call default implementation
class FocusEvent(Event):
"Focus related events"
names = ["focus", "blur"]
class FormEvent(UIEvent):
"Form HTML-like events "
names = ["select", "change", "reset", "submit", "invalid"]
cancel_default = True # command events should not escalate
class SubmitEvent(FormEvent):
"Form submission handler (includes HTML form data and field contents)"
def __init__(self, name, wx_event=None):
Event.__init__(self, name, wx_event)
self.form = wx_event.form
self.data = wx_event.data
class MouseEvent(Event):
"Mouse related events (wrapper for wx.MouseEvent)"
names = ["click", "dblclick", "mousedown", "mousemove",
"mouseout", "mouseover", "mouseup", "mousewheel"]
def __init__(self, name, wx_event=None):
Event.__init__(self, name, wx_event)
self.x = wx_event.GetX()
self.y = wx_event.GetY()
self.alt_key = wx_event.AltDown()
self.ctrl_key = wx_event.ControlDown()
self.shift_key = wx_event.ShiftDown()
self.meta_key = wx_event.Met
|
aDown()
|
self.left_button = wx_event.LeftIsDown()
self.right_button = wx_event.RightIsDown()
self.middle_button = wx_event.MiddleIsDown()
if name=="mousewheel":
self.wheel_delta = wx_event.GetWheelDelta()
class KeyEvent(Event):
"Keyboard related event (wrapper for wx.KeyEvent)"
# only sent to the widget that currently has the keyboard focus
names = "onkeypress", "onkeydown", "onkeyup",
def __init__(self, name, wx_event=None):
Event.__init__(self, name, wx_event)
self.ctrl_key = wx_event.ControlDown()
self.shift_key = wx_event.ShiftDown()
self.alt_key = wx_event.AltDown()
self.meta_key = wx_event.MetaDown()
self.key = wx_event.KeyCode # virtual key code value
self.char = unichr(wx_event.GetUnicodeKey()) # Unicode character
class TimingEvent(Event):
"Time interval events"
names = ["idle", "timer"]
def __init__(self, name, interval=None, wx_event=None):
Event.__init__(self, name, wx_event)
self.interval = interval
def request_more(self):
self.wx_event.RequestMore(needMore=True)
class HtmlLinkEvent(UIEvent):
"Html hyperlink click event (href and target)"
def __init__(self, name, detail=None, wx_event=None):
UIEvent.__init__(self, name, wx_event=wx_event,
detail=wx_event.GetLinkInfo().GetHtmlCell())
self.href = wx_event.GetLinkInfo().GetHref()
self.target = wx_event.GetLinkInfo().GetTarget()
class HtmlCellEvent(MouseEvent):
"Html Cell click / hover events"
def __init__(self, name, detail=None, wx_event=None):
MouseEvent.__init__(self, name, wx_event.GetMouseEvent())
self.detail = wx_event.GetCell()
self.point = wx_event.GetPoint()
class HtmlCtrlClickEvent(UIEvent):
"Html Control click "
def __init__(self, name, detail=None, wx_event=None):
UIEvent.__init__(self, name, wx_event=wx_event,
detail=wx_event.ctrl)
class TreeEvent(UIEvent):
"Tree Control events (detail has the selected/extended/collapsed item)"
def __init__(self, name, detail=None, wx_event=None):
wx_tree = wx_event.GetEventObject()
model = wx_tree.obj.items
wx_item = wx_event.GetItem()
if not wx_item.IsOk():
wx_item = wx_tree.GetSelection()
UIEvent.__init__(self, name, wx_event=wx_event,
detail=model(wx_item))
class GridEvent(UIEvent):
"Grid Control events (mouse, size, edit, etc.)"
def __init__(self, name, detail=None, wx_event=None):
wx_grid = wx_event.GetEventObject()
model = wx_grid.obj.items
try:
self.row = wx_event.GetRow()
self.col = wx_event.GetCol()
self.position = wx_event.GetPosition()
except:
pass
UIEvent.__init__(self, name, wx_event=wx_event,
detail=model[self.row][self.col])
WIDGET_EVENTS = MouseEvent, FocusEvent, TimingEvent
|
harisbal/pandas
|
pandas/core/apply.py
|
Python
|
bsd-3-clause
| 12,744
| 0
|
import warnings
import numpy as np
from pandas import compat
from pandas._libs import reduction
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_extension_type,
is_dict_like,
is_list_like,
is_sequence)
from pandas.util._decorators import cache_
|
readonly
from pandas.io.formats.printing import pprint_thing
def frame_apply(obj, func, axis=0, broadcast=None,
raw=False, reduce=None, result_type=None,
ignore_failures=False,
args=None, kwds=None):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameR
|
owApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, result_type=result_type,
ignore_failures=ignore_failures,
args=args, kwds=kwds)
class FrameApply(object):
def __init__(self, obj, func, broadcast, raw, reduce, result_type,
ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, 'reduce', 'broadcast', 'expand']:
raise ValueError("invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}")
if broadcast is not None:
warnings.warn("The broadcast argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='broadcast' to broadcast the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if broadcast:
result_type = 'broadcast'
if reduce is not None:
warnings.warn("The reduce argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='reduce' to try to reduce the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if reduce:
if result_type is not None:
raise ValueError(
"cannot pass both reduce=True and result_type")
result_type = 'reduce'
self.result_type = result_type
# curry if needed
if ((kwds or args) and
not isinstance(func, (np.ufunc, compat.string_types))):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, compat.string_types):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = compat.signature(func)
if 'axis' in sig.args:
self.kwds['axis'] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.f(self.values)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ['reduce', None]:
return self.obj.copy()
# we may need to infer
reduce = self.result_type == 'reduce'
from pandas import Series
if not reduce:
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(result_values,
index=target.index,
columns=target.columns)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (self.result_type in ['reduce', None] and
not self.dtypes.apply(is_extension_type).any()):
# Create a dummy Series from an empty array
from pandas import Series
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=index, dtype=values.dtype)
try:
result = reduction.re
|
census-instrumentation/opencensus-python
|
tests/unit/stats/test_measure_to_view_map.py
|
Python
|
apache-2.0
| 16,207
| 0
|
# Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.stats import measure_to_view_map as measure_to_view_map_module
from opencensus.stats.aggregation import CountAggregation
from opencensus.stats.measure import BaseMeasure, MeasureInt
from opencensus.stats.view import View
from opencensus.stats.view_data import ViewData
from opencensus.tags import tag_key as tag_key_module
METHOD_KEY = tag_key_module.TagKey("method")
REQUEST_COUNT_MEASURE = MeasureInt(
"request_count", "number of requests", "1")
REQUEST_COUNT_VIEW_NAME = "request_count_view"
CO
|
UNT = CountAggregation()
REQUEST_COUNT_VIEW = View(
REQUEST_COUNT_VIEW_NAME,
"number of requests broken down by methods",
[METHOD_KEY], REQUEST_COUNT_MEASURE, COUNT)
class TestMeasureToViewMap(unittest.TestCase):
@staticmethod
def _get_target_class():
re
|
turn measure_to_view_map_module.MeasureToViewMap
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor(self):
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
self.assertEqual({},
measure_to_view_map._measure_to_view_data_list_map)
self.assertEqual({}, measure_to_view_map._registered_views)
self.assertEqual({}, measure_to_view_map._registered_measures)
self.assertEqual(set(), measure_to_view_map.exported_views)
def test_get_view(self):
name = "testView"
description = "testDescription"
columns = mock.Mock()
measure = mock.Mock()
aggregation = mock.Mock()
view = View(
name=name,
description=description,
columns=columns,
measure=measure,
aggregation=aggregation)
timestamp = mock.Mock()
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
measure_to_view_map._registered_views = {}
no_registered_views = measure_to_view_map.get_view(
view_name=name, timestamp=timestamp)
self.assertEqual(None, no_registered_views)
measure_to_view_map._registered_views = {name: view}
measure_to_view_map._measure_to_view_data_list_map = {
view.measure.name:
[ViewData(view=view, start_time=timestamp, end_time=timestamp)]
}
view_data = measure_to_view_map.get_view(
view_name=name, timestamp=timestamp)
self.assertIsNotNone(view_data)
measure_to_view_map._measure_to_view_data_list_map = {}
view_data = measure_to_view_map.get_view(
view_name=name, timestamp=timestamp)
self.assertIsNone(view_data)
measure_to_view_map._measure_to_view_data_list_map = {
view.measure.name: [
ViewData(
view=mock.Mock(), start_time=timestamp, end_time=timestamp)
]
}
view_data = measure_to_view_map.get_view(
view_name=name, timestamp=timestamp)
self.assertIsNone(view_data)
def test_filter_exported_views(self):
test_view_1_name = "testView1"
description = "testDescription"
columns = mock.Mock()
measure = mock.Mock()
aggregation = mock.Mock()
test_view_1 = View(
name=test_view_1_name,
description=description,
columns=columns,
measure=measure,
aggregation=aggregation)
test_view_2_name = "testView2"
test_view_2 = View(
name=test_view_2_name,
description=description,
columns=columns,
measure=measure,
aggregation=aggregation)
all_the_views = {test_view_1, test_view_2}
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
views = measure_to_view_map.filter_exported_views(
all_views=all_the_views)
self.assertEqual(views, all_the_views)
def test_register_view(self):
name = "testView"
description = "testDescription"
columns = mock.Mock()
measure = MeasureInt("measure", "description", "1")
aggregation = mock.Mock()
view = View(
name=name,
description=description,
columns=columns,
measure=measure,
aggregation=aggregation)
timestamp = mock.Mock()
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
measure_to_view_map._registered_views = {}
measure_to_view_map._registered_measures = {}
measure_to_view_map.register_view(view=view, timestamp=timestamp)
self.assertIsNone(measure_to_view_map.exported_views)
self.assertEqual(measure_to_view_map._registered_views[view.name],
view)
self.assertEqual(
measure_to_view_map._registered_measures[measure.name], measure)
self.assertIsNotNone(measure_to_view_map.
_measure_to_view_data_list_map[view.measure.name])
# Registers a view with an existing measure.
view2 = View(
name="testView2",
description=description,
columns=columns,
measure=measure,
aggregation=aggregation)
test_with_registered_measures = measure_to_view_map.register_view(
view=view2, timestamp=timestamp)
self.assertIsNone(test_with_registered_measures)
self.assertEqual(
measure_to_view_map._registered_measures[measure.name], measure)
# Registers a view with a measure that has the same name as an existing
# measure, but with different schema. measure2 and view3 should be
# ignored.
measure2 = MeasureInt("measure", "another measure", "ms")
view3 = View(
name="testView3",
description=description,
columns=columns,
measure=measure2,
aggregation=aggregation)
test_with_registered_measures = measure_to_view_map.register_view(
view=view3, timestamp=timestamp)
self.assertIsNone(test_with_registered_measures)
self.assertEqual(
measure_to_view_map._registered_measures[measure2.name], measure)
measure_to_view_map._registered_measures = {measure.name: None}
self.assertIsNone(
measure_to_view_map._registered_measures.get(measure.name))
measure_to_view_map.register_view(view=view, timestamp=timestamp)
# view is already registered, measure will not be registered again.
self.assertIsNone(
measure_to_view_map._registered_measures.get(measure.name))
self.assertIsNotNone(measure_to_view_map.
_measure_to_view_data_list_map[view.measure.name])
measure_to_view_map._registered_views = {name: view}
test_result_1 = measure_to_view_map.register_view(
view=view, timestamp=timestamp)
self.assertIsNone(test_result_1)
self.assertIsNotNone(measure_to_view_map.
_measure_to_view_data_list_map[view.measure.name])
def test_register_view_with_exporter(self):
exporter = mock.Mock()
name = "testView"
description = "testDescription"
columns = mock.Mock()
measure = MeasureInt("measure", "description", "1")
aggregation = mock.Mock()
view = View(
name=name,
description=description,
columns=columns,
measure=measure,
aggregation=aggregation)
|
misnyo/searx
|
tests/unit/engines/test_btdigg.py
|
Python
|
agpl-3.0
| 19,083
| 0.002626
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import mock
from searx.engines import btdigg
from searx.testing import SearxTestCase
class TestBtdiggEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 0
params = btdigg.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('btdigg.org', params['url'])
def test_response(self):
self.assertRaises(AttributeError, btdigg.response, None)
self.assertRaises(AttributeError, btdigg.response, [])
self.assertRaises(AttributeError, btdigg.response, '')
self.assertRaises(AttributeError, btdigg.response, '[]')
response = mock.Mock(text='<html></html>')
self.assertEqual(btdigg.response(response), [])
html = u"""
<div id="search_res">
<table>
<tr>
<td class="idx">1</td>
<td>
<table class="torrent_name_tbl">
<tr>
<td class="torrent_name">
<a href="/url">Should be the title</a>
</td>
</tr>
</table>
<table class="torrent_name_tbl">
<tr>
<td class="ttth">
<a onclick="fclck(this.href)" href="magnet:?xt=urn:btih:magnet&dn=Test"
title="Télécharger des liens Magnet">[magnet]</a>
</td>
<td class="ttth">
<a href="https://btcloud.io/manager?cmd=add&info_hash=hash"
target="_blank" title="Ajouter à BTCloud">[cloud]</a>
</td>
<td>
<span class="attr_name">Taille:</span>
<span class="attr_val">8 B</span>
</td>
<td>
<span class="attr_name">Fichiers:</span>
<span class="attr_val">710</span>
</td>
<td>
<span class="attr_name">Téléchargements:</span>
<span class="attr_val">5</span>
</td>
<td>
<span class="attr_name">Temps:</span>
<span class="attr_val">417.8 jours</span>
</td>
<td>
<span class="attr_name">Dernière mise à jour:</span>
<span class="attr_val">5.3 jours</span>
</td>
<td>
<span class="attr_name">Faux:</span>
<span class="attr_val">Aucun</span>
</td>
</tr>
</table>
<pre class="snippet">
Content
</pre>
</td>
</tr>
</table>
</div>
"""
response = mock.Mock(text=html.encode('utf-8'))
results = btdigg.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Should be the title')
self.assertEqual(results[0]['url'], 'https://btdigg.org/url')
self.assertEqual(results[0]['content'], 'Content')
self.assertEqual(results[0]['seed'], 5)
self.assertEqual(results[0]['leech'], 0)
self.assertEqual(results[0]['filesize'], 8)
self.assertEqual(results[0]['files'], 710)
self.assertEqual(results[0]['magnetlink'], 'magnet:?xt=urn:btih:magnet&dn=Test')
html = """
<div id="search_res">
<table>
</table>
</div>
"""
response = mock.Mock(text=html.encode('utf-8'))
results = btdigg.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
html = u"""
<div id="search_res">
<table>
<tr>
<td class="idx">1</td>
<td>
<table class="torrent_name_tbl">
<tr>
<td class="torrent_name">
<a href="/url">Should be the title</a>
</td>
</tr>
</table>
<table class="torrent_name_tbl">
<tr>
<td class="ttth">
<a onclick="fclck(this.href)" href="magnet:?xt=urn:btih:magnet&dn=Test"
title="Télécharger des liens Magnet">[magnet]</a>
</td>
<td class="ttth">
<a href="https://btcloud.io/manager?cmd=add&info_hash=hash"
target="_blank" title="Ajouter à BTCloud">[cloud]</a>
</td>
<td>
<span class="attr_name">Taille:</span>
<span class="attr_val">1 KB</span>
</td>
<td>
<span class="attr_name">Fichiers:</span>
<span class="attr_val">710</span>
</td>
<td>
<span class="attr_name">Téléchargements:</span>
|
<span class="attr_val">5</span>
</td>
|
<td>
<span class="attr_name">Temps:</span>
<span class="attr_val">417.8 jours</span>
</td>
<td>
<span class="attr_name">Dernière mise à jour:</span>
<span class="attr_val">5.3 jours</span>
</td>
<td>
<span class="attr_name">Faux:</span>
<span class="attr_val">Aucun</span>
</td>
</tr>
</table>
<pre class="snippet">
Content
</pre>
</td>
</tr>
<tr>
<td class="idx">1</td>
<td>
<table class="torrent_name_tbl">
<tr>
<td class="torrent_name">
<a href="/url">Should be the title</a>
</td>
</tr>
</table>
<table class="torrent_name_tbl">
<tr>
<td class="ttth">
<a onclick="fclck(this.href)" href="magnet:?xt=urn:btih:magnet&dn=Test"
title="Télécharger des liens Magnet">[magnet]</a>
|
acenario/Payable
|
Payable/PayableCore/admin.py
|
Python
|
mit
| 217
| 0.013825
|
from django.contrib import admin
from django.contrib.auth import get_user_model
|
class SignUpAdmin(admin.Mo
|
delAdmin):
class Meta:
model = get_user_model()
admin.site.register(get_user_model(), SignUpAdmin)
|
TechInvestLab/dot15926
|
editor_qt/extensions/__init__.py
|
Python
|
lgpl-3.0
| 1,249
| 0.001601
|
"""Copyright 2012 TechInvestLab.ru dot15926@gmail.com
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistribut
|
ions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with th
|
e distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."""
|
behave/behave-django
|
tests/acceptance/steps/using_pageobjects.py
|
Python
|
mit
| 1,207
| 0
|
from behave import then, when
from bs4 import BeautifulSoup
from bs4.element import Tag
from pageobjects.pages import About, Welcome
@when(u'I instantiate the Welcome page object')
def new_pageobject(context):
context.page = Welcome(context)
@then(u'it provides a valid Beautiful Soup document')
def pageobject_works(context):
assert context.page.response.status_code == 200
assert context.page.request == context.page.response.request
assert isinstance(context.page.document, BeautifulSoup)
assert 'Test App: behave-django' == context.page.document.title.string, \
"unexpected title: %s" % context.page.document.title.string
@then(u'get_link() returns the link subdocument')
def getlink_subdocument(context):
context.about_link = context.page.get_link('about')
assert isinstance(context.about_link, Tag), \
"should be instance
|
of %s (not %s)" % (
Tag.__name__, context.about_link.__class__.__name__)
@when('I call click() on the link')
def linkelement_click(contex
|
t):
context.next_page = context.about_link.click()
@then('it loads a new PageObject')
def click_returns_pageobject(context):
assert About(context) == context.next_page
|
mikeh77/mi-instrument
|
mi/platform/util/node_configuration.py
|
Python
|
bsd-2-clause
| 2,911
| 0.012367
|
#!/usr/bin/env python
"""
@package ion.agents.platform.util.node_configuration
@file ion/agents/platform/util/node_configuration.py
@author Mike Harrington
@brief read node configuration files
"""
__author__ = 'Mike Harrington'
__license__ = 'Apache 2.0'
from ooi.logging import log
from mi.platform.exceptions import NodeConfigurationFileException
from mi.platform.util.NodeYAML import NodeYAML
import yaml
import logging
class NodeConfiguration(object):
"""
Various utilities utilities for reading in node configuration yaml files.
"""
def __init__(self):
self._node_yaml = NodeYAML.factory(None)
@property
def node_meta_data(self):
return self._node_yaml.node_meta_data
@property
def node_streams(self):
return self._node_yaml.node_streams
@property
def node_port_info(self):
return self._node_yaml.node_port_info
def openNode(self,platform_id,node_config_filename):
"""
Opens up and parses the node configuration files.
@param platform_id - id to associate with this set of Node Configuration Files
@param node_config_file - yaml file with information about the platform
@raise NodeConfigurationException
"""
self._platform_id = platform_id
log.debug("%r: Open: %s", self._platform_id, node_config_filename)
try:
with open(node_config_filename, 'r') as node_config_file:
try:
node_config = yaml.load(node_config_file)
except Exception as e:
raise NodeConfigurationFileException(msg="%s Cannot
|
parse yaml node specific config file : %s" % (str(e),node_config_filename))
except Exception as e:
raise NodeConfigurationFileException(msg="%
|
s Cannot open node specific config file : %s" % (str(e),node_config_filename))
self._node_yaml = NodeYAML.factory(node_config)
self._node_yaml.validate()
def Print(self):
log.debug("%r Print Config File Information for: %s\n\n", self._platform_id, self.node_meta_data['node_id_name'])
log.debug("%r Node Meta data", self._platform_id)
for meta_data_key,meta_data_item in sorted(self.node_meta_data.iteritems()):
log.debug("%r %r = %r", self._platform_id, meta_data_key,meta_data_item)
log.debug("%r Node Port Info", self._platform_id)
for port_data_key,port_data_item in sorted(self.node_port_info.iteritems()):
log.debug("%r %r = %r", self._platform_id, port_data_key,port_data_item)
log.debug("%r Node stream Info", self._platform_id)
for stream_data_key,stream_data_item in sorted(self.node_streams.iteritems()):
log.debug("%r %r = %r", self._platform_id, stream_data_key,stream_data_item)
|
zrhans/python
|
exemplos/wakari/scripts-examples-webplot_example.py
|
Python
|
gpl-2.0
| 885
| 0.00339
|
from webplot import p
p.use_doc('webplot example')
import numpy as np
import datetime
import time
x = np.arange(100) / 6.0
y = np.sin(x)
z = np.cos(x)
data_source = p.make_source(idx=range(100), x=x, y=y, z=z)
p.plot(x, y, 'orange')
p.figure()
p.plot('x', 'y', color='blue', data_source=data_source, title='sincos')
p.plot('x', 'z', color='green')
p.figure()
p.plot('x', 'y', data_source=data_source)
p.figure()
p.plot('x', 'z', data_source=data_source)
p.figure()
p.table(data_source, ['x', 'y', 'z'])
p.scatter('x', 'y', data_source=data_source)
p.figure()
p.scatter('x', 'z', data_source=data_
|
source)
p.figure()
p.hold(False)
p.scatter('x', 'y', 'orange', data_source=data_source)
p.scatter('x', 'z',
|
'red', data_source=data_source)
p.plot('x', 'z', 'yellow', data_source=data_source)
p.plot('x', 'y', 'black', data_source=data_source)
print "click on the plots tab to see results"
|
GarySparrow/mFlaskWeb
|
venv/Lib/site-packages/pygments/lexers/_openedge_builtins.py
|
Python
|
mit
| 48,362
| 0
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._openedge_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = (
'ABSOLUTE',
'ABS',
'ABSO',
'ABSOL',
'ABSOLU',
'ABSOLUT',
'ACCELERATOR',
'ACCUMULATE',
'ACCUM',
'ACCUMU',
'ACCUMUL',
'ACCUMULA',
'ACCUMULAT',
'ACTIVE-FORM',
'ACTIVE-WINDOW',
'ADD',
'ADD-BUFFER',
'ADD-CALC-COLUMN',
'ADD-COLUMNS-FROM',
'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM',
'ADD-FIRST',
'ADD-INDEX-FIELD',
'ADD-LAST',
'ADD-LIKE-COLUMN',
'ADD-LIKE-FIELD',
'ADD-LIKE-INDEX',
'ADD-NEW-FIELD',
'ADD-NEW-INDEX',
'ADD-SCHEMA-LOCATION',
'ADD-SUPER-PROCEDURE',
'ADM-DATA',
'ADVISE',
'ALERT-BOX',
'ALIAS',
'ALL',
'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION',
'ALTER',
'ALWAYS-ON-TOP',
'AMBIGUOUS',
'AMBIG',
'AMBIGU',
'AMBIGUO',
'AMBIGUOU',
'ANALYZE',
'ANALYZ',
'AND',
'ANSI-ONLY',
'ANY',
'ANYWHERE',
'APPEND',
'APPL-ALERT-BOXES',
'APPL-ALERT',
'APPL-ALERT-',
'APPL-ALERT-B',
'APPL-ALERT-BO',
'APPL-ALERT-BOX',
'APPL-ALERT-BOXE',
'APPL-CONTEXT-ID',
'APPLICATION',
'APPLY',
'APPSERVER-INFO',
'APPSERVER-PASSWORD',
'APPSERVER-USERID',
'ARRAY-MESSAGE',
'AS',
'ASC',
'ASCENDING',
'ASCE',
'ASCEN',
'ASCEND',
'ASCENDI',
'ASCENDIN',
'ASK-OVERWRITE',
'ASSEMBLY',
'ASSIGN',
'ASYNCHRONOUS',
'ASYNC-REQUEST-COUNT',
'ASYNC-REQUEST-HANDLE',
'AT',
'ATTACHED-PAIRLIST',
'ATTR-SPACE',
'ATTR',
'ATTRI',
'ATTRIB',
'ATTRIBU',
'ATTRIBUT',
'AUDIT-CONTROL',
'AUDIT-ENABLED',
'AUDIT-EVENT-CONTEXT',
'AUDIT-POLICY',
'AUTHENTICATION-FAILED',
'AUTHORIZATION',
'AUTO-COMPLETION',
'AUTO-COMP',
'AUTO-COMPL',
'AUTO-COMPLE',
'AUTO-COMPLET',
'AUTO-COMPLETI',
'AUTO-COMPLETIO',
'AUTO-ENDKEY',
'AUTO-END-KEY',
'AUTO-GO',
'AUTO-INDENT',
'AUTO-IND',
'AUTO-INDE',
'AUTO-INDEN',
'AUTOMATIC',
'AUTO-RESIZE',
'AUTO-RETURN',
'AUTO-RET',
'AUTO-RETU',
'AUTO-RETUR',
'AUTO-SYNCHRONIZE',
'AUTO-ZAP',
'AUTO-Z',
'AUTO-ZA',
'AVAILABLE',
'AVAIL',
'AVAILA',
'AVAILAB',
'AVAILABL',
'AVAILABLE-FORMATS',
'AVERAGE',
'AVE',
'AVER',
'AVERA',
'AVERAG',
'AVG',
'BACKGROUND',
'BACK',
'BACKG',
'BACKGR',
'BACKGRO',
'BACKGROU',
'BACKGROUN',
'BACKWARDS',
'BACKWARD',
'BASE64-DECODE',
'BASE64-ENCODE',
'BASE-ADE',
'BASE-KEY',
'BATCH-MODE',
'BATCH',
'BATCH-',
'BATCH-M',
'BATCH-MO',
'BATCH-MOD',
'BATCH-SIZE',
'BEFORE-HIDE',
'BEFORE-H',
'BEFORE-HI',
'BEFORE-HID',
'BEGIN-EVENT-GROUP',
'BEGINS',
'BELL',
'BETWEEN',
'BGCOLOR',
'BGC',
'BGCO',
'BGCOL',
'BGCOLO',
'BIG-ENDIAN',
'BINARY',
'BIND',
'BIND-WHERE',
'BLANK',
'BLOCK-ITERATION-DISPLAY',
'BORDER-BOTTOM-CHARS',
'BORDER-B',
'BORDER-BO',
'BORDER-BOT',
'BORDER-BOTT',
'BORDER-BOTTO',
'BORDER-BOTTOM-PIXELS',
'BORDER-BOTTOM-P',
'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX',
'BORDER-BOTTOM-PIXE',
'BORDER-BOTTOM-PIXEL',
'BORDER-LEFT-CHARS',
'BORDER-L',
'BORDER-LE',
'BORDER-LEF',
'BORDER-LEFT',
'BORDER-LEFT-',
'BORDER-LEFT-C',
'BORDER-LEFT-CH',
'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR',
'BORDER-LEFT-PIXELS',
'BORDER-LEFT-P',
'BORDER-LEFT-PI',
'BORDER-LEFT-PIX',
'BORDER-LEFT-PIXE',
'BORDER-LEFT-PIXEL',
'BORDER-RIGHT-CHARS',
'BORDER-R',
'BORDER-RI',
'BORDER-RIG',
'BORDER-RIGH',
'BORDER-RIGHT',
'BORDER-RIGHT-',
'BORDER-RIGHT-C',
'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA',
'BORDER-RIGHT-CHAR',
'BORDER-RIGHT-PIXELS',
'BORDER-RIGHT-P',
'BORDER-RIGHT-PI',
'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE',
'BORDER-RIGHT-PIXEL',
'BORDER-TOP-CHARS',
'BORDER-T',
'BORDER-TO',
'BORDER-TOP',
'BORDER-TOP-',
'BORDER-TOP-C',
'BORDER-TOP-CH',
'BORDER-TOP-CHA',
'BORDER-TOP-CHAR',
'BORDER-TOP-PIXELS',
'BORDER-TOP-P',
'BORDER-TOP-PI',
'BORDER-TOP-PIX',
'BORDER-TOP-PIXE',
'BORDER-TOP-PIXEL',
'BOX',
'BOX-SELECTABLE',
'BOX-SELECT',
'BOX-SELECTA',
'BOX-SELECTAB',
'BOX-SELECTABL',
'BREAK',
'BROWSE',
'BUFFER',
'BUFFER-CHARS',
'BUFFER-COMPARE',
'BUFFER-COPY',
'BUFFER-CREATE',
'BUFFER-DELETE',
'BUFFER-FIELD',
'BUFFER-HANDLE',
'BUFFER-LINES',
'BUFFER-NAME',
'BUFFER-RELEASE',
'BUFFER-VALUE',
'BUTTON',
'BUTTONS',
'BY',
'BY-POINTER',
'BY-VARIANT-POINTER',
'CACHE',
'CACHE-SIZE',
'CALL',
'CALL-NAME',
'CALL-TYPE',
'CANCEL-BREAK',
'CANCEL-BUTTON',
'CAN-CREATE',
'CAN-DELETE',
'CAN-DO',
'CAN-FIND',
'CAN-QUERY',
'CAN-READ',
'CAN-SET',
'CAN-WRITE',
'CAPS',
'CAREFUL-PAINT',
'CASE',
'CASE-SENSITIVE',
'CASE-SEN',
'CASE-SENS',
'CASE-SENSI',
'CASE-SENSIT',
'CASE-SENSITI',
'CASE-SENSITIV',
'CAST',
'CATCH',
'CDECL',
'CENTERED',
'CENTER',
'CENTERE',
'CHAINED',
'CHARACTER_LENGTH',
'CHARSET',
'CHECK',
'CHECKED',
'CHOOSE',
'CHR',
'CLASS',
'CLASS-TYPE',
'CLEAR',
'CLEAR-APPL-CONTEXT',
'CLEAR-LOG',
'CLEAR-SELECTION',
'CLEAR-SELECT',
'CLEAR-SELECTI',
'CLEAR-SELECTIO',
'CLEAR-SORT-ARROWS',
'CLEAR-SORT-ARROW',
'CLIENT-CONNECTION-ID',
'CLIENT-PRINCIPAL',
'CLIENT-TTY',
'CLIENT-TYPE',
'CLIENT-WORKSTATION',
'CLIPBOARD',
'CLOSE',
'CLOSE-LOG',
'CODE',
'CODEBASE-LOCATOR',
'CODEPAGE',
'CODEPAGE-CONVERT',
'COLLATE',
'COL-OF',
'COLON',
'COLON-ALIGNED',
'COLON-ALIGN',
'COLON-ALIGNE',
'COLOR',
'COLOR-TABLE',
'COLUMN',
'COL',
'COLU',
'COLUM',
'COLUMN-BGCOLOR',
'COLUMN-DCOLOR',
'COLUMN-FGCOLOR',
'COLUMN-FONT',
'COLUMN-LABEL',
'COLUMN-LAB',
'COLUMN-LABE',
'COLUMN-MOVABLE',
'COLUMN-OF',
'COLUMN-PFCOLOR',
'COLUMN-READ-ONLY',
'COLUMN-RESIZABLE',
'COLUMNS',
'COLUMN-SCROLLING',
'COMBO-BOX',
'COMMAND',
'COMPARES',
'COMPILE',
'COMPILER',
'COMPLETE',
'COM-SELF',
'CONFIG-NAME',
'CONNECT',
'CONNECTED',
'CONSTRUCTOR',
'CONTAINS',
'CONTENTS',
'CONTEXT',
'CONTEXT-HELP',
'CONTEXT-HELP-FILE',
'CONTEXT-HELP-ID',
'CONTEXT-POPUP',
'CONTROL',
'CONTROL-BOX',
'CONTROL-FRAME',
'CONVERT',
'CONVERT-3D-COLORS',
|
'CONVERT-TO-OFFSET',
'CONVERT-TO-OFFS',
'CONVERT-TO-OFFSE',
'COPY-DATASET',
'COPY-LOB',
'COPY-SAX-ATTRIBUTES',
'COPY-TEMP-TABLE',
'COUNT',
'COUNT-OF',
'CPCASE',
'CPCOLL',
'CPINTERNAL',
'CPLOG',
'CPPRINT',
'CPRCODEIN',
'CPRCODEOUT',
'CPSTREAM',
|
'CPTERM',
'CRC-VALUE',
'CREATE',
'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL',
'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY',
'CREATE-TEST-FILE',
'CURRENT',
'CURRENT_DATE',
'CURRENT-CHANGED',
'CURRENT-COLUMN',
'CURRENT-ENVIRONMENT',
'CURRENT-ENV',
'CURRENT-ENVI',
'CURRENT-ENVIR',
'CURRENT-ENVIRO',
'CURRENT-ENVIRON',
'CURRENT-ENVIRONM',
'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN',
'CURRENT-ITERATION',
'CURRENT-LANGUAGE',
'CURRENT-LANG',
'CURRENT-LANGU',
'CURRENT-LANGUA',
'CURRENT-LANGUAG',
'CURRENT-QUERY',
'CURRENT-RESULT-ROW',
'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE',
'CURRENT-WINDOW',
'CURSOR',
'CURS',
'CURSO',
'CURSOR-CHAR',
'CURSOR-LINE',
'CURSOR-OFFSET',
'DATABASE',
'DATA-BIND',
'DATA-ENTRY-RETURN',
'DATA-ENTRY-RET',
|
mark-in/securedrop-app-code
|
tests/test_unit_store.py
|
Python
|
agpl-3.0
| 1,563
| 0.00064
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
import zipfile
import config
import store
import common
from db import db_session, Source
import crypto_util
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
class TestStore(unittest.TestCase):
"""The set of tests for store.py."""
def setUp(self):
common.shared_setup()
def tearDown(self):
common.shared_teardown()
def test_
|
verify(self):
with self.assertRaises(store.PathException):
store.verify(os.path.join(config.STORE_DIR, '..', 'etc', 'passwd'))
with self.assertRaises(store.PathException):
store.verify(config.STORE_DIR + "_backup")
def test_get_zip(self):
sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
source = Source(sid, crypto_util.display_id())
db_sessi
|
on.add(source)
db_session.commit()
files = ['1-abc1-msg.gpg', '2-abc2-msg.gpg']
filenames = common.setup_test_docs(sid, files)
archive = zipfile.ZipFile(store.get_bulk_archive(filenames))
archivefile_contents = archive.namelist()
for archived_file, actual_file in zip(archivefile_contents, filenames):
actual_file_content = open(actual_file).read()
zipped_file_content = archive.read(archived_file)
self.assertEquals(zipped_file_content, actual_file_content)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
PacktPublishing/OpenCV-Computer-Vision-Projects-with-Python
|
Module 2/1/image_filters.py
|
Python
|
mit
| 2,407
| 0.02742
|
# http://lodev.org/cgtutor/filtering.html
import cv2
import numpy as np
#img = cv2.imread('../images/input_sharp_edges.jpg', cv2.IMREAD_GRAYSCALE)
img = cv2.imread('../images/input_tree.jpg')
rows, cols = img.shape[:2]
#cv2.imshow('Original', img)
###################
# Motion Blur
size = 15
kernel_motion_blur = np.zeros((size, size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
output = cv2.filter2D(img, -1, kernel_motion_blur)
#cv2.imshow('Motion Blur', output)
###################
# Sharpening
kernel_sharpen_1 = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
kernel_sharpen_2 = np.array([[1,1,1], [1,-7,1], [1,1,1]])
kernel_sharpen_3 = np.array([[-1,-1,-1,-1,-1],
[-1,2,2,2,-1],
[-1,2,8,2,-1],
[-1,2,2,2,-1],
[-1,-1,-1,-1,-1]]) / 8.0
output_1 = cv2.filter2D(img, -1, kernel_sharpen_1)
output_2 = cv2.filter2D(img, -1, kernel_sharpen_2)
output_3 = cv2.filter2D(img, -1, kernel_sharpen_3)
#cv2.imshow('Sharpening', output_1)
#cv2.i
|
mshow('Excessive Sharpening', output_2)
#cv2.imshow('Edge Enhancement', output_3)
###################
# Embossing
img_emboss_input = cv2.imread('../images/input_house.jpg')
kernel_emboss_1 = np.array([[0,-1,-1],
[1,0,-1],
[1,
|
1,0]])
kernel_emboss_2 = np.array([[-1,-1,0],
[-1,0,1],
[0,1,1]])
kernel_emboss_3 = np.array([[1,0,0],
[0,0,0],
[0,0,-1]])
gray_img = cv2.cvtColor(img_emboss_input,cv2.COLOR_BGR2GRAY)
output_1 = cv2.filter2D(gray_img, -1, kernel_emboss_1)
output_2 = cv2.filter2D(gray_img, -1, kernel_emboss_2)
output_3 = cv2.filter2D(gray_img, -1, kernel_emboss_3)
cv2.imshow('Input', img_emboss_input)
cv2.imshow('Embossing - South West', output_1 + 128)
cv2.imshow('Embossing - South East', output_2 + 128)
cv2.imshow('Embossing - North West', output_3 + 128)
###################
# Erosion and dilation
img = cv2.imread('../images/input_morphology.png',0)
kernel = np.ones((5,5), np.uint8)
img_erosion = cv2.erode(img, kernel, iterations=1)
img_dilation = cv2.dilate(img, kernel, iterations=1)
#cv2.imshow('Input', img)
#cv2.imshow('Erosion', img_erosion)
#cv2.imshow('Dilation', img_dilation)
cv2.waitKey()
|
eneldoserrata/marcos_openerp
|
marcos_addons/marcos_ncf/account_invoice.py
|
Python
|
agpl-3.0
| 10,032
| 0.004288
|
# -*- encoding: utf-8 -*-
from openerp.osv import osv, fields
from idvalidator import is_ncf
from openerp.osv.osv import except_osv
from openerp import netsvc
from datetime import datetime
from openerp.tools.translate import _
import time
class account_invoice(osv.Model):
_inherit = "account.invoice"
_name = "account.invoice"
def _get_reference_type(self, cr, uid, context=None):
return [('none', u'Referencia libre / Nº Fact. Proveedor'),
('01', '01 - Gastos de personal'),
('02', '02 - Gastos por trabajo, suministros y servicios'),
('03', '03 - Arrendamientos'),
('04', '04 - Gastos de Activos Fijos'),
('05', u'05 - Gastos de Representación'),
('06', '06 - Otras Deducciones Admitidas'),
('07', '07 - Gastos Financieros'),
('08', '08 - Gastos Extraordinarios'),
('09', '09 - Compras y Gastos que forman parte del Costo de Venta'),
('10', '10 - Adquisiciones de Activos'),
('11', '11 - Gastos de Seguro')
]
def on_change_fiscal_position(self, cr, uid, ids, value):
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, value).fiscal_type
if fiscal_type in [u'informal', u'minor']:
ncf_required = False
else:
ncf_required = True
return {"value": {'reference_type': fiscal_type, 'ncf_required': ncf_required}}
def onchange_journal_id(self, cr, uid, ids, *args):
if args:
journal = self.pool.get("account.journal").browse(cr, uid, args[0])
ncf_required = True
if journal.ncf_special:
ncf_required = False
return {"value": {'ncf_required': ncf_required}}
else:
return {"value": {}}
def onchange_reference(self, cr, uid, ids, reference, ncf_required):
if not is_ncf(reference.encode("ascii")) and ncf_required:
raise except_osv(u"NCF Invalido!", u"El NCF del proveedor no es válido!")
return False
def action_date_assign(self, cr, uid, ids, *args):
for inv in self.browse(cr, uid, ids):
if inv.journal_id.ncf_special in ['gasto', 'informal']:
self.write(cr, uid, [inv.id], {"reference": False})
if inv.type in ['in_invoice', 'in_refund'] and inv.ncf_required:
if inv.reference_type != 'none' and not is_ncf(inv.reference.encode("ascii")):
raise except_osv(u"NCF Invalido!", u"El NCF del proveedor no es válido!")
# TODO si la entrada de almacen referente a este pedido advertir al contador que debe terminar de recibir
# los productos pendientes o cancelarlos en caso de que se reciba parciarmente debe crear una nota de credito
# borrador
res = self.onchange_payment_term_date_invoice(cr, uid, inv.id, inv.payment_term.id, inv.date_invoice)
if res and res['value']:
self.write(cr, uid, [inv.id], res['value'])
return True
_columns = {
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=False),
'reference': fields.char('Invoice Reference', size=19, help="The partner reference of this invoice."),
'ipf': fields.boolean("Impreso", readonly=True),
'ncf_required': fields.boolean(),
"pay_to": fields.many2one("res.partner", "Pagar a")
}
_sql_constraints = [
# ('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!')
('number_uniq', 'unique(company_id, partner_id, number, journal_id)', u'El NCF para este relacionado ya fue utilizado!'),
]
_defaults = {
"ncf_required": True
}
def _get_journal_id(self, fiscal_type, shop_id, refund):
if refund:
return shop_id.notas_credito_id.id
elif fiscal_type == "final" or fiscal_type is None:
return shop_id.final_id.id
elif fiscal_type == "fiscal":
return shop_id.fiscal_id.id
elif fiscal_type == "special":
return shop_id.especiales_id.id
elif fiscal_type == "gov":
return shop_id.gubernamentales_id.id
else:
return False
def create(self, cr, uid, vals, context=None):
if not context:
context = {}
if context.get('active_model', False) == 'pos.order' and vals.get('type', False) in ["out_invoice", 'out_refund']:
pass
elif context.get('active_model', False) == 'stock.picking.in' and vals.get('type', False) == "out_refund":
pass
elif vals.get('type', False) == "out_invoice":
order_obj = self.pool.get('sale.order')
so_id = order_obj.search(cr, uid, [('name', '=', vals['origin'])])
so = order_obj.browse(cr, uid, so_id, context)[0]
if not vals['fiscal_position']: vals['fiscal_position'] = 2
fiscal_type = so.partner_id.property_account_position.fiscal_type or 'final'
vals['journal_id'] = self._get_journal_id(fiscal_type, so.shop_id, False)
elif vals.get('type', False) == "out_refund":
if vals.get('origin', False):
order_obj = self.pool.get('sale.order')
so_id = order_obj.search(cr, uid, [('name', '=', vals.get('origin', None))])
so = order_obj.browse(cr, uid, so_id, context)[0]
if not vals['fiscal_position']:
vals['fiscal_position'] = 2
vals['journal_id'] = self._get_journal_id(None, so.shop_id, True)
else:
vals['reference'] = u""
inv_obj = self.pool.get('account.invoice')
origin = inv_obj.read(cr, uid, context['active_id'], ['number'])
vals['origin'] = origin["number"]
elif vals.get('type', False) == "in_invoice" and vals.get('fi
|
scal_position', False):
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, vals['fiscal_position']).fiscal_type
vals['reference_type'] = fiscal_type
elif vals.get('type', False) == "in_refund" and vals.get('fiscal_position', False):
vals['reference'] = vals.get('origin', "")
|
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, vals['fiscal_position']).fiscal_type
vals['reference_type'] = fiscal_type
inv = super(account_invoice, self).create(cr, uid, vals, context)
return inv
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft', 'internal_number': False})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
def _refund_cleanup_lines(self, cr, uid, lines, context=None):
"""
For each invoice line.
If amount of days since invoice is greater than 30.
For each tax on each invoice line.
If the tax is included in the price.
The tax is replaced with the corresponding tax exempt tax.
If tax is not include in price, no tax will show up in the refund.
"""
result = super(account_invoice, self)._refund_cleanup_lines(cr, uid, lines, context=context)
# For each invoice_line
for x, y, line in result:
inv_obj = self.pool.get('account.invoice').browse(cr, uid, line['invoice_id'], context=context)
inv_date = datetime.strptime(inv_obj['date_invoice'], "%Y-%m-%d").date()
days_diff = datetime.today().date() - inv_date
# If amount of days since invoice is greater than 30:
if days_diff.days > 30:
taxes_ids = []
# For each
|
Tong-Chen/genomics-tools
|
mapreduce-python/mapreduce/model.py
|
Python
|
apache-2.0
| 39,526
| 0.006274
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by
|
applicable law or agreed to in writing, software
# distribut
|
ed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
# Disable "Invalid method name"
# pylint: disable=g-bad-name
__all__ = ["MapreduceState",
"MapperSpec",
"MapreduceControl",
"MapreduceSpec",
"ShardState",
"CountersMap",
"TransientShardState",
"QuerySpec",
"HugeTask"]
import cgi
import datetime
import urllib
import zlib
from mapreduce.lib.graphy.backends import google_chart_api
from mapreduce.lib import simplejson
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from mapreduce import context
from mapreduce import hooks
from mapreduce import json_util
from mapreduce import util
# pylint: disable=protected-access
# Special datastore kinds for MR.
_MAP_REDUCE_KINDS = ("_AE_MR_MapreduceControl",
"_AE_MR_MapreduceState",
"_AE_MR_ShardState",
"_AE_MR_TaskPayload")
class _HugeTaskPayload(db.Model):
"""Model object to store task payload."""
payload = db.BlobProperty()
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_TaskPayload"
class HugeTask(object):
"""HugeTask is a taskqueue.Task-like class that can store big payloads.
Payloads are stored either in the task payload itself or in the datastore.
Task handlers should inherit from base_handler.HugeTaskHandler class.
"""
PAYLOAD_PARAM = "__payload"
PAYLOAD_KEY_PARAM = "__payload_key"
# Leave some wiggle room for headers and other fields.
MAX_TASK_PAYLOAD = taskqueue.MAX_PUSH_TASK_SIZE_BYTES - 1024
MAX_DB_PAYLOAD = datastore_rpc.BaseConnection.MAX_RPC_BYTES
PAYLOAD_VERSION_HEADER = "AE-MR-Payload-Version"
# Update version when payload handling is changed
# in a backward incompatible way.
PAYLOAD_VERSION = "1"
def __init__(self,
url,
params,
name=None,
eta=None,
countdown=None,
parent=None,
headers=None):
"""Init.
Args:
url: task url in str.
params: a dict from str to str.
name: task name.
eta: task eta.
countdown: task countdown.
parent: parent entity of huge task's payload.
headers: a dict of headers for the task.
Raises:
ValueError: when payload is too big even for datastore, or parent is
not specified when payload is stored in datastore.
"""
self.url = url
self.name = name
self.eta = eta
self.countdown = countdown
self._headers = {
"Content-Type": "application/octet-stream",
self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION
}
if headers:
self._headers.update(headers)
# TODO(user): Find a more space efficient way than urlencoding.
payload_str = urllib.urlencode(params)
compressed_payload = ""
if len(payload_str) > self.MAX_TASK_PAYLOAD:
compressed_payload = zlib.compress(payload_str)
# Payload is small. Don't bother with anything.
if not compressed_payload:
self._payload = payload_str
# Compressed payload is small. Don't bother with datastore.
elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:
self._payload = self.PAYLOAD_PARAM + compressed_payload
elif len(compressed_payload) > self.MAX_DB_PAYLOAD:
raise ValueError(
"Payload from %s to big to be stored in database: %s" %
(self.name, len(compressed_payload)))
# Store payload in the datastore.
else:
if not parent:
raise ValueError("Huge tasks should specify parent entity.")
payload_entity = _HugeTaskPayload(payload=compressed_payload,
parent=parent)
payload_key = payload_entity.put()
self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)
def add(self, queue_name, transactional=False):
"""Add task to the queue."""
task = self.to_task()
task.add(queue_name, transactional)
def to_task(self):
"""Convert to a taskqueue task."""
# Never pass params to taskqueue.Task. Use payload instead. Otherwise,
# it's up to a particular taskqueue implementation to generate
# payload from params. It could blow up payload size over limit.
return taskqueue.Task(
url=self.url,
payload=self._payload,
name=self.name,
eta=self.eta,
countdown=self.countdown,
headers=self._headers)
@classmethod
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding
# failed, we can't abort a mr.
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body)
@classmethod
def _decode_payload(cls, body):
compressed_payload_str = None
if body.startswith(cls.PAYLOAD_KEY_PARAM):
payload_key = body[len(cls.PAYLOAD_KEY_PARAM):]
payload_entity = _HugeTaskPayload.get(payload_key)
compressed_payload_str = payload_entity.payload
elif body.startswith(cls.PAYLOAD_PARAM):
compressed_payload_str = body[len(cls.PAYLOAD_PARAM):]
if compressed_payload_str:
payload_str = zlib.decompress(compressed_payload_str)
else:
payload_str = body
result = {}
for (name, value) in cgi.parse_qs(payload_str).items():
if len(value) == 1:
result[name] = value[0]
else:
result[name] = value
return result
class CountersMap(json_util.JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name):
"""Get current counter value.
Args:
counter_name: counter name as string.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, 0)
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value
|
nschloe/quadpy
|
tools/lebedev/import_lebedev.py
|
Python
|
mit
| 4,694
| 0.000639
|
"""
This little helper takes Lebedev point and weight data from [1] and produces JSON files.
[1]
https://people.sc.fsu.edu/~jburkardt/datasets/sphere_lebedev_rule/sphere_lebedev_rule.html
"""
import os
import re
import numpy as np
def read(filename):
data = np.loadtxt(filename)
azimuthal_polar = data[:, :2] / 180.0
weights = data[:, 2]
return azimuthal_polar, weights
def chunk_data(weights):
# divide the weight vector into chunks of 6, 8, 12, 24, or 48
chunks = []
k = 0
ref_weight = 0.0
tol = 1.0e-12
while k < len(weights):
if len(chunks) > 0 and abs(weights[k] - ref_weight) < tol:
chunks[-1].append(k)
else:
chunks.append([k])
ref_weight = weights[k]
k += 1
return chunks
def sort_into_symmetry_classes(weights, azimuthal_polar):
data = {"a1": [], "a2": [], "a3": [], "pq0": [], "llm": [], "rsw": []}
for c in chunks:
if len(c) == 6:
data["a1"].append([weights[c[0]]])
elif len(c) == 12:
data["a2"].append([weights[c[0]]])
elif len(c) == 8:
|
data["a3"].append([weights[c[0]]])
elif len(c) == 24:
if any(abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12):
# polar == pi/2 => X == [p, q, 0].
# Find the smallest positive phi that's paired with `polar ==
# pi/2`; the symmetry is fully characterized by that phi.
k = np.where(
|
abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12)[0]
assert len(k) == 8
k2 = np.where(azimuthal_polar[c, 0][k] > 0.0)[0]
azimuthal_min = np.min(azimuthal_polar[c, 0][k][k2])
data["pq0"].append([weights[c[0]], azimuthal_min])
else:
# X = [l, l, m].
# In this case, there must by exactly two phi with the value
# pi/4. Take the value of the smaller corresponding `polar`;
# all points are characterized by it.
k = np.where(abs(azimuthal_polar[c, 0] - 0.25) < 1.0e-12)[0]
assert len(k) == 2
k2 = np.where(azimuthal_polar[c, 1][k] > 0.0)[0]
polar_min = np.min(azimuthal_polar[c, 1][k][k2])
data["llm"].append([weights[c[0]], polar_min])
else:
assert len(c) == 48
# This most general symmetry is characterized by two angles; one
# could take any two here.
# To make things easier later on, out of the 6 smallest polar
# angle, take the one with the smallest positive phi.
min_polar = np.min(azimuthal_polar[c, 1])
k = np.where(abs(azimuthal_polar[c, 1] - min_polar) < 1.0e-12)[0]
k2 = np.where(azimuthal_polar[c, 0][k] > 0.0)[0]
min_azimuthal = np.min(azimuthal_polar[c, 0][k][k2])
data["rsw"].append([weights[c[0]], min_azimuthal, min_polar])
return data
def write_json(filename, d):
# Getting floats in scientific notation in python.json is almost impossible, so do
# some work here. Compare with <https://stackoverflow.com/a/1733105/353337>.
class PrettyFloat(float):
def __repr__(self):
return f"{self:.16e}"
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(obj)
elif isinstance(obj, dict):
return {k: pretty_floats(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return list(map(pretty_floats, obj))
return obj
with open(filename, "w") as f:
string = (
pretty_floats(d)
.__repr__()
.replace("'", '"')
.replace("{", "{\n ")
.replace("[[", "[\n [")
.replace("], [", "],\n [")
.replace(']], "', ']\n ],\n "')
.replace("}", "\n}")
.replace("]]", "]\n ]")
)
f.write(string)
return
if __name__ == "__main__":
directory = "data/"
for k, file in enumerate(os.listdir(directory)):
filename = os.fsdecode(file)
m = re.match("lebedev_([0-9]+)\\.txt", filename)
degree = int(m.group(1))
azimuthal_polar, weights = read(os.path.join("data", filename))
chunks = chunk_data(weights)
data = sort_into_symmetry_classes(weights, azimuthal_polar)
delete_list = []
for key in data:
if len(data[key]) == 0:
delete_list.append(key)
for key in delete_list:
data.pop(key)
data["degree"] = degree
write_json(f"lebedev_{degree:03d}.json", data)
|
patjouk/djangogirls
|
applications/migrations/0002_auto_20150308_2229.py
|
Python
|
bsd-3-clause
| 1,002
| 0.002994
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='has_option_ot
|
her',
field=models.BooleanField(default=False, help_text=b"Used only with 'Choices' question type", verbose_name=b"Allow for 'Other' answer?"),
),
migrations.AlterField(
model_name='questio
|
n',
name='is_multiple_choice',
field=models.BooleanField(default=False, help_text=b"Used only with 'Choices' question type", verbose_name=b'Are there multiple choices allowed?'),
),
migrations.AlterField(
model_name='question',
name='is_required',
field=models.BooleanField(default=True, verbose_name=b'Is the answer to the question required?'),
),
]
|
rhololkeolke/apo-website
|
src/application/generate_keys.py
|
Python
|
bsd-3-clause
| 2,126
| 0.003293
|
#!/usr/bin/env python
# encoding: utf-8
"""
generate_keys.py
Generate CSRF and Session keys, output to secret_keys.py file
Usage:
generate_keys.py [-f]
Outputs secret_keys.py file in current folder
By default, an existing secret_keys file will not be replaced.
Use the '-f' flag to force the new keys to be written to the file
"""
import string
import os.path
from optparse import OptionParser
from random import choice
from string import Template
# File settings
file_name = 'secret_keys.py'
file_template = Template('''# CSRF- and Session keys
CSRF_SECRET_KEY = '$csrf_key'
SESSION_KEY = '$session_key'
# Facebook Keys
FACEBOOK_APP_ID = '$facebook_app_id'
FACEBOOK_APP_SECRET = '$facebook_app_secret'
''')
# Get options from command line
parser = OptionParser()
parser.add_option("-f", "--force", dest="force",
help="force overwrite of existing secret_keys file", action="store_true")
parser.add_option("-r", "--randomness", dest="randomness",
help="length (randomness) of generated key; default = 24", default=24)
(options, args) = parser.parse_args()
def generate_randomkey(length):
"""Generate random key, given a number of characters"""
chars = string.letters + string.digits
return ''.join([choice(chars) for i in range(length)])
def write_file(contents):
f = open(file_name, 'wb')
f.write(contents)
f.close()
def generate_keyfile(csrf_key, session_key):
"""Generate random keys for CSRF- and session key"""
output = file_template.safe_substitute(dict(
csrf_key=csrf_key, session_key=session_key,
facebook_app_id='FILL ME IN', facebook_app_secret='FILL
|
ME IN'
))
if os.path.exists(file_name):
if options.force is None:
print "Warning: secret_keys.py file exists. Use '-f
|
' flag to force overwrite."
else:
write_file(output)
else:
write_file(output)
def main():
r = options.randomness
csrf_key = generate_randomkey(r)
session_key = generate_randomkey(r)
generate_keyfile(csrf_key, session_key)
if __name__ == "__main__":
main()
|
brian-team/brian2genn
|
brian2genn/correctness_testing.py
|
Python
|
gpl-2.0
| 1,039
| 0.005775
|
'''
Definitions of the configuration for correctness testing.
'''
import brian2
import os
import shutil
import sys
import brian2genn
from brian2.tests.features import (Configuration, DefaultConfiguration,
run_feature_tests, run_single_feature_test)
__all__ = ['GeNNConfiguration',
'GeNNConfigurationCPU',
'GeNNConfigurationOptimized']
class G
|
eNNConfiguration(Configuration):
name = 'GeNN'
def before_run(self):
brian2.prefs.codegen.cpp.extra_compile_args = []
brian2.prefs._backup()
brian2.set_device('genn')
class GeNNConfigurationCPU
|
(Configuration):
name = 'GeNN_CPU'
def before_run(self):
brian2.prefs.codegen.cpp.extra_compile_args = []
brian2.prefs._backup()
brian2.set_device('genn', use_GPU=False)
class GeNNConfigurationOptimized(Configuration):
name = 'GeNN_optimized'
def before_run(self):
brian2.prefs.reset_to_defaults()
brian2.prefs._backup()
brian2.set_device('genn')
|
fuzzycode/RoboPi
|
main.py
|
Python
|
mit
| 2,261
| 0.002655
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Björn Larsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "
|
Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KI
|
ND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import logging
import signal
from robot.application import App
import robot.utils.config as config
from twisted.python import log
__logger = logging.getLogger(__name__)
def setupLogging(args):
# Connect the twisted log with the python log
observer = log.PythonLoggingObserver()
observer.start()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create main stream handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logging.getLogger('').addHandler(ch)
def main(args):
"""
Entry point for the application
Takes a list of command line arguments as parameter
:param args:
:return: return code
"""
setupLogging(args)
config.init()
app = App()
def SIGINT_handler(num, frame):
__logger.info("Signal handler triggered, purging application")
app.purge()
signal.signal(signal.SIGINT, SIGINT_handler)
signal.signal(signal.SIGHUP, SIGINT_handler)
app.setup(args)
app.run()
if __name__=='__main__':
main(sys.argv)
|
lefteye/superroutingchioce
|
create_loss_width_thoughtout.py
|
Python
|
gpl-2.0
| 1,854
| 0.006608
|
# -*- coding:UTF-8 -*-
#用于产生吞吐量、可用带宽、抖动、丢包率
import csv
f = open('iperf_13.csv')
c = f.readlines()
csvfile1 = open('iperf_133.csv', 'ab')
writer1 = csv.writer(csvfile1)
for i in c:
# try:
t = i.split()
# print t
# print len(t)
ll = []
if len(t)==14:
|
a= t[5]
# print type(a)
e=float(a)
# print type(a)
# print a
if e > 10:
# print "helloworld"
h = e * 0.001
k = str(h)
|
ll.append(k)
else:
a = t[5]
ll.append(a)
b = t[7]
c = t[-5]
d = t[-1]
ll.append(b)
ll.append(c)
if len(d) == 6:
t = d[1:4]
ll.append(t)
# writer1.writerow(d)
# print d
else:
t = d[1]
ll.append(t)
# writer1.writerow(d)
#ll.append(d)
elif len(t) == 13:
a = t[4]
b = t[6]
c = t[-5]
d = t[-1]
e = float(a)
# print type(a)
# print a
if e > 10:
# print "helloworld"
h = e * 0.001
k = str(h)
ll.append(k)
else:
a = t[4]
ll.append(a)
ll.append(b)
ll.append(c)
if len(d) == 6:
t = d[1:4]
ll.append(t)
writer1.writerow(ll)
# print d
else:
t = d[1]
ll.append(t)
writer1.writerow(ll)
else:
continue
#print ll
print "all complted"
csvfile1.close()
|
bp-kelley/rdkit
|
rdkit/ML/UnitTestBuildComposite.py
|
Python
|
bsd-3-clause
| 7,293
| 0.000137
|
# $Id$
#
# Copyright (C) 2003-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the BuildComposite functionality
"""
import io
import os
import unittest
from rdkit import RDConfig
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import BuildComposite
import pickle
class TestCase(unittest.TestCase):
def setUp(self):
self.baseDir = os.path.join(RDConfig.RDCodeDir, 'ML', 'test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = BuildComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def _init(self, refCompos, copyBounds=0):
BuildComposite._verbose = 0
conn = DbConnect(self.details.dbName, self.details.tableName)
cols = [x.upper() for x in conn.GetColumnNames()]
cDescs = [x.upper() for x in refCompos.GetDescriptorNames()]
self.assertEqual(cols, cDescs)
self.details.nModels = 10
self.details.lockRandom = 1
self.details.randomSeed = refCompos._randomSeed
self.details.splitFrac = refCompos._splitFrac
if self.details.splitFrac:
self.details.splitRun = 1
else:
self.details.splitRun = 0
if not copyBounds:
self.details.qBounds = [0] * len(cols)
else:
self.details.qBounds = refCompos.GetQuantBounds()[0]
def compare(self, compos, refCompos):
self.assertEqual(len(compos), len(refCompos))
cs = []
rcs = []
for i in range(len(compos)):
cs.append(compos[i])
rcs.append(refCompos[i])
cs.sort(key=lambda x: (x[2], x[2]))
rcs.sort(key=lambda x: (x[2], x[2]))
for i in range(len(compos)):
_, count, err = cs[i]
_, refCount, refErr = rcs[i]
self.assertEqual(count, refCount)
self.assertAlmostEqual(err, refErr, 4)
def test1_basics(self):
# """ basics """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
compos = BuildComposite.RunIt(self.details, saveIt=0)
# pickle.dump(compos,open(os.path.join(self.baseDir,refComposName), 'wb'))
# with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
# refCompos = pickle.load(pklF)
self.compare(compos, refCompos)
def test2_depth_limit(self):
# """ depth limit """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test3_depth_limit_less_greedy(self):
# """ depth limit + less greedy """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3_lessgreedy.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n
|
', '\n').encode('utf-8')
pklTF.close()
|
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.lessGreedy = 1
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test4_more_trees(self):
# """ more trees """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_50_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.nModels = 50
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test5_auto_bounds(self):
# """ auto bounds """
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test6_auto_bounds_real_activity(self):
# """ auto bounds with a real valued activity"""
self.details.tableName = 'ferro_noquant_realact'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
self.details.activityBounds = [0.5]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test7_composite_naiveBayes(self):
# """ Test composite of naive bayes"""
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_NaiveBayes.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTFile:
buf = pklTFile.read().replace('\r\n', '\n').encode('utf-8')
pklTFile.close()
with io.BytesIO(buf) as pklFile:
refCompos = pickle.load(pklFile)
self._init(refCompos, copyBounds=1)
self.details.useTrees = 0
self.details.useNaiveBayes = 1
self.details.mEstimateVal = 20.0
self.details.qBounds = [0] + [2] * 6 + [0]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
kirkwor/py
|
buildingGui_2.py
|
Python
|
mit
| 1,732
| 0.008083
|
'''
Created on 13 Jul 2017
@author: T
https://www.youtube.com/watch?v=6isuF_bBiXs
Tkinter:
+ standard lib
+ lightweight
+ good enough
- limited widgets
- strange import
- ugly
Python 3x
- ttk module (theme for better)
* run in transparent loop
* widgets are with parents
Geometry:
: absolute (avoid this)
: pack
+ simple
- not flexible
: grid
'''
from tkinter import *
from tkinter.ttk import *
class WithoutTtk():
def __init__(self, root):
self.frame = Frame(root)
self.build_window()
self.frame.pack(fill='both')
menubar = Menu(root)
root['menu'] = menubar
menu_file = Menu(menubar)
menu_file.add_command(label="Quit", command=self.quit
|
)
menubar.add_cascade(menu=menu_file, label="File")
def build_window(self):
label = Label(self.frame, text="How do I look?")
# label.pack(side="top")
label.grid(row=0, column=1)
button_bad = Button(self.frame, text="Terrible", command=self.quit)
# button_bad.pack(s
|
ide="left")
button_bad.grid(row=0, column=0)
# button_bad.grid(row=0, column=0, sticky="E")
button_good = Button(self.frame, text="not bad", command=self.quit)
# button_good.pack(side="right")
button_good.grid(row=0, column=2)
# button_good.grid(row=0, column=2, sticky="W")
self.frame.rowconfigure(0,weight=2) # row 0 is the one which will grow
self.frame.columnconfigure(1, weight=2)
def quit(self):
self.frame.quit()
if __name__ == '__main__':
root = Tk()
myApp = WithoutTtk(root)
root.mainloop()
|
chuckbutler/shoutcast-charm
|
lib/charmhelpers/core/hookenv.py
|
Python
|
mit
| 14,883
| 0
|
"Interactions with the Juju environment"
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developer
|
s <juju@lists.ubuntu.com>
import os
import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
DEBUG = "DEBUG"
MARKER = object()
cache = {}
def
|
cached(func):
"""Cache return values for multiple executions of func + args
For example::
@cached
def unit_get(attribute):
pass
unit_get('test')
will cache the result of unit_get + 'test' for future calls.
"""
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
def flush(key):
"""Flushes any entries from function cache where the
key is found in the function+args """
flush_list = []
for item in cache:
if key in item:
flush_list.append(item)
for item in flush_list:
del cache[item]
def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
command += [message]
subprocess.call(command)
class Serializable(UserDict.IterableUserDict):
"""Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj):
# wrap the object
UserDict.IterableUserDict.__init__(self)
self.data = obj
def __getattr__(self, attr):
# See if this object has attribute.
if attr in ("json", "yaml", "data"):
return self.__dict__[attr]
# Check for attribute in wrapped object.
got = getattr(self.data, attr, MARKER)
if got is not MARKER:
return got
# Proxy to the wrapped object via dict interface.
try:
return self.data[attr]
except KeyError:
raise AttributeError(attr)
def __getstate__(self):
# Pickle as a standard dictionary.
return self.data
def __setstate__(self, state):
# Unpickle into our wrapper.
self.data = state
def json(self):
"""Serialize the object to json"""
return json.dumps(self.data)
def yaml(self):
"""Serialize the object to yaml"""
return yaml.dump(self.data)
def execution_environment():
"""A convenient bundling of the current execution context"""
context = {}
context['conf'] = config()
if relation_id():
context['reltype'] = relation_type()
context['relid'] = relation_id()
context['rel'] = relation_get()
context['unit'] = local_unit()
context['rels'] = relations()
context['env'] = os.environ
return context
def in_relation_hook():
"""Determine whether we're running in a relation hook"""
return 'JUJU_RELATION' in os.environ
def relation_type():
"""The scope for the current relation hook"""
return os.environ.get('JUJU_RELATION', None)
def relation_id():
"""The relation ID for the current relation hook"""
return os.environ.get('JUJU_RELATION_ID', None)
def local_unit():
"""Local unit ID"""
return os.environ['JUJU_UNIT_NAME']
def remote_unit():
"""The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT']
def service_name():
"""The name service group this unit belongs to"""
return local_unit().split('/')[0]
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
class Config(dict):
"""A dictionary representation of the charm's config.yaml, with some
extra features:
- See which values in the dictionary have changed since the previous hook.
- For values that have changed, see what the previous value was.
- Store arbitrary data for use in a later hook.
NOTE: Do not instantiate this object directly - instead call
``hookenv.config()``, which will return an instance of :class:`Config`.
Example usage::
>>> # inside a hook
>>> from charmhelpers.core import hookenv
>>> config = hookenv.config()
>>> config['foo']
'bar'
>>> # store a new key/value for later use
>>> config['mykey'] = 'myval'
>>> # user runs `juju set mycharm foo=baz`
>>> # now we're inside subsequent config-changed hook
>>> config = hookenv.config()
>>> config['foo']
'baz'
>>> # test to see if this val has changed since last hook
>>> config.changed('foo')
True
>>> # what was the previous value?
>>> config.previous('foo')
'bar'
>>> # keys/values that we add are preserved across hooks
>>> config['mykey']
'myval'
"""
CONFIG_FILE_NAME = '.juju-persistent-config'
def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw)
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
self.load_previous()
def __getitem__(self, key):
"""For regular dict lookups, check the current juju config first,
then the previous (saved) copy. This ensures that user-saved values
will be returned by a dict lookup.
"""
try:
return dict.__getitem__(self, key)
except KeyError:
return (self._prev_dict or {})[key]
def load_previous(self, path=None):
"""Load previous copy of config from disk.
In normal usage you don't need to call this method directly - it
is called automatically at object initialization.
:param path:
File path from which to load the previous config. If `None`,
config is loaded from the default location. If `path` is
specified, subsequent `save()` calls will write to the same
path.
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
def changed(self, key):
"""Return True if the current value for this key is different from
the previous value.
"""
if self._prev_dict is None:
return True
return self.previous(key) != self.get(key)
def previous(self, key):
"""Return previous value for this key, or None if there
is no previous value.
"""
if self._prev_dict:
return self._prev_dict.get(key)
return None
def save(self):
"""Save this config to disk.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
"""
if self._prev_dict:
for k, v in self._prev_dict.iteritems():
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
json.dump(self, f)
@cached
def config(scope=None):
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
config_data = json.loads(subprocess.check_output(config_cmd_line))
if scope is not None:
return config_data
return Config(config_data)
except ValueError:
return None
@cached
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
ret
|
Risto-Stevcev/iac-protocol
|
iac/app/libreoffice/calc.py
|
Python
|
bsd-3-clause
| 3,484
| 0.003731
|
"""
To start UNO for both Calc and Writer:
(Note that if you use the current_document command, it will open the Calc's current document since it's the first switch passed)
libreoffice "--accept=socket,host=localhost,port=18100;urp;StarOffice.ServiceManager" --norestore --nofirststartwizard --nologo --calc --writer
To start UNO without opening a libreoffice instance, use the --headless switch:
(Note that this doesn't allow to use the current_document command)
libreoffice --headless "--accept=socket,host=localhost,port=18100;urp;StarOffice.ServiceManager" --norestore --nofirststartwizard --nologo --calc --writer
"""
from uno import getComponentContext
from com.sun.star.connection import ConnectionSetupException
from com.sun.star.awt.FontWeight import BOLD
import sys
# For saving the file
from com.sun.star.beans import PropertyValue
from uno import systemPathToFileUrl
class Message(object):
connection_setup_exception = "Error: Please start the uno bridge first."
# Connect to libreoffice using UNO
UNO_PORT = 18100
try:
localContext = getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", localContext)
context = resolver.resolve(
"uno:socket,host=localhost,port=%d;urp;StarOffice.ComponentContext" % UNO_PORT)
except ConnectionSetupException:
print("%s\n" % Message.connection_setup_exception)
sys.exit(1)
# Get the desktop service
desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
class Interface(object):
variables = {}
@staticmethod
def current_document():
"""current_document()"""
return desktop.getCurrentComponent()
@staticmethod
def load_document(path):
"""load_document(['path'])"""
url = systemPathToFileUrl(path)
return desktop.loadComponentFromURL(url ,"_blank", 0, ())
@staticmethod
def new_document():
"""new_document()"""
return desktop.loadComponentFromURL("private:factory/scalc","_blank", 0, ())
@staticmethod
def current_sheet(document):
"""[document].current_sheet()"""
return document.getCurrentController().getActiveSheet()
@staticmethod
def save_as(document, path):
"""[document].save_as(['path'])"""
url = systemPathToFileUrl(path)
# Set file to overwrite
property_value = PropertyValue()
property_value.Name = 'Overwrite'
property_value.Value = 'overwrite'
properties = (property_value,)
# Save to file
document.storeAsURL(url, properties)
return True
@staticmethod
def fetch_cell(sheet, cell_range):
"""[sheet].fetch_cell(['A1'])"""
return sheet.getCellRangeByName(cell_range)
@staticmethod
def set_text(cell, string):
"""[cell].set_text(['string'])"""
if (string.startswith('"') and string.endswith('"')) or \
(strin
|
g.startswith("'") and string.endswith("'")):
string = string[1:-1]
cell.setString(string)
return True
@staticmethod
def get_text(cell):
"""[cell].get_text()"""
return cell.getString()
@staticmethod
def weight(cell, bold):
"""[cell].weight(['bold'])"""
if bold.strip("'").strip('"') == "bold":
|
cell.CharWeight = BOLD
return True
else:
return False
|
smart-classic/smart_server
|
smart/models/records.py
|
Python
|
apache-2.0
| 5,560
| 0.001079
|
"""
Records for SMART Reference EMR
Ben Adida & Josh Mandel
"""
from base import *
from django.utils import simplejson
from django.conf import settings
from smart.common.rdf_tools.rdf_ontology import ontology
from smart.common.rdf_tools.util import rdf, foaf, vcard, sp, serialize_rdf, parse_rdf, bound_graph, URIRef, Namespace
from smart.lib import utils
from smart.models.apps import *
from smart.models.accounts import *
from smart.triplestore import *
from string import Template
import re
import datetime
class Record(Object):
Meta = BaseMeta()
full_name = models.CharField(max_length=150, null=False)
def __unicode__(self):
return 'Record %s' % self.id
def generate_direct_access_token(self, account, token_secret=None):
u = RecordDirectAccessToken.objects.create(
record=self,
account=account,
token_secret=token_secret
)
u.save()
return u
@classmethod
def search_records(cls, query):
try:
c = TripleStore()
ids = parse_rdf(c.sparql(query))
except Exception, e:
return None
from smart.models.record_object import RecordObject
demographics = RecordObject[sp.Demographics]
subjects = [p[0] for p in ids.triples((None, rdf['type'],
sp.Demographics))]
ret = c.get_contexts(subjects)
return ret
@classmethod
def rdf_to_objects(cls, res):
if res is None:
return None
m = parse_rdf(res)
record_list = []
q = """
PREFIX sp:<http://smartplatforms.org/terms#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dcterms:<http://purl.org/dc/terms/>
PREFIX v:<http://www.w3.org/2006/vcard/ns#>
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
SELECT ?gn ?fn ?dob ?gender ?zipcode ?d
WHERE {
?d rdf:type sp:Demographics.
?d v:n ?n.
?n v:given-name ?gn.
?n v:family-name ?fn.
optional{?d foaf:gender ?gender.}
optional{?d v:bday ?dob.}
optional{
?d v:adr ?a.
?a rdf:type v:Pref.
?a v:postal-code ?zipcode.
}
optional{
?d v:adr ?a.
?a v:postal-code ?zipcode.
}
}"""
people = list(m.query(q))
for p in people:
record = Record()
record.id = re.search(
"\/records\/(.*?)\/demographics", str(p[5])).group(1)
record.fn, record.ln, record.dob, record.gender, record.zipcode = p[:5]
record_list.append(record)
return record_list
class AccountApp(Object):
account = models.ForeignKey(Account)
app = models.ForeignKey(PHA)
# uniqueness
class Meta:
app_label = APP_LABEL
unique_together = (('account', 'app'),)
# Not an OAuth token, but an opaque token that can be used to support
# auto-login via a direct link to a smart_ui_server.
class RecordDirectAccessToken(Object):
record = models.ForeignKey(
Record, related_name='direct_access_tokens', null=False)
account = models.ForeignKey(
Account, related_name='direct_record_shares', null=False)
token = models.CharField(max_length=40, unique=True)
token_secret = models.CharField(max_length=60, null=True)
expir
|
es_at = models.DateTimeField(null=False)
def save(self, *args, **kwargs):
if not self.token:
self.token = utils.random_string(30)
if self.expires_at is None:
minutes_to_expire = 30
try:
minutes_to_expire = settings.MINUTES_TO_EXPIRE_DIRECT_ACCESS
except:
pass
self.expires_at = datetime.datetime.utcnow(
) + datetime.timedelta(minutes=minu
|
tes_to_expire)
super(RecordDirectAccessToken, self).save(*args, **kwargs)
class Meta:
app_label = APP_LABEL
class RecordAlert(Object):
record = models.ForeignKey(Record)
alert_text = models.TextField(null=False)
alert_time = models.DateTimeField(auto_now_add=True, null=False)
triggering_app = models.ForeignKey(
'OAuthApp', null=False, related_name='alerts')
acknowledged_by = models.ForeignKey('Account', null=True)
acknowledged_at = models.DateTimeField(null=True)
# uniqueness
class Meta:
app_label = APP_LABEL
@classmethod
def from_rdf(cls, rdfstring, record, app):
s = parse_rdf(rdfstring)
q = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX sp: <http://smartplatforms.org/terms#>
SELECT ?notes ?severity
WHERE {
?a rdf:type sp:Alert.
?a sp:notes ?notes.
?a sp:severity ?scv.
?scv sp:code ?severity.
}"""
r = list(s.query(q))
assert len(r) == 1, "Expected one alert in post, found %s" % len(r)
(notes, severity) = r[0]
assert type(notes) == Literal
spcodes = Namespace("http://smartplatforms.org/terms/code/alertLevel#")
assert severity in [spcodes.information, spcodes.warning,
spcodes.critical]
a = RecordAlert(
record=record,
alert_text=str(notes),
triggering_app=app
)
a.save()
return a
def acknowledge(self, account):
self.acknowledged_by = account
self.acknowledged_at = datetime.datetime.now()
self.save()
class LimitedAccount(Account):
records = models.ManyToManyField(Record, related_name="+")
|
ricjon/quarterapp
|
quarterapp/settings.py
|
Python
|
gpl-3.0
| 3,199
| 0.004689
|
#
# Copyright (c) 2013 Markus Eliasson, http://www.quarterapp.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import storage
import tornado.database
class QuarterSettings(object):
"""
Application settings contains the settings specific to the application,
not the running server. I.e. port numbers and such should not be kept here
but in the application configuration file (quarterapp.conf).
These settings might be updated at runtime
"""
def __init__(self, db):
"""
Constructs the application settings and try to update the settings
from database
@param db The Tornado database object used to access the database
"""
self.db = db
self.settings = {}
self.update()
def update(self):
"""
Update the settings from the database, if cannot read from database the
old settings remain active
"""
logging.info("Updating settings")
settings = storage.get_settings(self.db)
if settings:
for row in settings:
self.settings[row.name] = row.value
else:
logging.warn("Could not find any settings in database - everything setup ok?")
def get_value(self, key):
"""
Get the setting value for the given key, if no setting exist for this key
None is returned
"""
if self.settings.has_key(key):
return self.settings[key]
else:
return None
def put_value(self, key, value):
"""
Updates the value for the given key. If this key does not exist to begin with
this function will not insert the value. I.e. this function will only update
existing values.
@param key The settings key to update value for
@param value The new value
"""
if self.settings.has_key(key):
storage.put_setting(self.db, key, val
|
ue)
self.settings[key] = value
else:
logging.warning("Trying to update a settings key t
|
hat does not exists! (%s)", key)
raise Exception("Trying to update a settings key that does not exists!")
def create_default_config(path):
"""Create a quarterapp.conf file from the example config file"""
import shutil, os.path
target = os.path.join(path, 'quarterapp.conf')
if os.path.exists(target):
print('Cowardly refusing to overwrite configuration file')
else:
shutil.copyfile(os.path.join(os.path.dirname(__file__), 'resources', 'quarterapp.example.conf'), target)
|
JLLeitschuh/allwpilib
|
wpilibj/src/athena/cpp/nivision/get_struct_size.py
|
Python
|
bsd-3-clause
| 308
| 0.006494
|
from __future__ import print_function
import sys
def main():
for line
|
in sys.stdin:
line = line.strip()
if not
|
line.startswith("#STRUCT_SIZER"):
continue
line = line[14:]
line = line.replace("#", "")
print(line)
if __name__ == "__main__":
main()
|
deepmind/acme
|
acme/tf/networks/distributions_test.py
|
Python
|
apache-2.0
| 2,618
| 0.00382
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.networks.distributions."""
from absl.testing import absltest
from absl.testing import parameterized
from acme.tf.networks import distributions
import numpy as np
from numpy import testing as npt
class DiscreteValuedDistributionTest(parameterized.TestCase):
@parameterized.parameters(
((), (), 5),
((2,), ()
|
, 5),
((), (3, 4), 5),
((2,), (3, 4), 5),
((2, 6), (3, 4), 5),
)
def test_constructor(self, batch_shape, event_shape, num_values):
logits_shape = batch_shape + event_shape + (num_values,)
logits_size = np.prod(logits_shape)
logits = np.arange(logits_size, dtype=float).reshape(logits_shape)
values = np.linspace(start=-np.ones(event_shape, dtype=float),
stop=np.ones(event_shape, dtype=float),
|
num=num_values,
axis=-1)
distribution = distributions.DiscreteValuedDistribution(values=values,
logits=logits)
# Check batch and event shapes.
self.assertEqual(distribution.batch_shape, batch_shape)
self.assertEqual(distribution.event_shape, event_shape)
self.assertEqual(distribution.logits_parameter().shape.as_list(),
list(logits.shape))
self.assertEqual(distribution.logits_parameter().shape.as_list()[-1],
logits.shape[-1])
# Test slicing
if len(batch_shape) == 1:
slice_0_logits = distribution[1:3].logits_parameter().numpy()
expected_slice_0_logits = distribution.logits_parameter().numpy()[1:3]
npt.assert_allclose(slice_0_logits, expected_slice_0_logits)
elif len(batch_shape) == 2:
slice_logits = distribution[0, 1:3].logits_parameter().numpy()
expected_slice_logits = distribution.logits_parameter().numpy()[0, 1:3]
npt.assert_allclose(slice_logits, expected_slice_logits)
else:
assert not batch_shape
if __name__ == '__main__':
absltest.main()
|
Akrog/cinder
|
cinder/tests/test_dellscapi.py
|
Python
|
apache-2.0
| 155,946
| 0
|
# Copyright (c) 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
import mock
from requests import models
import uuid
LOG = logging.getLogger(__name__)
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanAPITestCase(test.TestCase):
'''DellSCSanAPITestCase
Class to test the Storage Center API using Mock.
'''
SC = {u'IPv6ManagementIPPrefix': 128,
u'connectionError': u'',
u'instanceId': u'64702',
u'scSerialNumber': 64702,
u'dataProgressionRunning': False,
u'hostOrIpAddress': u'192.168.0.80',
u'userConnected': True,
u'portsBalanced': True,
u'managementIp': u'192.168.0.80',
u'version': u'6.5.1.269',
u'location': u'',
u'objectType': u'StorageCenter',
u'instanceName': u'Storage Center 64702',
u'statusMessage': u'',
u'status': u'Up',
u'flashOptimizedConfigured': False,
u'connected': True,
u'operationMode': u'Normal',
u'userName': u'Admin',
u'nonFlashOptimizedConfigured': True,
u'name': u'Storage Center 64702',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'serialNumber': 64702,
u'raidRebalanceRunning': False,
u'userPasswordExpired': False,
u'contact': u'',
u'IPv6ManagementIP': u'::'}
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
|
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEna
|
bled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
INACTIVE_VOLUME = \
{u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': False,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
# ScServer where deletedAllowed=False (not allowed to be deleted)
SCSERVER_NO_DEL = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
|
NoneGG/aredis
|
tests/cluster/conftest.py
|
Python
|
mit
| 3,388
| 0.000885
|
# -*- coding: utf-8 -*-
# python std lib
import asyncio
import os
import sys
import json
# rediscluster imports
from aredis import StrictRedisCluster,
|
StrictRedis
# 3rd party imports
import pytest
from distutils.version import StrictVersion
# put our path in front so we can be sure we are testing locally not against the global package
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(1, basepath)
_REDIS_VERSIONS = {}
def get_versions(**kwargs):
key = json.dumps(kwargs)
if key not in _REDIS_VERSIONS:
client = _get_client(**kwargs)
|
loop = asyncio.get_event_loop()
info = loop.run_until_complete(client.info())
_REDIS_VERSIONS[key] = {key: value['redis_version'] for key, value in info.items()}
return _REDIS_VERSIONS[key]
def _get_client(cls=None, **kwargs):
if not cls:
cls = StrictRedisCluster
params = {
'startup_nodes': [{
'host': '127.0.0.1', 'port': 7000
}],
'stream_timeout': 10,
}
params.update(kwargs)
return cls(**params)
def _init_mgt_client(request, cls=None, **kwargs):
"""
"""
client = _get_client(cls=cls, **kwargs)
if request:
def teardown():
client.connection_pool.disconnect()
request.addfinalizer(teardown)
return client
def skip_if_not_password_protected_nodes():
"""
"""
return pytest.mark.skipif('TEST_PASSWORD_PROTECTED' not in os.environ, reason="")
def skip_if_server_version_lt(min_version):
"""
"""
versions = get_versions()
for version in versions.values():
if StrictVersion(version) < StrictVersion(min_version):
return pytest.mark.skipif(True, reason="")
return pytest.mark.skipif(False, reason="")
def skip_if_redis_py_version_lt(min_version):
"""
"""
import aredis
version = aredis.__version__
if StrictVersion(version) < StrictVersion(min_version):
return pytest.mark.skipif(True, reason="")
return pytest.mark.skipif(False, reason="")
@pytest.fixture()
def o(request, *args, **kwargs):
"""
Create a StrictRedisCluster instance with decode_responses set to True.
"""
params = {'decode_responses': True}
params.update(kwargs)
return _get_client(cls=StrictRedisCluster, **params)
@pytest.fixture()
def r(request, *args, **kwargs):
"""
Create a StrictRedisCluster instance with default settings.
"""
return _get_client(cls=StrictRedisCluster, **kwargs)
@pytest.fixture()
def ro(request, *args, **kwargs):
"""
Create a StrictRedisCluster instance with readonly mode
"""
params = {'readonly': True}
params.update(kwargs)
return _get_client(cls=StrictRedisCluster, **params)
@pytest.fixture()
def s(*args, **kwargs):
"""
Create a StrictRedisCluster instance with 'init_slot_cache' set to false
"""
s = _get_client(**kwargs)
assert s.connection_pool.nodes.slots == {}
assert s.connection_pool.nodes.nodes == {}
return s
@pytest.fixture()
def t(*args, **kwargs):
"""
Create a regular StrictRedis object instance
"""
return StrictRedis(*args, **kwargs)
@pytest.fixture()
def sr(request, *args, **kwargs):
"""
Returns a instance of StrictRedisCluster
"""
return _get_client(reinitialize_steps=1, cls=StrictRedisCluster, **kwargs)
|
dchirikov/luna
|
luna/utils/ip.py
|
Python
|
gpl-3.0
| 5,972
| 0.002009
|
'''
Written by Dmitry Chirikov <dmitry@chirikov.ru>
This file is part of Luna, cluster provisioning tool
https://github.com/dchirikov/luna
This file is part of Luna.
Luna is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Luna is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Luna. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import socket
from binascii import hexlify, unhexlify
import logging
log = logging.getLogger(__name__)
af = {
4: socket.AF_INET,
6: socket.AF_INET6,
}
hex_format = {
4: '08x',
6: '032x'
}
def ntoa(num_ip, ver=4):
"""
Convert the IP numip from the binary notation
into the IPv4 numbers-and-dots form
"""
try:
ip = socket.inet_ntop(
af[ver],
unhexlify(format(num_ip, hex_format[ver]))
)
return ip
except:
err_msg = ("Cannot convert '{}' from C"
" to IPv{} format".format(num_ip, ver))
log.error(err_msg)
|
raise RuntimeError, err_msg
def aton(ip, ver=4):
"""
Convert the IP ip from the IPv4 numbers-and-dots
notation into binary form (in network byte order)
"""
try:
absnum = int(hexlify(socket.inet_pton(af[
|
ver], ip)), 16)
return long(absnum)
except:
err_msg = "Cannot convert IP '{}' to C format".format(ip)
log.error(err_msg)
raise RuntimeError, err_msg
def reltoa(num_net, rel_ip, ver):
"""
Convert a relative ip (a number relative to the base of the
network obtained using 'get_num_subnet') into an IPv4 address
"""
num_ip = int(num_net) + int(rel_ip)
return ntoa(num_ip, ver)
def atorel(ip, num_net, prefix, ver=4):
"""
Convert an IPv4 address into a number relative to the base of
the network obtained using 'get_num_subnet'
"""
num_ip = aton(ip, ver)
# Check if the ip address actually belongs to num_net/prefix
if not ip_in_net(ip, num_net, prefix, ver):
err_msg = ("Network '{}/{}' does not contain '{}'"
.format(ntoa(num_net, ver), prefix, ip))
log.error(err_msg)
raise RuntimeError, err_msg
relative_num = long(num_ip - num_net)
return relative_num
def get_num_subnet(ip, prefix, ver=4):
"""
Get the address of the subnet to which ip belongs in binary form
"""
maxbits = 32
if ver == 6:
maxbits = 128
try:
prefix = int(prefix)
except:
err_msg = "Prefix '{}' is invalid, must be 'int'".format(prefix)
log.error(err_msg)
raise RuntimeError, err_msg
if ver == 4 and prefix not in range(1, 31):
err_msg = "Prefix should be in the range [1..30]"
log.error(err_msg)
raise RuntimeError, err_msg
if ver == 6 and prefix not in range(1, 127):
err_msg = "Prefix should be in the range [1..126]"
log.error(err_msg)
raise RuntimeError, err_msg
if type(ip) is long or type(ip) is int:
num_ip = ip
else:
try:
num_ip = aton(ip, ver)
except socket.error:
err_msg = "'{}' is not a valid IP".format(ip)
log.error(err_msg)
raise RuntimeError, err_msg
num_mask = (((1 << maxbits) - 1)
^ ((1 << (maxbits+1 - prefix) - 1) - 1))
num_subnet = long(num_ip & num_mask)
return num_subnet
def ip_in_net(ip, num_net, prefix, ver=4):
"""
Check if an address (either in binary or IPv4 form) belongs to
num_net/prefix
"""
if type(ip) is long or type(ip) is int:
num_ip = ip
else:
num_ip = aton(ip, ver)
num_subnet1 = get_num_subnet(num_net, prefix, ver)
num_subnet2 = get_num_subnet(num_ip, prefix, ver)
return num_subnet1 == num_subnet2
def guess_ns_hostname():
"""
Try to guess the hostname to use for the nameserver
it supports hosts of the format host-N, hostN for HA
configurations. Returns the current hostname otherwise
"""
ns_hostname = socket.gethostname().split('.')[0]
if ns_hostname[-1:].isdigit():
guessed_name = re.match('(.*)[0-9]+$', ns_hostname).group(1)
if guessed_name[-1] == '-':
guessed_name = guessed_name[:-1]
try:
guessed_ip = socket.gethostbyname(guessed_name)
except:
guessed_ip = None
if guessed_ip:
log.info(("Guessed that NS server should be '%s', "
"instead of '%s'. "
"Please update if this is not correct.") %
(guessed_name, ns_hostname))
return guessed_name
# Return the current host's hostname if the guessed name could not
# be resolved
return ns_hostname
def get_ip_version(ip):
for ver in [4, 6]:
try:
int(hexlify(socket.inet_pton(af[ver], ip)), 16)
return ver
except:
pass
return None
def ipv6_unwrap(ip):
"""
Retruns IPv6 ip address in full form:
fe80:1:: => fe80:0001:0000:0000:0000:0000:0000:0000
2001:db8::ff00:42:8329 => 2001:0db8:0000:0000:0000:ff00:0042:8329
"""
ip = ntoa(aton(ip, 6), 6)
out = [''] * 8
start, end = ip.split('::')
start_splited = start.split(':')
end_splited = end.split(':')
out[:len(start_splited)] = start_splited
i = 1
for elem in reversed(end_splited):
out[-i] = elem
i += 1
for i in range(len(out)):
out[i] = '{:0>4}'.format(out[i])
return ":".join(out)
|
ProjectQ-Framework/ProjectQ
|
projectq/ops/_command_test.py
|
Python
|
apache-2.0
| 12,405
| 0.000564
|
# -*- coding: utf-8 -*-
# Copyright 2017, 2021 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for projectq.ops._command."""
import math
import sys
from copy import deepcopy
import pytest
from projectq import MainEngine
from projectq.cengines import DummyEngine
from projectq.meta import ComputeTag, canonical_ctrl_state
from projectq.ops import BasicGate, CtrlAll, NotMergeable, Rx, _command
from projectq.types import Qubit, Qureg, WeakQubitRef
@pytest.fixture
def main_engine():
return MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
def test_command_init(main_engine):
qureg0 = Qureg([Qubit(main_engine, 0)])
qureg1 = Qureg([Qubit(main_engine, 1)])
qureg2 = Qureg([Qubit(main_engine, 2)])
# qureg3 = Qureg([Qubit(main_engine, 3)])
# qureg4 = Qureg([Qubit(main_engine, 4)])
gate = BasicGate()
cmd = _command.Command(main_engine, gate, (qureg0, qureg1, qureg2))
assert cmd.gate == gate
assert cmd.tags == []
expected_tuple = (qureg0, qureg1, qureg2)
for cmd_qureg, expected_qureg in zip(cmd.qubits, expected_tuple):
assert cmd_qureg[0].id == expected_qureg[0].id
# Testing that Qubits are now WeakQubitRef objects
assert type(cmd_qureg[0]) == WeakQubitRef
assert cmd._engine == main_engine
# Test that quregs are ordered if gate has interchangeable qubits:
symmetric_gate = BasicGate()
symmetric_gate.interchangeable_qubit_indices = [[0, 1]]
symmetric_cmd = _command.Command(main_engine, symmetric_gate, (qureg2, qureg1, qureg0))
assert cmd.gate == gate
assert cmd.tags == []
expected_ordered_tuple = (qureg1, qureg2, qureg0)
for cmd_qureg, expected_qureg in zip(symmetric_cmd.qubits, expected_ordered_tuple):
assert cmd_qureg[0].id == expected_qureg[0].id
assert symmetric_cmd._engine == main_engine
def test_command_deepcopy(main_engine):
qureg0 = Qureg([Qubit(main_engine, 0)])
qureg1 = Qureg([Qubit(main_engine, 1)])
gate = BasicGate()
cmd = _command.Command(main_engine, gate, (qureg0,))
cmd.add_control_qubits(qureg1)
cmd.tags.append("MyTestTag")
copied_cmd = deepcopy(cmd)
# Test that deepcopy gives same cmd
assert copied_cmd.gate == gate
assert copied_cmd.tags == ["MyTestTag"]
assert len(copied_cmd.qubits) == 1
assert copied_cmd.qubits[0][0].id == qureg0[0].id
assert len(copied_cmd.control_qubits) == 1
assert copied_cmd.control_qubits[0].id == qureg1[0].id
# Engine should not be deepcopied but a reference:
assert id(copied_cmd.engine) == id(main_engine)
# Test that deepcopy is actually a deepcopy
cmd.tags = ["ChangedTag"]
assert copied_cmd.tags == ["MyTestTag"]
cmd.control_qubits[0].id == 10
assert copied_cmd.control_qubits[0].id == qureg1[0].id
cmd.gate = "ChangedGate"
assert copied_cmd.gate == gate
def test_command_get_inverse(main_engine):
qubit = main_engine.allocate_qubit()
ctrl_qubit = main_engine.allocate_qubit()
cmd = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd.add_control_qubits(ctrl_qubit)
cmd.tags = [ComputeTag()]
inverse_cmd = cmd.get_inverse()
assert inverse_cmd.gate == Rx(-0.5 + 4 * math.pi)
assert len(cmd.qubits) == len(inverse_cmd.qubits)
assert cmd.qubits[0][0].id == inverse_cmd.qubits[0][0].id
assert id(cmd.qubits[0][0]) != id(inverse_cmd.qubits[0][0])
assert len(cmd.control_qubits) == len(inverse_cmd.control_qubits)
assert cmd.control_qubits[0].id == inverse_cmd.control_qubits[0].id
assert id(cmd.control_qubits[0]) != id(inverse_cmd.control_qubits[0])
assert cmd.tags == inverse_cmd.tags
assert id(cmd.tags[0]) != id(inverse_cmd.tags[0])
assert id(cmd.engine) == id(inverse_cmd.engine)
def test_command_get_merged(main_engine):
qubit = main_engine.allocate_qubit()
ctrl_qubit = main_engine.allocate_qubit()
cmd = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd.tags = ["TestTag"]
cmd.add_control_qubits(ctrl_qubit)
# Merge two commands
cmd2 = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd2.add_control_qubits(ctrl_qubit)
cmd2.tags = ["TestTag"]
merged_cmd = cmd.get_merged(cmd2)
expected_cmd = _command.Command(main_engine, Rx(1.0), (qubit,))
expected_cmd.add_control_qubits(ctrl_qubit)
expected_cmd.tags = ["TestTag"]
assert merged_cmd == expected_cmd
# Don't merge commands as different control qubits
cmd3 = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd3.tags = ["TestTag"]
with pytest.raises(NotMergeable):
cmd.get_merged(cmd3)
# Don't merge commands as different tags
cmd4 = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd4.add_control_qubits(ctrl_qubit)
with pytest.raises(NotMergeable):
cmd.get_merged(cmd4)
def test_command_is_identity(main_engine):
qubit = main_engine.allocate_qubit()
qubit2 = main_engine.allocate_qubit()
cmd = _command.Command(main_engine, Rx(0.0), (qubit,))
cmd2 = _command.Command(main_engine, Rx(0.5), (qubit2,))
inverse_cmd = cmd.get_inverse()
inverse_cmd2 = cmd2.get_inverse()
assert inverse_cmd.gate.is_identity()
assert cmd.gate.is_identity()
assert not inverse_cmd2.gate.is_identity()
assert not cmd2.gate.is_identity()
def test_command_order_qubits(main_engine):
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(main_engine, 1)])
qubit2 = Qureg([Qubit(main_engine, 2)])
qubit3 = Qureg([Qubit(main_engine, 3)])
qubit4 = Qureg([Qubit(main_engine, 4)])
qubit5 = Qureg([Qubit(main_engine, 5)])
gate = BasicGate()
gate.interchangeable_qubit_indices = [[0, 4, 5], [1, 2]]
input_tuple = (qubit4, qubit5, qubit3, qubit2, qubit1, qubit0)
expected_tuple = (qubit0, qubit3, qubit5, qubit2, qubit1, qubit4)
cmd = _comm
|
and.Command(main_engine, gate, input_tuple)
for ordered_qubit, expected_qubit in zip(cmd.qubits, expected_tuple):
assert ordered_qubit[0].id == expected_qubit[0].id
def test_command_interchangeable_qubit_indices(main_engine):
gate = BasicGate()
gate.interchangeable_qubit_indices = [[0, 4, 5], [1, 2]]
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(main_engine, 1)])
qubit2 = Q
|
ureg([Qubit(main_engine, 2)])
qubit3 = Qureg([Qubit(main_engine, 3)])
qubit4 = Qureg([Qubit(main_engine, 4)])
qubit5 = Qureg([Qubit(main_engine, 5)])
input_tuple = (qubit4, qubit5, qubit3, qubit2, qubit1, qubit0)
cmd = _command.Command(main_engine, gate, input_tuple)
assert (
cmd.interchangeable_qubit_indices
== [
[0, 4, 5],
[1, 2],
]
or cmd.interchangeable_qubit_indices == [[1, 2], [0, 4, 5]]
)
@pytest.mark.parametrize(
'state',
[0, 1, '0', '1', CtrlAll.One, CtrlAll.Zero],
ids=['int(0)', 'int(1)', 'str(0)', 'str(1)', 'CtrlAll.One', 'CtrlAll.Zero'],
)
def test_commmand_add_control_qubits_one(main_engine, state):
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(main_engine, 1)])
cmd = _command.Command(main_engine, Rx(0.5), (qubit0,))
cmd.add_control_qubits(qubit1, state=state)
assert cmd.control_qubits[0].id == 1
assert cmd.control_state == canonical_ctrl_state(state, 1)
with pytest.raises(ValueError):
cmd.add_control_qubits(qubit0[0])
@pytest.mark.parametrize(
'state',
[0, 1, 2, 3, '00', '01', '10', '11', CtrlAll.One, CtrlAll.Zero],
ids=[
'int(0)',
'int(1)',
'int(2)',
'int(3)',
'str(00)',
'str(01)',
'str(10)',
|
bioidiap/bob.bio.spear
|
bob/bio/spear/config/extractor/cqcc20.py
|
Python
|
gpl-3.0
| 328
| 0.003049
|
import numpy
imp
|
ort bob.bio.spear
# The authors of CQCC features recommend to use only first 20 features, plus deltas and delta-deltas
# feature vector is 60 in this case
cqcc20 = bob.bio.spear.extractor.CQCCFeatures(
features_mask=numpy.r_[
nump
|
y.arange(0, 20), numpy.arange(30, 50), numpy.arange(60, 80)
]
)
|
beni55/SimpleCV
|
SimpleCV/MachineLearning/TestTemporalColorTracker.py
|
Python
|
bsd-3-clause
| 1,136
| 0.033451
|
from SimpleCV import Camera, Image, Color, TemporalColorTracker, ROI, Display
import matplotlib.pyplot as plt
cam = Camera(1)
tct = TemporalColorTracker()
img = cam.getImage()
roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,img)
tct.train(cam,roi=roi,maxFrames=250,pkWndw=20)
# Matplot Lib example plotting
plotc = {'r':'r','g':'g','b':'b','i':'m','h':'y'}
for key in tct.data.keys():
plt.plot(tct.data[key],plotc[key])
for pt in tct.peaks[key]:
plt.plot(pt[0],pt[1],'r*')
for pt in tct.valleys[key]:
plt.plot(pt[0],pt[1],'b*')
plt.grid()
plt.show()
disp = Display((800,600))
while disp.isNotDone():
img = cam.getImage()
result = tct.recognize(img)
plt.plot(tct._rtData,'r-')
|
plt.grid()
plt.savefig('temp.png')
plt.clf()
plotImg = Image('temp.png')
roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,img)
roi.draw(width=3)
img.drawText(str(result),20,20,color=Color.RED,fontsize=32)
|
img = img.applyLayers()
img = img.blit(plotImg.resize(w=img.width,h=img.height),pos=(0,0),alpha=0.5)
img.save(disp)
|
butwhywhy/yamltempl
|
yamltempl/yaml_templates_command.py
|
Python
|
mit
| 1,406
| 0.000711
|
#! /usr/bin/env python
import argparse
import sys
from yamltempl import yamlutils, vtl
def main():
parser = argparse.ArgumentParser(
description="Merge yaml data into a Velocity Template Language template")
parser.add_argument('yamlfile',
metavar='filename.yaml',
type=argparse.FileType('r'),
help='the yaml file containing the data')
parser.add_argument('-t', '--template',
metavar='file',
type=argparse.FileType('r'),
default=sys.stdi
|
n,
help='the template file. If omitted, the template '
'is read from standard input')
parser.add_argument('-o', '--output',
metavar='file',
|
type=argparse.FileType('w'),
default=sys.stdout,
help='the output file, where the result should be '
'written. Standard output is used if omitted')
args = parser.parse_args()
yamldata = yamlutils.ordered_load(args.yamlfile)
args.yamlfile.close()
templ = args.template.read().decode('utf8')
args.template.close()
result = vtl.merge(yamldata, templ)
args.output.write(result.encode('utf8'))
args.output.close()
if __name__ == '__main__':
main()
|
x4dr/NossiNet
|
NossiSite/extra.py
|
Python
|
gpl-2.0
| 9,041
| 0.001106
|
import random
import time
from flask import (
request,
session,
flash,
redirect,
u
|
rl_for,
Response,
render_template,
)
fr
|
om NossiPack.Cards import Cards
from NossiPack.User import Userlist
from NossiPack.VampireCharacter import VampireCharacter
from NossiPack.krypta import DescriptiveError
from NossiSite.base import app as defaultapp, log
from NossiSite.helpers import checklogin
def register(app=None):
if app is None:
app = defaultapp
@app.route("/setfromsource/")
def setfromsource():
checklogin()
source = request.args.get("source")
ul = Userlist()
u = ul.loaduserbyname(session.get("user"))
try:
new = VampireCharacter()
if new.setfromdalines(source[-7:]):
u.sheetid = u.savesheet(new)
ul.saveuserlist()
flash("character has been overwritten with provided Dalines sheet!")
else:
flash("problem with " + source)
except Exception:
log.exception("setfromsource:")
flash(
"Sorry "
+ session.get("user").capitalize()
+ ", I can not let you do that."
)
return redirect(url_for("charsheet"))
@app.route("/timetest")
def timetest():
return str(time.time())
@app.route("/boardgame<int:size>_<seed>.json")
@app.route("/boardgame<int:size>_.json")
def boardgamemap(size, seed=""):
if size > 100:
size = 100
rx = random.Random()
if seed:
rx.seed(str(size) + str(seed))
def r(a=4):
for _ in range(a):
yield rx.randint(1, 10)
def e(inp, dif):
for i in inp:
yield 2 if i == 10 else (1 if i >= dif else 0)
def fpik(inp, pref="FPIK"):
vals = list(inp)
vals = [(v if v != 2 else (2 if sum(vals) < 4 else 1)) for v in vals]
for i, p in enumerate(pref):
yield '"' + p + '": ' + str(vals[i])
def cell(): # i, j):
difficulty = 8
"""6 + (
(9 if i == j else
8)
if i in [0, size - 1] and j in [0, size - 1] else
(7 if j in [0, size - 1] else
(6 if j % 2 == 1 and (i in [0, size - 1] or j in [0, size - 1]) else
(5 if 0 < i < size - 1 else 8))))"""
for li in fpik(e(r(), difficulty)):
yield li
first = True
def notfirst():
nonlocal first
if first:
first = False
return True
return False
def resetfirst():
nonlocal first
first = True
def generate():
yield '{"board": ['
for x in range(size):
yield ("," if not first else "") + "["
resetfirst()
for y in range(size):
yield ("" if notfirst() else ",") + '{ "x":%d, "y":%d, ' % (
x,
y,
) + ",".join(
cell(
# x, y
)
) + "}"
yield "]"
yield "]}"
return Response(generate(), mimetype="text/json")
@app.route("/gameboard/<int:size>/")
@app.route("/gameboard/<int:size>/<seed>")
def gameboard(size, seed=""):
if size > 20:
size = 20
return render_template("gameboard.html", size=size, seed=seed)
@app.route("/chargen/standard")
def standardchar():
return redirect(
url_for("chargen", a=3, b=5, c=7, abia=5, abib=9, abic=13, shuffle=1)
)
@app.route("/cards/", methods=["GET"])
@app.route("/cards/<command>", methods=["POST", "GET"])
def cards(command: str = None):
checklogin()
deck = Cards.getdeck(session["user"])
try:
if request.method == "GET":
if command is None:
return deck.serialized_parts
elif request.method == "POST":
par = request.get_json()["parameter"]
if command == "draw":
return {"result": list(deck.draw(par))}
elif command == "spend":
return {"result": list(deck.spend(par))}
elif command == "returnfun":
return {"result": list(deck.pilereturn(par))}
elif command == "dedicate":
if ":" not in par:
par += ":"
return {"result": list(deck.dedicate(*par.split(":", 1)))}
elif command == "remove":
return {"result": list(deck.remove(par))}
elif command == "free":
message = deck.undedicate(par)
for m in message:
flash("Affected Dedication: " + m)
return {"result": "ok", "messages": list(message)}
elif command == "free":
affected, message = deck.free(par)
for m in message:
flash("Affected Dedication: " + m)
return {
"result": list(affected),
"messages": message,
}
else:
return {"result": "error", "error": f"invalid command {command}"}
return render_template("cards.html", cards=deck)
except DescriptiveError as e:
return {"result": "error", "error": e.args[0]}
except TypeError:
return {"result": "error", "error": "Parameter is not in a valid Format"}
finally:
Cards.savedeck(session["user"], deck)
@app.route("/chargen", methods=["GET", "POST"])
def chargen_menu():
if request.method == "POST":
f = dict(request.form)
if not f.get("vampire", None):
return redirect(
url_for(
"chargen",
a=f["a"],
b=f["b"],
c=f["c"],
abia=f["abia"],
abib=f["abib"],
abic=f["abic"],
shuffle=1 if f.get("shuffle", 0) else 0,
)
)
return redirect(
url_for(
"chargen",
a=f["a"],
b=f["b"],
c=f["c"],
abia=f["abia"],
abib=f["abib"],
abic=f["abic"],
shuffle=1 if f["shuffle"] else 0,
vamp=f["discipline"],
back=f["back"],
)
)
return render_template("generate_dialog.html")
@app.route("/chargen/<a>,<b>,<c>,<abia>,<abib>,<abic>,<shuffle>")
@app.route("/chargen/<a>,<b>,<c>,<abia>,<abib>,<abic>,<shuffle>,<vamp>,<back>")
def chargen(a, b, c, abia, abib, abic, shuffle, vamp=None, back=None):
"""
Redirects to the charactersheet/ editor(if logged in) of a randomly
generated character
:param a: points to be allocated in the first attribute group
:param b: points to be allocated in the second attribute group
:param c: points to be allocated in the third attribute group
:param abia: points to be allocated in the first ability group
:param abib: points to be allocated in the second ability group
:param abic: points to be allocated in the third ability group
:param shuffle: if the first/second/third groups should be shuffled (each)
:param vamp: if not None, character will be a vampire, int(vamp)
is the amount of discipline points
:param back: background points
"""
try:
char = VampireCharacter.makerandom(
1,
5,
|
EKT/pyrundeck
|
tests/unit_tests/test_native_conversion.py
|
Python
|
bsd-3-clause
| 34,251
| 0
|
# Copyright (c) 2015, National Documentation Centre (EKT, www.ekt.gr)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# Neither the name of the National Documentation Centre nor the
# names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os import path
from lxml import etree
import nose.tools as nt
from nose.tools import raises
from tests import config
import pyrundeck.rundeck_parser as xmlp
from pyrundeck.xml2native import ParseError
__author__ = "Panagiotis Koutsourakis <kutsurak@ekt.gr>"
class TestXMLToNativePython:
def setup(self):
self.bogus_xml = etree.fromstring('<foo/>')
self.parser = xmlp.RundeckParser()
def test_job_creates_single_job_correctly(self):
single_job = path.join(config.rundeck_test_data_dir,
'single_job_from_response.xml')
with open(single_job) as job_fl:
single_job_etree = etree.fromstring(job_fl.read())
expected = {
'id': "ea17d859-32ff-45c8-8a0d-a16ac1ea3566",
'name': 'long job',
'group': '',
'project': 'API_client_development',
'description': 'async testing'
}
nt.assert_equal(expected, xmlp.parse(single_job_etree, 'composite',
self.parser.job_parse_table))
@raises(ParseError)
def test_job_raises_if_not_job_tag(self):
xmlp.parse(self.bogus_xml, 'composite', self.parser.job_parse_table)
def test_job_raises_if_missing_mandatory(self):
missing_id = ('<job><name>long job</name><group/><project>'
'API_client_development</project><description>'
'async testing</description></job>')
nt.assert_raises(ParseError, xmlp.parse,
etree.fromstring(missing_id),
'composite',
self.parser.job_parse_table)
missing_name = ('<job id="foo"><group/><project>API_client_development'
'</project><description>async testing</description>'
'</job>')
nt.assert_raises(ParseError, xmlp.parse,
etree.fromstring(missing_name),
'composite',
self.parser.job_parse_table)
missing_project = ('<job id="foo"><name>foo</name><group/>'
'<description>asynctesting</description></job>')
nt.assert_raises(ParseError, xmlp.parse,
etree.fromstring(missing_project),
'composite',
self.parser.job_parse_table)
def test_jobs_creates_multiple_jobs_correctly(self):
multiple_jobs = path.join(config.rundeck_test_data_dir,
'multiple_jobs.xml')
|
with open(multiple_jobs) as jobs_fl:
multiple_jobs = etree.fromstring(jobs_fl.read())
expected = {
'count': 3,
'list': [
{
'id': "3b8a86d5-4fc3-4cc1-95a2-8b51421c2069",
'name': 'job_with_args',
'group': '',
'project': 'API_client_development',
'description': ''
|
},
{
'id': "ea17d859-32ff-45c8-8a0d-a16ac1ea3566",
'name': 'long job',
'group': '',
'project': 'API_client_development',
'description': 'async testing'
},
{
'id': "78f491e7-714f-44c6-bddb-8b3b3a961ace",
'name': 'test_job_1',
'group': '',
'project': 'API_client_development',
'description': ''
},
]
}
nt.assert_equal(expected, xmlp.parse(multiple_jobs, 'list',
self.parser.jobs_parse_table))
@raises(ParseError)
def test_jobs_raises_if_not_jobs_tag(self):
xmlp.parse(self.bogus_xml, 'list', self.parser.jobs_parse_table)
@raises(ParseError)
def test_jobs_raises_if_no_count(self):
xml_str = ('<jobs>'
'<job id="3b8a86d5-4fc3-4cc1-95a2-8b51421c2069">'
'<name>job_with_args</name>'
'<group/>'
'<project>API_client_development</project>'
'<description/>'
'</job>'
'<job id="ea17d859-32ff-45c8-8a0d-a16ac1ea3566">'
'<name>long job</name>'
'<group/>'
'<project>API_client_development</project>'
'<description>async testing</description>'
'</job>'
'<job id="78f491e7-714f-44c6-bddb-8b3b3a961ace">'
'<name>test_job_1</name>'
'<group/>'
'<project>API_client_development</project>'
'<description/>'
'</job>'
'</jobs>')
xml_tree = etree.fromstring(xml_str)
xmlp.parse(xml_tree, 'list', self.parser.jobs_parse_table)
@raises(ParseError)
def test_jobs_raises_if_count_neq_jobs_len(self):
xml_str = ('<jobs count="5">'
'<job id="3b8a86d5-4fc3-4cc1-95a2-8b51421c2069">'
'<name>job_with_args</name>'
'<group/>'
'<project>API_client_development</project>'
'<description/>'
'</job>'
'<job id="ea17d859-32ff-45c8-8a0d-a16ac1ea3566">'
'<name>long job</name>'
'<group/>'
'<project>API_client_development</project>'
'<description>async testing</description>'
'</job>'
'<job id="78f491e7-714f-44c6-bddb-8b3b3a961ace">'
'<name>test_job_1</name>'
'<group/>'
'<project>API_client_development</project>'
'<description/>'
'</job>'
'</jobs>')
xml_tree = etree.fromstring(xml_str)
xmlp.parse(xml_tree, 'list', self.parser.jobs_parse_table)
def test_execution_creates_single_execution_correctly(self):
nt.assert_equal.__self__.maxDiff = 1000
test_data_file = path.join(config.rundeck_test_data_dir,
'execution.xml')
with open(test_data_file) as ex_fl:
xml_str = ex_fl.read()
expected = {
'id': '117',
'href': 'http://192.168.50.2:4440/execution/follow/117',
'status': 'succeeded',
'pro
|
georgemarshall/django
|
tests/utils_tests/test_inspect.py
|
Python
|
bsd-3-clause
| 1,501
| 0.002665
|
import unittest
from django.utils import inspect
class Person:
def no_arguments(self):
return None
def one_argument(self, something):
return something
def just_args(self, *args):
return args
def all_kinds(self, name, address='home', age=25, *args, **kwargs):
return kwargs
class TestInspectMethods(unittest.TestCase):
def test_get_func_full_args_no_arguments(self):
self.assertEqual(inspect.get_func_full_args(Person.no_arguments), [])
def test_get_func_full_args_one_argument(self):
self.assertEqual(inspect.get_func_full_args(Person.one_argument), [('something',)])
def test_get_func_full_args_all_arguments(self):
arguments = [('name',), ('address', 'home'), ('age', 25), ('*args',), ('**kwargs',)]
self.assertEqual(inspect.get_func_full_args(Person.all_kinds), arguments)
def test_func_accepts_var_args_has_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.just_args), True)
def test_func_accepts_var_args_no_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.one_argument)
|
, False)
def test_method_has_no_args(self):
self.assertIs(inspect.method_has_no_args(Person.no_arguments), True)
self.assertIs(inspect.method_has_no_args(Person.one_argument), False)
self.assertIs(inspect.method_has_no_args(Person().no_arguments), True)
self.assertIs(inspect.method_has_no_args(Person
|
().one_argument), False)
|
zbuc/imaghost
|
ghost_exceptions/__init__.py
|
Python
|
bsd-2-clause
| 120
| 0
|
fro
|
m __future__ import (absolute_import, print_function, division)
class NotImplementedException(Exception):
|
pass
|
owlabs/incubator-airflow
|
tests/test_utils/db.py
|
Python
|
apache-2.0
| 2,430
| 0.000823
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import (
Connection, DagModel, DagRun, DagTag, Pool, RenderedTaskInstanceFields, SlaMiss, TaskInstance, Variable,
errors,
)
from airflow.models.dagcode import DagCode
from airflow.utils.db import add_default_pool_if_not_exists,
|
create
|
_default_connections, \
create_session
def clear_db_runs():
with create_session() as session:
session.query(DagRun).delete()
session.query(TaskInstance).delete()
def clear_db_dags():
with create_session() as session:
session.query(DagTag).delete()
session.query(DagModel).delete()
def clear_db_sla_miss():
with create_session() as session:
session.query(SlaMiss).delete()
def clear_db_errors():
with create_session() as session:
session.query(errors.ImportError).delete()
def clear_db_pools():
with create_session() as session:
session.query(Pool).delete()
add_default_pool_if_not_exists(session)
def clear_db_connections():
with create_session() as session:
session.query(Connection).delete()
create_default_connections(session)
def clear_db_variables():
with create_session() as session:
session.query(Variable).delete()
def clear_db_dag_code():
with create_session() as session:
session.query(DagCode).delete()
def set_default_pool_slots(slots):
with create_session() as session:
default_pool = Pool.get_default_pool(session)
default_pool.slots = slots
def clear_rendered_ti_fields():
with create_session() as session:
session.query(RenderedTaskInstanceFields).delete()
|
svebk/DeepSentiBank_memex
|
cu_image_search/www/manager/locker.py
|
Python
|
bsd-2-clause
| 868
| 0.004608
|
import threading
# lock for each project or domain
# treat it as a singleton
# all file operation should be in lock region
class Locker(object):
_lock = {}
def acquire(self, name):
# create lock if it is not there
if name not in self._lock:
self._lock[name] = threading.Lock()
# acquire lock
self._lock[name].acquire()
def release(self, name):
# lock hasn't been created
if name not in self._lock:
return
try:
self._lock[name].rele
|
ase()
except:
pass
def remove(self, name):
# acquire lock first!!!
|
if name not in self._lock:
return
try:
l = self._lock[name]
del self._lock[name] # remove lock name first, then release
l.release()
except:
pass
|
tiborsimko/invenio-ext
|
invenio_ext/fixtures/registry.py
|
Python
|
gpl-2.0
| 1,417
| 0.000706
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, U
|
SA.
"""Registry definition for fixture datasets."""
from flask_registry import RegistryProxy
from inveni
|
o_ext.registry import ModuleAutoDiscoveryRegistry
from invenio_utils.datastructures import LazyDict
fixtures_proxy = RegistryProxy(
'fixtures', ModuleAutoDiscoveryRegistry, 'fixtures')
def fixtures_loader():
"""Load fixtures datasets."""
out = {}
for fixture in fixtures_proxy:
for data in getattr(fixture, '__all__', dir(fixture)):
if data[-4:] != 'Data' or data in out:
continue
out[data] = getattr(fixture, data)
return out
fixtures = LazyDict(fixtures_loader)
|
Ban3/Limnoria
|
src/utils/crypt.py
|
Python
|
bsd-3-clause
| 1,697
| 0
|
###
# Copyright (c) 2008, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions i
|
n binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the d
|
istribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from hashlib import md5
from hashlib import sha1 as sha
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
eayunstack/fuel-ostf
|
fuel_health/tests/smoke/test_vcenter.py
|
Python
|
apache-2.0
| 20,161
| 0.000198
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import traceback
from fuel_health.common.utils.data_utils import rand_name
from fuel_health import nmanager
from fuel_health import test
LOG = logging.getLogger(__name__)
class TestVcenter(nmanager.NovaNetworkScenarioTest):
"""Test suit verifies:
- Instance creation
- Floating ip creation
- Instance connectivity by floating IP
"""
@classmethod
def setUpClass(cls):
super(TestVcenter, cls).setUpClass()
if cls.manager.clients_initialized:
cls.tenant_id = cls.manager._get_identity_client(
cls.config.identity.admin_username,
cls.config.identity.admin_password,
cls.config.identity.admin_tenant_name).tenant_id
cls.keypairs = {}
cls.security_groups = {}
cls.network = []
cls.servers = []
cls.floating_ips = []
def setUp(self):
super(TestVcenter, self).setUp()
self.check_clients_state()
def tearDown(self):
super(TestVcenter, self).tearDown()
if self.manager.clients_initialized:
if self.servers:
for server in self.servers:
try:
self._delete_server(server)
self.servers.remove(server)
except Exception:
LOG.debug(traceback.format_exc())
LOG.debug("Server was already deleted.")
def test_1_vcenter_create_servers(self):
"""vCenter: Launch instance
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Delete instance.
Duration: 200 s.
Available since release: 2014.2-6.1
Deployment tags: use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25,
self._create_security_group,
1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
server = self.verify(
200,
self._create_server,
2,
"Creating instance using the new security group has failed.",
'image creation',
self.compute_client, name, security_groups, None, None, img_name
)
self.verify(30, self._delete_server, 3,
"Server can not be deleted.",
"server deletion", server)
def test_3_vcenter_check_public_instance_connectivity_from_instance(self):
"""vCenter: Check network connectivity from instance via floating IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Create a new floating IP
4. Assign the new floating IP to the instance.
5. Check connectivity to the floating IP using ping command.
6. Chec
|
k that public IP 8.8.8.8 can be pinged from instance.
7. Disassociate server floating ip.
8. Delete floating ip
9. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
|
Deployment tags: nova_network, use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
server = self.verify(250, self._create_server, 2,
"Server can not be created.",
"server creation",
self.compute_client, name, security_groups, None,
None, img_name)
floating_ip = self.verify(
20,
self._create_floating_ip,
3,
"Floating IP can not be created.",
'floating IP creation')
self.verify(20, self._assign_floating_ip_to_instance,
4, "Floating IP can not be assigned.",
'floating IP assignment',
self.compute_client, server, floating_ip)
self.floating_ips.append(floating_ip)
ip_address = floating_ip.ip
LOG.info('is address is {0}'.format(ip_address))
LOG.debug(ip_address)
self.verify(600, self._check_vm_connectivity, 5,
"VM connectivity doesn`t function properly.",
'VM connectivity checking', ip_address,
30, (6, 60))
self.verify(600, self._check_connectivity_from_vm,
6, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM', ip_address,
30, (6, 60))
self.verify(10, self.compute_client.servers.remove_floating_ip,
7, "Floating IP cannot be removed.",
"removing floating IP", server, floating_ip)
self.verify(10, self.compute_client.floating_ips.delete,
8, "Floating IP cannot be deleted.",
"floating IP deletion", floating_ip)
if self.floating_ips:
self.floating_ips.remove(floating_ip)
self.verify(30, self._delete_server, 9,
"Server can not be deleted. ",
"server deletion", server)
def test_2_vcenter_check_internet_connectivity_without_floatingIP(self):
"""vCenter: Check network connectivity from instance without floating \
IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
(if it doesn`t exist yet).
3. Check that public IP 8.8.8.8 can be pinged from instance.
4. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation', self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
compute = None
server = self.verify(
250, self._create_server, 2,
"Server can not be created.",
'server creation',
self.compute_client, name, security_groups, None, None, img_name)
try:
for addr in server.addresses:
if addr.
|
vortex-ape/scikit-learn
|
sklearn/preprocessing/__init__.py
|
Python
|
bsd-3-clause
| 1,775
| 0
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import QuantileTransformer
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import quantile_transform
from .data import power_transform
from .data import PowerTransformer
from .data import PolynomialFeatures
from ._encoders import OneH
|
otEncoder
from ._encoders import OrdinalEncoder
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from ._discretization import KBinsDiscretizer
from .imputation import Imputer
# stub, remove in version 0.21
from .data import CategoricalEncoder # noqa
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KBinsDiscretizer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder
|
',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'QuantileTransformer',
'Normalizer',
'OneHotEncoder',
'OrdinalEncoder',
'PowerTransformer',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
'quantile_transform',
'power_transform',
]
|
enigmampc/catalyst
|
catalyst/testing/core.py
|
Python
|
apache-2.0
| 47,204
| 0.000021
|
from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
from functools import wraps
import gzip
from inspect import getargspec
from itertools import (
combinations,
count,
product,
)
import operator
import os
from os.path import abspath, dirname, join, realpath
import shutil
from sys import _getframe
import tempfile
from logbook import TestHandler
from mock import patch
from nose.tools import nottest
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from six import itervalues, iteritems, with_metaclass
from six.moves import filter, map
from sqlalchemy import create_engine
from testfixtures import TempDirectory
from toolz import concat, curry
from catalyst.assets import AssetFinder, AssetDBWriter
from catalyst.assets.synthetic import make_simple_equity_info
from catalyst.data.data_portal import DataPortal
from catalyst.data.loader import get_benchmark_filename, INDEX_MAPPING
from catalyst.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY
)
from catalyst.data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentWriter,
)
from catalyst.finance.blotter import Blotter
from catalyst.finance.trading import TradingEnvironment
from catalyst.finance.order import ORDER_STATUS
from catalyst.lib.labelarray import LabelArray
from catalyst.pipeline.data import USEquityPricing
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline.factors import CustomFactor
from catalyst.pipeline.loaders.testing import make_seeded_random_loader
from catalyst.utils import security_list
from catalyst.utils.calendars import get_calendar
from catalyst.utils.input_validation import expect_dimensions
from catalyst.utils.numpy_utils import as_column, isnat
from catalyst.utils.pandas_utils import timedelta_to_integral_seconds
from catalyst.utils.paths import ensure_directory
from catalyst.utils.sentinel import sentinel
import numpy as np
from numpy import f
|
loat64
EPOCH = pd.Timestamp(0, tz='UTC')
def seconds_to_
|
timestamp(seconds):
return pd.Timestamp(seconds, unit='s', tz='UTC')
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH)
def drain_catalyst(test, catalyst):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in catalyst:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def check_algo_results(test,
results,
expected_transactions_count=None,
expected_order_count=None,
expected_positions_count=None,
sid=None):
if expected_transactions_count is not None:
txns = flatten_list(results["transactions"])
test.assertEqual(expected_transactions_count, len(txns))
if expected_positions_count is not None:
raise NotImplementedError
if expected_order_count is not None:
# de-dup orders on id, because orders are put back into perf packets
# whenever they a txn is filled
orders = set([order['id'] for order in
flatten_list(results["orders"])])
test.assertEqual(expected_order_count, len(orders))
def flatten_list(list):
return [item for sublist in list for item in sublist]
def assert_single_position(test, catalyst):
output, transaction_count = drain_catalyst(test, catalyst)
if 'expected_transactions' in test.catalyst_test_config:
test.assertEqual(
test.catalyst_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.catalyst_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.catalyst_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(os.path.join(old_dir, subdir),
os.path.join(new_dir, subdir))
with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \
patch.object(security_list, 'using_copy', True,
create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, 'using_copy'):
raise Exception('add_security_data must be used within '
'security_list_copy context')
directory = os.path.join(
security_list.SECURITY_LISTS_DIR,
"leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, 'w') as f:
for sym in deletes:
f.write(sym)
f.write('\n')
add_path = os.path.join(directory, "add")
with open(add_path, 'w') as f:
for sym in adds:
f.write(sym)
f.write('\n')
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from catalyst.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(i
|
jaybo/OpenCVGraph
|
TemcaGraphPy/temca_graph.py
|
Python
|
apache-2.0
| 21,780
| 0.007714
|
"""
Python wrapper for functionality exposed in the TemcaGraph dll.
@author: jayb
"""
from ctypes import *
import logging
import threading
import time
import os
import sys
import numpy as np
from pytemca.image.imageproc import fit_sin
from numpy.ctypeslib import ndpointer
if sys.flags.debug:
rel = "../x64/Debug/TemcaGraphDLL.dll"
else:
rel = "../x64/Release/TemcaGraphDLL.dll"
dll_path = os.path.join(os.path.dirname(__file__), rel)
class StatusCallbackInfo(Structure):
_fields_ = [
("status", c_int),
# -1 : fatal error
# 0: finishied init (startup),
# 1: starting new frame,
# 2: finished frame capture (ie. time to move the stage),
# 3: Sync step completed
# 4: Async step completed
# 5: Processing finished (except Async graphs)
# 6: Shutdown finished
("info_code", c_int),
# value indicates which sync or async step completed
("error_string", c_char * 256)
]
STATUSCALLBACKFUNC = CFUNCTYPE(c_int, POINTER(StatusCallbackInfo)) # r
|
eturns c_int
class CameraInfo(Structure):
'''
Information about the current camera in use.
'''
_fields_ = [
("width
|
", c_int),
("height", c_int),
("format", c_int),
("pixel_depth", c_int),
("camera_bpp", c_int),
("camera_model", c_char * 256),
("camera_id", c_char * 256)
]
class FocusInfo(Structure):
'''
Information about focus quality.
'''
_fields_ = [
("focus_score", c_float),
("astig_score", c_float),
("astig_angle", c_float),
("astig_profile", c_float * 360)
]
class QCInfo(Structure):
'''
Information about image quality.
'''
_fields_ = [
("min_value", c_int),
("max_value", c_int),
("mean_value", c_int),
("histogram", c_int * 256),
]
class ROIInfo(Structure):
'''
Information about the selected ROI used for stitching.
'''
_fields_ = [
("gridX", c_int),
("gridY", c_int),
]
class MatcherInfo(Structure):
'''
Match parameters from the Matcher.
'''
_fields_ = [
("dX", c_float),
("dY", c_float),
("distance", c_float),
("rotation", c_float),
("good_matches", c_int),
]
class TemcaGraphDLL(object):
"""
Hooks onto the C++ DLL. These are all the foreign functions we are going to be using
from the dll, along with their arguments types and return values.
"""
_TemcaGraphDLL = WinDLL(dll_path)
open = _TemcaGraphDLL.temca_open
open.argtypes = [c_int, c_char_p, STATUSCALLBACKFUNC]
open.restype = c_uint32
close = _TemcaGraphDLL.temca_close
close.argtype = [None]
close.restype = c_uint32
set_mode = _TemcaGraphDLL.setMode
set_mode.argtypes = [c_char_p]
set_mode.restype = c_uint32
get_camera_info = _TemcaGraphDLL.getCameraInfo
get_camera_info.restype = CameraInfo
get_focus_info = _TemcaGraphDLL.getFocusInfo
get_focus_info.restype = FocusInfo
set_fft_size = _TemcaGraphDLL.setFFTSize
set_fft_size.argtypes = [c_int, c_int, c_int]
set_fft_size.restype = None
get_qc_info = _TemcaGraphDLL.getQCInfo
get_qc_info.restype = QCInfo
grab_frame = _TemcaGraphDLL.grabFrame
grab_frame.argtypes = [c_char_p, c_int, c_int]
grab_frame.restype = None
get_last_frame = _TemcaGraphDLL.getLastFrame
get_last_frame.argtypes = [ndpointer(c_uint16, flags="C_CONTIGUOUS")]
get_last_frame.restype = None
get_preview_frame = _TemcaGraphDLL.getPreviewFrame
get_preview_frame.argtypes = [ndpointer(c_uint8, flags="C_CONTIGUOUS")]
get_preview_frame.restype = None
set_parameter = _TemcaGraphDLL.setParameter
set_parameter.argtypes = [c_char_p, c_int]
set_parameter.restype = None
get_parameter = _TemcaGraphDLL.getParameter
get_parameter.argtypes = [c_char_p]
get_parameter.restype = c_uint32
get_status = _TemcaGraphDLL.getStatus
get_status.restype = StatusCallbackInfo
setRoiInfo = _TemcaGraphDLL.setROI
setRoiInfo.restype = None
setRoiInfo.argtypes = [ POINTER( ROIInfo) ]
grab_matcher_template = _TemcaGraphDLL.grabMatcherTemplate
grab_matcher_template.restype = None
grab_matcher_template.argtypes = [c_int, c_int, c_int, c_int]
get_matcher_info = _TemcaGraphDLL.getMatcherInfo
get_matcher_info.restype = MatcherInfo
get_matcher_info.argtypes = None
class TemcaGraph(object):
'''
Python class which wraps the C++ TemcaGraphDLL and provides the linkage between Python and the C++ OpenCVGraph world.
The Python events which are triggered by C++ callbacks are::
eventInitCompleted - all graphs have finished building
eventStartNewFrame - ready for client to issue a frame grab request
eventCaptureCompleted - exposure completed
eventCapturePostProcessingCompleted - xfer to CUDA, upshift, Bright/Dark correction finished
eventSyncProcessingCompleted - Synchronous processing has finished
eventAsyncProcessingCompleted - Asynchronous processing has finished (may overlap next exposure)
eventFiniCompleted - graph has finished shutting down
'''
def __init__(self,):
'''
Many additional class variables are defined in the open() function
'''
self.aborting = False
self.eventInitCompleted = threading.Event() # Event signalling that initialization is complete.
self.eventStartNewFrame = threading.Event()
self.eventCaptureCompleted = threading.Event()
self.eventCapturePostProcessingCompleted = threading.Event()
self.eventSyncProcessingCompleted = threading.Event()
self.eventAsyncProcessingCompleted = threading.Event()
self.eventFiniCompleted = threading.Event()
# all events after eventStartNewFrame, and before eventFiniCompleted
self.eventsAllCaptureLoop = [self.eventCaptureCompleted,
self.eventCapturePostProcessingCompleted,
self.eventSyncProcessingCompleted,
self.eventAsyncProcessingCompleted]
self.threadLock = threading.Lock()
self.preview_decimation_factor = 4
self.wait_time = 10 # in seconds. If we reach this limit, its an error
def wait_graph_event (self, event):
'''
Waits for the specified event to signal indicating a change in the graph state,
and then clears the event.
'''
self.threadLock.acquire()
event.wait(self.wait_time)
event.clear()
self.threadLock.release()
def wait_all_capture_events(self):
for e in self.eventsAllCaptureLoop:
self.wait_graph_event(e)
def wait_start_of_frame(self):
'''
Wait for the event which indicates the graph is ready to start a new frame.
'''
self.wait_graph_event(self.eventStartNewFrame)
def open(self, dummyCamera = False, dummyPath = None, callback=None):
'''
Open up the Temca C++ DLL.
If dummyCamera is True, create a dummy TEMCA image source using...
either a real camera, image, directory, or movie according to dummyPath which MUST be specified
as no default path is provided. If dummyPath is an integer string, then an OpenCV camera will be used
corresponding to that index.
'''
if callback == None:
callback = self.statusCallback
# prevent the callback from being garbage collected !!!
self.callback = STATUSCALLBACKFUNC(callback)
self.dummyPath = dummyPath
t = time.clock()
if not TemcaGraphDLL.open(dummyCamera, self.dummyPath, self.callback):
raise EnvironmentError('Cannot open TemcaGraphDLL. Possiblities: camera, is offline, not installed, or already in use')
logging.info("TemcaGraph DLL initialized in %s seconds" % (time.clock() - t))
self.eventInitCompleted.wait()
# get info about frame dimensions
fi = se
|
kinddevil/aatest
|
python/mik.py
|
Python
|
mit
| 831
| 0.049338
|
#!/usr/bin/env python
import paramiko
from datetime import datetime
#hostname='192.168.0.102'
hostname='172.28.102.250'
username='root'
password='abc'
#port=22
if __name__=='__main__':
paramiko.util.log_to_file('paramiko.log')
s=paramiko.SSHClient()
#s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
s.connect(hostname = hostname,username=use
|
rname, password=password)
stdin,stdout,stderr=s.exec_command('ifconfig;free;df -h')
print stdout.read()
except:
print "fuck"
f = file('paramiko.log','w')
f.write(" ".join([str(datetime.now()),
|
"fuck\n"]))
f.close()
else:
print "how"
finally:
print "super fuck"
s.close()
|
AntonSax/plantcv
|
plantcv/roi_objects.py
|
Python
|
mit
| 4,848
| 0.004332
|
# Find Objects Partially Inside Region of Interest or Cut Objects to Region of Interest
import cv2
import numpy as np
from . import print_image
from . import plot_image
from . import fatal_error
def roi_objects(img, roi_type, roi_contour, roi_hierarchy, object_contour, obj_hierarchy, device, debug=None):
"""Find objects partially inside a region of interest or cut objects to the ROI.
Inputs:
img = img to display kept objects
roi_type = 'cutto' or 'partial' (for partially inside)
roi_contour = contour of roi, output from "View and Adjust ROI" function
roi_hierarchy = contour of roi, output from "View and Adjust ROI" function
object_contour = contours of objects, output from "Identifying Objects" function
obj_hierarchy = hierarchy of objects, output from "Identifying Objects" function
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
device = device number
kept_cnt = kept contours
hierarchy = contour hierarchy list
mask = mask image
obj_area = to
|
tal object pixel area
:param img: numpy array
:param roi_type: str
:param roi_contour: list
:param roi_hierarchy: list
:param object_contour: list
:param obj_hierarchy: list
:param device: int
:param debug: str
:return device: int
:retur
|
n kept_cnt: list
:return hierarchy: list
:return mask: numpy array
:return obj_area: int
"""
device += 1
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size = ix, iy, 3
background = np.zeros(size, dtype=np.uint8)
ori_img = np.copy(img)
w_back = background + 255
background1 = np.zeros(size, dtype=np.uint8)
background2 = np.zeros(size, dtype=np.uint8)
# Allows user to find all objects that are completely inside or overlapping with ROI
if roi_type == 'partial':
for c, cnt in enumerate(object_contour):
length = (len(cnt) - 1)
stack = np.vstack(cnt)
test = []
keep = False
for i in range(0, length):
pptest = cv2.pointPolygonTest(roi_contour[0], (stack[i][0], stack[i][1]), False)
if int(pptest) != -1:
keep = True
if keep == True:
if obj_hierarchy[0][c][3] > -1:
cv2.drawContours(w_back, object_contour, c, (255, 255, 255), -1, lineType=8,
hierarchy=obj_hierarchy)
else:
cv2.drawContours(w_back, object_contour, c, (0, 0, 0), -1, lineType=8, hierarchy=obj_hierarchy)
else:
cv2.drawContours(w_back, object_contour, c, (255, 255, 255), -1, lineType=8, hierarchy=obj_hierarchy)
kept = cv2.cvtColor(w_back, cv2.COLOR_RGB2GRAY)
kept_obj = cv2.bitwise_not(kept)
mask = np.copy(kept_obj)
obj_area = cv2.countNonZero(kept_obj)
kept_cnt, hierarchy = cv2.findContours(kept_obj, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(ori_img, kept_cnt, -1, (0, 255, 0), -1, lineType=8, hierarchy=hierarchy)
cv2.drawContours(ori_img, roi_contour, -1, (255, 0, 0), 5, lineType=8, hierarchy=roi_hierarchy)
# Allows user to cut objects to the ROI (all objects completely outside ROI will not be kept)
elif roi_type == 'cutto':
cv2.drawContours(background1, object_contour, -1, (255, 255, 255), -1, lineType=8, hierarchy=obj_hierarchy)
roi_points = np.vstack(roi_contour[0])
cv2.fillPoly(background2, [roi_points], (255, 255, 255))
obj_roi = cv2.multiply(background1, background2)
kept_obj = cv2.cvtColor(obj_roi, cv2.COLOR_RGB2GRAY)
mask = np.copy(kept_obj)
obj_area = cv2.countNonZero(kept_obj)
kept_cnt, hierarchy = cv2.findContours(kept_obj, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(w_back, kept_cnt, -1, (0, 0, 0), -1)
cv2.drawContours(ori_img, kept_cnt, -1, (0, 255, 0), -1, lineType=8, hierarchy=hierarchy)
cv2.drawContours(ori_img, roi_contour, -1, (255, 0, 0), 5, lineType=8, hierarchy=roi_hierarchy)
else:
fatal_error('ROI Type' + str(roi_type) + ' is not "cutto" or "partial"!')
if debug == 'print':
print_image(w_back, (str(device) + '_roi_objects.png'))
print_image(ori_img, (str(device) + '_obj_on_img.png'))
print_image(mask, (str(device) + '_roi_mask.png'))
elif debug == 'plot':
plot_image(w_back)
plot_image(ori_img)
plot_image(mask, cmap='gray')
# print ('Object Area=', obj_area)
return device, kept_cnt, hierarchy, mask, obj_area
|
abadger/ansible
|
test/lib/ansible_test/_internal/cli/compat.py
|
Python
|
gpl-3.0
| 22,147
| 0.004425
|
"""Provides compatibility with first-generation host delegation options in ansible-test."""
from __future__ import annotations
import argparse
import dataclasses
import enum
import os
import types
import typing as t
from ..constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from ..util import (
ApplicationError,
display,
filter_args,
sorted_versions,
str_to_version,
)
from ..docker_util import (
docker_available,
)
from ..completion import (
DOCKER_COMPLETION,
REMOTE_COMPLETION,
filter_completion,
)
from ..host_configs import (
ControllerConfig,
ControllerHostConfig,
DockerConfig,
FallbackDetail,
FallbackReason,
HostConfig,
HostContext,
HostSettings,
NativePythonConfig,
NetworkInventoryConfig,
NetworkRemoteConfig,
OriginConfig,
PosixRemoteConfig,
VirtualPythonConfig,
WindowsInventoryConfig,
WindowsRemoteConfig,
)
def filter_python(version, versions): # type: (t.Optional[str], t.Optional[t.List[str]]) -> t.Optional[str]
"""If a Python version is given and is in the given version list, return that Python version, otherwise return None."""
return version if version in versions else None
def controller_python(version): # type: (t.Optional[str]) -> t.Optional[str]
"""If a Python version is given and is supported by the controller, return that Python version, otherwise return None."""
return filter_python(version, CONTROLLER_PYTHON_VERSIONS)
def get_fallback_remote_controller(): # type: () -> str
"""Return the remote fallback platform for the controller."""
platform = 'freebsd' # lower cost than RHEL and macOS
candidates = [item for item in filter_completion(REMOTE_COMPLETION).values() if item.controller_supported and item.platform == platform]
fallback = sorted(candidates, key=lambda value: str_to_version(value.version), reverse=True)[0]
return fallback.name
def get_option_name(name): # type: (str) -> str
"""Return a command-line option name from the given option name."""
if name == 'targets':
name = 'target'
return f'--{name.replace("_", "-")}'
class PythonVersionUnsupportedError(ApplicationError):
"""A Python version was requested for a context which does not support that version."""
def __init__(self, context, version, versions):
super().__init__(f'Python {version} is not supported by environment `{context}`. Supported Python version(s) are: {", ".join(versions)}')
class PythonVersionUnspecifiedError(ApplicationError):
"""A Python version was not specified for a context which is unknown, thus the Python version is unknown."
|
""
def __init__(self, context):
super().__init__(f'A Python version was not specified for environment `{context}`. Use the `--python` option to specify a Python version.')
class ControllerNotSupportedError(ApplicationError):
"""Option(s) were specified which do not provide support for the controller and would be ignored because they are irrelevant for the target."""
|
def __init__(self, context):
super().__init__(f'Environment `{context}` does not provide a Python version supported by the controller.')
class OptionsConflictError(ApplicationError):
"""Option(s) were specified which conflict with other options."""
def __init__(self, first, second):
super().__init__(f'Options `{" ".join(first)}` cannot be combined with options `{" ".join(second)}`.')
@dataclasses.dataclass(frozen=True)
class LegacyHostOptions:
"""Legacy host options used prior to the availability of separate controller and target host configuration."""
python: t.Optional[str] = None
python_interpreter: t.Optional[str] = None
local: t.Optional[bool] = None
venv: t.Optional[bool] = None
venv_system_site_packages: t.Optional[bool] = None
remote: t.Optional[str] = None
remote_provider: t.Optional[str] = None
docker: t.Optional[str] = None
docker_privileged: t.Optional[bool] = None
docker_seccomp: t.Optional[str] = None
docker_memory: t.Optional[int] = None
windows: t.Optional[t.List[str]] = None
platform: t.Optional[t.List[str]] = None
platform_collection: t.Optional[t.List[t.Tuple[str, str]]] = None
platform_connection: t.Optional[t.List[t.Tuple[str, str]]] = None
inventory: t.Optional[str] = None
@staticmethod
def create(namespace): # type: (t.Union[argparse.Namespace, types.SimpleNamespace]) -> LegacyHostOptions
"""Create legacy host options from the given namespace."""
kwargs = {field.name: getattr(namespace, field.name, None) for field in dataclasses.fields(LegacyHostOptions)}
if kwargs['python'] == 'default':
kwargs['python'] = None
return LegacyHostOptions(**kwargs)
@staticmethod
def purge_namespace(namespace): # type: (t.Union[argparse.Namespace, types.SimpleNamespace]) -> None
"""Purge legacy host options fields from the given namespace."""
for field in dataclasses.fields(LegacyHostOptions): # type: dataclasses.Field
if hasattr(namespace, field.name):
delattr(namespace, field.name)
@staticmethod
def purge_args(args): # type: (t.List[str]) -> t.List[str]
"""Purge legacy host options from the given command line arguments."""
fields = dataclasses.fields(LegacyHostOptions) # type: t.Tuple[dataclasses.Field, ...]
filters = {get_option_name(field.name): 0 if field.type is t.Optional[bool] else 1 for field in fields} # type: t.Dict[str, int]
return filter_args(args, filters)
def get_options_used(self): # type: () -> t.Tuple[str, ...]
"""Return a tuple of the command line options used."""
fields = dataclasses.fields(self) # type: t.Tuple[dataclasses.Field, ...]
options = tuple(sorted(get_option_name(field.name) for field in fields if getattr(self, field.name)))
return options
class TargetMode(enum.Enum):
"""Type of provisioning to use for the targets."""
WINDOWS_INTEGRATION = enum.auto() # windows-integration
NETWORK_INTEGRATION = enum.auto() # network-integration
POSIX_INTEGRATION = enum.auto() # integration
SANITY = enum.auto() # sanity
UNITS = enum.auto() # units
SHELL = enum.auto() # shell
NO_TARGETS = enum.auto() # coverage
@property
def one_host(self):
"""Return True if only one host (the controller) should be used, otherwise return False."""
return self in (TargetMode.SANITY, TargetMode.UNITS, TargetMode.NO_TARGETS)
@property
def no_fallback(self):
"""Return True if no fallback is acceptable for the controller (due to options not applying to the target), otherwise return False."""
return self in (TargetMode.WINDOWS_INTEGRATION, TargetMode.NETWORK_INTEGRATION, TargetMode.NO_TARGETS)
@property
def multiple_pythons(self):
"""Return True if multiple Python versions are allowed, otherwise False."""
return self in (TargetMode.SANITY, TargetMode.UNITS)
@property
def has_python(self):
"""Return True if this mode uses Python, otherwise False."""
return self in (TargetMode.POSIX_INTEGRATION, TargetMode.SANITY, TargetMode.UNITS, TargetMode.SHELL)
def convert_legacy_args(
argv, # type: t.List[str]
args, # type: t.Union[argparse.Namespace, types.SimpleNamespace]
mode, # type: TargetMode
): # type: (...) -> HostSettings
"""Convert pre-split host arguments in the given namespace to their split counterparts."""
old_options = LegacyHostOptions.create(args)
old_options.purge_namespace(args)
new_options = [
'--controller',
'--target',
'--target-python',
]
used_old_options = old_options.get_options_used()
used_new_options = [name for name in new_options if name in argv]
if used_old_options:
if used_new_options:
raise OptionsConflictError(used_old_options, used_new_options)
controller, targets, controller_fallback = get_legacy_host_config(mode, old_options)
if cont
|
bfontecc007/osbs-client
|
osbs/build/__init__.py
|
Python
|
bsd-3-clause
| 314
| 0
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This softwa
|
re may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
from .build_response import BuildResponse # noqa
from .pod_response im
|
port PodResponse # noqa
|
SureshMatsui/SpeedCoin
|
contrib/linearize/linearize.py
|
Python
|
mit
| 3,357
| 0.034257
|
#!/usr/bin/python
#
# linearize.py: Construct a linear, no-fork, best version of the blockchain.
#
#
# Copyright (c) 2013 The SpeedCoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
class SpeedCoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def getblock(r
|
pc, settings, n):
hash = rpc.getblockhash(n)
hexdata = rpc.getblock(hash, False)
data = hexdata.decode('hex')
return data
def get_blocks(settings):
rpc = SpeedCoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
outf = open(settings['output']
|
, 'ab')
for height in xrange(settings['min_height'], settings['max_height']+1):
data = getblock(rpc, settings, height)
outhdr = settings['netmagic']
outhdr += struct.pack("<i", len(data))
outf.write(outhdr)
outf.write(data)
if (height % 1000) == 0:
sys.stdout.write("Wrote block " + str(height) + "\n")
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'output' not in settings:
settings['output'] = 'bootstrap.dat'
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 279000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_blocks(settings)
|
mcnowinski/various-and-sundry
|
lightcurve/windows/firstlook.py
|
Python
|
mit
| 10,661
| 0.001782
|
# this program requires the 32 bit version of Python!!
import os
import glob
import math
import subprocess
import re
import sys
import string
from decimal import Decimal
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from scipy.ndimage import median_filter
#from pyds9 import DS9
import argparse
import pandas as pd
import ch # custom callHorizons library
import dateutil
from datetime import datetime
from datetime import timedelta
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import pandas as pd
from astropy.time import Time
import shutil
#
# START SETTINGS
# MODIFY THESE FIELDS AS NEEDED!
#
# input pa
|
th *with* ending forward slash
input_path = './'
# output path *with* ending forward slash
sex_output_path = './firstlook/'
# bad path
bad_path = './
|
bad/'
# suffix for output files, if any...
sex_output_suffix = '.sex'
# log file name
log_fname = './log.firstlook.txt'
# path to sextractor executable and config files (incl. the filenames!)
sextractor_bin_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sextractor.exe'
sextractor_cfg_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.sex'
sextractor_param_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.param'
sextractor_filter_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.conv'
# tolerance for object matching
dRa = 0.00062
dDec = 0.00062
# target/comp list
comps_fname = './comps.in.txt'
targets_out_fname = './targets.out.csv'
counts_out_fname = './counts.out.csv'
# mask file that identifies bad pixels
bad_pixels_fname = './bad_pixels.txt'
cleaned_output_path = './cor/'
# observatory code
obs_code = 'G52'
# panstarrs
# panstarrs ref magnitude
pso_ref_mag = 'rPSFMag'
# panstarrs max magnitude
pso_max_mag = 16
# panstarrs min magnitude
pso_min_mag = 0
#
# END SETTINGS
#
# logger
def logme(str):
log.write(str + "\n")
print str
return
def exit():
logme('Program execution halted.')
log.close()
os.sys.exit(1)
# run external process
def runSubprocess(command_array):
# command array is array with command and all required parameters
try:
with open(os.devnull, 'w') as fp:
sp = subprocess.Popen(command_array, stderr=fp, stdout=fp)
# logme('Running subprocess ("%s" %s)...'%(' '.join(command_array), sp.pid))
sp.wait()
output, error = sp.communicate()
return (output, error, sp.pid)
except:
logme('Error. Subprocess ("%s" %d) failed.' %
(' '.join(command_array), sp.pid))
return ('', '', 0)
# get current ra/dec of target asteroid
def getAsteroidRaDec(name, dt):
ra = ''
dec = ''
start = dt
end = dt + timedelta(minutes=1)
# get ephemerides for target in JPL Horizons from start to end times
result = ch.query(name.upper(), smallbody=True)
result.set_epochrange(start.isoformat(), end.isoformat(), '1m')
result.get_ephemerides(obs_code)
if result and len(result['EL']):
ra = result['RA'][0]
dec = result['DEC'][0]
else:
logme('Error. Asteroid (%s) not found for %s.' %
(name, start.isoformat()))
exit()
return (ra, dec)
def jdToYYMMDD_HHMMSS(jd):
t = Time(jd, format='mjd', scale='utc')
return t.iso
# open log file
log = open(log_fname, 'a+')
# set up the command line argument parser
parser = argparse.ArgumentParser(
description='Perform lightcurve photometry using sextractor.')
# parser.add_argument('asteroid', metavar='asteroid#', type=int,
# help='Target asteroid number')
args = parser.parse_args()
# make sure input files and folder exist
inputs = [input_path, sextractor_bin_fname, sextractor_cfg_fname,
sextractor_param_fname, sextractor_filter_fname, comps_fname]
for input in inputs:
if not os.path.exists(input_path):
logme('Error. The file or path (%s) does not exist.' % input)
exit()
# does output directory exist? If not, create it...
outputs = [sex_output_path, cleaned_output_path, bad_path]
for output in outputs:
try:
os.mkdir(output)
except:
pass
image_data = []
# get a list of all FITS files in the input directory
fits_files = glob.glob(input_path+'*.fits')+glob.glob(input_path+'*.fit')
# loop through all qualifying files and perform sextraction
for fits_file in sorted(fits_files):
fits_data = fits.open(fits_file)
header = fits_data[0].header
wcs = WCS(header)
airmass = header['AIRMASS']
try:
dt_obs = dateutil.parser.parse(header['DATE-OBS'])
except:
logme('Error. Invalid observation date found in %s.' % fits_file)
exit()
try:
naxis1 = header['NAXIS1']
naxis2 = header['NAXIS2']
except:
logme('Error. Invalid CCD pixel size found in %s.' % fits_file)
exit()
try:
ra = header['CRVAL1']
dec = header['CRVAL2']
except:
logme('Error. Invalid RA/DEC found in %s.' % fits_file)
exit()
try:
JD = header['MJD-OBS']
except KeyError:
JD = header['JD']
# calculate image corners in ra/dec
ra1, dec1 = wcs.all_pix2world(0, 0, 0)
ra2, dec2 = wcs.all_pix2world(naxis1, naxis2, 0)
# calculate search radius in degrees from the center!
c1 = SkyCoord(ra1, dec1, unit="deg")
c2 = SkyCoord(ra2, dec2, unit="deg")
# estimate radius of FOV in arcmin
r_arcmin = '%f' % (c1.separation(c2).deg*60/2)
logme("Sextracting %s" % (fits_file))
output_file = sex_output_path + \
fits_file.replace('\\', '/').rsplit('/', 1)[1]
output_file = '%s%s.txt' % (output_file, sex_output_suffix)
# add input filename, output filename, airmass, and jd to sex_file list
image_data.append(
{'image': fits_file, 'sex': output_file, 'jd': JD, 'airmass': airmass, 'ra': ra, 'dec': dec, 'dt_obs': dt_obs, 'r_arcmin': r_arcmin})
# sextract this file
(output, error, id) = runSubprocess([sextractor_bin_fname, fits_file, '-c', sextractor_cfg_fname, '-catalog_name',
output_file, '-parameters_name', sextractor_param_fname, '-filter_name', sextractor_filter_fname])
if error:
logme('Error. Sextractor failed: %s' % output)
exit()
logme('Sextracted %d files.' % len(image_data))
# build list of comparison stars in comps_fname using
# PanSTARRS Stack Object Catalog Search
logme('Searching for comparison stars in the PANSTARRS catalog (ra=%s deg, dec=%s deg, radius=%s min)...' %
(image_data[0]['ra'], image_data[0]['dec'], image_data[0]['r_arcmin']))
pso_url_base = 'http://archive.stsci.edu/panstarrs/stackobject/search.php'
pso_url_parms = '?resolver=Resolve&radius=%s&ra=%s&dec=%s&equinox=J2000&nDetections=&selectedColumnsCsv=objname%%2Cobjid%%2Cramean%%2Cdecmean%%2Cgpsfmag%%2Crpsfmag%%2Cipsfmag' + \
'&coordformat=dec&outputformat=CSV_file&skipformat=on' + \
'&max_records=50001&action=Search'
url = pso_url_base + \
pso_url_parms % (image_data[0]['r_arcmin'], image_data[0]['ra'], image_data[0]
['dec'])
# get the results of the REST query
comps = pd.read_csv(url)
if len(comps) <= 0:
logme('Error. No comparison stars found!')
exit()
# remove dupes, keep first
comps.drop_duplicates(subset=['objName'], keep='first', inplace=True)
# make sure magnitudes are treated as floats
comps[pso_ref_mag] = pd.to_numeric(comps[pso_ref_mag], errors='coerce')
# remove spaces from obj names
comps['objName'] = comps['objName'].str.replace('PSO ', '')
# filter based on ref (r?) magnitude!
comps = comps.query("%s > %f & %s < %f" %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
if len(comps) <= 0:
logme('Error. No comparison stars meet the criteria (%s > %f & %s < %f)!' %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
exit()
logme('A total of %d comparison star(s) met the criteria (%s > %f & %s < %f)!' %
(len(comps), pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
# output objects to comps_fname in sext
|
MM1nd/worldengine
|
worldengine/simulations/humidity.py
|
Python
|
mit
| 1,502
| 0.000666
|
from worldengine.simulations.basic import find_threshold_f
import numpy
class HumiditySimulation(object):
@staticmethod
def is_applicable(world):
return world.has_precipitations() and world.has_irrigation() and (
not world.has_humidity())
def execute(self, world, seed):
asser
|
t seed is not None
data, quantiles = self._calculate(world)
world.humidity = (data, quantiles)
@staticmethod
def _calculate(world):
humids = world.humids
precipitationWeight = 1.0
irrigationWeight = 3
data = numpy.zeros((world.height, world.width), dtype=float)
data = (world.layers['precipitation'].data * precipitationWeight -
|
world.layers['irrigation'].data * irrigationWeight)/(precipitationWeight + irrigationWeight)
# These were originally evenly spaced at 12.5% each but changing them
# to a bell curve produced better results
ocean = world.layers['ocean'].data
quantiles = {}
quantiles['12'] = find_threshold_f(data, humids[6], ocean)
quantiles['25'] = find_threshold_f(data, humids[5], ocean)
quantiles['37'] = find_threshold_f(data, humids[4], ocean)
quantiles['50'] = find_threshold_f(data, humids[3], ocean)
quantiles['62'] = find_threshold_f(data, humids[2], ocean)
quantiles['75'] = find_threshold_f(data, humids[1], ocean)
quantiles['87'] = find_threshold_f(data, humids[0], ocean)
return data, quantiles
|
vitmod/dvbapp
|
lib/python/Plugins/SystemPlugins/SoftwareManager/Flash_online.py
|
Python
|
gpl-2.0
| 18,755
| 0.028686
|
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
from Components.Button import Button
from Components.Label import Label
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.FileList import FileList
from Components.Task import Task, Job, job_manager, Condition
from Components.Sources.StaticText import StaticText
from Screens.Console import Console
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Console import Console
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Tools.Downloader import downloadWithProgress
from boxbranding import getBoxType
import urllib2
import os
import shutil
#############################################################################################################
urlimage = 'http://downloads.openspa.info/images/2.0'
imagePath = '/hdd/images'
flashPath = '/hdd/images/flash'
flashTmp = '/hdd/images/tmp'
ofgwritePath = '/usr/bin/ofgwrite'
#############################################################################################################
def Freespace(dev):
statdev = os.statvfs(dev)
space = (statdev.f_bavail * statdev.f_frsize) / 1024
print "[Flash Online] Free space on %s = %i kilobytes" %(dev, space)
return space
class FlashOnline(Screen):
skin = """
<screen position="center,center" size="560,400" title="Flash On the Fly">
<ePixmap position="0,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap position="280,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap position="420,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="info-online" position="10,30" zPosition="1" size="450,100" font="Regular;20" halign="left
|
" valign="top" transparent="1" />
<widget name="info-local" position="10,150" zPosition="1" size="450,200" font="Regular;20" halign="left" valign="top" transparent="1" />
</screen>"""
def __init__(s
|
elf, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("Flash On the Fly"))
self["key_yellow"] = Button("Local")
self["key_green"] = Button("Online")
self["key_red"] = Button(_("Exit"))
self["key_blue"] = Button("")
self["info-local"] = Label(_("Local = Flash a image from local path /hdd/images"))
self["info-online"] = Label(_("Online = Download a image and flash it"))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.blue,
"yellow": self.yellow,
"green": self.green,
"red": self.quit,
"cancel": self.quit,
}, -2)
def check_hdd(self):
if not os.path.exists("/media/hdd"):
self.session.open(MessageBox, _("No /hdd found !!\nPlease make sure you have a HDD mounted.\n\nExit plugin."), type = MessageBox.TYPE_ERROR)
return False
if Freespace('/media/hdd') < 300000:
self.session.open(MessageBox, _("Not enough free space on /hdd !!\nYou need at least 300Mb free space.\n\nExit plugin."), type = MessageBox.TYPE_ERROR)
return False
if not os.path.exists(ofgwritePath):
self.session.open(MessageBox, _('ofgwrite not found !!\nPlease make sure you have ofgwrite installed in /usr/bin/ofgwrite.\n\nExit plugin.'), type = MessageBox.TYPE_ERROR)
return False
if not os.path.exists(imagePath):
os.mkdir(imagePath)
if os.path.exists(flashPath):
os.system('rm -rf ' + flashPath)
os.mkdir(flashPath)
return True
def quit(self):
self.close()
def blue(self):
pass
def green(self):
if self.check_hdd():
self.session.open(doFlashImage, online = True)
else:
self.close()
def yellow(self):
if self.check_hdd():
self.session.open(doFlashImage, online = False)
else:
self.close()
class doFlashImage(Screen):
skin = """
<screen position="center,center" size="560,500" title="Flash On the fly (select a image)">
<ePixmap position="0,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap position="280,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap position="420,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="imageList" position="10,10" zPosition="1" size="520,450" font="Regular;20" scrollbarMode="showOnDemand" transparent="1" />
</screen>"""
def __init__(self, session, online ):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("Flash On the fly (select a image)"))
self["key_green"] = Button(_("Flash"))
self["key_red"] = Button(_("Exit"))
self["key_blue"] = Button("")
self["key_yellow"] = Button("")
self.filename = None
self.imagelist = []
self.simulate = False
self.Online = online
self.imagePath = imagePath
self.feedurl = urlimage
self["imageList"] = MenuList(self.imagelist)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"green": self.green,
"yellow": self.yellow,
"red": self.quit,
"blue": self.blue,
"cancel": self.quit,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def quit(self):
self.close()
def blue(self):
if self.Online:
self.layoutFinished()
return
sel = self["imageList"].l.getCurrentSelection()
if sel == None:
print"Nothing to select !!"
return
self.filename = sel
self.session.openWithCallback(self.RemoveCB, MessageBox, _("Do you really want to delete\n%s ?") % (sel), MessageBox.TYPE_YESNO)
def RemoveCB(self, ret):
if ret:
if os.path.exists(self.imagePath + "/" + self.filename):
os.remove(self.imagePath + "/" + self.filename)
self.imagelist.remove(self.filename)
self["imageList"].l.setList(self.imagelist)
def green(self):
sel = self["imageList"].l.getCurrentSelection()
if sel == None:
print"Nothing to select !!"
return
file_name = self.imagePath + "/" + sel
self.filename = file_name
box = getBoxType()
self.hide()
if self.Online:
url = self.feedurl + "/" + box + "/" + sel
u = urllib2.urlopen(url)
f = open(file_name, 'w
|
privateip/ansible-modules-core
|
database/mysql/mysql_user.py
|
Python
|
gpl-3.0
| 22,945
| 0.005709
|
#!/usr/bin/python
# (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCH
|
ANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mysql_user
short_description: Adds or removes a user from a MySQL database.
description:
- Adds or removes a user from a MySQL database.
version_added: "0.6"
options:
name:
|
description:
- name of the user (role) to add or remove
required: true
password:
description:
- set the user's password. (Required when adding a user)
required: false
default: null
encrypted:
description:
- Indicate that the 'password' field is a `mysql_native_password` hash
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "2.0"
host:
description:
- the 'host' part of the MySQL username
required: false
default: localhost
host_all:
description:
- override the host option, making ansible apply changes to
all hostnames for a given user. This option cannot be used
when creating users
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "2.1"
priv:
description:
- "MySQL privileges string in the format: C(db.table:priv1,priv2)"
required: false
default: null
append_privs:
description:
- Append the privileges defined by priv to the existing ones for this
user instead of overwriting existing ones.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.4"
sql_log_bin:
description:
- Whether binary logging should be enabled or disabled for the connection.
required: false
choices: ["yes", "no" ]
default: "yes"
version_added: "2.1"
state:
description:
- Whether the user should exist. When C(absent), removes
the user.
required: false
default: present
choices: [ "present", "absent" ]
check_implicit_admin:
description:
- Check if mysql allows login as root/nopassword before trying supplied credentials.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.3"
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.0"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- "MySQL server installs with default login_user of 'root' and no password. To secure this user
as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
- Currently, there is only support for the `mysql_native_password` encryted password hash module.
author: "Jonathan Mainguy (@Jmainguy)"
extends_documentation_fragment: mysql
'''
EXAMPLES = """
# Removes anonymous user account for localhost
- mysql_user: name='' host=localhost state=absent
# Removes all anonymous user accounts
- mysql_user: name='' host_all=yes state=absent
# Create database user with name 'bob' and password '12345' with all database privileges
- mysql_user: name=bob password=12345 priv=*.*:ALL state=present
# Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges
- mysql_user: name=bob password='*EE0D72C1085C46C5278932678FBE2C6A782821B4' encrypted=yes priv=*.*:ALL state=present
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
# Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present
# Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials.
- mysql_user: login_user=root login_password=123456 name=sally state=absent
# Ensure no user named 'sally' exists at all
- mysql_user: name=sally host_all=yes state=absent
# Specify grants composed of more than one word
- mysql_user: name=replication password=12345 priv="*.*:REPLICATION CLIENT" state=present
# Revoke all privileges for user 'bob' and password '12345'
- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present
# Example privileges string format
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
# Example using login_unix_socket to connect to server
- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock
# Example of skipping binary logging while adding user 'bob'
- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present sql_log_bin=no
# Example .my.cnf file for setting the root password
[client]
user=root
password=n<_665{vS43y
"""
import getpass
import tempfile
import re
import string
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
# ===========================================
# MySQL module specific support methods.
#
# User Authentication Management was change in MySQL 5.7
# This is a generic check for if the server version is less than version 5.7
def server_version_check(cursor):
cursor.execute("SELECT VERSION()");
result = cursor.fetchone()
version_str = result[0]
version = version_str.split('.')
# Currently we have no facility to handle new-style password update on
# mariadb and the old-style update continues to work
if 'mariadb' in version_str.lower():
return True
if (int(version[0]) <= 5 and int(version[1]) < 7):
return True
else:
return False
def get_mode(cursor):
cursor.execute('SELECT @@GLOBAL.sql_mode')
result = cursor.fetchone()
mode_str = result[0]
if 'ANSI' in mode_str:
mode = 'ANSI'
else:
mode = 'NOTANSI'
return mode
def user_exists(cursor, user, host, host_all):
if host_all:
cursor.execute("SELECT count(*) FROM user WHERE user = %s", user)
else:
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
count = cursor.fetchone()
return count[0] > 0
def user_add(cursor, user, host, host_all, password, encrypted, new_priv, check_mode):
# we cannot create users without a proper hostname
if host_all:
return False
if check_mode:
return True
if password and encrypted:
cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (use
|
XueqingLin/tensorflow
|
tensorflow/python/training/sync_replicas_optimizer.py
|
Python
|
apache-2.0
| 40,433
| 0.003067
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
|
# Please note that the gradients from replicas are averaged instea
|
d of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
class SyncReplicasOptimizerV2(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
<empty line>
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
<empty line>
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch.
For the replicas:
<empty line>
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.SyncReplicasOptimizerV2(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
grads = opt.minimize(total_loss, global_step=self.global_step)
# You can now call get_init_tokens_op() and get_chief_queue_runner().
# Note that get_init_tokens_op() must be called before creating session
# because it modifies the graph by adding new nodes.
init_token_op = opt.get_init_tokens_op()
chief_queue_runner = opt.get_chief_queue_runner()
```
In the training program, every worker will run the train_op as if not
synchronized. But one worker (usually the chief) will need to execute the
chief_queue_runner and get_init_tokens_op from this optimizer.
```python
# When you create the supervisor, you need to add the local_init_op and
# ready_for_local_init_op to make sure the local_step is initialized to the
# global_step. Here is an example:
sv = tf.Supervisor(graph=g,
is_chief=is_chief,
# This initialize local step.
local_init_op=local_init_op,
# This makes sure global step is initialized before using.
ready_for_local_init_op=ready_for_local_init_op,
saver=model.saver)
# After the session is created by the Supervisor and before the main while
# loop:
if is_chief and FLAGS.sync_replicas:
sv.start_queue_runners(sess, [chief_queue_runner])
# Insert initial tokens to the queue.
sess.run(init_token_op)
```
@@__init__
@@compute_gradients
@@apply_gradients
@@get_chief_queue_runner
@@get_init_tokens_op
"""
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizerV2, self).__init__(use_locking, name)
logging.info(
|
arrayfire/arrayfire-python
|
setup.py
|
Python
|
bsd-3-clause
| 4,514
| 0.005981
|
#!/usr/bin/env python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import os
import re
# package can be distributed with
|
arrayfire binaries or
# just with python wrapper files, the AF_BUILD_LOCAL
# environment var determines whether to build the arrayfire
# binaries locally rather than searching in a system install
AF_BUILD_LOCAL_LIBS = os.environ.get('AF_BUILD_LOCAL_LIBS')
print(f'AF_BUILD_LOCAL_LIBS={AF_BUILD_LOCAL_LIBS}')
if AF_BUILD_LOCAL_LIBS:
print('Proceeding to build ArrayFire libraries')
el
|
se:
print('Skipping binaries installation, only python files will be installed')
AF_BUILD_CPU = os.environ.get('AF_BUILD_CPU')
AF_BUILD_CPU = 1 if AF_BUILD_CPU is None else int(AF_BUILD_CPU)
AF_BUILD_CPU_CMAKE_STR = '-DAF_BUILD_CPU:BOOL=ON' if (AF_BUILD_CPU == 1) else '-DAF_BUILD_CPU:BOOL=OFF'
AF_BUILD_CUDA = os.environ.get('AF_BUILD_CUDA')
AF_BUILD_CUDA = 1 if AF_BUILD_CUDA is None else int(AF_BUILD_CUDA)
AF_BUILD_CUDA_CMAKE_STR = '-DAF_BUILD_CUDA:BOOL=ON' if (AF_BUILD_CUDA == 1) else '-DAF_BUILD_CUDA:BOOL=OFF'
AF_BUILD_OPENCL = os.environ.get('AF_BUILD_OPENCL')
AF_BUILD_OPENCL = 1 if AF_BUILD_OPENCL is None else int(AF_BUILD_OPENCL)
AF_BUILD_OPENCL_CMAKE_STR = '-DAF_BUILD_OPENCL:BOOL=ON' if (AF_BUILD_OPENCL == 1) else '-DAF_BUILD_OPENCL:BOOL=OFF'
AF_BUILD_UNIFIED = os.environ.get('AF_BUILD_UNIFIED')
AF_BUILD_UNIFIED = 1 if AF_BUILD_UNIFIED is None else int(AF_BUILD_UNIFIED)
AF_BUILD_UNIFIED_CMAKE_STR = '-DAF_BUILD_UNIFIED:BOOL=ON' if (AF_BUILD_UNIFIED == 1) else '-DAF_BUILD_UNIFIED:BOOL=OFF'
if AF_BUILD_LOCAL_LIBS:
# invoke cmake and build arrayfire libraries to install locally in package
from skbuild import setup
def filter_af_files(cmake_manifest):
cmake_manifest = list(filter(lambda name: not (name.endswith('.h')
or name.endswith('.cpp')
or name.endswith('.hpp')
or name.endswith('.cmake')
or name.endswith('jpg')
or name.endswith('png')
or name.endswith('libaf.so') #avoids duplicates due to symlinks
or re.match('.*libaf\.so\.3\..*', name) is not None
or name.endswith('libafcpu.so')
or re.match('.*libafcpu\.so\.3\..*', name) is not None
or name.endswith('libafcuda.so')
or re.match('.*libafcuda\.so\.3\..*', name) is not None
or name.endswith('libafopencl.so')
or re.match('.*libafopencl\.so\.3\..*', name) is not None
or name.endswith('libforge.so')
or re.match('.*libforge\.so\.1\..*', name) is not None
or 'examples' in name), cmake_manifest))
return cmake_manifest
print('Building CMAKE with following configurable variables: ')
print(AF_BUILD_CPU_CMAKE_STR)
print(AF_BUILD_CUDA_CMAKE_STR)
print(AF_BUILD_OPENCL_CMAKE_STR)
print(AF_BUILD_UNIFIED_CMAKE_STR)
setup(
packages=['arrayfire'],
cmake_install_dir='',
cmake_process_manifest_hook=filter_af_files,
include_package_data=False,
cmake_args=[AF_BUILD_CPU_CMAKE_STR,
AF_BUILD_CUDA_CMAKE_STR,
AF_BUILD_OPENCL_CMAKE_STR,
AF_BUILD_UNIFIED_CMAKE_STR,
# todo: pass additional args from environ
'-DCMAKE_BUILD_TYPE:STRING="RelWithDebInfo"',
'-DFG_USE_STATIC_CPPFLAGS:BOOL=OFF',
'-DFG_WITH_FREEIMAGE:BOOL=OFF',
'-DCUDA_architecture_build_targets:STRING=All',
'-DAF_BUILD_DOCS:BOOL=OFF',
'-DAF_BUILD_EXAMPLES:BOOL=OFF',
'-DAF_INSTALL_STANDALONE:BOOL=ON',
'-DAF_WITH_IMAGEIO:BOOL=ON',
'-DAF_WITH_LOGGING:BOOL=ON',
'-DBUILD_TESTING:BOOL=OFF',
'-DAF_BUILD_FORGE:BOOL=ON',
'-DAF_INSTALL_LIB_DIR:STRING=arrayfire',
'-DAF_INSTALL_BIN_DIR:STRING=arrayfire',
'-DFG_INSTALL_LIB_DIR:STRING=arrayfire',
'-DAF_WITH_STATIC_MKL=ON',
]
)
else:
# ignores local arrayfire libraries, will search system instead
from setuptools import setup
setup()
|
MarsZone/DreamLand
|
evennia/evennia/locks/lockhandler.py
|
Python
|
bsd-3-clause
| 20,024
| 0.002497
|
"""
A *lock* defines access to a particular subsystem or property of
Evennia. For example, the "owner" property can be impmemented as a
lock. Or the disability to lift an object or to ban users.
A lock consists of three parts:
- access_type - this defines what kind of access this lock regulates. This
just a string.
- function call - this is one or many calls to functions that will determine
if the lock is passed or not.
- lock function(s). These are regular python functions with a special
set of allowed arguments. They should always return a boolean depending
on if they allow access or not.
A lock function is defined by existing in one of the modules
listed by settings.LOCK_FUNC_MODULES. It should also always
take four arguments looking like this:
funcname(accessing_obj, accessed_obj, *args, **kwargs):
[...]
The accessing object is the object wanting to gain access.
The accessed object is the object this lock resides on
args and kwargs will hold optional arguments and/or keyword arguments
to the function as a list and a dictionary respectively.
Example:
perm(accessing_obj, accessed_obj, *args, **kwargs):
"Checking if the object has a particular, desired permission"
if args:
desired_perm = args[0]
return desired_perm in accessing_obj.permissions.all()
return False
Lock functions should most often be pretty general and ideally possible to
re-use and combine in various ways to build clever locks.
Lock definition ("Lock
|
string")
A lock definition is a string with a special syntax. It is added to
each object's lockhandler, making that lock available from then on.
The lock definition looks like this:
'access_type:[NOT] func1(args)[ AND|OR][NOT] func2() ...'
That is, the access_type, a colon followed by calls to lock functions
combined with AND or OR. NOT negates the result of the following call.
|
Example:
We want to limit who may edit a particular object (let's call this access_type
for 'edit', it depends on what the command is looking for). We want this to
only work for those with the Permission 'Builders'. So we use our lock
function above and define it like this:
'edit:perm(Builders)'
Here, the lock-function perm() will be called with the string
'Builders' (accessing_obj and accessed_obj are added automatically,
you only need to add the args/kwargs, if any).
If we wanted to make sure the accessing object was BOTH a Builders and a
GoodGuy, we could use AND:
'edit:perm(Builders) AND perm(GoodGuy)'
To allow EITHER Builders and GoodGuys, we replace AND with OR. perm() is just
one example, the lock function can do anything and compare any properties of
the calling object to decide if the lock is passed or not.
'lift:attrib(very_strong) AND NOT attrib(bad_back)'
To make these work, add the string to the lockhandler of the object you want
to apply the lock to:
obj.lockhandler.add('edit:perm(Builders)')
From then on, a command that wants to check for 'edit' access on this
object would do something like this:
if not target_obj.lockhandler.has_perm(caller, 'edit'):
caller.msg("Sorry, you cannot edit that.")
All objects also has a shortcut called 'access' that is recommended to
use instead:
if not target_obj.access(caller, 'edit'):
caller.msg("Sorry, you cannot edit that.")
Permissions
Permissions are just text strings stored in a comma-separated list on
typeclassed objects. The default perm() lock function uses them,
taking into account settings.PERMISSION_HIERARCHY. Also, the
restricted @perm command sets them, but otherwise they are identical
to any other identifier you can use.
"""
from __future__ import print_function
from builtins import object
import re
import inspect
from django.conf import settings
from evennia.utils import logger, utils
from django.utils.translation import ugettext as _
__all__ = ("LockHandler", "LockException")
WARNING_LOG = settings.LOCKWARNING_LOG_FILE
#
# Exception class. This will be raised
# by errors in lock definitions.
#
class LockException(Exception):
"""
Raised during an error in a lock.
"""
pass
#
# Cached lock functions
#
_LOCKFUNCS = {}
def _cache_lockfuncs():
"""
Updates the cache.
"""
global _LOCKFUNCS
_LOCKFUNCS = {}
for modulepath in settings.LOCK_FUNC_MODULES:
_LOCKFUNCS.update(utils.callables_from_module(modulepath))
#
# pre-compiled regular expressions
#
_RE_FUNCS = re.compile(r"\w+\([^)]*\)")
_RE_SEPS = re.compile(r"(?<=[ )])AND(?=\s)|(?<=[ )])OR(?=\s)|(?<=[ )])NOT(?=\s)")
_RE_OK = re.compile(r"%s|and|or|not")
#
#
# Lock handler
#
#
class LockHandler(object):
"""
This handler should be attached to all objects implementing
permission checks, under the property 'lockhandler'.
"""
def __init__(self, obj):
"""
Loads and pre-caches all relevant locks and their functions.
Args:
obj (object): The object on which the lockhandler is
defined.
"""
if not _LOCKFUNCS:
_cache_lockfuncs()
self.obj = obj
self.locks = {}
try:
self.reset()
except LockException as err:
logger.log_trace(err)
def __str__(self):
return ";".join(self.locks[key][2] for key in sorted(self.locks))
def _log_error(self, message):
"Try to log errors back to object"
raise LockException(message)
def _parse_lockstring(self, storage_lockstring):
"""
Helper function. This is normally only called when the
lockstring is cached and does preliminary checking. locks are
stored as a string
atype:[NOT] lock()[[ AND|OR [NOT] lock()[...]];atype...
Args:
storage_locksring (str): The lockstring to parse.
"""
locks = {}
if not storage_lockstring:
return locks
duplicates = 0
elist = [] # errors
wlist = [] # warnings
for raw_lockstring in storage_lockstring.split(';'):
if not raw_lockstring:
continue
lock_funcs = []
try:
access_type, rhs = (part.strip() for part in raw_lockstring.split(':', 1))
except ValueError:
logger.log_trace()
return locks
# parse the lock functions and separators
funclist = _RE_FUNCS.findall(rhs)
evalstring = rhs
for pattern in ('AND', 'OR', 'NOT'):
evalstring = re.sub(r"\b%s\b" % pattern, pattern.lower(), evalstring)
nfuncs = len(funclist)
for funcstring in funclist:
funcname, rest = (part.strip().strip(')') for part in funcstring.split('(', 1))
func = _LOCKFUNCS.get(funcname, None)
if not callable(func):
elist.append(_("Lock: lock-function '%s' is not available.") % funcstring)
continue
args = list(arg.strip() for arg in rest.split(',') if arg and not '=' in arg)
kwargs = dict([arg.split('=', 1) for arg in rest.split(',') if arg and '=' in arg])
lock_funcs.append((func, args, kwargs))
evalstring = evalstring.replace(funcstring, '%s')
if len(lock_funcs) < nfuncs:
continue
try:
# purge the eval string of any superfluous items, then test it
evalstring = " ".join(_RE_OK.findall(evalstring))
eval(evalstring % tuple(True for func in funclist), {}, {})
except Exception:
elist.append(_("Lock: definition '%s' has syntax errors.") % raw_lockstring)
continue
if access_type in locks:
duplicates += 1
wlist.append(_("LockHandler on %(obj)s: access type '%(access_type)s' changed from '%(source)s' to '%(goal)s' " % \
{"obj":self.obj, "access_type":access_type, "source":locks[access_type][2], "goal":raw_lockstring}))
locks[access_type] = (evalstring, tuple(lock_funcs), raw_lock
|
NoyaInRain/tornado
|
tornado/web.py
|
Python
|
apache-2.0
| 138,158
| 0.000637
|
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request, or to limit your use of other threads to
`.IOLoop.run_in_executor` and ensure that your callbacks running in
the executor do not refer to Tornado objects.
"""
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import http.cookies
from inspect import isclass
from io import BytesIO
import mimetypes
import numbers
import os.path
import re
import sys
import threading
import time
import tornado
import traceback
import types
import urllib.parse
from urllib.parse import urlencode
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado import escape
from tornado import gen
from tornado.httpserver import HTTPServer
from tornado import httputil
from tornado import iostream
import tornado.locale
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.routing import (
AnyMatches,
DefaultHostMatches,
HostMatches,
ReversibleRouter,
Rule,
ReversibleRuleRouter,
URLSpec,
_RuleList,
)
from tornado.util import ObjectDict, unicode_type, _websocket_mask
url = URLSpec
from typing import (
Dict,
Any,
Union,
Optional,
Awaitable,
Tuple,
List,
Callable,
Iterable,
Generator,
Type,
cast,
overload,
)
from types import TracebackType
import typing
if typing.TYPE_CHECKING:
from typing import Set # noqa: F401
# The following types are accepted by RequestHandler.set_header
# and related methods.
_HeaderTypes = Union[bytes, unicode_type, int, numbers.Integral, datetime.datetime]
_CookieSecretTypes = Union[str, bytes, Dict[int, str], Dict[int, bytes]]
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value
|
version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridd
|
en by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class _ArgDefaultMarker:
pass
_ARG_DEFAULT = _ArgDefaultMarker()
class RequestHandler(object):
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
Applications should not construct `RequestHandler` objects
directly and subclasses should not override ``__init__`` (override
`~RequestHandler.initialize` instead).
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT", "OPTIONS")
_template_loaders = {} # type: Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
_stream_request_body = False
# Will be set in _execute.
_transforms = None # type: List[OutputTransform]
path_args = None # type: List[str]
path_kwargs = None # type: Dict[str, str]
def __init__(
self,
application: "Application",
request: httputil.HTTPServerRequest,
**kwargs: Any
) -> None:
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._prepared_future = None
self.ui = ObjectDict(
(n, self._ui_method(m)) for n, m in application.ui_methods.items()
)
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self, application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
assert self.request.connection is not None
# TODO: need to add set_close_callback to HTTPConnection interface
self.request.connection.set_close_callback( # type: ignore
self.on_connection_close
)
self.initialize(**kwargs) # type: ignore
def _initialize(self) -> None:
pass
initialize = _initialize # type: Callable[..., None]
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a ``URLSpec`` will be
supplied as keyword arguments to ``initialize()``.
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
@property
def settings(self) -> Dict[str, Any]:
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def _unimplemented_method(self, *args: str, **kwargs: str) -> None:
raise HTTPError(405)
head = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
get = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
post = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
delete = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
patch = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
put = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
options = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
def prepare(self) -> Optional[Awaitable[None]]:
"""Called at the beginning of a request before `get`/`post`/etc.
Override this metho
|
adviti/melange
|
thirdparty/google_appengine/google/appengine/api/prospective_search/prospective_search_pb.py
|
Python
|
apache-2.0
| 60,260
| 0.020179
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.entity_pb import EntityProto
class SchemaEntry(ProtocolBuffer.ProtocolMessage):
STRING = 1
INT32 = 2
BOOLEAN = 3
DOUBLE = 4
POINT = 5
USER = 6
REFERENCE = 7
_Type_NAMES = {
1: "STRING",
2: "INT32",
3: "BOOLEAN",
4: "DOUBLE",
5: "POINT",
6: "USER",
7: "REFERENCE",
}
def Type_Name(cls, x): return cls._Type_NAMES.get(x, "")
Type_Name = classmethod(Type_Name)
has_name_ = 0
name_ = ""
has_type_ = 0
type_ = 0
has_meaning_ = 0
meaning_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = 0
def has_type(self): return self.has_type_
def meaning(self): return self.meaning_
def set_meaning(self, x):
self.has_meaning_ = 1
self.meaning_ = x
def clear_meaning(self):
if self.has_meaning_:
self.has_meaning_ = 0
self.meaning_ = 0
def has_meaning(self): return self.has_meaning_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_type()): self.set_type(x.type())
if (x.has_meaning()): self.set_meaning(x.meaning())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_meaning_ != x.has_meaning_: return 0
if self.has_meaning_ and self.meaning_ != x.meaning_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: type not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += self.lengthVarInt64(self.type_)
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_type_):
n += 1
n += self.lengthVarInt64(self.type_)
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
return n
def Clear(self):
self.clear_name()
self.clear_type()
self.clear_meaning()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
out.putVarInt32(16)
out.putVarInt32(self.type_)
if (self.has_meaning_):
out.putVarInt32(24)
out.putVarInt32(self.meaning_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_type_):
out.putVarInt32(16)
out.putVarInt32(self.type_)
if (self.has_meaning_):
out.putVarInt32(24)
out.putVarInt32(self.meaning_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 16:
self.set_type(d.getVarInt32())
continue
if tt == 24:
self.set_meaning(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatInt32(self.type_))
if self.has_meaning_: res+=prefix+("meaning: %s\n" % self.DebugFormatInt32(self.meaning_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
ktype = 2
kmeaning = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "type",
3: "meaning",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SchemaEntry'
class SubscribeRequest(ProtocolBuffer.ProtocolMessage):
has_topic_ = 0
topic_ = ""
has_sub_id_ = 0
sub_id_ = ""
has_lease_duration_sec_ = 0
lease_duration_sec_ = 0.0
has_vanilla_query_ = 0
vanilla_query_ = ""
def __init__(self, contents=None):
self.schema_entry_ = []
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def sub_id(self): return self.sub_id_
def set_sub_id(self, x):
self.has_sub_id_ = 1
self.sub_id_ = x
def clear_sub_id
|
(self):
if self.has_sub_id_:
self.has_sub_id_ = 0
self.sub_id_ = ""
def has_sub_id(self): return self.has_sub_id_
def lease_duration_sec(self): return self.lease_duration_sec_
def set_leas
|
e_duration_sec(self, x):
self.has_lease_duration_sec_ = 1
self.lease_duration_sec_ = x
def clear_lease_duration_sec(self):
if self.has_lease_duration_sec_:
self.has_lease_duration_sec_ = 0
self.lease_duration_sec_ = 0.0
def has_lease_duration_sec(self): return self.has_lease_duration_sec_
def vanilla_query(self): return self.vanilla_query_
def set_vanilla_query(self, x):
self.has_vanilla_query_ = 1
self.vanilla_query_ = x
def clear_vanilla_query(self):
if self.has_vanilla_query_:
self.has_vanilla_query_ = 0
self.vanilla_query_ = ""
def has_vanilla_query(self): return self.has_vanilla_query_
def schema_entry_size(self): return len(self.schema_entry_)
def schema_entry_list(self): return self.schema_entry_
def schema_entry(self, i):
return self.schema_entry_[i]
def mutable_schema_entry(self, i):
return self.schema_entry_[i]
def add_schema_entry(self):
x = SchemaEntry()
self.schema_entry_.append(x)
return x
def clear_schema_entry(self):
self.schema_entry_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_sub_id
|
haijieg/SFrame
|
oss_src/unity/python/sframe/data_structures/__init__.py
|
Python
|
bsd-3-clause
| 841
| 0.005945
|
"""
GraphLab Create offers several data structures for data analysis.
Concise descriptions of the data str
|
uctures and their methods are contained in
the API documentation, along with a small number of simple examples. For more
detailed descriptions and examples, please see the `User Guide
<https://dato.com/learn/userguide/>`_, `API Translator
<https://dato.com/learn
|
/translator/>`_, `How-Tos
<https://dato.com/learn/how-to/>`_, and data science `Gallery
<https://dato.com/learn/gallery/>`_.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
__all__ = ['sframe', 'sarray', 'sgraph', 'sketch', 'image']
from . import image
from . import sframe
from . import sarray
from . import sgraph
from . import sketch
|
b-jesch/service.fritzbox.callmonitor
|
resources/lib/PhoneBooks/pyicloud/vendorlibs/keyrings/alt/_win_crypto.py
|
Python
|
gpl-2.0
| 3,477
| 0.000575
|
from ctypes import Structure, POINTER, c_void_p, cast, create_string_buffer, \
c_char_p, byref, memmove
from ctypes import windll, WinDLL, WINFUNCTYPE
try:
from ctypes import wintypes
except ValueError:
# see http://bugs.python.org/issue16396
raise ImportError("wintypes")
from keyring.util.escape import u
# Crypto API ctypes bindings
class DATA_BLOB(Structure):
_fields_ = [('cbData', wintypes.DWORD),
('pbData', POINTER(wintypes.BYTE))]
class CRYPTPROTECT_PROMPTSTRUCT(Structure):
_fields_ = [('cbSize', wintypes.DWORD),
('dwPromptFlags', wintypes.DWORD),
('hwndApp', wintypes.HWND),
('szPrompt', POINTER(wintypes.WCHAR))]
# Flags for CRYPTPROTECT_PROMPTSTRUCT
CRYPTPROTECT_PROMPT_ON_UNPROTECT = 1
CRYPTPROTECT_PROMPT_ON_PROTECT = 2
# Flags for CryptProtectData/CryptUnprotectData
CRYPTPROTECT_UI_FORBIDDEN = 0x01
CRYPTPROTECT_LOCAL_MACHINE = 0x04
CRYPTPROTECT_CRED_SYNC = 0x08
CRYPTPROTECT_AUDIT = 0x10
CRYPTPROTECT_NO_RECOVERY = 0x20
CRYPTPROTECT_VERIFY_PROTECTION = 0x40
CRYPTPROTECT_CRED_REGENERATE = 0x80
# Crypto API Functions
_dll = WinDLL('CRYPT32.DLL')
CryptProtectData = WINFUNCTYPE(wintypes.BOOL,
POINTER(DATA_BLOB),
POINTER(wintypes.WCHAR),
POINTER(DATA_BLOB),
c_void_p,
POINTER(CRYPTPROTECT_PROMPTSTRUCT),
wintypes.DWORD,
POINTER(DATA_BLOB))(('CryptProtectData', _dll))
CryptUnprotectData = WINFUNCTYPE(wintypes.BOOL,
POINTER(DATA_BLOB),
POINTER(wintypes.WCHAR),
POINTER(DATA_BLOB),
c_void_p,
POINTER(CRYPTPROTECT_PROMPTSTRUCT),
wintypes.DWORD, POINTER(DATA_BLOB))(
('CryptUnprotectData', _dll))
# Functions
def encrypt(data, non_interactive=0):
blobin = DATA_BLOB(cbData=len(data),
pbData=cast(c_char_p(data),
POINTER(wintypes.BYTE)))
blobout = DATA_BLOB()
if not CryptProtectData(byre
|
f(blobin),
u('python-keyring-lib.win32crypto'
|
),
None, None, None,
CRYPTPROTECT_UI_FORBIDDEN,
byref(blobout)):
raise OSError("Can't encrypt")
encrypted = create_string_buffer(blobout.cbData)
memmove(encrypted, blobout.pbData, blobout.cbData)
windll.kernel32.LocalFree(blobout.pbData)
return encrypted.raw
def decrypt(encrypted, non_interactive=0):
blobin = DATA_BLOB(cbData=len(encrypted),
pbData=cast(c_char_p(encrypted),
POINTER(wintypes.BYTE)))
blobout = DATA_BLOB()
if not CryptUnprotectData(byref(blobin),
u('python-keyring-lib.win32crypto'),
None, None, None,
CRYPTPROTECT_UI_FORBIDDEN,
byref(blobout)):
raise OSError("Can't decrypt")
data = create_string_buffer(blobout.cbData)
memmove(data, blobout.pbData, blobout.cbData)
windll.kernel32.LocalFree(blobout.pbData)
return data.raw
|
laszewsm/backuper
|
backup.py
|
Python
|
gpl-2.0
| 1,456
| 0.013049
|
#!/usr/bin/env python
''' W celu poprawniego dzialania ponizszego skryptu, trzeba skonigurowac
logowanie RSA / DSA na maszynie na ktora, bedziemy przenosic nasza kopie zapasowa. Sposob konfiguracji mozna znalezc tutaj
http://www.nerdokracja.pl/linux-logowanie-ssh-klucze-rsa/
'''
import os
import os.path
print ('Tworzenie folderu tymczasowego')
cmd = "mkdir ~/tmp" #Tworzenie folderu tymczasowego
print ('Tworze kopie zapasowa bazy danych')
# -u user << wpisujemy swoj login
# -phaslo << haslo wpisujemy ciagiem zaraz po -p
# moja_baza_danych << nalezy zmienic na nazwe bazy danych do zachowania
# moja.sql << nazwa npliku .sql z baza danych
cmd1 = "mysqldump -u user -phaslo moja_baza_dancyh > ~/tmp/moja.sql"
print ('Tworze kopie zapasa folderow')
# /home/ /root/ /var/www/ /etc/ << foldery do zachowania mozna zmieniac wedle uznania
cmd2 = "zip -r ~/tmp/backup.zip ~/tmp/moja.sql /home/ /root/ /var/www/ /etc/"
# Logowanie za pomoca scp
# konto << nazwa konta na maszynie do ktorej backup.zip bedzie wyslany
# jakis_adres_ip << adres maszyny na ktora backup bedzie wyslany
# /home/backup/ << miejsce gdzie backup.zip bedzie przechowyw
|
any
print ('Wysylanie backupu ...')
cmd3 = "scp ~/tmp/backup.zip konto@jakis_aders_ip:/home/backup"
print('Usuniecie folderu tymczasowego')
cmd4 = "rm -R ~/tmp"
#wykonianie zdefiniowanych wczesniej polecen
os.system(cmd)
os.system(cmd1)
os.system(cmd2)
os.system(cmd
|
3)
os.system(cmd4)
|
g10k/sw_tts
|
sw_tts/settings.py
|
Python
|
mit
| 5,058
| 0.000989
|
"""
Django settings for sw_tts project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1^xn16c_l*5+ko)_#nrue-+as1@jowgr1+e%0y4fk@#rd%*j)3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework_swagger',
'rest_framework',
'tts',
'core',
'social.apps.django_app.default',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.Mes
|
sageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sw_tts.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors'
|
: [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'sw_tts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('frontend', os.path.join(BASE_DIR, 'frontend')),
)
LANGUAGE_CODE = 'ru-Ru'
OUTPUT_DIR = os.path.join(BASE_DIR, 'generated')
MAX_SOUND_LIFE = 60*60*12 # seconds of sound file storing
SOCIAL_AUTH_VK_OPENAPI_ID = '5596606'
SOCIAL_AUTH_VK_APP_SECRET = 'jBx8nnH7pzevq7UA3hH0'
SOCIAL_AUTH_VK_APP_USER_MODE = 2
VK_APP_ID = '5596606'
VKONTAKTE_APP_ID = VK_APP_ID
VK_API_SECRET = 'jBx8nnH7pzevq7UA3hH0'
VKONTAKTE_APP_SECRET = VK_API_SECRET
SOCIAL_AUTH_VK_OAUTH2_KEY = '5596606'
SOCIAL_AUTH_VK_OAUTH2_SECRET = 'jBx8nnH7pzevq7UA3hH0'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.vk.VKOAuth2',
'social.backends.vk.VKontakteOpenAPI',
'social.backends.yandex.YaruOAuth2',
'social.backends.yandex.YandexOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_URL_NAMESPACE = 'social'
# SOCIAL_AUTH_STORAGE = 'social.apps.django_app.me.models.DjangoStorage'
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '25/minute',
'user': '25/minute'
}
}
try:
from sw_tts.local_settings import *
except ImportError:
print("Warning: no local_settings.py")
|
mfrey/baltimore
|
persistence/__init__.py
|
Python
|
gpl-3.0
| 74
| 0
|
__all__ = ['persistence',
|
'baltimorejsondecoder', 'bal
|
timorejsonencoder']
|
cchristelis/inasafe
|
safe/report/test/test_template_composition.py
|
Python
|
gpl-3.0
| 4,573
| 0.000219
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Template Composition Test Cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'akbargumbira@gmail.com'
__date__ = '06/01/2015'
__copyright__ = ('Copyright 2013, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import unittest
import shutil
import logging
from safe.report.template_composition import TemplateComposition
from safe.utilities.resources import resources_path
from safe.test.utilities import get_qgis_app, temp_dir
LOGGER = logging.getLogger('InaSAFE')
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
INASAFE_TEMPLATE_PATH = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
class TemplateCompositionTest(unittest.TestCase):
"""Test Impact Merge Dialog widget."""
# noinspection PyPep8Naming
def setUp(self):
"""Runs before each test."""
pass
# noinspection PyPep8Naming
def tearDown(self):
"""Runs after each test."""
pass
def test_constructor(self):
"""Test constructor."""
# If we give param map_settings, composition instance must not be none
map_settings = CANVAS.mapSettings()
template_composition = TemplateComposition(map_settings=map_settings)
message = 'The composition instance variable must not be none.'
self.assertIsNotNone(template_composition.composition, message)
def test_missing_elements(self):
"""Test if we can get missing elements correctly."""
# Copy the inasafe template to temp dir
template_path = os.path.join(
temp_dir('test'), 'a4-portrait-blue.qpt')
shutil.copy2(INASAFE_TEMPLATE_PATH, template_path)
template_composition = TemplateComposition(template_path=template_path)
# No missing elements here
component_ids = [
'white-inasafe-logo',
'north-arrow',
'organisation-logo',
'impact-map',
'impact-legend']
template_composition.component_ids = component_ids
message = 'There should be no missing elements, but it gets: %s' % (
template_composition.missing_elements)
expected_result = []
self.assertEqual(
template_composition.missing_elements, expected_result, mess
|
age)
# There are missing elements
component_ids = [
'white-inasafe-logo',
'north-arrow',
'organisation-logo',
'impact
|
-map', 'impact-legend',
'i-added-element-id-here-nooo']
template_composition.component_ids = component_ids
message = 'There should be no missing elements, but it gets: %s' % (
template_composition.missing_elements)
expected_result = ['i-added-element-id-here-nooo']
self.assertEqual(
template_composition.missing_elements, expected_result, message)
# Remove test dir
shutil.rmtree(temp_dir('test'))
def test_load_template(self):
"""Test we can load template correctly."""
# Copy the inasafe template to temp dir
template_path = os.path.join(
temp_dir('test'), 'a4-portrait-blue.qpt')
shutil.copy2(INASAFE_TEMPLATE_PATH, template_path)
template_composition = TemplateComposition(
template_path=template_path,
map_settings=CANVAS.mapSettings())
template_composition.load_template()
# Check the element of the composition
# In that template, there should be these components:
component_ids = [
'white-inasafe-logo',
'north-arrow',
'organisation-logo',
'impact-map',
'impact-legend']
for component_id in component_ids:
component = template_composition.composition.getComposerItemById(
component_id)
message = ('In this template: %s, there should be this component '
'%s') % (INASAFE_TEMPLATE_PATH, component_id)
self.assertIsNotNone(component, message)
if __name__ == '__main__':
suite = unittest.makeSuite(TemplateCompositionTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
qvazzler/Flexget
|
flexget/plugins/plugin_aria2.py
|
Python
|
mit
| 22,207
| 0.003017
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import re
import xmlrpc.client
from flexget import plugin
from flexget.event import event
from flexget.utils.template
|
import RenderError
from flexget.plugin import get_plugin_by_name
from socket import error as socket_error
log = logging.getLogger('aria2')
# TODO: stop using torrent_info_hash[0:16] as the GID
# for RENAME_CONTENT_FILES:
# to rename TV episodes, content_is_episodes must be set to yes
class OutputAria2(object):
"""
aria2 output plugin
Version 1.0.0
Configuration:
ser
|
ver: Where aria2 daemon is running. default 'localhost'
port: Port of that server. default '6800'
username: XML-RPC username set in aria2. default ''
password: XML-RPC password set in aria2. default ''
do: [add-new|remove-completed] What action to take with incoming
entries.
uri: URI of file to download. Can include inline Basic Auth para-
meters and use jinja2 templating with any fields available
in the entry. If you are using any of the dynamic renaming
options below, the filename can be included in this setting
using {{filename}}.
exclude_samples:
[yes|no] Exclude any files that include the word 'sample' in
their name. default 'no'
exclude_non_content:
[yes|no] Exclude any non-content files, as defined by filename
extensions not listed in file_exts. (See below.) default 'no'
rename_content_files:
[yes|no] If set, rename all content files (as defined by
extensions listed in file_exts). default 'no'
rename_template:
If set, and rename_content_files is yes, all content files
will be renamed using the value of this field as a template.
Will be parsed with jinja2 and can include any fields
available in the entry. default ''
parse_filename:
[yes|no] If yes, filenames will be parsed with either the
series parser (if content_is_episodes is set to yes) or the
movie parser. default: 'no'
content_is_episodes:
[yes|no] If yes, files will be parsed by the series plugin
parser to attempt to determine series name and series_id. If
no, files will be treated as movies. Note this has no effect
unless parse_filename is set to yes. default 'no'
keep_parent_folders:
[yes|no] If yes, any parent folders within the torrent itself
will be kept and created within the download directory.
For example, if a torrent has this structure:
MyTorrent/
MyFile.mkv
If this is set to yes, the MyTorrent folder will be created in
the download directory. If set to no, the folder will be
ignored and the file will be downloaded directly into the
download directory. default: 'no'
fix_year: [yes|no] If yes, and the last four characters of the series
name are numbers, enclose them in parantheses as they are
likely a year. Example: Show Name 1995 S01E01.mkv would become
Show Name (1995) S01E01.mkv. default 'yes'
file_exts: [list] File extensions of all files considered to be content
files. Used to determine which files to rename or which files
to exclude from download, with appropriate options set. (See
above.)
default: ['.mkv', '.avi', '.mp4', '.wmv', '.asf', '.divx',
'.mov', '.mpg', '.rm']
aria_config:
"Parent folder" for any options to be passed directly to aria.
Any command line option listed at
http://aria2.sourceforge.net/manual/en/html/aria2c.html#options
can be used by removing the two dashes (--) in front of the
command name, and changing key=value to key: value. All
options will be treated as jinja2 templates and rendered prior
to passing to aria2. default ''
Sample configuration:
aria2:
server: myserver
port: 6802
do: add-new
exclude_samples: yes
exclude_non_content: yes
parse_filename: yes
content_is_episodes: yes
rename_content_files: yes
rename_template: '{{series_name}} - {{series_id||lower}}'
aria_config:
max-connection-per-server: 4
max-concurrent-downloads: 4
split: 4
file-allocation: none
dir: "/Volumes/all_my_tv/{{series_name}}"
"""
schema = {
'type': 'object',
'properties': {
'server': {'type': 'string', 'default': 'localhost'},
'port': {'type': 'integer', 'default': 6800},
'username': {'type': 'string', 'default': ''},
'password': {'type': 'string', 'default': ''},
'do': {'type': 'string', 'enum': ['add-new', 'remove-completed']},
'uri': {'type': 'string'},
'exclude_samples': {'type': 'boolean', 'default': False},
'exclude_non_content': {'type': 'boolean', 'default': True},
'rename_content_files': {'type': 'boolean', 'default': False},
'content_is_episodes': {'type': 'boolean', 'default': False},
'keep_parent_folders': {'type': 'boolean', 'default': False},
'parse_filename': {'type': 'boolean', 'default': False},
'fix_year': {'type': 'boolean', 'default': True},
'rename_template': {'type': 'string', 'default': ''},
'file_exts': {
'type': 'array',
'items': {'type': 'string'},
'default': ['.mkv', '.avi', '.mp4', '.wmv', '.asf', '.divx', '.mov', '.mpg', '.rm']
},
'aria_config': {
'type': 'object',
'additionalProperties': {'oneOf': [{'type': 'string'}, {'type': 'integer'}]}
}
},
'required': ['do'],
'additionalProperties': False
}
def on_task_output(self, task, config):
if 'aria_config' not in config:
config['aria_config'] = {}
if 'uri' not in config and config['do'] == 'add-new':
raise plugin.PluginError('uri (path to folder containing file(s) on server) is required when adding new '
'downloads.', log)
if 'dir' not in config['aria_config']:
if config['do'] == 'add-new':
raise plugin.PluginError('dir (destination directory) is required.', log)
else:
config['aria_config']['dir'] = ''
if config['keep_parent_folders'] and config['aria_config']['dir'].find('{{parent_folders}}') == -1:
raise plugin.PluginError('When using keep_parent_folders, you must specify {{parent_folders}} in the dir '
'option to show where it goes.', log)
if config['rename_content_files'] and not config['rename_template']:
raise plugin.PluginError('When using rename_content_files, you must specify a rename_template.', log)
if config['username'] and not config['password']:
raise plugin.PluginError('If you specify an aria2 username, you must specify a password.')
try:
userpass = ''
if config['username']:
userpass = '%s:%s@' % (config['username'], config['password'])
baseurl = 'http://%s%s:%s/rpc' % (userpass, config['server'], config['port'])
log.debug('base url: %s' % baseurl)
s = xmlrpc.client.ServerProxy(baseurl)
log.info('Connected to daemon at ' + baseurl + '.')
except xmlrpc.client.ProtocolError as err:
raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s'
|
jayanthkoushik/torch-gel
|
gel/__init__.py
|
Python
|
mit
| 160
| 0
|
impor
|
t gel.gelcd
import gel.gelfista
import gel.gelpaths
import gel.ridgepaths
__version__ = "2.0.0"
__all__ = ["gelcd", "gelfista", "gelpaths", "ridg
|
epaths"]
|
jrziviani/ziviani.net
|
build.py
|
Python
|
gpl-3.0
| 1,772
| 0.004515
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# blog.py - maps requests to methods and handles them accordingly.
# Copyright (C) 2017 Jose Ricardo Ziviani
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --
# IMPORTS
# --
from src.templates import templates
import os
import sys
import subprocess
# --
# CONSTANTS
# --
DEFAULT_DIR = os.path.dirname(os.path.r
|
ealpath(__file__))
# --
# IMPLEMENTATION
# --
def run_command(cmd):
'''
Runs arbitrary com
|
mand on shell
'''
proc = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
if err:
print err
#sys.exit(1)
return out
def create_feeds():
'''
Creates the feed.xml file based on the published posts available
'''
print 'Creating feeds'
tmpls = templates()
if not tmpls.generate_metadata():
print 'ERROR: cannot create feed.xml'
sys.exit(1)
def update_folder():
'''
Updates local repository
'''
print 'Updating folders'
run_command('git pull')
# --
# ENTRY POINT
# --
if __name__ == '__main__':
update_folder()
create_feeds()
|
null-none/OpenGain
|
default_set/staticpages/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,523
| 0.005909
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='StaticPage',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('url', models.CharField(verbose_name='URL', db_index=True, max_length=100)),
('title', models.CharField(verbose_name='title', max_length=200)),
('title_ru', models.CharField(null=True, verbose_name='title', max_length=200)),
('title_en', models.CharField(null=True, verbose_name='title', max_length=200)),
('content', models.TextField(blank=True, verbose_name='content')),
('content_ru', models.TextField(null=True, blank=True, verbose_name='content')),
('content_en', models.TextField(null=True, blank=True, verbose_name='content')),
('template_name', models.CharField(help_text="Example: 'staticpages/contact_page.html'. If this isn't provided, the system will use 'staticpages/default.html'.", verbose_name='template name', blank=True, max_length=70)),
],
options=
|
{
'verbose_name_plural': 'static pages',
'ordering': ('url',),
'verbose_name': 'static page',
|
},
bases=(models.Model,),
),
]
|
iwoca/django-deep-collector
|
deep_collector/compat/serializers/django_1_9.py
|
Python
|
bsd-3-clause
| 2,672
| 0.004117
|
from django.core.serializers.json import Serializer
from ..builtins import StringIO
class CustomizableLocalFieldsSerializer(Serializer):
"""
This is a not so elegant copy/paste from django.core.serializer.base.Serializer serialize method.
We wanted to add parent fields of current serialized object because they are lacking when we want to import them
again.
We had to redefine serialize() method to add the possibility to subclass methods that are getting local
fields to serialize (get_local_fields and get_local_m2m_fields)
"""
internal_use_only = False
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.s
|
tream = options.pop("stream", StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_ke
|
ys', False)
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
progress_bar = self.progress_class(
options.pop('progress_output', None), options.pop('object_count', 0)
)
self.start_serialization()
self.first = True
for count, obj in enumerate(queryset, start=1):
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in self.get_local_fields(concrete_model):
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in self.get_local_m2m_fields(concrete_model):
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
progress_bar.update(count)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
def get_local_fields(self, concrete_model):
return concrete_model._meta.local_fields
def get_local_m2m_fields(self, concrete_model):
return concrete_model._meta.many_to_many
|
hfp/tensorflow-xsmm
|
tensorflow/python/keras/backend.py
|
Python
|
apache-2.0
| 160,654
| 0.006648
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES. We use a dummy class instead of something like a
# string because strings are not weakly-referencable.
class _DummyEagerGraph(object):
pass
_DUMMY_EAGER_GRAPH = _DummyEagerGraph()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a gra
|
ph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exist
|
s for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.epsilon')
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
```python
>>> keras.backend.epsilon()
1e-07
```
"""
return _EPSILON
@keras_export('keras.backend.set_epsilon')
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Arguments:
value: float. New value of epsilon.
Example:
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-07
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
"""
global _EPSILON
_EPSILON = value
@keras_export('keras.backend.floatx')
def floatx():
"""Returns the default float type, as a string.
E.g. 'float16', 'float32', 'float64'.
Returns:
String, the current default float type.
Example:
```python
>>> keras.backend.floatx()
'float32'
```
"""
return _FLOATX
@keras_export('keras.backend.set_floatx')
def set_floatx(value):
"""Sets the default float type.
Arguments:
value: String; 'float16', 'float32', or 'float64'.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=_FLOATX)
@keras_export('keras.backend.image_data_format')
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
```python
>>> keras.backend.image_data_format()
'channels_first'
```
"""
return _IMAGE_DATA_FORMAT
@keras_export('keras.backend.set_image_data_format')
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Arguments:
data_format: string. `'channels_first'` or `'channels_last'`.
Example:
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format: ' + str(data_format))
_IMAGE_DATA_FORMAT = str(data_format)
# A global dictionary mapping graph objects to an index of counters used
|
DreamSourceLab/DSView
|
libsigrokdecode4DSL/decoders/ir_nec/lists.py
|
Python
|
gpl-3.0
| 1,471
| 0.008158
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
# Addresses/devices. Items that are not listed are reserved/unknown.
address = {
0x40: 'Matsui TV',
}
digits = {
0: ['0', '0'],
1: ['1', '1'],
2: ['2', '2'],
3: ['3', '3'],
4: ['4', '4'],
5: ['5', '5'],
6: ['6', '6'],
7: ['7', '7'],
8: ['8', '8'],
9: ['9', '9'],
}
# Commands. Items that are not listed are reserved/unknown.
command = {
0x40: dict(list(digits.items()) + list({
11: ['-/--', '-/--'],
16: ['Mute', 'M'],
18: ['Standby', 'StBy'],
26: ['Volume up', 'Vol+'],
|
27: ['Program up', 'P+'],
30: ['Volume down', 'Vol-'],
|
31: ['Program down', 'P-'],
68: ['AV', 'AV'],
}.items())),
}
|
nicovillanueva/obsidian-core
|
src/screenshots/Capture.py
|
Python
|
gpl-2.0
| 836
| 0
|
class Capture(object):
"""
Generic Capture entity. Base class
that both Screenshot and Pdiff can inherit from.
"""
def __init__(self, img_path):
if img_path is not None:
try:
self.path = img_path
self.hashvalue = self._set_hash()
except IOError:
self.path = None
self.hashvalue =
|
None
def _set_hash(self):
from hashlib import md5
md5hasher = md5()
with open(self.path, 'r') as f:
|
data = f.read()
md5hasher.update(data)
return str(md5hasher.hexdigest())
def to_string(self):
entity = ""
for each in self.__dict__:
if each[0] != "_":
entity += "%s: %s \n" % (each, self.__dict__.get(each))
return entity
|
mrkm4ntr/incubator-airflow
|
tests/dags/test_on_kill.py
|
Python
|
apache-2.0
| 1,647
| 0
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
impo
|
rt time
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.timezone import datetime
class DummyWithOnKill(DummyOperator):
def execute(self, context):
import os
# This runs extra processes, so that we can be sure that we correctly
# tidy up all processes launched by a task when killing
if not os.fork():
os.system('sleep 10')
time.sleep(10)
def on_kill(self):
|
self.log.info("Executing on_kill")
with open("/tmp/airflow_on_kill", "w") as f:
f.write("ON_KILL_TEST")
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(dag_id='test_on_kill', start_date=datetime(2015, 1, 1))
dag1_task1 = DummyWithOnKill(task_id='task1', dag=dag1, owner='airflow')
|
california-civic-data-coalition/django-calaccess-processed-data
|
docs/conf.py
|
Python
|
mit
| 8,077
| 0.007305
|
# -*- coding: utf-8 -*-
import sys
import os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-calaccess-processed-data'
copyright = u'%s, California Civic Data Coalition' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative
|
to this direct
|
ory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-calaccess-processed-datadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-calaccess-processed-data.tex', u'django-calaccess-processed-data Documentation',
u'California Civic Data Coalition', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-calaccess-processed-data', u'django-calaccess-processed-data Documentation',
[u'California Civic Data Coalition'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-calaccess-processed-data', u'django-calaccess-processed-data Documentation',
u'California Civic Data Coalition', 'django-calaccess-processed-data', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
atzengin/OCC
|
occ/gui/Block.py
|
Python
|
gpl-3.0
| 8,149
| 0.006381
|
"""
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
OpenCV Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
OpenCV Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the F
|
ree Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
|
import Utils
import Colors
from .. base import odict
from Constants import BORDER_PROXIMITY_SENSITIVITY
from Constants import \
BLOCK_LABEL_PADDING, \
PORT_SEPARATION, LABEL_SEPARATION, \
PORT_BORDER_SEPARATION, POSSIBLE_ROTATIONS
import pygtk
pygtk.require('2.0')
import gtk
import pango
BLOCK_MARKUP_TMPL="""\
#set $foreground = $block.is_valid() and 'black' or 'red'
<span foreground="$foreground" font_desc="Sans 8"><b>$encode($block.get_name())</b></span>"""
class Block(Element):
"""The graphical signal block."""
def __init__(self):
"""
Block contructor.
Add graphics related params to the block.
"""
#add the position param
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Coordinate',
'key': '_coordinate',
'type': 'raw',
'value': '(0, 0)',
'hide': 'all',
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Rotation',
'key': '_rotation',
'type': 'raw',
'value': '0',
'hide': 'all',
})
))
Element.__init__(self)
def get_coordinate(self):
"""
Get the coordinate from the position param.
Returns:
the coordinate tuple (x, y) or (0, 0) if failure
"""
try: #should evaluate to tuple
coor = eval(self.get_param('_coordinate').get_value())
x, y = map(int, coor)
fgW,fgH = self.get_parent().get_size()
if x <= 0:
x = 0
elif x >= fgW - BORDER_PROXIMITY_SENSITIVITY:
x = fgW - BORDER_PROXIMITY_SENSITIVITY
if y <= 0:
y = 0
elif y >= fgH - BORDER_PROXIMITY_SENSITIVITY:
y = fgH - BORDER_PROXIMITY_SENSITIVITY
return (x, y)
except:
self.set_coordinate((0, 0))
return (0, 0)
def set_coordinate(self, coor):
"""
Set the coordinate into the position param.
Args:
coor: the coordinate tuple (x, y)
"""
self.get_param('_coordinate').set_value(str(coor))
def get_rotation(self):
"""
Get the rotation from the position param.
Returns:
the rotation in degrees or 0 if failure
"""
try: #should evaluate to dict
rotation = eval(self.get_param('_rotation').get_value())
return int(rotation)
except:
self.set_rotation(POSSIBLE_ROTATIONS[0])
return POSSIBLE_ROTATIONS[0]
def set_rotation(self, rot):
"""
Set the rotation into the position param.
Args:
rot: the rotation in degrees
"""
self.get_param('_rotation').set_value(str(rot))
def create_shapes(self):
"""Update the block, parameters, and ports when a change occurs."""
Element.create_shapes(self)
if self.is_horizontal(): self.add_area((0, 0), (self.W, self.H))
elif self.is_vertical(): self.add_area((0, 0), (self.H, self.W))
def create_labels(self):
"""Create the labels for the signal block."""
Element.create_labels(self)
self._bg_color = self.get_enabled() and Colors.BLOCK_ENABLED_COLOR or Colors.BLOCK_DISABLED_COLOR
layouts = list()
#create the main layout
layout = gtk.DrawingArea().create_pango_layout('')
layouts.append(layout)
layout.set_markup(Utils.parse_template(BLOCK_MARKUP_TMPL, block=self))
self.label_width, self.label_height = layout.get_pixel_size()
#display the params
markups = [param.get_markup() for param in self.get_params() if param.get_hide() not in ('all', 'part')]
if markups:
layout = gtk.DrawingArea().create_pango_layout('')
layout.set_spacing(LABEL_SEPARATION*pango.SCALE)
layout.set_markup('\n'.join(markups))
layouts.append(layout)
w,h = layout.get_pixel_size()
self.label_width = max(w, self.label_width)
self.label_height += h + LABEL_SEPARATION
width = self.label_width
height = self.label_height
#setup the pixmap
pixmap = self.get_parent().new_pixmap(width, height)
gc = pixmap.new_gc()
gc.set_foreground(self._bg_color)
pixmap.draw_rectangle(gc, True, 0, 0, width, height)
#draw the layouts
h_off = 0
for i,layout in enumerate(layouts):
w,h = layout.get_pixel_size()
if i == 0: w_off = (width-w)/2
else: w_off = 0
pixmap.draw_layout(gc, w_off, h_off, layout)
h_off = h + h_off + LABEL_SEPARATION
#create vertical and horizontal pixmaps
self.horizontal_label = pixmap
if self.is_vertical():
self.vertical_label = self.get_parent().new_pixmap(height, width)
Utils.rotate_pixmap(gc, self.horizontal_label, self.vertical_label)
#calculate width and height needed
self.W = self.label_width + 2*BLOCK_LABEL_PADDING
self.H = max(*(
[self.label_height+2*BLOCK_LABEL_PADDING] + [2*PORT_BORDER_SEPARATION + \
sum([port.H + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in (self.get_sources_gui(), self.get_sinks_gui())] +
[4*PORT_BORDER_SEPARATION + \
sum([(port.H) + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in ([i for i in self.get_sources_gui() if i.get_type() == 'bus'], [i for i in self.get_sinks_gui() if i.get_type() == 'bus'])]
))
def draw(self, gc, window):
"""
Draw the signal block with label and inputs/outputs.
Args:
gc: the graphics context
window: the gtk window to draw on
"""
x, y = self.get_coordinate()
#draw main block
Element.draw(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or Colors.BORDER_COLOR,
)
#draw label image
if self.is_horizontal():
window.draw_drawable(gc, self.horizontal_label, 0, 0, x+BLOCK_LABEL_PADDING, y+(self.H-self.label_height)/2, -1, -1)
elif self.is_vertical():
window.draw_drawable(gc, self.vertical_label, 0, 0, x+(self.H-self.label_height)/2, y+BLOCK_LABEL_PADDING, -1, -1)
#draw ports
for port in self.get_ports_gui():
port.draw(gc, window)
def what_is_selected(self, coor, coor_m=None):
"""
Get the element that is selected.
Args:
coor: the (x,y) tuple
coor_m: the (x_m, y_m) tuple
Returns:
this block, a port, or None
"""
for port in self.get_ports_gui():
port_selected = port.what_is_selected(coor, coor_m)
if port_selected: return port_selected
return Element.what_is_selected(self, coor, coor_m)
|
mesbahamin/chronophore
|
tests/conftest.py
|
Python
|
mit
| 4,885
| 0
|
import logging
import pathlib
import pytest
from datetime import date, time
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from chronophore.models import Base, Entry, User
logging.disable(logging.CRITICAL)
@pytest.fixture()
def nonexistent_file(tmpdir, request):
"""Return a path to an empty config file.
Remove the file when a test is finished with it.
"""
data_dir = pathlib.Path(str(tmpdir))
nonexistent = data_dir.joinpath('nonexistent')
if nonexistent.exists():
nonexistent.unlink()
def tearDown():
if nonexistent.exists():
nonexistent.unlink()
request.addfinalizer(tearDown)
return nonexistent
@pytest.fixture()
def invalid_file(tmpdir, request):
"""Return a path to an invalid config file.
Remove the file when a test is finished with it.
"""
data_dir = pathlib.Path(str(tmpdir))
invalid_file = data_dir.joinpath('invalid')
with invalid_file.open('w') as f:
f.write('this is invalid')
def tearDown():
if invalid_file.exists():
invalid_file.unlink()
request.addfinalizer(tearDown)
return invalid_file
@pytest.fixture()
def db_session(request, test_users, test_entries):
"""Create an in-memory sqlite database, add
some test users and entries, and return an
sqlalchemy session to it.
Close the session when the test is finished with it.
"""
engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=engine)
Base.metadata.create_all(engine)
session = Session()
session.add_all([user for user in test_users.values()])
session.add_all(test_entries)
def tearDown():
session.close()
request.addfinalizer(tearDown)
return session
@pytest.fixture()
def test_users():
test_users = dict(
frodo=User(
user_id='888000000',
date_joined=date(2014, 12, 11),
date_left=None,
education_plan=False,
personal_email='baggins.frodo@gmail.com',
first_name='Frodo',
last_name='Baggins',
major='Medicine',
is_student=True,
is_tutor=True,
),
sam=User(
user_id='888111111',
date_joined=date(2015, 2, 16),
date_left=None,
education_plan=True,
personal_email='gamgee.samwise@gmail.com',
first_name='Sam',
last_name='Gamgee',
major='Agriculture',
is_student=True,
is_tutor=False,
),
merry=User(
user_id='888222222',
date_joined=date(2015, 4, 12),
date_left=date(2016, 3, 24),
education_plan=True,
personal_email='brandybuck.merriadoc@gmail.com',
first_name='Merry',
last_name='Brandybuck',
major='Physics',
is_student=False,
is_tutor=True,
),
pippin=User(
user_id='888333333',
date_joined=date(2015, 2, 16),
date_left=None,
education_plan=False,
personal_email='took.peregrin@gmail.com',
first_name='Pippin',
last_name='Took',
major='Botany',
is_student=True,
is_tutor=False,
),
gandalf=User(
user_id='888444444',
date_joined=date(2010, 10, 10),
date_left=None,
education_plan=False,
personal_email='mithrandir@gmail.com',
first_name='Gandalf',
last_name='the Grey',
major='Computer Science',
is_student=False,
is_tutor=True,
),
)
return test_users
@pytest.fixture()
def test_entries():
test_entries = [
Entry(
uuid='4407d790-a05f-45cb-bcd5-6023ce9500bf',
date=date(2016, 2, 17),
time_in=time(10, 45, 23),
time_out=None,
user_id='888333333',
user_type='student',
),
Entry(
uuid='1f4f10a4-b0c6-43bf-94f4-9ce6e3e204d2',
date=date(2016, 2, 17),
time_in=time(10, 45, 48),
time_out=time(13, 30, 18),
user_id='888222222',
user_type='tutor',
),
Entry(
uuid='7b4ae0fc-3801-4412-998f-ace14829d150',
date=date(2016, 2, 17),
time_in=time(12, 45, 9),
time_out=time(16, 44, 56),
user_id='888111111',
user_type='student',
),
Entry(
uuid='42a1eab2-cb94-4d05-9bab-e1a021f7f949',
date=date(2016, 2,
|
17),
time_in=time(10, 45, 48),
time_out=None,
user_id='888222222',
user_type='tutor',
),
|
]
return test_entries
|
bbiped-platform/BBIPED
|
src/BBIPED-GUI/src/Automatization/templates/BackwardBlade3D_template/BackwardBlade3D_template_Calculations.py
|
Python
|
lgpl-3.0
| 12,276
| 0.039915
|
from __future__ import division
from math import *
import numpy as np
import scipy
import matplotlib
# USER INPUT
def UserValues(VariablesToModify):
# Blade Geometry Definition
_DefinedValues_dict = dict()
_DefinedValues_dict["Blade_gamma"]=45 # Blade stagger angle
_DefinedValues_dict["Blade_B1"]=15 # angle of attack (negative value is forward blade)
_DefinedValues_dict["Blade_B2"]=15 # exit angle respect to Radiar vector
_DefinedValues_dict["Blade_Ri"]=300 # blade leading edge radius
_DefinedValues_dict["Blade_Ro"]=500 # blade trailing edge radius
_DefinedValues_dict["Blade_Thickness"]=5 #
_DefinedValues_dict["Blade_Number"]=3 #
_DefinedValues_dict["Blade_Height"]=148 #
_DefinedValues_dict["Blade_LeadingEdge_Cut_Height_frac"]=0.4 #
_DefinedValues_dict["Blade_TrailingEdge_Cut_Height_fac"]=0.9 #
_DefinedValues_dict["Blade_Cut_RotorSpanfract"]=0.2 #
# Volute Geometry Definition
_DefinedValues_dict["Vol_beta"] = 8 # This is the angle at which the volute tongue starts to create the seashell respect to the tangent of the origin centered circle crossing that point.
_DefinedValues_dict["Vol_omega"] = 40 # The user will introduce an angle like 40, but we work with 40+180
_DefinedValues_dict["Vol_gamma"] = 63.8 # The user will introduce an angle like 40, but we work with 40+180
_DefinedValues_dict["Vol_F_adim"] = 0.18 # This is the free space distance from the rotor outlet to the N0 vertex, in fraction of Rotor outlet Radius (Vol_F_adim = 1 = Rot_out distance)
_DefinedValues_dict["Vol_Rc_adim"] = 0.045 # Radius of the volute tonge / Rot_out length
_DefinedValues_dict["Vol_W_adim"] = 0.65 # Distance from Rot_out radius until the outest volute x coordinate (N4x), in fraction of Rot_out length (Vol_W = 1 = Rot_out)
_DefinedValues_dict["Vol_N1x_adim"] = 0.0 # in fraction of Rot_out length (Vol_N1x = 1 = Rot_out)
_DefinedValues_dict["Vol_N2y_adim"] = 0.0 # in fraction of Rot_out length (Vol_N2y = 1 = Rot_out)
_DefinedValues_dict["Vol_N3x_adim"] = 0.0 # in fraction of Rot_out length (Vol_N3x = 1 = Rot_out)
_DefinedValues_dict["Vol_N4y_adim"] = 0.0 # in fraction of Rot_out length (Vol_N4y = 1 = Rot_out)
_DefinedValues_dict["Vol_difussor_length_adim"] = 1
_DefinedValues_dict["Vol_Height"] = 444
# Rotor definition
_DefinedValues_dict["Rot_in"] = 270 #
_DefinedValues_dict["Rot_out"] = 580 #
_DefinedValues_dict["Rot_out_unit_step_fraction"] = 0.05 # Relative to Rot_out
_DefinedValues_dict["Rot_Height"] = 190
_DefinedValues_dict["Rot_out_ceiling_fraction"] = 0.05 # Relative to (Blade outlet - Blade inlet)
_DefinedValues_dict["Rot_Internal_tongeangle"] = 45
_DefinedValues_dict["Rot_Internal_and_exit_tongue_separation_adim"] = 3 # Relative to Blade thickness
_DefinedValues_dict["Rot_Internal_tongLength_adim"] = 0.6 # Relative to Blade inlet Radius
_DefinedValues_dict["InletRadius_adim"] = 1.35 # Relative to Blade inlet Radius
# only for 2D
#_DefinedValues_dict["TwoD_inlet_adim"] = 0.7 # This is inlet circle * Rot_in
# Mesh fineness definition
_DefinedValues_dict["Mesh_scale_factor"] = 1
_DefinedValues_dict["Mesh_VOLUTE_max_area"] = 50
_DefinedValues_dict["Mesh_VOLUTE_min_area"] = 10
_DefinedValues_dict["Mesh_ROTOR_max_area"] = 30
_DefinedValues_dict["Mesh_ROTOR_min_area"] = 2
_D
|
efinedValues_dict["Mesh_INLET_max_area"] = 15
_DefinedValues_dict["Mesh_INLET_min_area"] = 1
|
0
for i in range(len(VariablesToModify)):
if VariablesToModify[i] in _DefinedValues_dict:
if type(VariablesToModify[i+1]) is not str:
_DefinedValues_dict[VariablesToModify[i]]=VariablesToModify[i+1]
else: raise RuntimeError, "After variable %s there isn't a number % dicc[VariablesToModify[i]]"
return _DefinedValues_dict
def ComputeGeometry(ModifiedVariables):
# For modifying a variable in the template from the user defined values (function UserValues). First import the definition with "Var = ModifiedVariables[...]" and then export it with "_returnDicc[...] = nbr "
# Definitions
_returnDicc = dict()
Ri = ModifiedVariables["Blade_Ri"]
Ro = ModifiedVariables["Blade_Ro"]
gamma = ModifiedVariables["Blade_gamma"]
B1 = ModifiedVariables["Blade_B1"]
B2 = ModifiedVariables["Blade_B2"]
Blade_Cut_RotorSpanfract = ModifiedVariables["Blade_Cut_RotorSpanfract"]
Blade_Thickness =ModifiedVariables["Blade_Thickness"]
Blade_LeadingEdge_Cut_Height_frac =ModifiedVariables["Blade_LeadingEdge_Cut_Height_frac"]
Blade_Height = ModifiedVariables["Blade_Height"]
Blade_TrailingEdge_Cut_Height =ModifiedVariables["Blade_TrailingEdge_Cut_Height_fac"]*Blade_Height
Blade_Number =ModifiedVariables["Blade_Number"]
Rot_in = ModifiedVariables["Rot_in"]
Rot_out = ModifiedVariables["Rot_out"]
Rot_out_unit_step_fraction = ModifiedVariables["Rot_out_unit_step_fraction"]
Rot_Height = ModifiedVariables["Rot_Height"]
Rot_out_ceiling_fraction = ModifiedVariables["Rot_out_ceiling_fraction"]
Rot_Internal_tongeangle = ModifiedVariables["Rot_Internal_tongeangle"]
Rot_Internal_and_exit_tongue_separation = ModifiedVariables["Rot_Internal_and_exit_tongue_separation_adim"] * Blade_Thickness
Rot_Internal_tongLength = ModifiedVariables["Rot_Internal_tongLength_adim"] * Ri
InletRadius_adim = ModifiedVariables["InletRadius_adim"]
InletRadius = InletRadius_adim * Ri
Vol_beta = ModifiedVariables["Vol_beta"] # This is the angle at which the volute tongue starts to create the seashell respect to the tangent of the origin centered circle crossing that point.
Vol_omega = ModifiedVariables["Vol_omega"] #
Vol_gamma = ModifiedVariables["Vol_gamma"] # The user will introduce an angle like 40, but we work with 40+180
Vol_F = ModifiedVariables["Vol_F_adim"]*Rot_out # Distance between Rotor outlet and Vol_n0 divided by Rot_out
Vol_Rc = ModifiedVariables["Vol_Rc_adim"]*Rot_out # Radius of the volute tonge
Vol_W = ModifiedVariables["Vol_W_adim"] *Rot_out
Vol_n1x = ModifiedVariables["Vol_N1x_adim"] *Rot_out
Vol_n2y = ModifiedVariables["Vol_N2y_adim"] *Rot_out
Vol_n3x = ModifiedVariables["Vol_N3x_adim"] *Rot_out
Vol_n4y = ModifiedVariables["Vol_N4y_adim"]*Rot_out
Vol_Height = ModifiedVariables["Vol_Height"]
Vol_difussor_length = ModifiedVariables["Vol_difussor_length_adim"]*Rot_out
Mesh_VOLUTE_max_area = ModifiedVariables["Mesh_VOLUTE_max_area"]
Mesh_VOLUTE_min_area = ModifiedVariables["Mesh_VOLUTE_min_area"]
Mesh_ROTOR_max_area = ModifiedVariables["Mesh_ROTOR_max_area"]
Mesh_ROTOR_min_area = ModifiedVariables["Mesh_ROTOR_min_area"]
Mesh_INLET_max_area = ModifiedVariables["Mesh_INLET_max_area"]
Mesh_INLET_min_area = ModifiedVariables["Mesh_INLET_min_area"]
HugeValue = 20 * Rot_out
# CALCULATIONS OF BLADE
gamma = gamma/360*2*pi
# theta = theta/360*2*pi
B1 = B1/360*2*pi
B2 = B2/360*2*pi
"""
# Angle of attack
uy=sin(pi/2+gamma)
ux=cos(pi/2+gamma)
N1px = ux
N1py = uy
"""
N1x = 0
N1y = Ri
Ux = -sin(B1)
Uy = cos(B1)
Vx = -Uy
Vy = Ux
# Converting from gamma to omega
T = (-2*cos(gamma)*Ri+sqrt(4*cos(gamma)**2*Ri**2 + 4* (Ro**2-Ri**2))) / (2)
N2x = -sin(gamma)*T
N2y = Ri + cos(gamma) *T
omega = acos(N2y/Ro)
Theta = gamma + B2
Sx = - cos(pi/2-Theta)
Sy = sin(pi/2-Theta)
CLx = -Sy
CLy = Sx
# Solve location geometry constrains
Crod=(N1y*Sx-N2y*Sx-N1x*Sy+N2x*Sy)/(CLy*Sx-CLx*Sy+Sy*Vx-Sx*Vy)
H=-(CLy*N1x-CLx*N1y-CLy*N2x+CLx*N2y+N1y*Vx-N2y*Vx-N1x*Vy+N2x*Vy)/(CLy*Sx-CLx*Sy+Sy*Vx-Sx*Vy)
Cx=N1x+Vx*Crod
Cy=N1y+Vy*Crod
Lx=Cx-CLx*Crod
Ly=Cy-CLy*Crod
N3x=N2x-Sy*Blade_Thickness
N3y=N2y+Sx*Blade_Thickness
N4x=Lx+CLx*Blade_Thickness
N4y=Ly+CLy*Blade_Thickness
N5x=N1x+Vx*Blade_Thickness
N5y=N1y+Vy*Blade_Thickness
# For 3D, calculate Blade N1yup, and the box (face perpendicular to [N1px,N1py] is [N1py, -N1px]) that will cut the Blade.
Blade_N1zup = N1y + Blade_Height
Blade_N5zup = N5y + Blade_Height
_BladeChord = sqrt((N2y-N1y)**2+(N2x-N1x)**2)
Blade_CutNodeY = N1y + _BladeChord * Blade_Cut_RotorSpanfract * cos(gamma)
Blade_CutNodeX = N1x - _BladeChord * Blade_Cut_RotorSpanfract * sin(gamma)
# RETURN BLADES
_returnDicc["Blade_N1x"] = N1x
_returnDicc["Blade_N1y"] = N1y
_returnDicc["Blade_N2x"] = N2x
_returnDicc["Blade_N2y"] = N2y
|
Formlabs/djangocms-page-meta
|
djangocms_page_meta/admin.py
|
Python
|
bsd-3-clause
| 1,842
| 0.001086
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.extensions import PageExtensionAdmin, TitleExtensionAdmin
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .forms import TitleMetaAdminForm
from .models import PageMeta, TitleMeta
class PageMetaAdmin(PageExtensionAdmin):
raw_id_fields = ('og_author',)
fieldsets = (
(None, {'fields': ('image',)}),
(_('OpenGraph'), {
'fields': (
'og_type', ('og_author', 'og_author_url', 'og_author_fbid'),
('og_publisher', 'og_app_id')
),
|
'classes': ('collapse',)
}),
(_('Twitter Cards'), {
'fields': ('twitter_type', 'twitter_author'),
'classes': ('collapse',)
}),
(_('Google+ Snippets'), {
'fields': ('gplus_type', 'gplus_author'),
'cl
|
asses': ('collapse',)
}),
)
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
}
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(PageMeta, PageMetaAdmin)
class TitleMetaAdmin(TitleExtensionAdmin):
form = TitleMetaAdminForm
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
}
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(TitleMeta, TitleMetaAdmin)
|
beaker-project/beaker
|
Server/bkr/server/alembic/versions/5ab66e956c6b_osversion_osmajor_id_non_nullable.py
|
Python
|
gpl-2.0
| 743
| 0.008075
|
# This program is free software; you can redis
|
tribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Make osversion.osmajor_id non-NULLable
Revision ID: 5ab66e956c6b
Revises: 286ed23a5c1b
Create Date: 2017-12-20 15:54:38.825703
"""
# revision identifiers, used by Alembic.
revision = '5ab66e956c6b'
down_revision = '286ed23a5c1b'
from alem
|
bic import op
from sqlalchemy import Integer
def upgrade():
op.alter_column('osversion', 'osmajor_id', existing_type=Integer, nullable=False)
def downgrade():
op.alter_column('osversion', 'osmajor_id', existing_type=Integer, nullable=True)
|
Applied-GeoSolutions/geokit
|
geokit/tests/util.py
|
Python
|
gpl-2.0
| 1,699
| 0.000589
|
import logging
from django.db import connection
from django.con
|
trib.auth.models import User
from tenant_schemas.utils import get_tenant_model
from tenant_schemas.test.cases import TenantTestCase
import pytest
logger = logging.getLogger('tests.util')
def make_tenant(schema='test', domain='tenant.test.com', username='tester'):
"""Returns a tuple: (a tenant schema, an administrative user for it).
`schema`: Schema name
`domain`: Domain for the tenant site
`username`: Username to be admin of the site
Both user and
|
tenant are created if they don't already exist, and the db
connection is set to that tenant. Logs to tests.util, level INFO.
Tenant creation is conditional because it requires significant time.
"""
TenantTestCase.sync_shared()
# create or get the user
user, created = User.objects.get_or_create(username=username)
if created:
logger.info("Created user '{}'.".format(user))
else:
logger.info("User '{}' exists, not creating it.".format(user))
# create or get the tenant
goc = get_tenant_model().objects.get_or_create
d = {'domain_url': domain, 'schema_name': schema, 'user': user}
tenant, created = goc(schema_name=schema, defaults=d)
if created:
msg = "No schema named '{}' detected; creating one"
logger.info(msg.format(schema))
tenant.create_schema(check_if_exists=True)
else:
logger.info("Tenant with schema name '{}' found".format(schema))
connection.set_tenant(tenant)
return (user, tenant)
@pytest.fixture
def set_tenant(request):
tenant = get_tenant_model().objects.get(schema_name='test')
connection.set_tenant(tenant)
|
nickhills81/tado_heating_for_splunk
|
bin/zone0.py
|
Python
|
mit
| 2,916
| 0.03155
|
#!/usr/bin/python
import urllib, urllib2, json, sys
import splunk.entity as entity
# access the credentials in /servicesNS/nobody/app_name/admin/passwords
def getCredentials(sessionKey):
myapp = 'tado'
try:
# list all credentials
entities = entity.getEntities(['admin', 'passwords'], namespace=myapp, owner='nobody', sessionKey=sessionKey)
except Exception, e:
raise Exception("Could not get %s credentials from splunk. Error: %s" % (myapp, str(e)))
# return first set of credentials
for i, c in entities.items():
return c['username'], c['clear_password']
raise Exception("No credentials have been found")
def main():
# read session key sent from splunkd
sessionKey = sys.stdin.readline().strip()
if len(sessionKey) == 0:
sys.stderr.write("Did not receive a session key from splunkd. " +
"Please enable passAuth in inputs.conf for this " +
"script\n")
exit(2)
username, password = getCredential
|
s(sessionKey)
token = getAuth(username, password)
homeId = getHomeId(token)
doRequest(token,homeId)
def getAuth(email, password):
data = dict(client_id="tado-webapp",grant_type="password",password=password,scope="home.user", username=email )
authUrl = "https://my.tado.co
|
m/oauth/token"
method = "POST"
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
data = urllib.urlencode(data)
request = urllib2.Request(authUrl, data=data)
request.get_method = lambda: method
try:
connection = opener.open(request)
except urllib2.HTTPError,e:
connection = e
if connection.code == 200:
responseData = str(connection.read())
jsonList = json.loads(responseData)
return jsonList['access_token']
else:
print "errorCode="+str(connection.code)
def getHomeId(token):
url = "https://my.tado.com/api/v2/me"
req = urllib2.Request(url)
req.add_header("Authorization","Bearer "+token)
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
try:
connection = opener.open(req)
except urllib2.HTTPError,e:
connection = e
if 200 <= connection.code <= 207:
responseData = str(connection.read())
jsonList = json.loads(responseData)
return jsonList['homes'][0]['id']
else:
print "errorCode="+str(connection.code)
def doRequest(token,homeId):
url = "https://my.tado.com/api/v2/homes/"+str(homeId)+"/zones/0/state"
req = urllib2.Request(url)
req.add_header("Authorization","Bearer "+token)
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
try:
connection = opener.open(req)
except urllib2.HTTPError,e:
connection = e
if 200 <= connection.code <= 207:
print connection.read()
else:
print "errorCode="+str(connection.code)
main()
|
cuongnv23/curl2share
|
curl2share/__init__.py
|
Python
|
mit
| 691
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from curl2share.config import log_file, log_level
loglevel = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARN,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
|
'NOTSET': logging.NOTSET}
logger = logging.getLogger(__name__)
logger.setLevel(loglevel[log_level])
fh = logging.FileHandler(log_file)
fh.setLevel(loglevel[log_level])
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s \
-
|
%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
|
berkeleybop/behave_core
|
tests/environment.py
|
Python
|
bsd-3-clause
| 411
| 0.024331
|
####
#### S
|
etup gross testing environment.
####
#import time
from behave_core.environment import start_browser, define_target, quit_browser
from behave import *
## Run this before anything else.
def before_all(context):
#pass
start_browser(context)
define_target(context)
#time.sleep(10)
## Do this after completing e
|
verything.
def after_all(context):
#pass
quit_browser(context)
|
ckprice/bedrock
|
bedrock/mozorg/helpers/social_widgets.py
|
Python
|
mpl-2.0
| 3,148
| 0.001271
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
from datetime import datetime
import urllib
import jingo
from lib.l10n_utils.dotlang import _
@jingo.register.function
def format_tweet_body(tweet):
"""
Return a tweet in an HTML format.
@param tweet: A Tweepy Status object retrieved with the Twitter REST API.
See the developer document for details:
https://dev.twitter.com/docs/platform-objects/tweets
"""
text = tweet.text
entities = tweet.entities
# Hashtags (#something)
for hashtags in entities['hashtags']:
hash = hashtags['text']
text = text.replace('#' + hash,
('<a href="https://twitter.com/search?q=%s&src=hash"'
' class="hash">#%s</a>' % ('%23' + urllib.quote(hash.encode('utf8')),
hash)))
# Mentions (@someone)
for user in entities['user_mentions']:
name = user['screen_name']
text = text.replace('@' + name,
('<a href="https://twitter.com/%s" class="mention">@%s</a>'
% (urllib
|
.quote(name.encode('utf8')), name)))
# URLs
for url in entities['urls']:
text = text.replace(url['url'],
('<a href="%s" title="%s">%s</a>'
% (url['url'], url['expanded_url'], url['display_url'])))
# Media
if entities.get('media'):
for medium in entities['media']:
text = text.replace(medium['url'],
('<a href="%s" title=
|
"%s" class="media">%s</a>'
% (medium['url'], medium['expanded_url'],
medium['display_url'])))
return text
@jingo.register.function
def format_tweet_timestamp(tweet):
"""
Return an HTML time element filled with a tweet timestamp.
@param tweet: A Tweepy Status object retrieved with the Twitter REST API.
For a tweet posted within the last 24 hours, the timestamp label should be
a relative format like "20s", "3m" or 5h", otherwise it will be a simple
date like "6 Jun". See the Display Requirements for details:
https://dev.twitter.com/terms/display-requirements
"""
now = datetime.utcnow()
created = tweet.created_at # A datetime object
diff = now - created # A timedelta Object
if diff.days == 0:
if diff.seconds < 60:
label = _('%ds') % diff.seconds
elif diff.seconds < 60 * 60:
label = _('%dm') % round(diff.seconds / 60)
else:
label = _('%dh') % round(diff.seconds / 60 / 60)
else:
label = created.strftime("%-d %b")
full = created.strftime("%Y-%m-%d %H:%M")
return ('<time datetime="%s" title="%s" itemprop="dateCreated">%s '
'<span class="full">(%s)</span></time>'
% (created.isoformat(), full, label, full))
|
TheProjecter/kassie
|
src/bases/importeur.py
|
Python
|
bsd-3-clause
| 10,203
| 0.00337
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit un objet 'importeur', chargé de contrôler le mécanisme
d'importation, initialisation, configuration, déroulement et arrêt
des modules primaires et secondaires.
On parcourt les sous-dossiers définis dans les variables :
- REP_PRIMAIRES : répertoire des modules primaires
- REP_SECONDAIRES : répertoire des modules secondaires
Il est possible de changer ces variables mais dans ce cas, une réorganisation
du projet s'impose.
Dans chaque module, on s'occupera de charger l'objet le représentant.
Par exemple, le module anaconf se définit comme suit :
* un package anaconf contenu dans REP_PRIMAIRES
* un fichier __init__.py
* une classe Anaconf
On créée un objet chargé de représenter le module. C'est cet objet qui
possède les méthodes génériques chargées d'initialiser, configurer, lancer
et arrêter un module. Les autres fichiers du module sont une boîte noir
inconnu pour l'importeur.
"""
import os
import sys
from abstraits.module import *
REP_PRIMAIRES = "primaires"
REP_SECONDAIRES = "secondaires"
class Importeur:
"""Classe chargée de créer un objet Importeur. Il contient sous la forme
d'attributs les modules primaires et secondaires chargés. Les modules
primaires et secondaires ne sont pas distingués.
On ne doit créer qu'un seul objet Importeur.
"""
nb_importeurs = 0
def __init__(self):
"""Constructeur de l'importeur. Il vérifie surtout
qu'un seul est créé.
Il prend en paramètre le parser de commande qu'il doit transmettre
à chaque module.
"""
Importeur.nb_importeurs += 1
if Importeur.nb_importeurs > 1:
raise RuntimeError("{0} importeurs ont été créés".format( \
Importeur.nb_importeurs))
def __str__(self):
"""Retourne sous ue forme un peu plus lisible les modules importés."""
ret = []
for nom_module in self.__dict__.keys():
ret.append("{0}: {1}".format(nom_module, getattr(self, \
nom_module)))
ret.sort()
return "\n".join(ret)
def tout_charger(self):
"""Méthode appelée pour charger les modules primaires et secondaires.
Par défaut, on importe tout mais on ne créée rien.
"""
# On commence par parcourir les modules primaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_PRIMAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_PRIMAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
# On fait de même avec les modules secondaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_SECONDAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_SECONDAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
def tout_instancier(self, parser_cmd):
"""Cette méthode permet d'instancier les modules chargés auparavant.
On se base sur le type du module (classe ou objet)
pour le créer ou non.
En effet, cette méthode doit pouvoir être appelée quand certains
modules sont instanciés, et d'autres non.
NOTE IMPORTANTE: on passe au constructeur de chaque module
self, c'est-à-dire l'importeur. Les modules en ont en effet
besoin pour interragir entre eux.
"""
for nom_module, module in self.__dict__.items():
if type(module) is type: # on doit l'instancier
setattr(self, nom_module, module(self, parser_cmd))
def tout_configurer(self):
"""Méthode permettant de configurer tous les modules qui en ont besoin.
Les modules qui doivent être configuré sont ceux instanciés.
Attention: les modules non encore instanciés sont à l'état de classe.
Tous les modules doivent donc être instanciés au minimum avant
que cette méthode ne soit appelée. Autrement dit, la méthode
tout_instancier doit être appelée auparavant.
"""
for module in self.__dict__.values():
if module.statut == INSTANCIE:
module.config()
def tout_initialiser(self):
"""Méthode permettant d'initialiser tous les modules qui en ont besoin.
Les modules à initialiser sont ceux configuré.
"""
for module in self.__dict__.values():
if module.statut == CONFIGURE:
module.init()
def tout_detruire(self):
"""Méthode permettant de détruire tous les modules qui en ont besoin.
Les modules à détruire sont ceux initialisés.
"""
for module in self.__dict__.values():
|
if module.statut == INITIALISE:
module.detruire()
def boucle(self):
"""Méthode appelée à chaque tour de boucle synchro.
Elle doit faire appel à la méthode boucle de chaque module primaire
ou secondaire.
"""
for module in self.__dict__.values():
module.boucle()
def module_est_charge(self, nom):
"""Retourne True si le module est déjà chargé, False sinon.
On
|
n'a pas besoin du type du module, les modules primaires
et secondaires étant stockés de la même façon.
Attention: un module peut être chargé sans être instancié,
configuré ou initialisé.
"""
return nom in self.__dict__.keys()
def charger_module(self, parser_cmd, m_type, nom):
"""Méthode permettant de charger un module en fonction de son type et
de son nom.
Si le module est déjà chargé, on ne fait rien.
Note: à la différence de tout_charger, cette méthode créée directement
l'objet gérant le module.
"""
if m_type == "primaire":
rep = REP_PRIMAIRES
elif m_type == "secondaire":
rep = REP_SECONDAIRES
else:
raise ValueError("le type {0} n'est ni primaire ni secondaire" \
.format(type))
if self.module_est_charge(nom):
print("Le module {0} est déjà chargé.".format(nom))
else:
package = __import__(rep + "." + nom)
module = getattr(getattr(package, nom), \
nom.capitalize())
|
bwasti/caffe2
|
caffe2/python/operator_test/cross_entropy_ops_test.py
|
Python
|
apache-2.0
| 3,643
| 0.000274
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_cross_entropy_with_logits(x, z):
return np.maximum(x, 0) - x * z + np.log(1 + np.exp(-np.abs(x)))
def sigmoid_cross_entropy_with_logits_grad(x, z):
return z - sigmoid(x)
class TestCrossEntropyOps(hu.HypothesisTestCase):
@given(
inputs=st.lists(
elements=st.integers(min_value=1, max_value=5),
min_size=1,
max_size=2,
average_size=2,
).flatmap(
lambda shape: st.tuples(
hu.arrays(
dims=shape,
elements=st.one_of(
st.floats(min_value=-1.0, max_value=-0.1),
st.floats(min_value=0.1, max_value=1.0),
)),
|
hu.arrays(
dims=shape,
elements=st.sampled_from([0.0, 1.0]),
),
)
),
**hu.gcs
)
def test_sigmoid_cross_entropy_with_logits(self, inputs, gc, dc):
logits, targets = inputs
def sigmoid_xentr_logit_ref(logits, targets):
s = sigmoid_cross_entropy_with_logits(logits, targets)
m = np.mean(s, axis=len(logits.shape) - 1
|
)
return (m, )
def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
fwd_logits, fwd_targets = fwd_inputs
inner_size = fwd_logits.shape[-1]
m = fwd_targets - sigmoid(fwd_logits)
g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
return (g_in, None)
op = core.CreateOperator(
'SigmoidCrossEntropyWithLogits',
['logits', 'targets'],
['xentropy'])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[logits, targets],
reference=sigmoid_xentr_logit_ref,
output_to_grad='xentropy',
grad_reference=sigmoid_xentr_logit_grad_ref)
@given(n=st.integers(2, 10),
b=st.integers(1, 5),
**hu.gcs_cpu_only)
def test_soft_label_cross_entropy(self, n, b, gc, dc):
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(b, n).astype(np.float32)
X = X + 1e-2
for i in range(b):
X[i] = X[i] / np.sum(X[i])
# Initialize label
label = np.random.rand(b, n).astype(np.float32)
for i in range(b):
label[i] = label[i] / np.sum(label[i])
# Reference implementation of cross entropy with soft labels
def soft_label_xentr_ref(X, label):
xent = [np.sum((-label[j][i] * np.log(max(X[j][i], 1e-20))
for i in range(len(X[0])))) for j in range(b)]
return (xent,)
op = core.CreateOperator("CrossEntropy", ["X", "label"], ["Y"])
# TODO(surya) Once CrossEntropyOp is ported to GPU, add the respective
# tests to this unit test.
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=soft_label_xentr_ref,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [0], stepsize=1e-4, threshold=1e-2)
if __name__ == "__main__":
import unittest
unittest.main()
|
jmosky12/huxley
|
huxley/api/validators.py
|
Python
|
bsd-3-clause
| 1,610
| 0.001882
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import re
from rest_framework.serializers import ValidationError
def name(value):
'''Matches names of people, countries and and other things.'''
if re.match(r'^[A-Za-z\s\.\-\'àèéìòóôù]+$', value) is None:
raise ValidationError('This field contains invalid characters.')
def address(value):
'''Matches street addresses.'''
if re.match(r'^[\w\s\.\-\'àèéìòóôù]+$', value) is None:
raise ValidationError('This field contains invalid characters.')
def numeric(value):
'''Matches numbers and spaces.'''
if re.match(r'^[\d\s]+$', value) is None:
raise ValidationError('This field can only contain numbers and spaces.')
def email(value):
'''Loosely matches email addresses.'''
if re.match(r'^[\w_.+-]+@[\w-]+\.[\w\-.]+$', value) is None:
raise ValidationError('This is an invalid email address
|
.')
def phone_international(value):
'''Loosely matches phone numbers.'''
if re.mat
|
ch(r'^[\d\-x\s\+\(\)]+$', value) is None:
raise ValidationError('This is an invalid phone number.')
def phone_domestic(value):
'''Matches domestic phone numbers.'''
if re.match(r'^\(?(\d{3})\)?\s(\d{3})-(\d{4})(\sx\d{1,5})?$', value) is None:
raise ValidationError('This is an invalid phone number.')
def nonempty(value):
'''Requires that a field be non-empty.'''
if not value:
raise ValidationError('This field is required.')
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tornado/escape.py
|
Python
|
bsd-2-clause
| 14,393
| 0.000208
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function
import json
import re
from tornado.util import PY3, unicode_type, basestring_type
if PY3:
from urllib.parse import parse_qs as _parse_qs
import html.entities as htmlentitydefs
import urllib.parse as urllib_parse
unichr = chr
else:
from urlparse import parse_qs as _parse_qs
import htmlentitydefs
import urllib as urllib_parse
try:
import typing # noqa
except ImportError:
pass
_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"',
'\'': '''}
def xhtml_escape(value):
"""Escapes a string so it is valid within HTML or XML.
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
When used in attribute values the escaped strings must be enclosed
in quotes.
.. versionchanged:: 3.2
Added the single quote to the list of escaped characters.
"""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/tornadoweb/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javascript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value, plus=True):
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib_parse.quote_plus if plus else urllib_parse.quote
return quote(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if not PY3:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
if encoding is None:
return unquote(utf8(value))
else:
return unicode_type(unquote(utf8(value)), encoding)
parse_qs_bytes = _parse_qs
else:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor
|
of character encodings.
result = _parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes, type(None))
def utf8(value):
# ty
|
pe: (typing.Union[bytes,unicode_type,None])->typing.Union[bytes,None]
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str
|
awsdocs/aws-doc-sdk-examples
|
python/example_code/comprehend/test/test_comprehend_demo_resources.py
|
Python
|
apache-2.0
| 4,823
| 0.000622
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for comprehend_demo_resources.py
"""
from io import BytesIO
import json
import tarfile
import time
from unittest.mock import MagicMock
import uuid
import boto3
from botocore.exceptions import ClientError
import pytest
from comprehend_demo_resources import ComprehendDemoResources
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_bucket')])
def test_setup(make_stubber, stub_runner, monkeypatch, error_code, stop_on_method):
s3_resource = boto3.resource('s3')
s3_stubber = make_stubber(s3_resource.meta.client)
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
demo_resources = ComprehendDemoResources(s3_resource, iam_resource)
demo_name = 'test-name'
bucket_name = 'doc-example-bucket-test-uuid'
role_name = f'{demo_name}-role'
policy_name = f'{demo_name}-policy'
policy_arn = f'arn:aws:iam:REGION:123456789012:policy/{policy_name}'
monkeypatch.setattr(uuid, 'uuid4', lambda: 'test-uuid')
monkeypatch.setattr(time, 'sleep', lambda x: None)
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
s3_stubber.stub_create_bucket, bucket_name,
s3_resource.meta.client.meta.region_name)
runner.add(iam_stubber.stub_create_role, role_name)
runner.add(iam_stubber.stub_get_role, role_name)
runner.add(iam_stubber.stub_create_policy, policy_name, policy_arn)
runner.add(iam_stubber.stub_get_policy, pol
|
icy_arn)
runner.add(iam_s
|
tubber.stub_attach_role_policy, role_name, policy_arn)
if error_code is None:
demo_resources.setup(demo_name)
assert demo_resources.bucket.name == bucket_name
assert demo_resources.data_access_role.name == role_name
else:
with pytest.raises(ClientError) as exc_info:
demo_resources.setup(demo_name)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,file_name,file_contents,output', [
(None, 'name1.jsonl',
[json.dumps('content1'), json.dumps('content2')],
['content1', 'content2']),
(None, 'name1.csv',
['field1,field2', 'value1-1,value1-2', 'value2-1,value2-2'],
[{'field1': 'value1-1', 'field2': 'value1-2'},
{'field1': 'value2-1', 'field2': 'value2-2'}]),
('TestException', 'name1.jsonl', [], [])])
def test_extract_job_output(monkeypatch, error_code, file_name, file_contents, output):
demo_resources = ComprehendDemoResources(None, None)
demo_resources.bucket = MagicMock()
demo_resources.bucket.name = 'test-bucket'
job = {'OutputDataConfig': {
'S3Uri': f's3://{demo_resources.bucket.name}/test-key'}}
def mock_output(output_key, output_bytes):
assert output_key == 'test-key'
output_bytes.write(b'test-content')
demo_resources.bucket.download_fileobj = mock_output
if error_code is not None:
demo_resources.bucket.download_fileobj.side_effect = ClientError(
{'Error': {'Code': error_code}}, 'test-op')
def mock_extract_file(name):
return BytesIO('\n'.join(file_contents).encode())
monkeypatch.setattr(
tarfile, 'open', lambda fileobj, mode: MagicMock(
extractfile=mock_extract_file, getnames=lambda: [file_name]))
got_output = demo_resources.extract_job_output(job)
if error_code is None:
assert got_output[file_name]['data'] == output
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_cleanup(make_stubber, monkeypatch, error_code):
s3_resource = boto3.resource('s3')
s3_stubber = make_stubber(s3_resource.meta.client)
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
demo_resources = ComprehendDemoResources(s3_resource, iam_resource)
bucket_name = 'doc-example-bucket-test-uuid'
role_name = 'comprehend-classifier-demo-role'
policy_name = 'comprehend-classifier-demo-policy'
policy_arn = 'arn:aws:iam:REGION:123456789012:policy/test-policy'
demo_resources.data_access_role = iam_resource.Role(role_name)
demo_resources.bucket = s3_resource.Bucket(bucket_name)
iam_stubber.stub_list_attached_role_policies(role_name, {policy_name: policy_arn})
iam_stubber.stub_detach_role_policy(role_name, policy_arn)
iam_stubber.stub_delete_policy(policy_arn)
iam_stubber.stub_delete_role(role_name, error_code=error_code)
s3_stubber.stub_list_objects(bucket_name, ['key1'])
s3_stubber.stub_delete_objects(bucket_name, ['key1'])
s3_stubber.stub_delete_bucket(bucket_name, error_code=error_code)
demo_resources.cleanup()
|
generica/euler
|
18.py
|
Python
|
gpl-2.0
| 782
| 0.001279
|
#!/usr/bin/python
y = '''75
95 64
17 47 82
|
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'''
i = 0
a = {}
for line in y.split('\n'):
l = line.split()
a[i] = l
i += 1
x_pos = 0
tally = 0
for y_pos in range(0, 14)
|
:
tally += int(a[y_pos][x_pos])
print int(a[y_pos][x_pos])
next_l = int(a[y_pos+1][x_pos])
next_r = int(a[y_pos+1][x_pos+1])
if next_l < next_r:
x_pos += 1
print int(a[y_pos+1][x_pos])
tally += int(a[y_pos+1][x_pos])
print tally
|
tseaver/google-cloud-python
|
pubsub/google/cloud/pubsub_v1/_gapic.py
|
Python
|
apache-2.0
| 2,637
| 0.000758
|
# Copyright 2019, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import functools
def add_methods(source_class, blacklist=()):
"""Add wrapped versions of the `api` member's methods to the class.
Any methods passed in `blacklist` are not added.
Additionally, any methods explicitly defined on the wrapped class are
not added.
"""
def wrap(wrapped_fx, lookup_fx):
"""Wrap a GAPIC method; preserve its name and docstring."""
# If this is a static or class method, then we do *not*
# send self as the first argument.
#
# For instance methods, we need to send self.api rather
# than self, since that is where the actual methods were declared.
if isinstance(lookup_fx, (classmethod, staticmethod)):
fx = lambda *a, **kw: wrapped_fx(*a, **kw) # noqa
return staticmethod(functools.wraps(wrapped_fx)(fx))
else:
fx = lambda self, *a, **kw: wrapped_fx(self.api, *a, **kw) # noqa
return
|
functools.wraps(wrapped_fx)(fx)
def actual_decorator(cls):
# Reflectively iterate over most of the met
|
hods on the source class
# (the GAPIC) and make wrapped versions available on this client.
for name in dir(source_class):
# Ignore all private and magic methods.
if name.startswith("_"):
continue
# Ignore anything on our blacklist.
if name in blacklist:
continue
# Retrieve the attribute, and ignore it if it is not callable.
attr = getattr(source_class, name)
if not callable(attr):
continue
# Add a wrapper method to this object.
lookup_fx = source_class.__dict__[name]
fx = wrap(attr, lookup_fx)
setattr(cls, name, fx)
# Return the augmented class.
return cls
# Simply return the actual decorator; this is returned from this method
# and actually used to decorate the class.
return actual_decorator
|
marcelor/django_info
|
django_info/views.py
|
Python
|
mit
| 1,879
| 0.001064
|
# -*- coding: utf-8 -*-
import os
import sys
from django.contrib.auth.decorators import login_required
from django import get_version as get_django_version
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
from django_info import __version__
def django_info_version():
return __version__
def get_platform():
return ' '.join(os.uname())
def get_python_version():
return sys.versi
|
on
def get_database_engine():
return settings.DATABASES['default']['ENGINE'].split('.')[-1]
def get_installed_apps
|
():
return settings.INSTALLED_APPS
def get_debug_mode():
return settings.DEBUG
def get_template_debug_mode():
return settings.TEMPLATE_DEBUG
def is_dev_server(request):
"""
http://stackoverflow.com/a/1291858/1503
"""
server_software = request.META.get('SERVER_SOFTWARE', None)
return server_software is not None and ('WSGIServer' in server_software or 'Python' in server_software)
def get_path(request):
path = request.META.get('PATH', None)
if path:
return [p for p in path.split(":")]
return None
@login_required(login_url="/admin/")
def info(request):
context = {
'django_info_version': django_info_version(),
'django_version': get_django_version(),
'database_engine': get_database_engine(),
'python_version': get_python_version(),
'platform': get_platform(),
# settings
'settings_debug_mode': get_debug_mode(),
'settings_template_debug_mode': get_template_debug_mode(),
'settings_installed_apps': get_installed_apps(),
'is_dev_server': is_dev_server(request),
'paths': get_path(request),
}
return render_to_response('django_info/info.html', context,
context_instance=RequestContext(request))
|
linkian209/Destiny_RNN
|
create.py
|
Python
|
mit
| 3,091
| 0.019088
|
# encoding=utf8
import utils
import pickle
import zipfile
import os
from tqdm import tqdm
from pprint import pprint
# Globals
#[Special, Heavy, Primary]
bucketHashes = [2465295065,953998645,1498876634]
# Load in Manifest
print 'Loading Manifest...'
with open('manifest.pickle','rb') as f:
data = pickle.loads(f.read())
# Convert strings to Unicodie
print 'Converting Manifest...'
data = utils.convert(data)
# Get the Items, Grid, Stats, and Perks tables from the Manifest
items = data['DestinyInventoryItemDefinition']
grids = data['DestinyTalentGridDefinition']
stats = data['DestinyStatDefinition']
perks = data['DestinySandboxPerkDefinition']
# Get all named items from the database
all_items = {}
print 'Creating items....\n'
for i in tqdm(items, desc='Item Gathering'):
# Get Weapons
if items[i]['bucketTypeHash'] in bucketHashes:
if 'itemName' in items[i].viewkeys():
all_items[items[i]['itemName']] = {'grid':items[i]['talentGridHash'],'hash': i}
# Loop through items and create training data
cur_arch = 0
num_guns = 0
hash_list = []
bad_hashes = []
print '\nLooping through Guns to create training data...\n'
for item in tqdm(all_items, desc='Guns'):
gun = all_items[item]
cur_archive = 'archive_%d.zip' % cur_arch
# First check to see if this archive exists, if not make it
if not os.path.exists(cur_archive):
zf = zipfile.ZipFile(cur_archive, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
zf.close()
# Make sure this archive can handle another file
if not(os.stat(cur
|
_archive).st_size <= 3900000000):
# Create a contents file for the archive
with open('contents.txt','w') as f:
for i in hash_l
|
ist:
f.write('%d.txt' % i)
zf = zipfile.ZipFile(cur_archive, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
zf.write('contents.txt')
zf.close()
os.remove('contents.txt')
cur_arch += 1
hash_list = []
# Open zipfile
zf = zipfile.ZipFile(cur_archive, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
# Create grid for gun
# If it is no good, just continue onto the next
try:
grid = utils.makeGrid(grids[gun['grid']])
except:
bad_hashes.append(gun['hash'])
continue
# Create the training data!
utils.makeTrainingDataJSON(items, stats, perks, utils.makeGrid(grids[gun['grid']]), gun['hash'])
# Add this to the zipfile
zf.write('%d.txt' % gun['hash'])
zf.close()
# Remove the file and add the hash to the list
os.remove('%d.txt' % gun['hash'])
hash_list.append(gun['hash'])
num_guns += 1
# Done! Add contents to the last archive
with open('contents.txt','w') as f:
for i in hash_list:
f.write('%d.txt\n' % i)
zf = zipfile.ZipFile('archive_%d.zip' % cur_arch, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
zf.write('contents.txt')
zf.close()
os.remove('contents.txt')
# Show completion and print end stats!
print '\nComplete!'
print 'Created training data for %d guns across %d %s!' % (num_guns, cur_arch+1, 'archives' if cur_arch > 0 else 'archive')
print 'Skipped %d hashes!' % len(bad_hashes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.