repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
brouberol/pynlg
|
pynlg/lexicon/lang.py
|
Python
|
mit
| 118
| 0
|
# encoding:
|
utf-8
"""
|
Definition of supported languages"""
ENGLISH = 'english'
FRENCH = 'french'
DEFAULT = 'english'
|
olivetree123/memory_profiler
|
test/test_import.py
|
Python
|
bsd-3-clause
| 179
| 0.005587
|
from memory_profiler import profile
@profile
def my_func():
a = [1] * (10 ** 6)
b = [2] * (2 * 10 ** 7)
del b
return a
if __name
|
__ == '__main__':
my_func()
|
|
Eric89GXL/vispy
|
vispy/app/backends/_osmesa.py
|
Python
|
bsd-3-clause
| 7,645
| 0
|
# -*- coding: utf-8 -*-
# vispy: testskip
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
OSMesa backend for offscreen rendering on Linux/Unix
"""
from __future__ import division
from ...util.ptime import time
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from ...gloo import gl
from time import sleep
try:
from ...ext import osmesa
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
else:
available, testable, why_not, which = True, True, None, 'OSMesa'
# -------------------------------------------------------------- capability ---
capability = dict(
# if True they mean:
title=True, # can set title on the fly
size=True, # can set size on the fly
position=False, # can set position on the fly
show=True, # can show/hide window XXX ?
vsync=False, # can set window to sync to blank
resizable=False, # can toggle resizability (e.g., no user resizing)
decorate=True, # can toggle decorations
fullscreen=False, # fullscreen window support
context=True, # can share contexts between windows
multi_window=True, # can use multiple windows at once
scroll=False, # scroll-wheel events are supported
parent=False, # can pass native widget backend parent
always_on_top=False, # can be made always-on-top
)
_VP_OSMESA_ALL_WINDOWS = []
def _get_osmesa_windows():
return [win for win in _VP_OSMESA_ALL_WINDOWS
if isinstance(win, CanvasBackend)]
# ------------------------------------------------------------- application ---
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._timers = list()
def _add_timer(self, timer):
if timer not in self._timers:
self._timers.append(timer)
def _vispy_get_backend_name(self):
return 'osmesa'
def _vispy_process_events(self):
for timer in self._timers:
timer._tick()
wins = _get_osmesa_windows()
for win in wins:
if win._needs_draw:
win._needs_draw = False
win._on_draw()
def _vispy_run(self):
wins = _get_osmesa_windows()
while not all(w.closed for w in wins):
self._vispy_process_events()
self._vispy_quit()
def _vispy_quit(self):
wins = _get_osmesa_windows()
for win in wins:
win._vispy_close()
for timer in self._timers:
timer._vispy_stop()
self._timers = []
def _vispy_get_native_app(self):
return osmesa
class OSMesaContext(object):
"""
A wrapper around an OSMesa context that destroy the context when
garbage collected
"""
def __init__(self):
self.context = osmesa.OSMesaCreateContext()
def make_current(self, pixels, width, height):
return osmesa.OSMesaMakeCurrent(self.context, pixels, width, height)
def __del__(self):
osmesa.OSMesaDestroyContext(self.context)
# ------------------------------------------------------------------ canvas ---
class CanvasBackend(BaseCanvasBackend):
"""OSMesa backend for Canvas"""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
# We use _process_backend_kwargs() to "serialize" the kwargs
# and to check whether they match this backend's capability
p = self._process_backend_kwargs(kwargs)
# Deal with config
# TODO: We do not support setting config
# ... use context.config
# Deal with context
p.context.shared.add_ref('osmesa', self)
if p.context.shared.ref is self:
self._native_context = OSMesaContext()
else:
self._native_context = p.context.shared.ref._native_context
self._closed = False
self._pixels = None
self._vispy_set_size(*p.size)
_VP_OSMESA_ALL_WINDOWS.append(self)
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
def _vispy_set_current(self):
if self._native_context is None:
raise RuntimeError('Native context is None')
if self._pixels is None:
raise RuntimeError('Pixel buffer has already been deleted')
ok = self._native_context.make_current(self._pixels, self._size[0],
self._size[1])
if not ok:
raise RuntimeError('Failed attaching OSMesa rendering buffer')
def _vispy_swap_buffers(self):
if self._pixels is None:
raise RuntimeError('No pixel buffer')
|
gl.glFinish()
def _vispy_set_title(self, title):
pass
def _vispy_set_size(self, w, h):
self._pixels = osmesa.allocate_pixels_buffer(w, h)
self._size = (w, h)
self._vispy_canvas.events.resize(size=(w, h))
self._vispy_set_current()
self._vispy_update()
def _vispy_set_position(self, x, y):
pass
def _vispy_
|
set_visible(self, visible):
if visible:
self._vispy_set_current()
self._vispy_update()
def _vispy_set_fullscreen(self, fullscreen):
pass
def _vispy_update(self):
# This is checked by osmesa ApplicationBackend in process_events
self._needs_draw = True
def _vispy_close(self):
if self.closed:
return
# We do not set self._native_context = None here because this causes
# trouble in case a canvas is closed multiple times (as in
# app.test_run()). The problem occurs in gloo's glir._gl_initialize
# when it tries to call glGetString(GL_VERSION).
# But OSMesa requires a context to be attached when calling
# glGetString otherwise it returns an empty string, which gloo doesn't
# like
self._closed = True
return
def _vispy_warmup(self):
etime = time() + 0.1
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_get_size(self):
if self._pixels is None:
return
return self._size
@property
def closed(self):
return self._closed
def _vispy_get_position(self):
return 0, 0
def _vispy_get_fullscreen(self):
return False
def _on_draw(self):
# This is called by the osmesa ApplicationBackend
if self._vispy_canvas is None or self._pixels is None:
raise RuntimeError('draw with no canvas or pixels attached')
return
self._vispy_set_current()
self._vispy_canvas.events.draw(region=None) # (0, 0, w, h)
# ------------------------------------------------------------------- timer ---
class TimerBackend(BaseTimerBackend):
def __init__(self, vispy_timer):
BaseTimerBackend.__init__(self, vispy_timer)
vispy_timer._app._backend._add_timer(self)
self._vispy_stop()
def _vispy_start(self, interval):
self._interval = interval
self._next_time = time() + self._interval
def _vispy_stop(self):
self._next_time = float('inf')
def _tick(self):
if time() > self._next_time:
self._vispy_timer._timeout()
self._next_time = time() + self._interval
|
gvnn3/PCS
|
pcs/packets/igmp.py
|
Python
|
bsd-3-clause
| 3,526
| 0.011344
|
import pcs
from socket import AF_INET, inet_ntop
import struct
import time
import pcs.packets.ipv4
import pcs.packets.igmpv2 as igmpv2
import pcs.packets.igmpv3 as igmpv3
#import pcs.packets.dvmrp
#import pcs.packets.mtrace
IGMP_HOST_MEMBERSHIP_QUERY = 0x11
IGMP_v1_HOST_MEMBERSHIP_REPORT = 0x12
IGMP_DVMRP = 0x13
IGMP_v2_HOST_MEMBERSHIP_REPORT = 0x16
IGMP_HOST_LEAVE_MESSAGE = 0x17
IGMP_v3_HOST_MEMBERSHIP_REPORT = 0x22
IGMP_MTRACE_REPLY = 0x1e
IGMP_MTRACE_QUERY = 0x1f
igmp_map = {
IGMP_HOST_MEMBERSHIP_QUERY: igmpv2.i
|
gmpv2,
IGMP_v1_HOST_MEMBERSHIP_REPORT: igmpv2.igmpv2,
#IGMP_DVMRP: dvmrp.dvmrp,
IGMP_v2_HOST_MEMBERSHIP_REPORT: igmpv2.igmpv2,
IGMP_HOST_LEAVE_MESSAGE: igmpv2.igmpv2,
#IGMP_MTRACE_REPLY: mtrace.reply,
#IGMP_MTRACE_QUERY: mtrace.query,
IGMP_v3_HOST_MEMBERSHIP_REPORT: igmpv3.report
}
descr = {
IGMP_HOST_MEMBERSHIP_QUERY: "IGMPv2 Query",
IGMP_v1_HOS
|
T_MEMBERSHIP_REPORT: "IGMPv1 Report",
IGMP_DVMRP: "DVMRP",
IGMP_v2_HOST_MEMBERSHIP_REPORT: "IGMPv2 Report",
IGMP_HOST_LEAVE_MESSAGE: "IGMPv2 Leave",
IGMP_MTRACE_REPLY: "MTRACE Reply",
IGMP_MTRACE_QUERY: "MTRACE Query",
IGMP_v3_HOST_MEMBERSHIP_REPORT: "IGMPv3 Report"
}
class igmp(pcs.Packet):
"""IGMP"""
_layout = pcs.Layout()
_map = igmp_map
_descr = descr
def __init__(self, bytes = None, timestamp = None, **kv):
""" Define the common IGMP encapsulation; see RFC 2236. """
type = pcs.Field("type", 8, discriminator=True)
code = pcs.Field("code", 8)
checksum = pcs.Field("checksum", 16)
pcs.Packet.__init__(self, [type, code, checksum], bytes = bytes, **kv)
self.description = "IGMP"
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if bytes is not None:
offset = self.sizeof()
if self.type == IGMP_HOST_MEMBERSHIP_QUERY and \
len(bytes) >= igmpv3.IGMP_V3_QUERY_MINLEN:
self.data = igmpv3.query(bytes[offset:len(bytes)],
timestamp = timestamp)
else:
# XXX Workaround Packet.next() -- it only returns something
# if it can discriminate.
self.data = self.next(bytes[offset:len(bytes)],
timestamp = timestamp)
if self.data is None:
self.data = payload.payload(bytes[offset:len(bytes)])
else:
self.data = None
def rdiscriminate(self, packet, discfieldname = None, map = igmp_map):
"""Reverse-map an encapsulated packet back to a discriminator
field value. Like next() only the first match is used."""
#print "reverse discriminating %s" % type(packet)
return pcs.Packet.rdiscriminate(self, packet, "type", map)
def calc_checksum(self):
"""Calculate and store the checksum for this IGMP header.
IGMP checksums are computed over payloads too."""
from pcs.packets.ipv4 import ipv4
self.checksum = 0
tmpbytes = self.bytes
if not self._head is None:
tmpbytes += self._head.collate_following(self)
self.checksum = ipv4.ipv4_cksum(tmpbytes)
def __str__(self):
"""Walk the entire packet and pretty print the values of the fields."""
retval = self._descr[self.type] + "\n"
for field in self._layout:
retval += "%s %s\n" % (field.name, field.value)
return retval
|
dingmingliu/quanttrade
|
quanttrade/core/data.py
|
Python
|
apache-2.0
| 350
| 0.025714
|
__author__ = 'bett'
import MySQLdb as db
import pandas.io.sql as psql
from config import db_config
def getData(symbols,start,
|
end):
databa
|
se = db.connect(**db_config)
data=psql.frame_query("SELECT * FROM tbl_historical where start", database)
return data;
if(__name__=='__main__'):
getData('000009.sz','2013-1-1','2015-4-8')
|
calamityman/ansible-modules-extras
|
network/openvswitch_bridge.py
|
Python
|
gpl-3.0
| 8,748
| 0.000572
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
#
# Portions copyright @ 2015 VMware, Inc.
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=C0111
DOCUMENTATION = '''
---
module: openvswitch_bridge
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch bridges
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch bridges
options:
bridge:
required: true
description:
- Name of bridge to manage
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the bridge should exist
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
required: false
default: None
description:
- A dictionary of external-ids. Omitting this parameter is a No-op.
To clear all external-ids pass an empty value.
fail_mode:
version_added: 2.0
default: None
required: false
choices : [secure, standalone]
description:
- Set bridge fail-mode. The default value (None) is a No-op.
'''
EXAMPLES = '''
# Create a bridge named br-int
- openvswitch_bridge: bridge=br-int state=present
# Create an integration bridge
- openvswitch_bridge: bridge=br-int state=present fail_mode=secure
args:
external_ids:
bridge-id: "br-int"
'''
class OVSBridge(object):
""" Interface to ovs-vsctl. """
def __init__(self, module):
self.module = module
self.bridge = module.params['bridge']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.fail_mode = module.params['fail_mode']
def _vsctl(self, command):
'''Run ovs-vsctl command'''
return self.module.run_command(['ovs-vsctl', '-t',
str(self.timeout)] + command)
def exists(self):
'''Check if the bridge already exists'''
rtc, _, err = self._vsctl(['br-exists', self.bridge])
if rtc == 0: # See ovs-vsctl(8) for status codes
return True
if rtc == 2:
return False
self.module.fail_json(msg=err)
def add(self):
'''Create the bridge'''
rtc, _, err = self._vsctl(['add-br', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
if self.fail_mode:
self.set_fail_mode()
def delete(self):
'''Delete the bridge'''
rtc, _, err = self._vsctl(['del-br', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
def check(self):
'''Run check mode'''
changed = False
# pylint: disable=W0703
try:
if self.state == 'present' and self.exists():
if (self.fail_mode and
(self.fail_mode != self.get_fail_mode())):
changed = True
##
# Check if external ids would change.
current_external_ids = self.get_external_ids()
exp_external_ids = self.module.params['external_ids']
if exp_external_ids is not None:
for (key, value) in exp_external_ids:
if ((key in current_external_ids) and
(value != current_external_ids[key])):
changed = True
##
# Check if external ids would be removed.
for (key, value) in current_external_ids.items():
if key not in exp_external_ids:
changed = True
elif self.state == 'absent' and self.exists():
changed = True
elif self.state == 'present' and not self.exists():
changed = True
except Exception, earg:
self.module.fail_json(msg=str(earg))
# pylint: enable=W0703
self.modul
|
e.exit_json(changed=changed)
def run(self):
'''Make the necessary changes'''
changed = False
# pylint: disable=W0703
try:
if self.state == 'abse
|
nt':
if self.exists():
self.delete()
changed = True
elif self.state == 'present':
if not self.exists():
self.add()
changed = True
current_fail_mode = self.get_fail_mode()
if self.fail_mode and (self.fail_mode != current_fail_mode):
self.module.log( "changing fail mode %s to %s" % (current_fail_mode, self.fail_mode))
self.set_fail_mode()
changed = True
current_external_ids = self.get_external_ids()
##
# Change and add existing external ids.
exp_external_ids = self.module.params['external_ids']
if exp_external_ids is not None:
for (key, value) in exp_external_ids.items():
if ((value != current_external_ids.get(key, None)) and
self.set_external_id(key, value)):
changed = True
##
# Remove current external ids that are not passed in.
for (key, value) in current_external_ids.items():
if ((key not in exp_external_ids) and
self.set_external_id(key, None)):
changed = True
except Exception, earg:
self.module.fail_json(msg=str(earg))
# pylint: enable=W0703
self.module.exit_json(changed=changed)
def get_external_ids(self):
""" Return the bridge's external ids as a dict. """
results = {}
if self.exists():
rtc, out, err = self._vsctl(['br-get-external-id', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
lines = out.split("\n")
lines = [item.split("=") for item in lines if len(item) > 0]
for item in lines:
results[item[0]] = item[1]
return results
def set_external_id(self, key, value):
""" Set external id. """
if self.exists():
cmd = ['br-set-external-id', self.bridge, key]
if value:
cmd += [value]
(rtc, _, err) = self._vsctl(cmd)
if rtc != 0:
self.module.fail_json(msg=err)
return True
return False
def get_fail_mode(self):
""" Get failure mode. """
value = ''
if self.exists():
rtc, out, err = self._vsctl(['get-fail-mode', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
value = out.strip("\n")
return value
def set_fail_mode(self):
""" Set failure mode. """
if self.exists():
(rtc, _, err) = self._vsctl(['set-fail-mode', self.bridge,
self.fail_mode])
if rtc != 0:
self.module.fail_json(msg=err)
# pylint: disable=E0602
def main():
""" Entry point. """
module = AnsibleModule(
argument_spec={
'bridge': {'
|
delicb/SublimeConfig
|
tailf.py
|
Python
|
mit
| 3,015
| 0
|
import os
import io
import stat
import time
import threading
import sublime
import sublime_plugin
# Set of IDs of view that are being monitored.
TAILF_VIEWS = set()
STATUS_KEY = 'tailf'
class TailF(sublime_plugin.TextCommand):
'''
Start monitoring file in `tail -f` line style.
'''
def __init__(self, *args, **kwargs):
super(TailF, self).__init__(*args, **kwargs)
self.prev_file_size = -1
self.prev_mod_time = -1
def run(self, edit):
self.view.set_read_only(True)
t = threading.Thread(target=self.thread_handler)
TAILF_VIEWS.add(self.view.id())
self.view.set_status(STATUS_KEY, 'TailF mode')
t.start()
def thread_handler(self):
while True:
if self.view.id() in TAILF_VIEWS:
if self.view.file_name() is None:
sublime.error_message('File not save on disk')
return
else:
file_stat = os.stat(self.view.file_name())
new_size = file_stat[stat.ST_SIZE]
new_mod_time = file_stat[stat.ST_MTIME]
if (
|
new_mod_time > self.prev_mod_time or
new_size != self.prev_file_size):
self.view.run_command('update_file')
self.view.run_command('move_to',
args={'to': 'eof', 'extend': False})
|
self.prev_file_size = new_size
self.prev_mod_time = new_mod_time
time.sleep(self.view.settings().get('tailf_pull_rate'))
else:
return
def description(self):
return 'Starts monitoring file on disk'
class StopTailF(sublime_plugin.TextCommand):
'''
Stop monitoring file command.
'''
def run(self, edit):
TAILF_VIEWS.remove(self.view.id())
# restore view to previous state
self.view.set_read_only(False)
self.view.set_scratch(False)
self.view.erase_status(STATUS_KEY)
def description(self):
return 'Stops monitoring file on disk'
class UpdateFile(sublime_plugin.TextCommand):
'''
Reloads content of the file and replaces view content with it.
'''
def run(self, edit):
read_only = self.view.is_read_only()
self.view.set_read_only(False)
with io.open(self.view.file_name(), 'r', encoding='utf-8-sig') as f:
content = f.read()
whole_file = sublime.Region(0, self.view.size())
self.view.replace(edit, whole_file, content)
self.view.set_read_only(read_only)
# don't ask user if he want's to save changes to disk
self.view.set_scratch(True)
class TailFEventListener(sublime_plugin.EventListener):
'''
Listener that removes files from monitored files once file is
about to be closed.
'''
def on_pre_close(self, view):
if view.id() in TAILF_VIEWS:
TAILF_VIEWS.remove(view.id())
|
chris-martin/chain-bitcoin-python
|
chain_bitcoin/tests/test_get_block_op_returns_by_height.py
|
Python
|
mit
| 1,761
| 0
|
from __future__ import absolute_import
import sure
from .. import Chain, NoApiKeyId, OpReturn, get_block_op_returns_by_height
from .mock_http_adapter import *
def test_get_block_op_returns_by_height():
get_block_op_returns_by_height(block_height, api_key_id=api_key_id,
http_adapter=http_adapter) \
|
.should.equal(op_returns)
def test_get_block_op_returns_by_height_using_class():
Chain(api_key_id=api_key_id, http_adapter=http_adapter) \
.get_block_op_returns_by_height(block_height).should.equal(op_returns)
def test_get_block_op_returns_by_height_without_api_key_id():
(lambda: get_block_op_returns_by_height(block_height,
|
http_adapter=no_http())) \
.should.throw(NoApiKeyId)
block_height = 308920
api_key_id = 'DEMO-4a5e1e4'
url = 'https://api.chain.com/v1/bitcoin/blocks/308920/op-returns' \
'?api-key-id=DEMO-4a5e1e4'
response_body = """
[
{
"transaction_hash":"ac88...",
"hex":"4067...",
"text":"Yo Adam!",
"sender_addresses": ["1Bj5..."],
"receiver_addresses": ["1def..."]
},
{
"transaction_hash":"5d7...",
"hex":"4052...",
"text":"Hey Devon, what's up?",
"sender_addresses": ["1def..."],
"receiver_addresses": ["1Bj5..."]
}
]
"""
op_returns = [
OpReturn(
transaction_hash='ac88...',
hex='4067...',
text='Yo Adam!',
sender_addresses=['1Bj5...'],
receiver_addresses=['1def...'],
),
OpReturn(
transaction_hash='5d7...',
hex='4052...',
text='Hey Devon, what\'s up?',
sender_addresses=['1def...'],
receiver_addresses=['1Bj5...'],
),
]
http_adapter = mock_get(url, response_body)
|
mlds-lab/egk
|
demo.py
|
Python
|
mit
| 1,643
| 0
|
import numpy as np
import cPickle as pickle
from sklearn.svm import LinearSVC
import gp
from full_marginal import compute_means_covs
from fastfood import FastfoodEGK
def main():
np.random.seed(111)
with open('data/ECG200-50.pkl', 'rb') as f:
ts_train, ts_test, l_train, l_test = pickle.load(f)
# Estimate GP hyperparameters and the noise parameter by maximizing
# the marginal likelihood.
gp_parms = gp.learn_hyperparms(ts_train)
# All time series are defined over a common time interval [0, 1].
# We use 300 evenly-spaced reference time points between [0, 1]
# to represent each time series.
t_ref = np.linspace(0, 1, 300)
# Compute the marginal posterior mean and covariance matrix for
# both training and test time series
train_means, train_covs = compute_means_covs(ts_train, t_ref, gp_parms)
test_means, test_covs = compute_means_covs(ts_test, t_ref, gp_parms)
# We use 500 random features with low-rank approximation, rank 10 in this
# case, and normalize the random feature vector to have unit length.
# By dropping the rank argument or set rank to 0 turns off the low rank
# approximation.
# The parameters gamma and C can be chosen using cross validation.
rp = FastfoodEGK(gamma=20, n_sample=500, rank=10,
|
normalize=True)
clf = LinearSVC(C=100)
X_train = rp.fit_transform(train_means, train_covs)
clf.fit(X_train, l_train)
X_test = rp.transform(test_means, test_covs)
l_predict = clf.predict(X_test)
accuracy = np
|
.mean(l_predict == l_test)
print accuracy
if __name__ == '__main__':
main()
|
LegNeato/bztools
|
bugzilla/models.py
|
Python
|
bsd-3-clause
| 6,168
| 0.000811
|
from remoteobjects import RemoteObject as RemoteObject_, fields
from .fields import StringBoolean, Datetime
# The datetime format is inconsistent.
DATETIME_FORMAT_WITH_SECONDS = '%Y-%m-%d %H:%M:%S %z'
DATETIME_FORMAT = '%Y-%m-%d %H:%M %Z'
class RemoteObject(RemoteObject_):
def post_to(self, url):
self._location = url
self.post(self)
return self.api_data['ref']
def put_to(self, url):
self._location = url
self.put()
def _get_location(self):
if self.__location is not None:
return self.__location
else:
return self.api_data.get('ref', None)
def _set_location(self, url):
self.__location = url
_location = property(_get_location, _
|
set_location)
class Bug(RemoteObject):
id = fields.Field()
summary = fields.Field()
assigned_to = fields.Object('User')
reporter = fields.Object('User')
target_milestone = fields.Field()
attachments = fields.List(fields.Object('Attachment'))
comments = fields.List(fields.Object('Comment'))
history = fields.List(fields.Object('Changeset'))
keywords = fields.List(fields.Object('Keyword'))
status = fields.Field()
resolution = fie
|
lds.Field()
# TODO: These are Mozilla specific and should be generalized
cf_blocking_20 = fields.Field()
cf_blocking_fennec = fields.Field()
cf_crash_signature = fields.Field()
creation_time = Datetime(DATETIME_FORMAT_WITH_SECONDS)
flags = fields.List(fields.Object('Flag'))
blocks = fields.List(fields.Field())
#depends_on = CommaSeparatedBugs(FooLink(fields.Object('Bug')))
#depends_on = fields.List(BugLink(fields.Object('Bug')))
#depends_on = BugLink(fields.List(fields.Object('Bug')))
url = fields.Field()
cc = fields.List(fields.Object('User'))
keywords = fields.List(fields.Field())
whiteboard = fields.Field()
op_sys = fields.Field()
platform = fields.Field()
priority = fields.Field()
product = fields.Field()
qa_contact = fields.Object('User')
severity = fields.Field()
see_also = fields.List(fields.Field())
version = fields.Field()
alias = fields.Field()
classification = fields.Field()
component = fields.Field()
is_cc_accessible = StringBoolean()
is_everconfirmed = StringBoolean()
is_reporter_accessible = StringBoolean()
last_change_time = Datetime(DATETIME_FORMAT_WITH_SECONDS)
ref = fields.Field()
# Needed for submitting changes.
token = fields.Field(api_name='update_token')
# Time tracking.
actual_time = fields.Field()
deadline = Datetime(DATETIME_FORMAT_WITH_SECONDS)
estimated_time = fields.Field()
# groups = fields.Field() # unimplemented
percentage_complete = fields.Field()
remaining_time = fields.Field()
work_time = fields.Field()
def __repr__(self):
return '<Bug %s: "%s">' % (self.id, self.summary)
def __str__(self):
return "[%s] - %s" % (self.id, self.summary)
def __hash__(self):
return self.id
class User(RemoteObject):
name = fields.Field()
real_name = fields.Field()
ref = fields.Field()
def __repr__(self):
return '<User "%s">' % self.real_name
def __str__(self):
return self.real_name or self.name
def __hash__(self):
if not self or not self.name:
return 0
return self.name.__hash__()
class Attachment(RemoteObject):
# Attachment data.
id = fields.Field()
attacher = fields.Object('User')
creation_time = Datetime(DATETIME_FORMAT_WITH_SECONDS)
last_change_time = Datetime(DATETIME_FORMAT_WITH_SECONDS)
description = fields.Field()
bug_id = fields.Field()
bug_ref = fields.Field()
# File data.
file_name = fields.Field()
size = fields.Field()
content_type = fields.Field()
# Attachment metadata.
flags = fields.List(fields.Object('Flag'))
is_obsolete = StringBoolean()
is_private = StringBoolean()
is_patch = StringBoolean()
# Used for submitting changes.
token = fields.Field()
ref = fields.Field()
# Only with attachmentdata=1
data = fields.Field()
encoding = fields.Field()
def __repr__(self):
return '<Attachment %s: "%s">' % (self.id, self.description)
def __hash__(self):
return self.id
class Comment(RemoteObject):
id = fields.Field()
author = creator = fields.Object('User')
creation_time = Datetime(DATETIME_FORMAT_WITH_SECONDS)
text = fields.Field()
is_private = StringBoolean()
def __repr__(self):
return '<Comment by %s on %s>' % (
self.author, self.creation_time.strftime(DATETIME_FORMAT))
def __str__(self):
return self.text
def __hash__(self):
return self.id
class Change(RemoteObject):
field_name = fields.Field()
added = fields.Field()
removed = fields.Field()
def __repr__(self):
return '<Change "%s": "%s" -> "%s">' % (self.field_name, self.removed,
self.added)
class Changeset(RemoteObject):
changer = fields.Object('User')
changes = fields.List(fields.Object('Change'))
change_time = Datetime(DATETIME_FORMAT_WITH_SECONDS)
def __repr__(self):
return '<Changeset by %s on %s>' % (
self.changer, self.change_time.strftime(DATETIME_FORMAT))
class Flag(RemoteObject):
id = fields.Field()
name = fields.Field()
setter = fields.Object('User')
status = fields.Field()
requestee = fields.Object('User')
type_id = fields.Field()
def __repr__(self):
return '<Flag "%s">' % self.name
def __str__(self):
return self.name
def __hash__(self):
return self.id
class Keyword(RemoteObject):
name = fields.Field()
def __repr__(self):
return '<Keyword "%s">' % self.name
def __str__(self):
return self.name
def __hash__(self):
if not self or not self.name:
return 0
return self.name.__hash__()
class BugSearch(RemoteObject):
bugs = fields.List(fields.Object('Bug'))
|
keisuke-umezawa/chainer
|
chainer/link_hooks/spectral_normalization.py
|
Python
|
mit
| 11,583
| 0
|
import numpy
import chainer
from chainer import backend
from chainer import configuration
import chainer.functions as F
from chainer import link_hook
import chainer.links as L
from chainer import variable
import chainerx
from chainerx import _fallback_workarounds as fallback
def l2normalize(xp, v, eps):
"""Normalize a vector by its L2 norm.
Args:
xp (numpy or cupy):
v (numpy.ndarray or cupy.ndarray)
eps (float): Epsilon value for numerical stability.
Returns:
:class:`numpy.ndarray` or :class:`cupy.ndarray`
"""
# TODO(crcrpar): Remove this when chainerx.linalg.norm becomes available.
if xp is chainerx:
# NOTE(crcrpar): `chainerx.power` is not available as of 2019/03/27.
# See https://github.com/chainer/chainer/pull/6522
norm = chainerx.sqrt(chainerx.sum(v * v))
else:
norm = xp.linalg.norm(v)
return v / (norm + eps)
def update_approximate_vectors(
weight_matrix, u, n_power_iteration, eps):
"""Update the first left and right singular vectors.
This function updates the first left singular vector `u` and
the first right singular vector `v`.
Args:
weight_matrix (~chainer.Variable): 2D weight.
u (numpy.ndarray, cupy.ndarray, or None):
Vector that approximates the first left singular vector and
has the shape of (out_size,).
n_power_iteration (int): Number of iterations to approximate
the first right and left singular vectors.
Returns:
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first left singular vector.
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first right singular vector.
"""
weight_matrix = weight_matrix.array
xp = backend.get_array_module(weight_matrix)
for _ in range(n_power_iteration):
v = l2normalize(xp, xp.dot(u, weight_matrix), eps)
u = l2normalize(xp, xp.dot(weight_matrix, v), eps)
return u, v
def calculate_max_singular_value(weight_matrix, u, v):
"""Calculate max singular value by power iteration method.
Args:
weight_matrix (~chainer.Variable)
u (numpy.ndarray or cupy.ndarray)
v (numpy.ndarray or cupy.ndarray)
Returns:
~chainer.Variable: Max singular value via power iteration method.
"""
sigma = F.matmul(F.matmul(u, weight_matrix), v)
return sigma
class SpectralNormalization(link_hook.LinkHook):
"""Spectral Normalization link hook implementation.
This hook normalizes a weight using max singular value and this value
is computed via power iteration method. Currently, this hook is supposed to
be added to :class:`chainer.links.Linear`, :class:`chainer.links.EmbedID`,
:class:`chainer.links.Convolution2D`, :class:`chainer.links.ConvolutionND`,
:class:`chainer.links.Deconvolution2D`,
and :class:`chainer.links.DeconvolutionND`. However, you can use this to
other links like RNNs by specifying ``weight_name``.
It is highly recommended to add this hook before optimizer setup because
this hook add a scaling parameter ``gamma`` if ``use_gamma`` is True.
Otherwise, the registered ``gamma`` will not be updated.
.. math::
\\bar{\\mathbf{W}} &=& \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\
\\text{, where} \\ \\sigma(\\mathbf{W}) &:=&
\\max_{\\mathbf{h}: \\mathbf{h} \\ne 0}
\\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}
= \\max_{\\|\\mathbf{h}\\|_2 \\le 1} \\|\\mathbf{W}\\mathbf{h}\\|_2
See: T. Miyato et. al., `Spectral Normalization for Generative Adversarial
Networks <https://arxiv.org/abs/1802.05957>`_
Args:
n_power_iteration (int): Number of power iteration.
The default value is 1.
eps (float): Numerical stability in norm calculation.
The default value is 1e-6 for the compatibility with
mixed precision training. The value used in the author's
implementation is 1e-12.
use_gamma (bool): If ``True``, weight scaling parameter gamma which is
initialized by initial weight's max singular value is introduced.
factor (float, None): Scaling parameter to divide maximum singular
value. The default value is 1.0.
weight_name (str): Link's weight name to apply this hook. The default
value is ``'W'``.
name (str or None): Name of this hook. The default value is
``'SpectralNormalization'``.
Attributes:
vector_name (str): Name of the approximate first left singular vector
registered in the target link.
the target link.
axis (int): Axis of weight represents the number of output
feature maps or output units (``out_channels`` and
``out_size``, respectively).
.. admonition:: Example
There are almost the same but 2 ways to apply spectral normalization
(SN) hook to links.
1. Initialize link and SN separately. This makes it easy to handle
buffer and parameter of links registered by SN hook.
>>> l = L.Convolution2D(3, 5, 3)
>>> hook = chainer.link_hooks.SpectralNormalization()
>>> _ = l.add_hook(hook)
>>> # Check the shape of the first left singular vector.
>>> getattr(l, hook.vector_name).shape
(5,)
>>> # Delete SN hook from this link.
>>> l.delete_hook(hook.name)
2. Initialize both link and SN hook at one time. This makes it easy to
define your original :class:`~chainer.Chain`.
>>> # SN hook handles lazy initialization!
>>> layer = L.Convolution2D(
... 5, 3, stride=1, pad=1).add_hook(
... chainer.link_hooks.SpectralNormalization())
"""
name = 'SpectralNormalization'
def __init__(self, n_power_iteration=1, eps=1e-6, use_gamma=False,
factor=None, weight_name='W', name=None):
assert n_power_iteration > 0
self.n_power_iteration = n_power_iteration
self.eps = eps
self.use_gamma = use_gamma
self.factor = factor
self.weight_name = weight_name
self.vector_name = weight_name + '_u'
self._initialized = False
self.axis = 0
if name is not None:
self.name = name
def __enter__(self):
raise NotImplementedError(
'This hook is not supposed to be used as context manager.')
def __exit__(self):
raise NotImplementedError
def added(self, link):
# Define axis and register ``u`` if the weight is initialized.
if not hasattr(link, self.weight_name):
raise ValueError(
'Weight \'{}\' does not exist!'.format(self.weight_name))
if isinstance(link, (L.Deconvolution2D, L.DeconvolutionND)):
self.axis = 1
if getattr(link, self.weight_name).array is not None:
self._prepare_parameters(link)
def deleted(self, link):
# Remove approximate vector ``u`` and parameter ``gamma` if exists.
delattr(link, self.vector_name)
i
|
f self.use_gamma:
del link.gamma
def forward_preprocess(self, cb_args):
# This method normalizes target link's weight spectrally
# using power iteration method
link = c
|
b_args.link
input_variable = cb_args.args[0]
if not self._initialized:
self._prepare_parameters(link, input_variable)
weight = getattr(link, self.weight_name)
# For link.W or equivalents to be chainer.Parameter
# consistently to users, this hook maintains a reference to
# the unnormalized weight.
self.original_weight = weight
# note: `normalized_weight` is ~chainer.Variable
normalized_weight = self.normalize_weight(link)
setattr(link, self.weight_name, normalized_weight)
def forward_postprocess(self, cb_args):
# Here, the computational graph is already created,
# we can reset link.W or equivalents to be Para
|
ruibarreira/linuxtrail
|
usr/lib/python2.7/unittest/loader.py
|
Python
|
gpl-3.0
| 13,465
| 0.002005
|
"""Loading unittests."""
import os
import re
import sys
import traceback
import types
from functools import cmp_to_key as _CmpToKey
from fnmatch import fnmatch
from . import case, suite
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception, e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.a
|
bspath(start_dir)
|
):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_jira/migrations/0003_project_template.py
|
Python
|
mit
| 1,946
| 0.001028
|
# Generated by Django 1.11.7 on 2017-12-13 09:18
import django.db.models.deletion
from django.db import migrations, models
import waldur_core.core.fields
import waldur_core.core.models
import waldur_core.core.validators
class Migration(migrations.Migration):
dependencies = [
('waldur_jira', '0002_resource'),
]
operations = [
migrations.CreateModel(
name='ProjectTemplate',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('icon_url', models.URLField(blank=True, verbose_name='icon url')),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(max_length=255, unique=True)),
],
options={'abstract
|
': Fal
|
se,},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.AddField(
model_name='project',
name='template',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_jira.ProjectTemplate',
),
preserve_default=False,
),
]
|
Shopify/shopify_python_api
|
shopify/resources/gift_card_adjustment.py
|
Python
|
mit
| 193
| 0
|
from ..base import ShopifyResource
class GiftC
|
ardAdjustment(ShopifyResource):
_prefix_source = "/admin/gift_cards/$gift_card_id/"
_plural = "adjustments"
_singular = "adjustment"
| |
danakj/chromium
|
third_party/WebKit/Tools/Scripts/webkitpy/performance_tests/perftest.py
|
Python
|
bsd-3-clause
| 12,942
| 0.002859
|
# Copyright (C) 2012 Google Inc. All rights reserved.
# Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import logging
import math
import re
import signal
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.port.driver import DriverInput
from webkitpy.layout_tests.port.driver import DriverOutput
DEFAULT_TEST_RUNNER_COUNT = 4
_log = logging.getLogger(__name__)
class PerfTestMetric(object):
def __init__(self, metric, unit=None, iterations=None):
# FIXME: Fix runner.js to report correct metric names
self._iterations = iterations or []
self._unit = unit or self.metric_to_unit(metric)
self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric
def name(self):
return self._metric
def has_values(self):
return bool(self._iterations)
def append_group(self, group_values):
assert isinstance(group_values, list)
self._iterations.append(group_values)
def grouped_iteration_values(self):
return self._iterations
def flattened_iteration_values(self):
return [value for group_values in self._iterations for value in group_values]
def unit(self):
return self._unit
@staticmethod
def metric_to_unit(metric):
assert metric in ('Time', 'Malloc', 'JSHeap')
return 'ms' if metric == 'Time' else 'bytes'
@staticmethod
def time_unit_to_metric(unit):
return {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[unit]
class PerfTest(object):
def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
self._port = port
self._test_name = test_name
self._test_path = test_path
self._description = None
self._metrics = {}
self._ordered_metrics_name = []
self._test_runner_count = test_runner_count
def test_name(self):
return self._test_name
def test_name_without_file_extension(self):
|
return re.sub(r'\.\w+$', '', self.test_name())
def test_path(self):
return self._test_path
def description(self):
return self._description
def _create_driver(self):
return self._port.create_driver(worker_number=0, no_timeout=True)
def run(self, time_out_ms):
for _ in xrange(self._test_runner_count):
|
driver = self._create_driver()
try:
if not self._run_with_driver(driver, time_out_ms):
return None
finally:
driver.stop()
should_log = not self._port.get_option('profile')
if should_log and self._description:
_log.info('DESCRIPTION: %s', self._description)
results = {}
for metric_name in self._ordered_metrics_name:
metric = self._metrics[metric_name]
results[metric.name()] = metric.grouped_iteration_values()
if should_log:
legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ')
self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(),
metric.flattened_iteration_values(), metric.unit())
return results
@staticmethod
def log_statistics(test_name, values, unit):
sorted_values = sorted(values)
# Compute the mean and variance using Knuth's online algorithm (has good numerical stability).
square_sum = 0
mean = 0
for i, time in enumerate(sorted_values):
delta = time - mean
sweep = i + 1.0
mean += delta / sweep
square_sum += delta * (time - mean)
middle = int(len(sorted_values) / 2)
mean = sum(sorted_values) / len(values)
median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2
stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_values) > 1 else 0
_log.info('RESULT %s= %s %s', test_name, mean, unit)
_log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s',
median, unit, stdev, unit, sorted_values[0], unit, sorted_values[-1], unit)
_description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
_metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):')
_statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
_score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
_console_regex = re.compile(r'^CONSOLE (MESSAGE|WARNING):')
def _run_with_driver(self, driver, time_out_ms):
output = self.run_single(driver, self.test_path(), time_out_ms)
self._filter_output(output)
if self.run_failed(output):
return False
current_metric = None
for line in re.split('\n', output.text):
description_match = self._description_regex.match(line)
metric_match = self._metrics_regex.match(line)
score = self._score_regex.match(line)
console_match = self._console_regex.match(line)
if description_match:
self._description = description_match.group('description')
elif metric_match:
current_metric = metric_match.group('metric').replace(' ', '')
elif score:
if score.group('key') != 'values':
continue
metric = self._ensure_metrics(current_metric, score.group('unit'))
metric.append_group(map(lambda value: float(value), score.group('value').split(', ')))
elif console_match:
# Ignore console messages such as deprecation warnings.
continue
else:
_log.error('ERROR: ' + line)
return False
return True
def _ensure_metrics(self, metric_name, unit=None):
if metric_name not in self._metrics:
self._metrics[metric_name] = PerfTestMetric(metric_name, unit)
self._ordered_metrics_name.append(metric_name)
return self._metrics[metric_name]
def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False):
return driver.run_test(
DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test, args=[]), stop_when_done=False)
def run_failed(self, output):
if output.error:
_log.error('e
|
sacharya/nova
|
nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
|
Python
|
apache-2.0
| 4,710
| 0.001274
|
# Copyright (c) 2012 Midokura Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with th
|
e License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import webob
from nova.api.openstack.compute.contrib
|
import server_start_stop
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
def fake_instance_get(context, instance_id,
columns_to_join=None, use_slave=False):
result = fakes.stub_instance(id=1, uuid=instance_id)
result['created_at'] = None
result['deleted_at'] = None
result['updated_at'] = None
result['deleted'] = 0
result['info_cache'] = {'network_info': '[]',
'instance_uuid': result['uuid']}
return result
def fake_start_stop_not_ready(self, context, instance):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_start_stop_locked_server(self, context, instance):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
class ServerStartStopTest(test.TestCase):
def setUp(self):
super(ServerStartStopTest, self).setUp()
self.controller = server_start_stop.ServerStartStopActionController()
def test_start(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
self.mox.StubOutWithMock(compute_api.API, 'start')
compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
self.controller._start_server(req, 'test_inst', body)
def test_start_not_ready(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, 'test_inst', body)
def test_start_locked_server(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'start', fake_start_stop_locked_server)
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, 'test_inst', body)
def test_stop(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
self.mox.StubOutWithMock(compute_api.API, 'stop')
compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(stop="")
self.controller._stop_server(req, 'test_inst', body)
def test_stop_not_ready(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, 'test_inst', body)
def test_stop_locked_server(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_locked_server)
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, 'test_inst', body)
def test_start_with_bogus_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._start_server, req, 'test_inst', body)
def test_stop_with_bogus_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
|
idaholab/raven
|
framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLarsIC.py
|
Python
|
apache-2.0
| 6,386
| 0.008926
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
Lasso model fit with Lars using BIC or AIC for model selection.
"""
#Internal Modules (Lazy Importer)----------------------------------------------------
|
----------------
#Internal Modules (Lazy Importer) End-------------------------------------------------------
|
---------
#External Modules------------------------------------------------------------------------------------
from numpy import finfo
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class LassoLarsIC(ScikitLearnBase):
"""
Lasso model fit with Lars using BIC or AIC for model selection
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.LassoLarsIC
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LassoLarsIC, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LassoLarsIC} (\textit{Lasso model fit with Lars using BIC or AIC for model selection})
is a Lasso model fit with Lars using BIC or AIC for model selection.
The optimization objective for Lasso is:
$(1 / (2 * n\_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1$
AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria
are useful to select the value of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should explain well the data
while being simple.
\zNormalizationNotPerformed{LassoLarsIC}
"""
specs.addSub(InputData.parameterInputFactory("criterion", contentType=InputTypes.makeEnumType("criterion", "criterionType",['bic', 'aic']),
descr=r"""The type of criterion to use.""", default='aic'))
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType,
descr=r"""This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting the mean and
dividing by the l2-norm.""", default=True))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of iterations.""", default=500))
specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType,
descr=r"""Whether to use a precomputed Gram matrix to speed up calculations.
For sparse input this option is always True to preserve sparsity.""", default='auto'))
specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType,
descr=r"""The machine-precision regularization in the computation of the Cholesky
diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol
parameter in some iterative optimization-based algorithms, this parameter does not
control the tolerance of the optimization.""", default=finfo(float).eps))
specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType,
descr=r"""When set to True, forces the coefficients to be positive.""", default=False))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType,
descr=r"""Amount of verbosity.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['fit_intercept','max_iter', 'normalize', 'precompute',
'eps','positive','criterion', 'verbose'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
|
khushboo9293/postorius
|
src/postorius/views/settings.py
|
Python
|
gpl-3.0
| 4,486
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) 1998-2015 by the Free Software Foundation, Inc.
#
# This file is part of Postorius.
#
# Postorius is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Postorius is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Postorius. If not, see <http://www.gnu.org/licenses/>.
import json
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import (login_required,
permission_required,
user_passes_test)
from django.contrib.auth.forms import (AuthenticationForm, PasswordResetForm,
SetPasswordForm, PasswordChangeForm)
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, redirect
from django.template import Context, loader, RequestContext
from django.utils.decorators import method_decorator
from django.utils.translation import gettext as _
from urllib2 import HTTPError
from postorius import utils
from postorius.models import (Domain, List, Member, MailmanUser,
MailmanApiError, Mailman404Error)
from postorius.forms import *
from postorius.auth.decorators import *
@login_required
@user_passes_test(lambda u: u.is_superuser)
def site_settings(request):
return render_to_response('postorius/site_settings.html',
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda u: u.is_superuser)
def domain_index(request):
try:
existing_domains = Domain.objects.all()
except MailmanApiError:
return utils.render_api_error(request)
return render_to_response('postorius/domain_index.html',
{'domains': existing_domains},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda u: u.is_superuser)
def domain_new(request):
message = None
if request.method == 'POST':
form = DomainNew(request.POST)
if form.is_valid():
domain = Domain(mail_host=form.cleaned_data['mail_host'],
base_url=form.cleaned_data['web_host'],
description=form.cleaned_data['description'],
owner=request.user.email)
try:
domain.save()
except MailmanApiError:
return utils.render_api_error(request)
except HTTPError, e:
messages.error(request, e)
else:
messages.success(request, _("New Domain registered"))
return redirect("domain_index")
else:
form = DomainNew()
return render_to_response('postorius/domain_
|
new.html',
{'form': form, 'message': message},
context_instance=RequestContext(request))
def domain_delete(request, domain):
"""Deletes a domain but asks for confirmation first.
"""
if request.method == 'POST':
try:
client = utils.get_client()
clie
|
nt.delete_domain(domain)
messages.success(request,
_('The domain %s has been deleted.' % domain))
return redirect("domain_index")
except HTTPError as e:
print e.__dict__
messages.error(request, _('The domain could not be deleted:'
' %s' % e.msg))
return redirect("domain_index")
submit_url = reverse('domain_delete',
kwargs={'domain': domain})
return render_to_response('postorius/domain_confirm_delete.html',
{'domain': domain, 'submit_url': submit_url},
context_instance=RequestContext(request))
|
iniweb/deployCD
|
manage.py
|
Python
|
mit
| 266
| 0.003759
|
#!env/bin/python
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
migrate = Migrate(app, db)
manager
|
= Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__m
|
ain__':
manager.run()
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/lsp_bit/overload_bit/__init__.py
|
Python
|
apache-2.0
| 25,678
| 0.001675
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
from . import reset_triggers
class overload_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit/overload-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines Overload Bit configuration.
"""
__slots__ = (
"_path_helper", "_extmethods", "__config", "__state", "__reset_triggers"
)
_yang_name = "overload-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
"overload-bit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
YANG Description: This container defines ISIS Overload Bit configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', d
|
efining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__confi
|
g = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
YANG Description: This container defines state for ISIS Overload Bit.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state for ISIS Overload Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="contai
|
Aravinthu/odoo
|
addons/account/tests/account_test_classes.py
|
Python
|
agpl-3.0
| 2,749
| 0.003638
|
# -*- coding: utf-8 -*-
from odoo.tests.common import HttpCase
from odoo.exceptions import ValidationError
class AccountingTestCase(HttpCase):
""" This class extends the base TransactionCase, in order to test the
accounting with localization setups. It is configured to run the tests after
the installation of all modules, and will SKIP TESTS ifit cannot find an already
configured accounting (which means no localization module has been installed).
"""
post_install = True
at_install = False
def setUp(self):
super(AccountingTestCase, self).setUp()
domain = [('company_id', '=', self.env.ref('base.main_company').id)]
if not self.env['account.account'].search_count(domain):
self.skipTest("No Chart of account found")
def check_complete_move(self, move, theorical_lines):
for aml in move.line_ids:
line = (aml.name, round(aml.debit, 2), round(aml.credit, 2))
if line in theorical_lines:
theorical_lines.remove(line)
else:
raise ValidationError('Unexpected journal item. (label: %s, debit: %s, credit: %s)' % (aml.name, round(aml.debit, 2), round(aml.credit, 2)))
if theorical_lines:
raise ValidationError('Remaining theorical line (not found). %s)' % ([(aml[0], aml[1], aml[2]) for aml in theorical_lines]))
return True
def ensure_account_property(self, property_name):
'''Ensure the ir.property targetting an account.account passed as parameter exists.
In case it's not: create it with a random account. This is useful when testing with
partially defined localization (missing stock properties for examp
|
le)
:param property_name: The name of the property.
'''
company_id = self.env.user.company_id
field_id = self.env['ir.model.fields'].search(
[('model', '=', 'product.template'), ('name', '=', property_name)], limit=1)
property_id = self.env['ir.property'].search([
('company_id', '=', company_id.i
|
d),
('name', '=', property_name),
('res_id', '=', None),
('fields_id', '=', field_id.id)], limit=1)
account_id = self.env['account.account'].search([('company_id', '=', company_id.id)], limit=1)
value_reference = 'account.account,%d' % account_id.id
if property_id and not property_id.value_reference:
property_id.value_reference = value_reference
else:
self.env['ir.property'].create({
'name': property_name,
'company_id': company_id.id,
'fields_id': field_id.id,
'value_reference': value_reference,
})
|
torshid/foodnow
|
server.py
|
Python
|
gpl-3.0
| 2,035
| 0.009337
|
import datetime
import json
import os
import psycopg2 as dbapi2
import re
|
from werkzeug.exceptions import NotFound, Forbidden
from flask import Flask, app, render_template
from common import *
from config import *
import jinja
app = Flask(__name__)
# jinja-python functions
@app.context_processor
def processor():
functions = {}
for function in jinja.__dict__.values():
if callable(function):
functions[function.__name__] = function
return functions
# dynamically load all entities + register blueprints
for name in os.listdir("entities
|
"):
if name.endswith(".py"):
module = name[:-3]
globals()[module] = __import__('entities.' + module, fromlist = ['page'])
app.register_blueprint(getattr(globals()[module], 'page'))
@app.errorhandler(NotFound)
def error(e):
return render_template('errors/' + str(e.code) + '.html'), e.code
@app.errorhandler(Forbidden)
def error(e):
return render_template('errors/' + str(e.code) + '.html'), e.code
def get_elephantsql_dsn(vcap_services):
"""Returns the data source name for ElephantSQL."""
parsed = json.loads(vcap_services)
uri = parsed["elephantsql"][0]["credentials"]["uri"]
match = re.match('postgres://(.*?):(.*?)@(.*?)(:(\d+))?/(.*)', uri)
user, password, host, _, port, dbname = match.groups()
dsn = """user='{}' password='{}' host='{}' port={}
dbname='{}'""".format(user, password, host, port, dbname)
return dsn
if __name__ == '__main__':
app.secret_key = flaskkey
VCAP_APP_PORT = os.getenv('PORT')
if VCAP_APP_PORT is not None:
port, debug = int(VCAP_APP_PORT), False
else:
port, debug = 5000, True
VCAP_SERVICES = os.getenv('VCAP_SERVICES')
if VCAP_SERVICES is not None:
app.config['dsn'] = get_elephantsql_dsn(VCAP_SERVICES)
else:
app.config['dsn'] = "user='" + dbuser + "' password='" + dbpass + "' host='localhost' port=5432 dbname='" + dbname + "'"
app.run(host = '0.0.0.0', port = port, debug = debug)
|
ellert/doxygen
|
src/scan_states.py
|
Python
|
gpl-2.0
| 1,591
| 0.0044
|
#!/usr/bin/python
# python script to generate an overview of the staes based on the input lex file.
#
# Copyright (C) 1997-2019 by Dimitri van Heesch.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation under the terms of the GNU General Public License is hereby
# granted. No representations are made about the suitability of this software
# for any purpose. It is provided "as is" without express or implied warranty.
# See the GNU General Public License for more details.
#
# Documents produced by Doxygen are derivative works derived from the
# input used in their production; they are not affected by this license.
#
import sys
import os
import re
def main():
if len(sys.argv)!=2:
sys.exit('Usage: %s <lex_file>' % sys.argv[0])
lex_file = s
|
ys.argv[1]
if (os.path.exists(lex_file)):
#write preamble
print("static const char *stateToString(int state)")
print("{")
print(" switch(state)")
print(" {")
print(" case INITIAL: return \"INITIAL\";")
with open(lex_file) as f:
for line in f:
if re.search(r'^%x', line) or re.search(r'^%s', line):
state = line.split()[1]
print(" case %s: return \"%s\";" % (state,sta
|
te))
elif re.search(r'^%%', line):
break
else:
pass
f.close()
#write post
print(" }")
print(" return \"Unknown\";")
print("}")
if __name__ == '__main__':
main()
|
qtux/instmatcher
|
tests/test_parser.py
|
Python
|
apache-2.0
| 21,291
| 0.039547
|
# Copyright 2016 Matthias Gazzari
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from .util import GrobidServer
from instmatcher import parser
import xml.etree.ElementTree as et
class test_parser(unittest.TestCase):
def setUp(self):
host = 'localhost'
port = 8081
self.url = 'http://' + host + ':' + str(port)
self.server = GrobidServer(host, port)
self.server.start()
def tearDown(self):
self.server.stop()
def test_parse_None(self):
actual = list(parser.parseAll(None, self.url))
expected = []
self.assertEqual(actual, expected)
def test_parse_empty(self):
self.server.setResponse(__name__, '')
actual = list(parser.parseAll(__name__, self.url))
expected = []
self.assertEqual(actual, expected)
def test_parse_no_institution(self):
self.server.setResponse(
__name__,
'''<affiliation>
<address>
<country key="AQ">Irrelevant</country>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(__name__, self.url))
expected = []
self.assertEqual(actual, expected)
def test_parse_no_alpha2(self):
self.server.setResponse(
__name__,
'''<affiliation>
<orgName type="institution">institA</orgName>
<address>
<country>country</country>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(__name__, self.url))
expected = [{
'institution': 'institA',
'institutionSource': 'grobid',
'settlement': ['settlement',],
},]
self.assertEqual(actual, expected)
def test_parse_no_country(self):
self.server.setResponse(
__name__,
'''<affiliation>
<orgName type="institution">institB</orgName>
<address>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(__name__, self.url))
expected = [{
'institution': 'institB',
'institutionSource': 'grobid',
'settlement': ['settlement',],
},]
self.assertEqual(actual, expected)
def test_parse_no_settlement(self):
self.server.setResponse(
__name__,
'''<affiliation>
<orgName type="institution">institC</orgName>
<address>
<country key="AQ">Irrelevant</country>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(__name__, self.url))
expected = [{
'institution': 'institC',
'institutionSource': 'grobid',
'alpha2': 'AQ',
'country': 'Antarctica',
'countrySource': 'grobid',
'settlement':[],
},]
self.assertEqual(actual, expected)
def test_parse_not_regocnised_country(self):
affiliation = 'institA, settlement, INDIA'
self.server.setResponse(
affiliation,
'''<affiliation>
<orgName type="institution">institA</orgName>
<address>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(affiliation, self.url))
expected = [{
'institution': 'institA',
'institutionSource': 'regexReplace',
'alpha2': 'IN',
'country': 'India',
'countrySource': 'regex',
'settlement': ['settlement',],
},]
self.assertEqual(actual, expected)
def test_parse_not_regocnised_bad_country(self):
affiliation = 'institA, settlement, Fantasia'
self.server.setResponse(
affiliation,
'''<affiliation>
<orgName type="institution">institA</orgName>
<address>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(affiliation, self.url))
expected = [{
'institution': 'institA',
'institutionSource': 'regexReplace',
'settlement': ['settlement',],
},]
self.assertEqual(actual, expected)
def test_parse_not_recognised_country_no_comma_in_affiliation_string(self):
affiliation = 'institA settlement Algeria'
self.server.setResponse(
affiliation,
'''<affiliation>
<orgName type="institution">institA</orgName>
<address>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(affiliation, self.url))
expected = [{
'institution': 'institA',
'institutionSource': 'grobid',
'alpha2': 'DZ',
'country': 'Algeria',
'countrySource': 'regex',
'settlement': ['settlement',],
},]
self.assertEqual(actual, expected)
def test_parse_multiple_not_recognised_countries(self):
affiliation = 'institA settlement Algeria India'
self.server.setResponse(
affiliation,
'''<affiliation>
<orgName type="institution">institA</orgName>
<address>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(affiliation, self.url))
expected = [{
'institution': 'institA',
'institutionSource': 'grobid',
'alpha2': 'IN',
'country': 'India',
'countrySource': 'regex',
'settlement': ['settlement',],
},]
self.assertEqual(actual, expected)
def test_parse_releveant_tags(self):
self.server.setResponse(
__name__,
'''<affiliation>
<orgName t
|
ype="institution">institD</orgName>
<address>
<country key="AQ">Irrelevant</country>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(__name__, self.url))
expected = [{
'ins
|
titution': 'institD',
'institutionSource': 'grobid',
'alpha2': 'AQ',
'country': 'Antarctica',
'countrySource': 'grobid',
'settlement': ['settlement',],
},]
self.assertEqual(actual, expected)
def test_parse_every_tags(self):
self.server.setResponse(
__name__,
'''<affiliation>
<orgName type="laboratory">lab</orgName>
<orgName type="department">dep</orgName>
<orgName type="institution">institE</orgName>
<address>
<addrLine>addrLine</addrLine>
<country key="AQ">Irrelevant</country>
<postCode>postCode</postCode>
<region>region</region>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(__name__, self.url))
expected = [{
'institution': 'institE',
'institutionSource': 'grobid',
'department': 'dep',
'laboratory': 'lab',
'alpha2': 'AQ',
'country': 'Antarctica',
'countrySource': 'grobid',
'settlement': ['settlement',],
'region': 'region',
'postCode': 'postCode',
},]
self.assertEqual(actual, expected)
def test_parse_multiple_institutions(self):
self.server.setResponse(
__name__,
'''<affiliation>
<orgName type="laboratory" key="lab1">lab1</orgName>
<orgName type="laboratory" key="lab2">lab2</orgName>
<orgName type="laboratory" key="lab3">lab3</orgName>
<orgName type="department" key="dep1">dep1</orgName>
<orgName type="department" key="dep2">dep2</orgName>
<orgName type="department" key="dep3">dep3</orgName>
<orgName type="institution" key="instit1">instit1</orgName>
<orgName type="institution" key="instit2">instit2</orgName>
<orgName type="institution" key="instit3">instit3</orgName>
<address>
<addrLine>addrLine1</addrLine>
<addrLine>addrLine2</addrLine>
<addrLine>addrLine3</addrLine>
<country key="AQ">Irrelevant</country>
<postCode>postCode</postCode>
<region>region</region>
<settlement>settlement</settlement>
</address>
</affiliation>'''
)
actual = list(parser.parseAll(__name__, self.url))
expected = [{
'institution': 'instit1',
'institutionSource': 'grobid',
'department': 'dep1',
'laboratory': 'lab1',
'alpha2': 'AQ',
'country': 'Antarctica',
'countrySource': 'grobid',
'settlement': ['settlement',],
'region': 'region',
'postCode': 'postCode',
},{
'institution': 'instit2',
'institutionSource': 'grobid',
'department': 'dep2',
'laboratory'
|
gnulinooks/sympy
|
sympy/utilities/runtests.py
|
Python
|
bsd-3-clause
| 21,885
| 0.002467
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly (or
identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
import os
import sys
import inspect
import traceback
import pdb
from glob import glob
from timeit import default_timer as clock
def isgeneratorfunction(object):
"""
Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
"""
CO_GENERATOR = 0x20
if (inspect.isfunction(object) or inspect.ismethod(object)) and \
object.func_code.co_flags & CO_GENERATOR:
return True
return False
def test(*paths, **kwargs):
"""
Runs the tests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.test()
Run one file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", "")
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
r = PyTestReporter(verbose, tb, colors)
t = SymPyTests(r, kw, post_mortem)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
return t.test()
def doctest(*paths, **kwargs):
"""
Runs the doctests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.doctest()
Run one file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
blacklist.extend([
"sympy/thirdparty/pyglet", # segfaults
"sympy/mpmath", # needs to be fixed upstream
"sympy/plotting", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/galgebra/GA.py", # needs numpy
"sympy/galgebra/latex_ex.py", # needs numpy
"sympy/conftest.py", # needs py.test
"sympy/utilities/benchmarking.py", # needs py.test
])
r = PyTestReporter(verbose)
t = SymPyDocTests(r, blacklist=blacklist)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
return t.test()
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = self.get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._tests = []
def add_paths(self, paths):
for path in paths:
path2 = os.path.join(self._root_dir, *path.split("/"))
if path2.endswith(".py"):
self._tests.append(path2)
else:
self._tests.extend(self.get_tests(path2))
def test(self):
"""
Runs the tests.
Returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._tests:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
name = "test%d" % self._count
name = os.path.splitext(os.path.basename(filename))[0]
self._count += 1
gl = {'__file__':filename}
|
try:
execfile(filename, gl)
except (ImportError, SyntaxError):
self._reporter.import_error(filename, sys.exc_info())
return
pytestfile = ""
if gl.has_key("XFAIL"):
pytest
|
file = inspect.getsourcefile(gl["XFAIL"])
disabled = gl.get("disabled", False)
if disabled:
funcs = []
else:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f])
or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i is not len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
self._reporter.entering_filename(filename, len(funcs))
for f in funcs:
self._reporter.entering_test(f)
try:
f()
except KeyboardInterrupt:
raise
except:
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip()
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_sympy_dir(self):
"""
Returns the root sympy directory.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return sympy_dir
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if self._kw == "":
return True
return x.__name__.find(self._kw) != -1
def get_paths(self, dir="", level=15):
"""
Generates a set of paths for testfiles searching.
Example:
>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'symp
|
alex-ip/geophys2netcdf
|
utils/set_attribute.py
|
Python
|
apache-2.0
| 1,500
| 0.003333
|
'''
Created on Apr 7, 2016
@author: Alex Ip, Geoscience Australia
'''
import sys
import netCDF4
import subprocess
import re
from geophys2netc
|
df import ERS2NetCDF
de
|
f main():
assert len(
sys.argv) == 5, 'Usage: %s <root_dir> <file_template> <attribute_name> <attribute_value>' % sys.argv[0]
root_dir = sys.argv[1]
file_template = sys.argv[2]
attribute_name = sys.argv[3]
attribute_value = sys.argv[4]
nc_path_list = [filename for filename in subprocess.check_output(
['find', root_dir, '-name', file_template]).split('\n') if re.search('\.nc$', filename)]
for nc_path in nc_path_list:
print 'Setting attribute in %s' % nc_path
nc_dataset = netCDF4.Dataset(nc_path, 'r+')
try:
# Rename attribute
setattr(nc_dataset, attribute_name, attribute_value)
print '%s.%s set to %s' % (nc_path, attribute_name, attribute_value)
except Exception as e:
print 'Unable to set attribute %s to %s: %s' % (attribute_name, attribute_value, e.message)
nc_dataset.close()
print 'Updating metadata in %s' % nc_path
try:
g2n_object = ERS2NetCDF()
g2n_object.update_nc_metadata(nc_path, do_stats=True)
# Kind of redundant, but possibly useful for debugging
g2n_object.check_json_metadata()
except Exception as e:
print 'Metadata update failed: %s' % e.message
if __name__ == '__main__':
main()
|
rfhk/awo-custom
|
sale_line_quant_extended/wizard/__init__.py
|
Python
|
lgpl-3.0
| 60
| 0
|
# -*-
|
co
|
ding: utf-8 -*-
from . import stock_return_picking
|
DJArmstrong/autovet
|
FPPcalc/priorutils.py
|
Python
|
gpl-3.0
| 16,928
| 0.021148
|
#significant input and copied functions from T. Morton's VESPA code (all mistakes are my own)
#coords -- RA and DEC of target in degrees. Needed for GAIA querying.
# Degrees, 0-360 and -90 to +90. List format [RA,DEC].
import numpy as np
import pandas as pd
from scipy.integrate import quad
from scipy import stats
import astropy.constants as const
import astropy.units as u
from astropy.coordinates import SkyCoord
import subprocess as sp
import os, re
import time
AU = const.au.cgs.value
RSUN = const.R_sun.cgs.value
REARTH = const.R_earth.cgs.value
MSUN = const.M_sun.cgs.value
DAY = 86400 #seconds
G = const.G.cgs.value
import logging
def semimajor(P,mtotal=1.):
"""
Returns semimajor axis in AU given P in days, total mass in solar masses.
"""
return ((P*DAY/2/np.pi)**2*G*mtotal*MSUN)**(1./3)/AU
def eclipse_probability(R1, R2, P, M1, M2):
return (R1 + R2) *RSUN / (semimajor(P , M1 + M2)*AU)
def centroid_PDF_source(pos,centroiddat):
cent_x, cent_y = centroiddat[0], centroiddat[1]
sig_x, sig_y = centroiddat[2], centroiddat[3]
return stats.multivariate_normal.pdf([pos[0],pos[1]],mean=[cent_x,cent_y],
cov=[[sig_x**(1/2.),0],[0,sig_y**(1/2.)]])
def bgeb_prior(centroid_val, star_density, skyarea, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.12):
'''
Centroid val is value at source (no integration over area). This allows comparison
to planet_prior without having two planet_prior functions.
'''
return centroid_val * skyarea * star_density * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2)
def bgtp_prior(centroid_val, star_density, skyarea, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2):
'''
Centroid val is value at source (no integration over area). This allows comparison
to planet_prior without having two planet_prior functions.
'''
return centroid_val * skyarea * star_density * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp)
def eb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.027):
'''
centroid pdf at source location
f_binary = 0.3 (moe + di stefano 2017) - valid for 0.8-1.2 Msun!
could improve to be average over all types?
f_close = 0.027 (moe + di stefano 2017) fraction of binaries with P between 3.2-32d
eclipse prob
works for defined source EBs too, just use appropriate centroid pdf value.
'''
return centroid_val * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2)
def heb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_triple=0.1, f_close=1.0):
'''
centroid pdf at source location
f_triple = 0.1 (moe + di stefano 2017) - valid for 0.8-1.2 Msun!
could improv
|
e to be average over all types?
f_close = 1.0 implies all triples have a close binary. May be over-generous
eclipse prob
'''
return centroid_val * f_triple * f_close * eclipse_probability(r1, r2, P, m1, m2)
def planet_prior(centro
|
id_val, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2957):
'''
centroid pdf at source location
planet occurrence (fressin, any planet<29d)
eclipse prob
works for defined source planets too, just use appropriate centroid pdf value.
possibly needs a more general f_planet - as classifier will be using a range of planets.
should prior then be the prior of being in the whole training set, rather than the specific depth seen?
if so, need to change to 'fraction of ALL stars with planets' (i.e. including EBs etc).
Also look into default radii and masses. Precalculate mean eclipse probability for training set?
'''
return centroid_val * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp)
def fp_fressin(rp,dr=None):
if dr is None:
dr = rp*0.3
fp = quad(fressin_occurrence,rp-dr,rp+dr)[0]
return max(fp, 0.001) #to avoid zero
def fressin_occurrence(rp):
"""
Occurrence rates per bin from Fressin+ (2013)
"""
rp = np.atleast_1d(rp)
sq2 = np.sqrt(2)
bins = np.array([1/sq2,1,sq2,2,2*sq2,
4,4*sq2,8,8*sq2,
16,16*sq2])
rates = np.array([0,0.155,0.155,0.165,0.17,0.065,0.02,0.01,0.012,0.01,0.002,0])
return rates[np.digitize(rp,bins)]
def trilegal_density(ra,dec,kind='target',maglim=21.75,area=1.0,mapfile=None):
if kind=='interp' and mapfile is None:
print('HEALPIX map file must be passed')
return 0
if kind not in ['target','interp']:
print('kind not recognised. Setting kind=target')
kind = 'target'
if kind=='target':
basefilename = 'trilegal_'+str(ra)+'_'+str(dec)
h5filename = basefilename + '.h5'
if not os.path.exists(h5filename):
get_trilegal(basefilename,ra,dec,maglim=maglim,area=area)
else:
print('Using cached trilegal file. Sky area may be different.')
if os.path.exists(h5filename):
stars = pd.read_hdf(h5filename,'df')
with pd.HDFStore(h5filename) as store:
trilegal_args = store.get_storer('df').attrs.trilegal_args
if trilegal_args['maglim'] < maglim:
print('Re-calling trilegal with extended magnitude range')
get_trilegal(basefilename,ra,dec,maglim=maglim,area=area)
stars = pd.read_hdf(h5filename,'df')
stars = stars[stars['TESS_mag'] < maglim] #in case reading from file
#c = SkyCoord(trilegal_args['l'],trilegal_args['b'],
# unit='deg',frame='galactic')
#self.coords = c.icrs
area = trilegal_args['area']*(u.deg)**2
density = len(stars)/area
return density.value
else:
return 0
else:
import healpy as hp
#interpolate pre-calculated densities
coord = SkyCoord(ra,dec,unit='deg')
if np.abs(coord.galactic.b.value)<5:
print('Near galactic plane, Trilegal density may be inaccurate.')
#Density map will set mag limits
densitymap = hp.read_map(mapfile)
density = hp.get_interp_val(densitymap,ra,dec,lonlat=True)
return density
#maglim of 21 used following sullivan 2015
def get_trilegal(filename,ra,dec,folder='.', galactic=False,
filterset='TESS_2mass_kepler',area=1,maglim=21,binaries=False,
trilegal_version='1.6',sigma_AV=0.1,convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
"""
|
ThinkmanWang/NotesServer
|
models/Alarm.py
|
Python
|
apache-2.0
| 151
| 0.02649
|
class Alarm(object):
|
id = ''
uid = 0
note_id = ''
date = 0
update_date = 0
is_delet
|
ed = 0
#note = None()
|
kfwang/Glance-OVA-OVF
|
glance/async/taskflow_executor.py
|
Python
|
apache-2.0
| 5,199
| 0.000385
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from stevedore import driver
from taskflow import engines
from taskflow.listeners import logging as llistener
from taskflow.types import futures
from taskflow.utils import eventlet_utils
import glance.async
from glance.common.scripts import utils as script_utils
from glance import i18n
_ = i18n._
_LE = i18n._LE
LOG = logging.getLogger(__name_
|
_)
_deprecated_opt = cfg.DeprecatedOpt('eventlet_executor_pool_size',
group='task')
taskflow_executor_opts = [
cfg.StrOpt('engine_mode',
default='parallel',
choices=('serial', 'parallel'),
help=_("The mode in which the engine will run. "
"Can be 'serial' or 'paralle
|
l'.")),
cfg.IntOpt('max_workers',
default=10,
help=_("The number of parallel activities executed at the "
"same time by the engine. The value can be greater "
"than one when the engine mode is 'parallel'."),
deprecated_opts=[_deprecated_opt])
]
CONF = cfg.CONF
CONF.register_opts(taskflow_executor_opts, group='taskflow_executor')
class TaskExecutor(glance.async.TaskExecutor):
def __init__(self, context, task_repo, image_repo, image_factory):
self.context = context
self.task_repo = task_repo
self.image_repo = image_repo
self.image_factory = image_factory
self.engine_conf = {
'engine': CONF.taskflow_executor.engine_mode,
}
self.engine_kwargs = {}
if CONF.taskflow_executor.engine_mode == 'parallel':
self.engine_kwargs['max_workers'] = (
CONF.taskflow_executor.max_workers)
super(TaskExecutor, self).__init__(context, task_repo, image_repo,
image_factory)
@contextlib.contextmanager
def _executor(self):
if CONF.taskflow_executor.engine_mode != 'parallel':
yield None
else:
max_workers = CONF.taskflow_executor.max_workers
if eventlet_utils.EVENTLET_AVAILABLE:
yield futures.GreenThreadPoolExecutor(max_workers=max_workers)
else:
yield futures.ThreadPoolExecutor(max_workers=max_workers)
def _get_flow(self, task):
try:
task_input = script_utils.unpack_task_input(task)
uri = script_utils.validate_location_uri(
task_input.get('import_from'))
kwds = {
'uri': uri,
'task_id': task.task_id,
'task_type': task.type,
'context': self.context,
'task_repo': self.task_repo,
'image_repo': self.image_repo,
'image_factory': self.image_factory
}
return driver.DriverManager('glance.flows', task.type,
invoke_on_load=True,
invoke_kwds=kwds).driver
except RuntimeError:
raise NotImplementedError()
def _run(self, task_id, task_type):
LOG.debug('Taskflow executor picked up the execution of task ID '
'%(task_id)s of task type '
'%(task_type)s' % {'task_id': task_id,
'task_type': task_type})
task = script_utils.get_task(self.task_repo, task_id)
if task is None:
# NOTE: This happens if task is not found in the database. In
# such cases, there is no way to update the task status so,
# it's ignored here.
return
flow = self._get_flow(task)
try:
with self._executor() as executor:
engine = engines.load(flow, self.engine_conf,
executor=executor, **self.engine_kwargs)
with llistener.DynamicLoggingListener(engine, log=LOG):
engine.run()
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') %
{'task_id': task_id, 'exc': exc.message})
# TODO(sabari): Check for specific exceptions and update the
# task failure message.
task.fail(_('Task failed due to Internal Error'))
self.task_repo.save(task)
|
mahandra/recipes_video_conv
|
rec_hls_server/check_rec_stream.py
|
Python
|
gpl-2.0
| 4,889
| 0.004909
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import time
__author__ = 'mah'
__email__ = 'andrew.makhotin@gmail.com'
import MySQLdb as mdb
import sys
import ConfigParser
import logging
import logging.handlers
import re
import os
from ffprobe import FFProbe
#### LOG ###
logger = logging.getLogger('Logging for check_sound')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler = logging.handlers.RotatingFileHandler('/var/log/eservices/ffmpeg_sound.log', maxBytes=(1048576*5), backupCount=5)
handler.setFormatter(formatter)
consolehandler = logging.StreamHandler() # for stdout
consolehandler.setFormatter(formatter)
logger.addHandler(consolehandler)
logger.addHandler(handler)
### LOG ###
dbcon = {}
conf = ConfigParser.ConfigParser()
conf.read('/etc/eservices/ffmpeg_sound.cfg')
dbcon.update({'service_id': conf.get('main', 'service_id')})
dbcon.update({'dbhost': conf.get("mysql", "host")})
dbcon.update({'dbuser': conf.get("mysql", "user")})
dbcon.update({'dbpasswd': conf.get("mysql", "passwd")})
dbcon.update({'dbbase': conf.get("mysql", "base")})
def channelsinsrv(srvid):
'''
What channels in this service id
:param srvid:
:return: cch id
'''
chids = []
try:
con = mdb.connect(dbcon['dbhost'], dbcon['dbuser'], dbcon['dbpasswd'], dbcon['dbbase'], charset='utf8')
cur = con.cursor()
cur.execute('''SELECT service_id, btv_channel_id, file_name FROM ma_internet_v2.hls_collector_report_view
where service_id = %s''' % (srvid,))
rows = cur.fetchall()
ch = []
for row in rows:
ch.append(row)
except con.Error, e:
logger.error("Error %d: %s", e.args[0], e.args[1])
#print "Error %d: %s" % (e.args[0], e.args[1])
#sys.exit(1)
finally:
if con:
con.close()
return ch
def checksound(pls):
'''
Check sound in ffprobe class and return status sound restart ch pid if needed
:param src: pls.m3u8
:return: status sound in ch
'''
status = {}
meta = False
try:
meta = FFProbe(pls)
except IOError, e:
logger.error('====Error:%s', e)
return 'nofile'
if meta:
for stream in meta.streams:
if stream.isVideo():
status['video'] = stream.codec()
elif stream.isAudio():
status['audio'] = stream.codec()
else:
return False
logger.debug('status: %s, %s', status, pls)
return status #status
def restartchid(ch):
'''
Restart ch i
:param ch: (89L, 326L, u'/var/lastxdays/326/5a9f3bad8adba3a5')
:return:
'''
logger.warning('to do restart ch:%s', ch[1])
try:
con = mdb.connect(dbcon['dbhost'], dbcon['dbuser'], dbcon['dbpasswd'], dbcon['dbbase'], charset='utf8')
cur = con.cursor()
cur.execute('''UPDATE ma_internet_v2.hls_collector_report_view set restart = 1 where service_id = %s
AND btv_channel_id = %s;''' % (ch[0], ch[1],))
con.commit()
logger.warning('Restart Done')
except con.Error, e:
logger.error("Error %d: %s", e.args[0], e.args[1])
#sys.exit(1)
finally:
if con:
con.close()
def create_m3u8(pls, ch):
with open(pls+'.m3u8', 'r') as f:
data = f.readlines()
last = data[:6] + data[-4:]
file = os.path.split(pls+'.m3u8')
f = '/run/sound/'+str(ch)+'.m3u8'
with open(f, 'w') as tempfile:
for i in last:
m = re.search(r'.ts', i)
if m:
tempfile.write(file[0]+'/'+i)
else:
tempfile.write(i)
tempfile.close()
return '/run/sound/'+str(ch)+'.m3u8'
#########################
def main():
if not os.path.isdir('/run/sound'):
os.mkdir('/run/sound')
for id in dbcon['service_id'].split(','):
chids = channelsinsrv(id)
logger.info('service: %s', id)
'''
chid is:[0] [1] [2]
(service_id, btv_channel_id, file_name)
'''
for ch in chids:
#print ch[1]
pls = create_m3u8(ch[2], ch[1])
#print 'pls:',pls
if ch[1] == 159:
print '!!!!! 159 !!!!!!'
if 'audio' not in checksound(pls):
logger.warning('not audio in %s, %s', checksound(ch[2], ch[1]), ch[1])
#TODO if not video do not restart ch
if checksound(ch[2], ch[1]) != 'nofile':
restartchid(ch)
if __name__ == '__main__':
while
|
1:
try:
main()
except KeyboardInterrupt:
sys.exit(0)
#logger.inf
|
o('waiting...')
time.sleep(30)
|
tyb0807/angr
|
angr/analyses/decompiler/clinic.py
|
Python
|
bsd-2-clause
| 5,861
| 0.002901
|
import logging
import networkx
from .. import Analysis, register_analysis
from ...codenode import BlockNode
from ..calling_convention import CallingConventionAnalysis
import ailment
import ailment.analyses
l = logging.getLogger('angr.analyses.clinic')
class Clinic(Analysis):
"""
A Clinic deals with AILments.
"""
def __init__(self, func):
self.function = func
self.graph = networkx.DiGraph()
self._ail_manager = None
self._blocks = { }
# sanity checks
if not self.kb.functions:
l.warning('No function is available in kb.functions. It will lead to a suboptimal conversion result.')
self._analyze()
#
# Public methods
#
def block(self, addr, size):
"""
Get the converted block at the given specific address with the given size.
:param int addr:
:param int size:
:return:
"""
try:
return self._blocks[(addr, size)]
except KeyError:
return None
def dbg_repr(self):
"""
:return:
"""
s = ""
for block in sorted(self.graph.nodes(), key=lambda x: x.addr):
s += str(block) + "\n\n"
return s
#
# Private methods
#
def _analyze(self):
CallingConventionAnalysis.recover_calling_conventions(self.project)
# initialize the AIL conversion manager
self._ail_manager = ailment.Manager(arch=self.project.arch)
self._convert_all()
self._recover_and_link_variables()
self._simplify_all()
self._update_graph()
ri = self.project.analyses.RegionIdentifier(self.function, graph=self.graph) # pylint:disable=unused-variable
# print ri.region.dbg_print()
def _convert_all(self):
"""
:return:
"""
for block_node in self.function.transition_graph.nodes():
ail_block = self._convert(block_node)
if type(ail_block) is ailment.Block:
self._blocks[(block_node.addr, block_node.size)] = ail_block
def _convert(self, block_node):
"""
:param block_node:
:return:
"""
if not type(block_node) is BlockNode:
return block_node
block = self.project.factory.block(block_node.addr, block_node.size)
ail_block = ailment.IRSBConverter.convert(block.vex, self._ail_manager)
return ail_block
def _simplify_all(self):
"""
:return:
"""
for key in self._blocks.iterkeys():
ail_block = self._blocks[key]
simplified = self._simplify(ail_block)
self._blocks[key] = simplified
def _simplify(self, ail_block):
simp = self.project.analyses.AILSimplifier(ail_block)
csm = self.project.analyses.AILCallSiteMaker(simp.result_block)
if csm.result_block:
ail_block = csm.result_block
simp = self.project.analyses.AILSimplifier(ail_block)
return simp.result_block
def _recover_and_link_variables(self):
# variable recovery
|
vr = self.project.analyses.VariableRecoveryFast(self.function, clinic=self, kb=self.kb) # pylint:disable=unused-variable
# TODO: The current m
|
apping implementation is kinda hackish...
for block in self._blocks.itervalues():
self._link_variables_on_block(block)
def _link_variables_on_block(self, block):
"""
:param block:
:return:
"""
var_man = self.kb.variables[self.function.addr]
for stmt_idx, stmt in enumerate(block.statements):
# I wish I could do functional programming in this method...
stmt_type = type(stmt)
if stmt_type is ailment.Stmt.Store:
# find a memory variable
mem_vars = var_man.find_variables_by_stmt(block.addr, stmt_idx, 'memory')
if len(mem_vars) == 1:
stmt.variable = mem_vars[0][0]
self._link_variables_on_expr(var_man, block, stmt_idx, stmt, stmt.data)
elif stmt_type is ailment.Stmt.Assignment:
self._link_variables_on_expr(var_man, block, stmt_idx, stmt, stmt.dst)
self._link_variables_on_expr(var_man, block, stmt_idx, stmt, stmt.src)
def _link_variables_on_expr(self, variable_manager, block, stmt_idx, stmt, expr):
# TODO: Make it recursive
if type(expr) is ailment.Expr.Register:
# find a register variable
reg_vars = variable_manager.find_variables_by_stmt(block.addr, stmt_idx, 'register')
# TODO: make sure it is the correct register we are looking for
if len(reg_vars) == 1:
reg_var = reg_vars[0][0]
expr.variable = reg_var
elif type(expr) is ailment.Expr.Load:
# self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.addr)
pass
elif type(expr) is ailment.Expr.BinaryOp:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[0])
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[1])
def _update_graph(self):
node_to_block_mapping = {}
for node in self.function.transition_graph.nodes():
ail_block = self._blocks.get((node.addr, node.size), node)
node_to_block_mapping[node] = ail_block
self.graph.add_node(ail_block)
for src_node, dst_node, data in self.function.transition_graph.edges(data=True):
src = node_to_block_mapping[src_node]
dst = node_to_block_mapping[dst_node]
self.graph.add_edge(src, dst, **data)
register_analysis(Clinic, 'Clinic')
|
AstroMatt/esa-time-perception
|
backend/api_v2/migrations/0007_auto_20170101_0101.py
|
Python
|
mit
| 3,986
| 0.003512
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-01 01:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_v2', '0006_remove_event_is_valid'),
]
operations = [
migrations.AlterField(
model_name='trial',
name='percentage_all',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - all', max_digits=3, null=True, verbose_name='P'),
),
migrations.AlterField(
model_name='trial',
name='percentage_blue',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - blue', max_digits=3, null=True, verbose_name='PB'),
),
migrations.AlterField(
model_name='trial',
name='percentage_red',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - red', max_digits=3, null=True, verbose_name='PR'),
),
migrations.AlterField(
model_name='trial',
name='percentage_white',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - white', max_digits=3, null=True, verbose_name='PW'),
),
migrations.AlterField(
model_name='trial',
|
name='regularity',
field=models.PositiveSmallIntegerField(help_
|
text='Click every X seconds', verbose_name='Regularity'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_all',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - all', max_digits=3, null=True, verbose_name='TM'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_blue',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - blue', max_digits=3, null=True, verbose_name='TMB'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_red',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - red', max_digits=3, null=True, verbose_name='TMR'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_white',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - white', max_digits=3, null=True, verbose_name='TMW'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_all',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - all', max_digits=3, null=True, verbose_name='TSD'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_blue',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - blue', max_digits=3, null=True, verbose_name='TSDB'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_red',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - red', max_digits=3, null=True, verbose_name='TSDR'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_white',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - white', max_digits=3, null=True, verbose_name='TSDW'),
),
migrations.AlterField(
model_name='trial',
name='timeout',
field=models.DecimalField(decimal_places=2, help_text='Seconds per color', max_digits=3, verbose_name='Timeout'),
),
]
|
donbright/piliko
|
examples/example05.py
|
Python
|
bsd-3-clause
| 1,688
| 0.034953
|
from piliko import *
print
print 'example 5'
v1,v2 = vector(3,0),vector(0,4)
print 'vectors v1, v2:', v1, v2
print ' v1 + v2, v1 - v2: ', v1 + v2, v1 - v2
print ' v1 * 5/4:', v1 * Fraction(5,4)
print ' v1 perpendicular v1? ', v1.perpendicular( v1 )
print ' v1 perpendicular v2? ', v1.perpendicular( v2 )
print ' v2 perpendicular v1? ', perpendicular( v2, v1 )
print ' v1 perpendicular v1+v2? ', perpendicular( v1, v1+v2 )
print ' v1 parallel v1? ', v1.parallel( v1 )
print ' v1 parallel v2? ', v1.parallel( v2 )
print ' v1 parallel 5*v1? ', parallel( v1, 5*v1 )
print ' v1 parallel v1+v2? ', parallel( v1, v1+v2 )
v3 = v2 - v1
print 'vector v3 = v2-v1: ', v3
lhs = quadrance( v1 ) + quadrance( v2 )
rhs = quadrance( v3 )
print 'v1 dot v2, v2 dot v3, v1 dot 5*v1:', v1.dot(v2), v2.dot(v3), v1.dot(5*v1)
print 'v1 dot (v2+v3), (v1 dot v2)+(v1 dot v3):', v1.dot(v2+v3),
|
v1.dot(v2) + v1.dot(v3)
print ' pythagoras: Q(v1)+Q(v2)=Q(v3)?: lhs:', lhs, 'rhs:',rhs
v4 = vector( -5, 0 )
v5 = 3 * v4
v6 = v5 - v4
print 'vector v4, v5, and v6=v5-v4:', v4, v5, v6
lhs = sqr( quadrance( v4 ) + quadrance( v5 ) + quadrance( v6 ) )
rhs = 2*(sqr(quadrance(v4))+sqr(quadrance(v5))+sqr(quad
|
rance(v6)))
print ' triplequad for v4,v5,v6 : lhs:', lhs, 'rhs:',rhs
print 'spread( v1, v1 ):', spread( v1, v1 )
print 'spread( v2, v1 ):', spread( v2, v1 )
print 'spread( v2, 5*v1 ):', spread( v2, 5*v1 )
print 'spread( v1, v2 ):', spread( v1, v2 )
print 'spread( v1, v3 ):', spread( v1, v3 )
print 'spread( v1, 5*v3 ):', spread( v1, 5*v3 )
print 'spread( v2, v3 ):', spread( v2, v3 )
print 'spread( 100*v2, -20*v2 ):', spread( 100*v2, -20*v2 )
print 'quadrance v1 == v1 dot v1?', quadrance(v1), '=?=', v1.dot(v1)
|
ofek/hatch
|
backend/src/hatchling/builders/plugin/hooks.py
|
Python
|
mit
| 229
| 0
|
from ...plugin imp
|
ort hookimpl
from ..custom import CustomBuilder
from ..sdist import SdistBuilder
from ..wheel import WheelBuilder
@hookimpl
def hatch_register_builder():
return [CustomBuilder, SdistBuilder, Wh
|
eelBuilder]
|
miyucy/oppia
|
core/domain/dependency_registry_test.py
|
Python
|
apache-2.0
| 4,450
| 0
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for JavaScript library dependencies."""
__author__ = 'Sean Lip'
from core.domain import dependency_registry
from core.domain import exp_services
from core.domain import widget_registry
from core.tests import test_utils
import feconf
class DependencyRegistryTests(test_utils.GenericTestBase):
"""Tests for the dependency registry."""
def test_get_dependency_html(self):
self.assertIn(
'jsrepl',
dependency_registry.Registry.get_dependency_html('jsrepl'))
with self.assertRaises(IOError):
dependency_registry.Registry.get_dependency_html('a')
class DependencyControllerTests(test_utils.GenericTestBase):
"""Tests for dependency loading on user-facing pages."""
def test_no_dependencies_in_non_exploration_pages(self):
response = self.testapp.get(feconf.LEARN_GALLERY_URL)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
response = self.testapp.get('/about')
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
self.register_editor('editor@example.com')
self.login('editor@example.com')
response = self.testapp.get(feconf.CONTRIBUTE_GALLERY_URL)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
self.logout()
def test_dependencies_loaded_in_exploration_editor(self):
exp_services.load_demo('0')
# Register and login as an editor.
self.register_editor('editor@example.com')
self.login('editor@example.com')
# Verify that the exploration does not have a jsrepl dependency.
exploration = exp_services.get_exploration_by_id('0')
interactive_widget_ids = exploration.get_interactive_widget_ids()
all_dependency_ids = (
widget_registry.Registry.get_deduplicated_dependency_ids(
interactive_widget_ids))
self.assertNotIn('jsrepl', all_dependency_ids)
# However, jsrepl is loaded in the exploration editor anyway, since
# all dependencies are loaded in the exploration editor.
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 200)
response.mustcontain('jsrepl')
self.logout()
def test_dependency_does_not_load_in_exploration_not_containing_it(self):
EXP_ID = '0'
exp_services.load_demo(EXP_ID)
# Verify that exploration 0 does not have a jsrepl dependency.
exploration = exp_services.get_exploration_by_id(EXP_ID)
interactive_widget_ids = exploration.get_interactive_widget_
|
ids()
all_dependency_ids = (
widget_registry.Registry.get_deduplicated_dependency_ids(
interactive_widget_ids))
self.assertNotIn('jsrepl', all_dependency_ids)
# Thus, jsrepl is not loaded in the exploration reader.
response = self.testapp.get('/explore/%s' % EXP_ID)
self.assertEq
|
ual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
def test_dependency_loads_in_exploration_containing_it(self):
EXP_ID = '1'
exp_services.load_demo(EXP_ID)
# Verify that exploration 1 has a jsrepl dependency.
exploration = exp_services.get_exploration_by_id(EXP_ID)
interactive_widget_ids = exploration.get_interactive_widget_ids()
all_dependency_ids = (
widget_registry.Registry.get_deduplicated_dependency_ids(
interactive_widget_ids))
self.assertIn('jsrepl', all_dependency_ids)
# Thus, jsrepl is loaded in the exploration reader.
response = self.testapp.get('/explore/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
response.mustcontain('jsrepl')
|
EducationalTestingService/rsmtool
|
rsmtool/test_utils.py
|
Python
|
apache-2.0
| 66,934
| 0.001404
|
import os
import re
import sys
import warnings
from ast import literal_eval as eval
from filecmp import clear_cache, dircmp
from glob import glob
from importlib.machinery import SourceFileLoader
from inspect import getmembers, getsourcelines, isfunction
from os import remove
from os.path import basename, exists, join
from pathlib import Path
from shutil import copyfile, copytree, rmtree
import numpy as np
from bs4 import BeautifulSoup
from nose.tools import assert_equal, ok_
from pandas.testing import assert_frame_equal
from .modeler import Modeler
from .reader import DataReader
from .rsmcompare import run_comparison
from .rsmeval import run_evaluation
from .rsmpredict import compute_and_save_predictions
from .rsmsummarize import run_summary
from .rsmtool import run_experiment
from .rsmxval import run_cross_validation
html_error_regexp = re.compile(r'Traceback \(most recent call last\)')
html_warning_regexp = re.compile(r'<div class=".*?output_stderr.*?>([^<]+)')
section_regexp = re.compile(r'<h2>(.*?)</h2>')
# get the directory containing the tests
rsmtool_test_dir = Path(__file__).absolute().parent.parent.joinpath('tests')
tools_with_input_data = ['rsmsummarize', 'rsmcompare']
tools_with_output = ['rsmtool', 'rsmeval',
'rsmsummarize', 'rsmpredict']
# check if tests are being run in strict mode
# if so, any warnings found in HTML
# reports should not be ignored
STRICT_MODE = os.environ.get('STRICT', None)
IGNORE_WARNINGS = False if STRICT_MODE else True
def check_run_experiment(source,
experiment_id,
subgroups=None,
consistency=False,
skll=False,
file_format='csv',
given_test_dir=None,
config_obj_or_dict=None,
suppress_warnings_for=[]):
"""
Run a parameterized rsmtool experiment test.
Parameters
----------
source : str
The name of the source directory containing the experiment
configuration.
experiment_id : str
The experiment ID of the experiment.
subgroups : list of str, optional
List of subgroup names used in the experiment. If specified,
outputs pertaining to subgroups are also checked as part of the
test.
Defaults to ``None``.
consistency : bool, optional
Whether to check consistency files as part of the experiment test.
Generally, this
|
should be true if the second human score column is
specified.
Defaults to ``False``.
skll : bool, optional
Whether the model being used in the experiment is a SKLL model
in which case
|
the coefficients, predictions, etc. will not be
checked since they can vary across machines, due to parameter tuning.
Defaults to ``False``.
file_format : str, optional
Which file format is being used for the output files of the experiment.
Defaults to "csv".
given_test_dir : str, optional
Path where the test experiments are located. Unless specified, the
rsmtool test directory is used. This can be useful when using these
experiments to run tests for RSMExtra.
Defaults to ``None``.
config_obj_or_dict: configuration_parser.Configuration or dict, optional
Configuration object or dictionary to use as an input, if any.
If ``None``, the function will construct a path to the config file
using ``source`` and ``experiment_id``.
suppress_warnings_for : list, optional
Categories for which warnings should be suppressed when running the
experiments.
Defaults to ``[]``.
"""
# use the test directory from this file unless it's been overridden
test_dir = given_test_dir if given_test_dir else rsmtool_test_dir
if config_obj_or_dict is None:
config_input = join(test_dir,
'data',
'experiments',
source,
'{}.json'.format(experiment_id))
else:
config_input = config_obj_or_dict
model_type = 'skll' if skll else 'rsmtool'
do_run_experiment(source,
experiment_id,
config_input,
suppress_warnings_for=suppress_warnings_for)
output_dir = join('test_outputs', source, 'output')
expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))
output_files = glob(join(output_dir, '*.{}'.format(file_format)))
for output_file in output_files:
output_filename = basename(output_file)
expected_output_file = join(expected_output_dir, output_filename)
if exists(expected_output_file):
check_file_output(output_file, expected_output_file, file_format=file_format)
check_generated_output(output_files, experiment_id, model_type, file_format=file_format)
if not skll:
check_scaled_coefficients(output_dir, experiment_id, file_format=file_format)
if subgroups:
check_subgroup_outputs(output_dir, experiment_id, subgroups, file_format=file_format)
if consistency:
check_consistency_files_exist(output_files, experiment_id, file_format=file_format)
# check report for any errors but ignore warnings
# which we check below separately
check_report(html_report, raise_warnings=False)
# make sure that there are no warnings in the report
# but ignore warnings if appropriate
if not IGNORE_WARNINGS:
warning_msgs = collect_warning_messages_from_report(html_report)
assert_equal(len(warning_msgs), 0)
def check_run_evaluation(source,
experiment_id,
subgroups=None,
consistency=False,
file_format='csv',
given_test_dir=None,
config_obj_or_dict=None,
suppress_warnings_for=[]):
"""
Run a parameterized rsmeval experiment test.
Parameters
----------
source : str
The name of the source directory containing the experiment
configuration.
experiment_id : str
The experiment ID of the experiment.
subgroups : list of str, optional
List of subgroup names used in the experiment. If specified,
outputs pertaining to subgroups are also checked as part of the
test.
Defaults to ``None``.
consistency : bool, optional
Whether to check consistency files as part of the experiment test.
Generally, this should be true if the second human score column is
specified.
Defaults to ``False``.
file_format : str, optional
Which file format is being used for the output files of the experiment.
Defaults to "csv".
given_test_dir : str, optional
Path where the test experiments are located. Unless specified, the
rsmtool test directory is used. This can be useful when using these
experiments to run tests for RSMExtra.
Defaults to ``None``.
config_obj_or_dict: configuration_parser.Configuration or dict, optional
Configuration object or dictionary to use as an input, if any.
If ``None``, the function will construct a path to the config file
using ``source`` and ``experiment_id``.
Defaults to ``None``.
suppress_warnings_for : list, optional
Categories for which warnings should be suppressed when running the
experiments.
Defaults to ``[]``.
"""
# use the test directory from this file unless it's been overridden
test_dir = given_test_dir if given_test_dir else rsmtool_test_dir
if config_obj_or_dict is None:
config_input = join(test_dir,
'data',
'experiments',
source,
'{}.json'.format(experiment_id))
else:
config
|
dan1/horizon-proto
|
openstack_dashboard/api/rest/keystone.py
|
Python
|
apache-2.0
| 18,340
| 0
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API over the keystone service.
"""
import django.http
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api.rest import urls
@urls.register
class Version(generic.View):
"""API for active keystone version.
"""
url_regex = r'keystone/version/$'
@rest_utils.ajax()
def get(self, request):
"""Get active keystone version.
"""
return {'version': api.keystone.get_version()}
@urls.register
class Users(generic.View):
"""API for keystone users.
"""
url_regex = r'keystone/users/$'
client_keywords = {'project_id', 'domain_id', 'group_id'}
@rest_utils.ajax()
def get(self, request):
"""Get a list of users.
By default, a listing of all users for the current domain are
returned. You may specify GET pa
|
rameters for project_id, domain_id and
group_id to change that listing's context.
The listing result is an object with property "items".
"""
domain_context = request.session.get('domain_context')
filters = rest_utils.parse_filters_kwargs(request,
self.client_keywords)[0]
if len(filters) == 0:
filters
|
= None
result = api.keystone.user_list(
request,
project=request.GET.get('project_id'),
domain=request.GET.get('domain_id', domain_context),
group=request.GET.get('group_id'),
filters=filters
)
return {'items': [u.to_dict() for u in result]}
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a user.
Create a user using the parameters supplied in the POST
application/json object. The base parameters are name (string), email
(string, optional), password (string, optional), project_id (string,
optional), enabled (boolean, defaults to true). The user will be
created in the default domain.
This action returns the new user object on success.
"""
# not sure why email is forced to None, but other code does it
domain = api.keystone.get_default_domain(request)
new_user = api.keystone.user_create(
request,
name=request.DATA['name'],
email=request.DATA.get('email') or None,
password=request.DATA.get('password'),
project=request.DATA.get('project_id'),
enabled=True,
domain=domain.id
)
return rest_utils.CreatedResponse(
'/api/keystone/users/%s' % new_user.id,
new_user.to_dict()
)
@rest_utils.ajax(data_required=True)
def delete(self, request):
"""Delete multiple users by id.
The DELETE data should be an application/json array of user ids to
delete.
This method returns HTTP 204 (no content) on success.
"""
for user_id in request.DATA:
if user_id != request.user.id:
api.keystone.user_delete(request, user_id)
@urls.register
class User(generic.View):
"""API for a single keystone user.
"""
url_regex = r'keystone/users/(?P<id>[0-9a-f]+|current)$'
@rest_utils.ajax()
def get(self, request, id):
"""Get a specific user by id.
If the id supplied is 'current' then the current logged-in user
will be returned, otherwise the user specified by the id.
"""
if id == 'current':
id = request.user.id
return api.keystone.user_get(request, id).to_dict()
@rest_utils.ajax()
def delete(self, request, id):
"""Delete a single user by id.
This method returns HTTP 204 (no content) on success.
"""
if id == 'current':
raise django.http.HttpResponseNotFound('current')
api.keystone.user_delete(request, id)
@rest_utils.ajax(data_required=True)
def patch(self, request, id):
"""Update a single user.
The PATCH data should be an application/json object with attributes to
set to new values: password (string), project (string),
enabled (boolean).
A PATCH may contain any one of those attributes, but
if it contains more than one it must contain the project, even
if it is not being altered.
This method returns HTTP 204 (no content) on success.
"""
keys = tuple(request.DATA)
user = api.keystone.user_get(request, id)
if 'password' in keys:
password = request.DATA['password']
api.keystone.user_update_password(request, user, password)
elif 'enabled' in keys:
enabled = request.DATA['enabled']
api.keystone.user_update_enabled(request, user, enabled)
else:
# note that project is actually project_id
# but we can not rename due to legacy compatibility
# refer to keystone.api user_update method
api.keystone.user_update(request, user, **request.DATA)
@urls.register
class Roles(generic.View):
"""API over all roles.
"""
url_regex = r'keystone/roles/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of roles.
By default a listing of all roles are returned.
If the GET parameters project_id and user_id are specified then that
user's roles for that project are returned. If user_id is 'current'
then the current user's roles for that project are returned.
The listing result is an object with property "items".
"""
project_id = request.GET.get('project_id')
user_id = request.GET.get('user_id')
if project_id and user_id:
if user_id == 'current':
user_id = request.user.id
roles = api.keystone.roles_for_user(request, user_id,
project_id) or []
items = [r.to_dict() for r in roles]
else:
items = [r.to_dict() for r in api.keystone.role_list(request)]
return {'items': items}
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a role.
Create a role using the "name" (string) parameter supplied in the POST
application/json object.
This method returns the new role object on success.
"""
new_role = api.keystone.role_create(request, request.DATA['name'])
return rest_utils.CreatedResponse(
'/api/keystone/roles/%s' % new_role.id,
new_role.to_dict()
)
@rest_utils.ajax(data_required=True)
def delete(self, request):
"""Delete multiple roles by id.
The DELETE data should be an application/json array of role ids to
delete.
This method returns HTTP 204 (no content) on success.
"""
for role_id in request.DATA:
api.keystone.role_delete(request, role_id)
@urls.register
class Role(generic.View):
"""API for a single role.
"""
url_regex = r'keystone/roles/(?P<id>[0-9a-f]+|default)$'
@rest_utils.ajax()
def get(self, request, id):
"""Get a specific role by id.
If the id supplied is 'default' then the default role will be
returned, otherwise the role specified by the id.
"""
if id == 'default':
return api.keystone.get_default_role(request).to_dict()
return api.keystone.role_ge
|
blckshrk/Weboob
|
modules/grooveshark/test.py
|
Python
|
agpl-3.0
| 1,879
| 0.001597
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not,
|
see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.video import BaseVideo
class GroovesharkTest(BackendTest):
BACKEND = 'grooveshark'
def test_grooveshark_video_search(self):
result = list(self.backend.search_videos("Loic Lantoine"))
self.assertTrue(len(result) > 0)
def test_
|
grooveshark_user_playlist(self):
l1 = list(self.backend.iter_resources([BaseVideo], [u'playlists']))
assert len(l1)
c = l1[0]
l2 = list(self.backend.iter_resources([BaseVideo], c.split_path))
assert len(l2)
v = l2[0]
self.backend.fillobj(v, ('url',))
self.assertTrue(v.url is not None, 'URL for video "%s" not found: %s' % (v.id, v.url))
def test_grooveshark_album_search(self):
l1 = list(self.backend.iter_resources([BaseVideo], [u'albums', u'live']))
assert len(l1)
c = l1[0]
l2 = list(self.backend.iter_resources([BaseVideo], c.split_path))
assert len(l2)
v = l2[0]
self.backend.fillobj(v, ('url',))
self.assertTrue(v.url is not None, 'URL for video "%s" not found: %s' % (v.id, v.url))
|
pynamodb/PynamoDB
|
pynamodb/exceptions.py
|
Python
|
mit
| 4,009
| 0.001746
|
"""
PynamoDB exceptions
"""
from typing import Any, Optional
import botocore.exceptions
class PynamoDBException(Exception):
"""
A common exception class
"""
def __init__(self, msg: Optional[str] = None, cause: Optional[Exception] = None) -> None:
self.msg = msg
self.cause = cause
super(PynamoDBException, self).__init__(self.msg)
@property
def cause_response_code(self) -> Optional[str]:
return getattr(self.cause, 'response', {}).get('Error', {}).get('Code')
@property
def cause_response_message(self) -> Optional[str]:
return getattr(self.cause, 'response', {}).get('Error', {}).get('Message')
class PynamoDBConnectionError(PynamoDBException):
"""
A base class for connection errors
"""
msg = "Connection Error"
class DeleteError(PynamoDBConnectionError):
"""
Raised when an error occurs deleting an item
"""
msg = "Error deleting item"
class QueryError(PynamoDBConnectionError):
"""
Raised when queries fail
"""
msg = "Error performing query"
class ScanError(PynamoDBConnectionError):
"""
Raised when a scan operation fails
"""
msg = "Error performing scan"
class PutError(PynamoDBConnectionError):
"""
Raised when an item fails to be created
"""
msg = "Error putting item"
class UpdateError(PynamoDBConnectionError):
"""
Raised when an item fails to be updated
"""
msg = "Error updating item"
class GetError(PynamoDBConnectionError):
"""
Raised when an item fails to be retrieved
"""
msg = "Error getting item"
class TableEr
|
ror(PynamoDBConnectionError):
"""
An error involving a dynamodb table operation
"""
msg = "Error performing a table operation"
class DoesNotExist(PynamoDBException):
"""
Raised when an item queried does not exist
"""
msg = "Item does not exist"
class TableDoesNotExist(PynamoDBException):
"""
Raised when an operation is attempted on a table that doesn't exist
"""
def _
|
_init__(self, table_name: str) -> None:
msg = "Table does not exist: `{}`".format(table_name)
super(TableDoesNotExist, self).__init__(msg)
class TransactWriteError(PynamoDBException):
"""
Raised when a TransactWrite operation fails
"""
pass
class TransactGetError(PynamoDBException):
"""
Raised when a TransactGet operation fails
"""
pass
class InvalidStateError(PynamoDBException):
"""
Raises when the internal state of an operation context is invalid
"""
msg = "Operation in invalid state"
class AttributeDeserializationError(TypeError):
"""
Raised when attribute type is invalid
"""
def __init__(self, attr_name: str, attr_type: str):
msg = "Cannot deserialize '{}' attribute from type: {}".format(attr_name, attr_type)
super(AttributeDeserializationError, self).__init__(msg)
class AttributeNullError(ValueError):
def __init__(self, attr_name: str) -> None:
self.attr_path = attr_name
def __str__(self):
return f"Attribute '{self.attr_path}' cannot be None"
def prepend_path(self, attr_name: str) -> None:
self.attr_path = attr_name + '.' + self.attr_path
class VerboseClientError(botocore.exceptions.ClientError):
def __init__(self, error_response: Any, operation_name: str, verbose_properties: Optional[Any] = None):
""" Modify the message template to include the desired verbose properties """
if not verbose_properties:
verbose_properties = {}
self.MSG_TEMPLATE = (
'An error occurred ({{error_code}}) on request ({request_id}) '
'on table ({table_name}) when calling the {{operation_name}} '
'operation: {{error_message}}'
).format(request_id=verbose_properties.get('request_id'), table_name=verbose_properties.get('table_name'))
super(VerboseClientError, self).__init__(error_response, operation_name)
|
insta-code1/ecommerce
|
src/products/views.py
|
Python
|
mit
| 3,661
| 0.029227
|
from django.contrib import messages
from django.db.models import Q
from django.http import Http404
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
# Create your views here.
from .forms import VariationInventoryFormSet
from .mixins import StaffRequiredMixin
from .models import Product, Variation, Category
class CategoryListView(ListView):
model = Category
queryset = Category.objects.all()
template_name = "products/product_list.html"
class CategoryDetailView(DetailView):
model = Category
def get_context_data(self, *args, **kwargs):
context = super(CategoryDetailView, self).get_context_data(*args, **kwargs)
obj = self.get_object()
product_set = obj.product_set.all()
default_products = obj.default_category.all()
products = ( product_set | default_products ).distinct()
context["products"] = products
return context
class VariationListView(StaffRequiredMixin, ListView):
model = Variation
queryset = Variation.objects.all()
def get_context_data(self, *args, **kwargs):
context = super(VariationListView, self).get_context_data(*args, **kwargs)
context["formset"] = VariationInventoryFormSet(queryset=self.get_queryset())
return context
def get_queryset(self, *args, **kwargs):
product_pk = self.kwargs.get("pk")
if product_pk:
product = get_object_or_404(Product, pk=product_pk)
queryset = Variation.objects.filter(product=product)
return queryset
def post(self, request, *args, **kwargs):
formset = VariationInventoryFormSet(request.POST, request.FILES)
if formset.is_valid():
formset.save(commit=False)
for form in formset:
new_item = form.save(commit=False)
#if new_item.title:
product_pk = self.kwargs.get("pk")
product = get_object_or_404(Product, pk=product_pk)
new_item.product = product
new_item.save()
messages.success(request, "Your inventory and pricing has been updated.")
return redirect("products")
raise Http404
class Prod
|
uctListView(ListView):
model = Product
queryset = Product.objects.all()
def get_context_data(self, *args, **kwargs):
context = super(ProductListView, self).get_context_data(*args, **kwargs)
context["now"] = timezone.now()
context["query"] = self.request.GET.get("q") #None
return context
def get_queryset(self, *args, **kwargs):
qs = super(ProductListView, self).get_queryset
|
(*args, **kwargs)
query = self.request.GET.get("q")
if query:
qs = self.model.objects.filter(
Q(title__icontains=query) |
Q(description__icontains=query)
)
try:
qs2 = self.model.objects.filter(
Q(price=query)
)
qs = (qs | qs2).distinct()
except:
pass
return qs
import random
class ProductDetailView(DetailView):
model = Product
#template_name = "product.html"
#template_name = "<appname>/<modelname>_detail.html"
def get_context_data(self, *args, **kwargs):
context = super(ProductDetailView, self).get_context_data(*args, **kwargs)
instance = self.get_object()
#order_by("-title")
context["related"] = sorted(Product.objects.get_related(instance)[:6], key= lambda x: random.random())
return context
def product_detail_view_func(request, id):
#product_instance = Product.objects.get(id=id)
product_instance = get_object_or_404(Product, id=id)
try:
product_instance = Product.objects.get(id=id)
except Product.DoesNotExist:
raise Http404
except:
raise Http404
template = "products/product_detail.html"
context = {
"object": product_instance
}
return render(request, template, context)
|
lovelylinus35/Cinnamon
|
files/usr/lib/cinnamon-settings/bin/XletSettings.py
|
Python
|
gpl-2.0
| 14,098
| 0.003263
|
#!/usr/bin/env python2
import sys
try:
import os
import glob
import gettext
import json
import collections
import XletSettingsWidgets
import dbus
from SettingsWidgets import SectionBg
from gi.repository import Gio, Gtk, GObject, GdkPixbuf
except Exception, detail:
print detail
sys.exit(1)
home = os.path.expanduser("~")
translations = {}
def translate(uuid, string):
#check for a translation for this xlet
if uuid not in translations:
try:
translations[uuid] = gettext.translation(uuid, home + "/.local/share/locale").ugettext
except IOError:
try:
translations[uuid] = gettext.translation(uuid, "/usr/share/locale").ugettext
except IOError:
translations[uuid] = None
#do not translate whitespaces
if not string.strip():
return string
if translations[uuid]:
result = translations[uuid](string)
if result != string:
return result
return _(string)
class XletSetting:
def __init__(self, uuid, parent, _type):
self.parent = parent
self.type = _type
self.current_id = None
self.builder = Gtk.Builder()
self.builder.add_from_file("/usr/lib/cinnamon-settings/bin/xlet-settings.ui")
self.content = self.builder.get_object("content")
self.back_to_list_button = self.builder.get_object("back_to_list")
self.highlight_button = self.builder.get_object("highlight_button")
self.more_button = self.builder.get_object("more_button")
self.remove_button = self.builder.get_object("remove_xlet")
self.uuid = uuid
self.content.connect("hide", self.on_hide)
self.applet_meta = {}
self.applet_settings = collections.OrderedDict()
self.setting_factories = collections.OrderedDict()
self.load_applet_data (self.uuid)
if "icon" in self.applet_meta:
image = Gtk.Image().new_from_icon_name(self.applet_meta["icon"], Gtk.IconSize.BUTTON)
self.back_to_list_button.set_image(image)
self.back_to_list_button.get_property('image').set_padding(5, 0)
self.back_to_list_button.set_label(translate(uuid, self.applet_meta["name"]))
self.back_to_list_button.set_tooltip_text(_("Back to list"))
self.more_button.set_tooltip_text(_("More actions..."))
self.remove_button.set_label(_("Remove"))
self.remove_button.set_tooltip_text(_("Remove the current instance of this %s") % self.type)
self.highlight_button.set_label(_("Highlight"))
self.highlight_button.set_tooltip_text(_("Momentarily highlight the %s on your desktop") % self.type)
if len(self.applet_settings.keys()) > 1:
self.build_notebook()
else:
self.build_single()
self.back_to_list_button.connect("clicked", self.on_back_to_list_button_clicked)
if self.type != "extension":
self.highlight_button.connect("clicked", self.on_highlight_button_clicked)
self.highlight_button.show()
else:
self.highlight_button.hide()
self.more_button.connect("clicked", self.on_more_button_clicked)
self.remove_button.connect("clicked", self.on_remove_button_clicked)
def show (self):
self.content.show_all()
try:
self.back_to_list_button.get_property('image').show()
except:
pass
def on_hide (self, widget):
self.content.hide()
self.content.destroy()
self.applet_meta = None
self.applet_settings = None
for _id in self.setting_factories.keys():
self.setting_factories[_id].pause_monitor()
self.setting_factories = None
def load_applet_data (self, uuid):
found = self.get_meta_data_for_applet("/usr/share/cinnamon/%ss/%s" % (self.type, uuid))
if not found:
found = self.get_meta_data_for_applet("%s/.local/share/cinnamon/%ss/%s" % (home, self.type, uuid))
if not found:
print("Could not find %s metadata - are you sure it's installed correctly?" % self.type)
return
found = self.get_settings_for_applet("%s/.cinnamon/configs/%s" % (home, uuid))
if not found:
print("Could not find any instance settings data for this %s - are you sure it is loaded, and supports settings?" % self.type)
def get_meta_data_for_applet(self, path):
if os.path.exists(path) and os.path.isdir(path):
if os.path.exists("%s/metadata.json" % path):
raw_data = open("%s/metadata.json" % path).read()
self.applet_meta = json.loads(raw_data.decode('utf-8'))
return True
return False
def get_settings_for_applet(self, path):
if "max-instances" in self.applet_meta:
try:
self.multi_instance = int(self.applet_meta["max-instances"]) != 1
except:
self.multi_instance = False
else:
self.multi_instance = False
if os.path.exists(path) and os.path.isdir(path):
instances = sorted(os.listdir(path))
if len(instances) != 0:
for instance in instances:
raw_data = open("%s/%s" % (path, instance)).read()
try:
js = json.loads(raw_data.decode('utf-8'), object_pairs_hook=collections.OrderedDict)
except:
raise Exceptio
|
n("Failed to parse settings JSON d
|
ata for %s %s" % (self.type, self.uuid))
instance_id = instance.split(".json")[0]
self.applet_settings[instance_id] = js
self.setting_factories[instance_id] = XletSettingsWidgets.Factory("%s/%s" % (path, instance), instance_id, self.multi_instance, self.uuid)
return True
else:
raise Exception("Could not find any active setting files for %s %s" % (self.type, self.uuid))
return False
def build_single(self):
self.nb = None
self.view = SectionBg()
self.content_box = Gtk.VBox()
self.view.add(self.content_box)
self.content_box.set_border_width(5)
for instance_key in self.applet_settings.keys():
for setting_key in self.applet_settings[instance_key].keys():
if setting_key == "__md5__" or self.applet_settings[instance_key][setting_key]["type"] == "generic":
continue
self.setting_factories[instance_key].create(setting_key,
self.applet_settings[instance_key][setting_key]["type"],
self.uuid)
widgets = self.setting_factories[instance_key].widgets
for widget_key in widgets.keys():
if widgets[widget_key].get_indented():
indent = XletSettingsWidgets.IndentedHBox()
indent.add_fill(widgets[widget_key])
self.content_box.pack_start(indent, False, False, 2)
else:
self.content_box.pack_start(widgets[widget_key], False, False, 2)
if len(widgets[widget_key].dependents) > 0:
widgets[widget_key].update_dependents()
self.current_id = instance_key
self.content.pack_start(self.view, True, True, 2)
def build_notebook(self):
self.nb = Gtk.Notebook()
i = 0
target_instance = -1
target_page = -1
if len(sys.argv) > 3:
target_instance = sys.argv[3]
for instance_key in self.applet_settings.keys():
view = Gtk.ScrolledWindow()
content_box = Gtk.VBox()
view.add_with_viewport(content_box)
content_box.set_border_width(5)
for setting_key in self.applet_settings[instance_key].keys():
if setting_key == "__md5__" or self.applet_settings[instance_key][setting_key]["type"] == "generic":
continue
|
google/ctfscoreboard
|
scoreboard/config_defaults.py
|
Python
|
apache-2.0
| 1,499
| 0
|
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class Defaults(object):
ATTACHMENT_BACKEND = 'file://attachments'
COUNT_QUERIES = False
CSP_POLICY = None
CWD = os.path.dirname(os.path.realpath(__file__))
DEBUG = False
EXTEND_CSP_POLICY = None
ERROR_404_HELP = False
FIRST_BLOOD = 0
FIRST_BLOOD_MIN = 0
GAME_TIME = (None, None)
INVITE_KEY = None
LOGIN_METHOD = '
|
local'
MAIL_FROM = None
MAIL_FROM_NAME = None
MAIL_HOST = 'localhost'
NE
|
WS_POLL_INTERVAL = 60000
PROOF_OF_WORK_BITS = 0
RULES = '/rules'
SCOREBOARD_ZEROS = True
SCORING = 'plain'
SECRET_KEY = None
TEAM_SECRET_KEY = None
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
SESSION_EXPIRATION_SECONDS = 60 * 60
SYSTEM_NAME = 'root'
TEAMS = True
TEASE_HIDDEN = True
TITLE = 'Scoreboard'
SUBMIT_AFTER_END = True
|
vinegret/youtube-dl
|
youtube_dl/extractor/foxnews.py
|
Python
|
unlicense
| 5,156
| 0.002715
|
from __future__ import unicode_literals
import re
from .amp import AMPIE
from .common import InfoExtractor
class FoxNewsIE(AMPIE):
IE_NAME = 'foxnews'
IE_DESC = 'Fox News and Fox Business Video'
_VALID_URL = r'https?://(?P<host>video\.(?:insider\.)?fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
_TESTS = [
{
'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips',
'md5': '32aaded6ba3ef0d1c04e238d01031e5e',
'info_dict': {
'id': '3937480',
'ext': 'flv',
'title': 'Frozen in Time',
'description': '16-year-old girl is size of toddler',
'duration': 265,
'timestamp': 1304411491,
'upload_date': '20110503',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://video.foxnews.com/v/3922535568001/rep-luis-gutierrez-on-if-obamas-immigration-plan-is-legal/#sp=show-clips',
'md5': '5846c64a1ea05ec78175421b8323e2df',
'info_dict': {
'id': '3922535568001',
'ext': 'mp4',
'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal",
'description': "Congressman discusses president's plan",
'duration': 292,
'timestamp': 1417662047,
'upload_date': '20141204',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
'only_matching': True,
},
{
'url': 'http://video.foxbusiness.com/v/4442309889001',
'only_matching': True,
},
{
# From http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words
'url': 'http://video.insider.foxnews.com/v/video-embed.html?video_id=5099377331001&autoplay=true&share_url=http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words&share_title=Student%20Group:%20Saying%20%27Politically%20Correct,%27%20%27Trash%27%20and%20%27Lame%27%20Is%20Offensive&share=true',
'only_matching': True,
},
]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<(?:amp-)?iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//video\.foxnews\.com/v/video-embed\.html?.*?\bvideo_id=\d+.*?)\1',
webpage)]
def _real_extract(self, url):
host, video_id = re.match(self._VALID_URL, url).groups()
info = self._extract_feed_info(
'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id))
info['id'] = video_id
return info
class FoxNewsArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:insider\.)?foxnews\.com/(?!v)([^/]+/)+(?P<id>[a-z-]+)'
IE_NAME = 'foxnews:article'
_TESTS = [{
# data-video-id
'url': 'http://www.foxnews.com/politics/2016/09/08/buzz-about-bud-clinton-camp-denies-claims-wore-earpiece-at-forum.html',
'md5': '83d44e1aff1433e7a29a7b537d1700b5',
'info_dict': {
'id': '5116295019001',
'ext': 'mp4',
'title': 'Trump and Clinton asked to defend positions on Iraq War',
'description': 'Veterans react on \'The Kelly File\'',
'timestamp': 1473301045,
'upload_date': '20160908',
},
}, {
# iframe embed
'url': 'http://www.foxnews.com/us/2018/03/09/parkland-survivor-kyle-kashuv-on-meeting-trump-his-app-to-prevent-anoth
|
er-school-shooting.amp.html?__twitter_impression=true',
'info_dict': {
'id': '5748266721001',
'ext': 'flv',
'title': 'Kyle Kashuv has a positive message for the Trump White House',
'description': 'Marjory Stoneman Douglas student disagrees with classmates.',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 229,
'timestamp': 1520594670,
'upload_date': '20180309',
},
'
|
params': {
'skip_download': True,
},
}, {
'url': 'http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex(
r'data-video-id=([\'"])(?P<id>[^\'"]+)\1',
webpage, 'video ID', group='id', default=None)
if video_id:
return self.url_result(
'http://video.foxnews.com/v/' + video_id, FoxNewsIE.ie_key())
return self.url_result(
FoxNewsIE._extract_urls(webpage)[0], FoxNewsIE.ie_key())
|
max00xam/service.maxxam.teamwatch
|
lib/socketio/asyncio_server.py
|
Python
|
gpl-3.0
| 24,559
| 0
|
import asyncio
import engineio
import six
from . import asyncio_manager
from . import exceptions
from . import packet
from . import server
class AsyncServer(server.Server):
"""A Socket.IO server for asyncio.
This class implements a fully compliant Socket.IO web server with support
for websocket and long-polling transports, compatible with the asyncio
framework on Python 3.5 or newer.
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param async_handlers: If set to
|
``True``, event handlers are executed in
separate threads. To run handlers synchronously,
set to ``False``. The default is ``Tru
|
e``.
:param kwargs: Connection parameters for the underlying Engine.IO server.
The Engine.IO configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are "aiohttp". If
this argument is not given, an async mode is chosen
based on the installed packages.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting.
:param ping_interval: The interval in seconds at which the client pings
the server.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport.
:param allow_upgrades: Whether to allow transport upgrades or not.
:param http_compression: Whether to compress packages when using the
polling transport.
:param compression_threshold: Only compress messages when their byte size
is greater than this value.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``.
"""
def __init__(self, client_manager=None, logger=False, json=None,
async_handlers=True, **kwargs):
if client_manager is None:
client_manager = asyncio_manager.AsyncManager()
super().__init__(client_manager=client_manager, logger=logger,
binary=False, json=json,
async_handlers=async_handlers, **kwargs)
def is_asyncio_based(self):
return True
def attach(self, app, socketio_path='socket.io'):
"""Attach the Socket.IO server to an application."""
self.eio.attach(app, socketio_path)
async def emit(self, event, data=None, room=None, skip_sid=None,
namespace=None, callback=None, **kwargs):
"""Emit a custom event to one or more connected clients.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param room: The recipient of the message. This can be set to the
session ID of a client to address that client's room, or
to any custom room created by the application, If this
argument is omitted the event is broadcasted to all
connected clients.
:param skip_sid: The session ID of a client to skip when broadcasting
to a room or to all clients. This can be used to
prevent a message from being sent to the sender.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addressing an individual client.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used. It is recommended
to always leave this parameter with its default
value of ``False``.
Note: this method is a coroutine.
"""
namespace = namespace or '/'
self.logger.info('emitting event "%s" to %s [%s]', event,
room or 'all', namespace)
await self.manager.emit(event, data, namespace, room=room,
skip_sid=skip_sid, callback=callback,
**kwargs)
async def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None, **kwargs):
"""Send a message to one or more connected clients.
This function emits an event with the name ``'message'``. Use
:func:`emit` to issue custom event names.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param room: The recipient of the message. This can be set to the
session ID of a client to address that client's room, or
to any custom room created by the application, If this
argument is omitted the event is broadcasted to all
connected clients.
:param skip_sid: The session ID of a client to skip when broadcasting
to a room or to all clients. This can be used to
prevent a message from being sent to the sender.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addre
|
ufo-kit/concert
|
concert/tests/integration/test_scan.py
|
Python
|
lgpl-3.0
| 4,356
| 0.002296
|
from itertools import product
import numpy as np
from concert.quantities import q
from concert.tests import assert_almost_equal, TestCase
from concert.devices.motors.dummy import LinearMotor
from concert.processes.common import scan, ascan, dscan
def compare_sequences(first_sequence, second_sequence, assertion):
assert len(first_sequence) == len(second_sequence)
for x, y in zip(first_sequence, second_sequence):
assertion(x[0], y[0])
assertion(x[1], y[1])
class Te
|
stScan(TestCase):
def setUp(self):
super(TestScan, self).setUp()
self.motor = LinearMotor()
async def feedback(self):
return 1 * q.dimensionless
async def test_ascan(self):
async def run(include_last=True):
scanned = []
async for pair in ascan(self.motor['position'], 0 * q.mm, 10 * q.mm,
5 * q.mm, self.feedback, include_last=include_last):
scanned.append(pai
|
r)
return scanned
expected = [(0 * q.mm, 1 * q.dimensionless), (5 * q.mm, 1 * q.dimensionless),
(10 * q.mm, 1 * q.dimensionless)]
scanned = await run()
compare_sequences(expected, scanned, assert_almost_equal)
# Second scan, values must be same
scanned = await run()
compare_sequences(expected, scanned, assert_almost_equal)
# Exclude last
scanned = await run(include_last=False)
compare_sequences(expected[:-1], scanned, assert_almost_equal)
async def test_ascan_units(self):
scanned = []
expected = [(0 * q.mm, 1 * q.dimensionless), (50 * q.mm, 1 * q.dimensionless),
(100 * q.mm, 1 * q.dimensionless)]
async for pair in ascan(self.motor['position'], 0 * q.mm, 10 * q.cm,
5 * q.cm, self.feedback):
scanned.append(pair)
compare_sequences(expected, scanned, assert_almost_equal)
async def test_dscan(self):
async def run(include_last=True):
scanned = []
async for pair in dscan(self.motor['position'], 10 * q.mm, 5 * q.mm, self.feedback,
include_last=include_last):
scanned.append(pair)
return scanned
scanned = await run()
expected = [(0 * q.mm, 1 * q.dimensionless), (5 * q.mm, 1 * q.dimensionless),
(10 * q.mm, 1 * q.dimensionless)]
compare_sequences(expected, scanned, assert_almost_equal)
# Second scan, x values must be different
scanned = await run()
expected = [(10 * q.mm, 1 * q.dimensionless), (15 * q.mm, 1 * q.dimensionless),
(20 * q.mm, 1 * q.dimensionless)]
compare_sequences(expected, scanned, assert_almost_equal)
# Exclude last
scanned = await run(include_last=False)
expected = [(20 * q.mm, 1 * q.dimensionless), (25 * q.mm, 1 * q.dimensionless)]
compare_sequences(expected, scanned, assert_almost_equal)
async def test_scan(self):
async def run():
scanned = []
async for pair in scan(self.motor['position'], np.arange(0, 10, 5) * q.mm,
self.feedback):
scanned.append(pair)
return scanned
scanned = await run()
expected = [(0 * q.mm, 1 * q.dimensionless), (5 * q.mm, 1 * q.dimensionless)]
compare_sequences(expected, scanned, assert_almost_equal)
async def test_multiscan(self):
"""A 2D scan."""
values_0 = np.arange(0, 10, 5) * q.mm
values_1 = np.arange(20, 30, 5) * q.mm
async def run():
other = LinearMotor()
scanned = []
async for pair in scan((self.motor['position'], other['position']),
(values_0, values_1),
self.feedback):
vec, res = pair
scanned.append((vec[0], vec[1], res))
return scanned
scanned = await run()
expected = list(product(values_0, values_1, [1 * q.dimensionless]))
x, y, z = list(zip(*scanned))
x_gt, y_gt, z_gt = list(zip(*expected))
assert_almost_equal(x, x_gt)
assert_almost_equal(y, y_gt)
assert_almost_equal(z, z_gt)
|
sandeva/appspot
|
astro/birth/urls.py
|
Python
|
apache-2.0
| 1,038
| 0.003854
|
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
impo
|
rt re
urlpatterns = patterns(re.sub(r'[^.]*$', "views", __name__),
(r'^$', 'index'),
(r'^(?P<admin>admin)/(?P<user>.*?)/$', 'index'),
(r'^((?P<event_key>.*?)/)?edit/$', 'edit'),
(r'^(?P<ref_key>.*?)/((?P<event_key>.*?)/)?edit/event/$', 'editPureEvent'
|
),
#(r'^(?P<location_key>\w+)/update/$', 'update'),
# Uncomment this for admin:
# (r'^admin/', include('django.contrib.admin.urls')),
)
|
ga4gh/CGT
|
tests/populate.py
|
Python
|
apache-2.0
| 1,290
| 0.00155
|
import uuid
import json
import requests
import argparse
parser = argparse.ArgumentParser(
description="Upload a series of test submissions with randomized ids")
parser.add_argument('host', nargs='?', default="http://localhost:5000",
help="URL of server to upload to")
args = parser.parse_args()
with open("tests/ALL/ALL-US.json") as f:
submissions = json.loads(f.read())
submitted = []
# Submit but don't publish to make it faster
for name, fields in
|
submissions.iteritems():
print("Submitting {}".format(name))
# Change raw_data_accession so each run adds new records
fields["raw_data_accession"] = str(uuid.uuid4())
print(fields["raw_data_accession"])
r = requests.post("{}/v0/submissions?publish=false".for
|
mat(args.host),
files=[
("files[]", (fields["vcf_filename"],
open("tests/ALL/{}".format(fields["vcf_filename"]), "rb")))],
data=fields)
assert(r.status_code == requests.codes.ok)
submitted.append(json.loads(r.text)["multihash"])
print("Publishing submissions...")
r = requests.put("{}/v0/submissions".format(args.host), json={"submissions": submitted})
assert(r.status_code == requests.codes.ok)
print("Done.")
|
xplv/qtile
|
libqtile/widget/currentlayout.py
|
Python
|
mit
| 2,371
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2012 roger
# Copyright (c) 2012, 2014 Tycho Andersen
# Copyright (c) 2012 Maximilian Köhl
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyrig
|
ht (c) 2014 Adi Sieker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Soft
|
ware without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
from .. import bar, hook
class CurrentLayout(base._TextBox):
"""
Display the name of the current layout of the current
group of the screen, the bar containing the widget, is on.
"""
orientations = base.ORIENTATION_HORIZONTAL
def __init__(self, width=bar.CALCULATED, **config):
base._TextBox.__init__(self, "", width, **config)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
self.text = self.bar.screen.group.layouts[0].name
self.setup_hooks()
def setup_hooks(self):
def hook_response(layout, group):
if group.screen is not None and group.screen == self.bar.screen:
self.text = layout.name
self.bar.draw()
hook.subscribe.layout_change(hook_response)
def button_press(self, x, y, button):
if button == 1:
self.qtile.cmd_next_layout()
elif button == 2:
self.qtile.cmd_prev_layout()
|
alex-dow/psistatsrd
|
psistatsrd/config.py
|
Python
|
mit
| 920
| 0.004348
|
import sys
import os
import pygame
class Config(object):
def __init__(self, config_file):
self.config_file = config_file
self.params = {}
def parse(self):
with open(self.config_file) as f:
for line in f:
if line[0] == ";":
continue;
if len(line) >= 3:
parts = line.split('=', 1)
self.params[parts[0]] = parts[1].strip("
|
\n")
val = self.params[parts[0]]
if val[0] == "#":
self.params[parts[0]] = pygame.Color(val)
if val == "false":
self.params[parts[0]] = False
elif val == "true":
self.params[parts[0]] = True
elif v
|
al == "none":
self.params[parts[0]] = None
|
D-K-E/cltk
|
src/cltk/prosody/lat/verse_scanner.py
|
Python
|
mit
| 18,030
| 0.002609
|
"""Parent class and utility class for producing a scansion pattern for a line of Latin verse.
Some useful methods
* Perform a conservative i to j transformation
* Performs elisions
* Accents vowels by position
* Breaks the line into a list of syllables by calling a Syllabifier class which may be injected
into this classes constructor.
"""
import logging
import re
from typing import Any, Dict, List
import cltk.prosody.lat.string_ut
|
ils as string_utils
from cltk.prosody.lat.metrical_validator import MetricalValidator
from cltk.prosody.lat.scansion_constants import ScansionConstants
from cltk.prosody.lat.scansion_formatter import ScansionFormatter
from cltk.prosody.lat.syllabifier import Syllabifier
from cltk.prosody.lat.verse import Vers
|
e
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
__author__ = ["Todd Cook <todd.g.cook@gmail.com>"]
__license__ = "MIT License"
class VerseScanner:
"""
The scansion symbols used can be configured by passing a suitable constants class to
the constructor.
"""
def __init__(
self, constants=ScansionConstants(), syllabifier=Syllabifier(), **kwargs
):
self.constants = constants
self.remove_punct_map = string_utils.remove_punctuation_dict()
self.punctuation_substitutions = string_utils.punctuation_for_spaces_dict()
self.metrical_validator = MetricalValidator(constants)
self.formatter = ScansionFormatter(constants)
self.syllabifier = syllabifier
self.inverted_amphibrach_re = re.compile(
r"{}\s*{}\s*{}".format(
self.constants.STRESSED,
self.constants.UNSTRESSED,
self.constants.STRESSED,
)
)
self.syllable_matcher = re.compile(
r"[{}]".format(
self.constants.VOWELS
+ self.constants.ACCENTED_VOWELS
+ self.constants.LIQUIDS
+ self.constants.MUTES
)
)
def transform_i_to_j(self, line: str) -> str:
"""
Transform instances of consonantal i to j
:param line:
:return:
>>> print(VerseScanner().transform_i_to_j("iactātus"))
jactātus
>>> print(VerseScanner().transform_i_to_j("bracchia"))
bracchia
"""
words = line.split(" ")
space_list = string_utils.space_list(line)
corrected_words = []
for word in words:
found = False
for prefix in self.constants.PREFIXES:
if word.startswith(prefix) and word != prefix:
corrected_words.append(
self.syllabifier.convert_consonantal_i(prefix)
)
corrected_words.append(
self.syllabifier.convert_consonantal_i(word[len(prefix) :])
)
found = True
break
if not found:
corrected_words.append(self.syllabifier.convert_consonantal_i(word))
new_line = string_utils.join_syllables_spaces(corrected_words, space_list)
char_list = string_utils.overwrite(
list(new_line),
r"\b[iī][{}]".format(
self.constants.VOWELS + self.constants.ACCENTED_VOWELS
),
"j",
)
char_list = string_utils.overwrite(
char_list, r"\b[I][{}]".format(self.constants.VOWELS_WO_I), "J"
)
char_list = string_utils.overwrite(
char_list,
r"[{}][i][{}]".format(self.constants.VOWELS_WO_I, self.constants.VOWELS),
"j",
1,
)
return "".join(char_list)
def transform_i_to_j_optional(self, line: str) -> str:
"""
Sometimes for the demands of meter a more permissive i to j transformation is warranted.
:param line:
:return:
>>> print(VerseScanner().transform_i_to_j_optional("Italiam"))
Italjam
>>> print(VerseScanner().transform_i_to_j_optional("Lāvīniaque"))
Lāvīnjaque
>>> print(VerseScanner().transform_i_to_j_optional("omnium"))
omnjum
"""
words = line.split(" ")
space_list = string_utils.space_list(line)
corrected_words = []
for word in words:
found = False
for prefix in self.constants.PREFIXES:
if word.startswith(prefix) and word != prefix:
corrected_words.append(
self.syllabifier.convert_consonantal_i(prefix)
)
corrected_words.append(
self.syllabifier.convert_consonantal_i(word[len(prefix) :])
)
found = True
break
if not found:
corrected_words.append(self.syllabifier.convert_consonantal_i(word))
new_line = string_utils.join_syllables_spaces(corrected_words, space_list)
# the following two may be tunable and subject to improvement
char_list = string_utils.overwrite(
list(new_line),
"[bcdfgjkmpqrstvwxzBCDFGHJKMPQRSTVWXZ][i][{}]".format(
self.constants.VOWELS_WO_I
),
"j",
1,
)
char_list = string_utils.overwrite(
char_list,
"[{}][iI][{}]".format(self.constants.LIQUIDS, self.constants.VOWELS_WO_I),
"j",
1,
)
return "".join(char_list)
def accent_by_position(self, verse_line: str) -> str:
"""
Accent vowels according to the rules of scansion.
:param verse_line: a line of unaccented verse
:return: the same line with vowels accented by position
>>> print(VerseScanner().accent_by_position(
... "Arma virumque cano, Troiae qui primus ab oris").lstrip())
Ārma virūmque canō Trojae qui primus ab oris
"""
line = verse_line.translate(self.punctuation_substitutions)
line = self.transform_i_to_j(line)
marks = list(line)
# locate and save dipthong positions since we don't want them being accented
dipthong_positions = []
for dipth in self.constants.DIPTHONGS:
if dipth in line:
dipthong_positions.append(line.find(dipth))
# Vowels followed by 2 consonants
# The digraphs ch, ph, th, qu and sometimes gu and su count as single consonants.
# see http://people.virginia.edu/~jdk3t/epicintrog/scansion.htm
marks = string_utils.overwrite(
marks,
"[{}][{}][{}]".format(
self.constants.VOWELS,
self.constants.CONSONANTS,
self.constants.CONSONANTS_WO_H,
),
self.constants.STRESSED,
)
# one space (or more for 'dropped' punctuation may intervene)
marks = string_utils.overwrite(
marks,
r"[{}][{}]\s*[{}]".format(
self.constants.VOWELS,
self.constants.CONSONANTS,
self.constants.CONSONANTS_WO_H,
),
self.constants.STRESSED,
)
# ... if both consonants are in the next word, the vowel may be long
# .... but it could be short if the vowel is not on the thesis/emphatic part of the foot
# ... see Gildersleeve and Lodge p.446
marks = string_utils.overwrite(
marks,
r"[{}]\s*[{}][{}]".format(
self.constants.VOWELS,
self.constants.CONSONANTS,
self.constants.CONSONANTS_WO_H,
),
self.constants.STRESSED,
)
# x is considered as two letters
marks = string_utils.overwrite(
marks, "[{}][xX]".format(self.constants.VOWELS), self.constants.STRESSED
)
# z is considered as two letters
marks = string_utils.overwrite(
marks, r"[{}][zZ]".format(self.constants.VOWELS), self.constants.STRESSED
)
original_verse = list(line)
for idx, word in e
|
david58/gradertools
|
gradertools/isolation/isolate_simple.py
|
Python
|
mit
| 1,622
| 0.006165
|
import subprocess
import shutil
import os
import time
from .interface import IsolateInterface
class IsolateSimple(IsolateInterface):
def isolate(self, files, command, parameters, envvariables, directories, allowmultiprocess, stdinfile, stdoutfile):
if os.path.isdir("/tmp/gradert
|
ools/isolation/"):
shutil.rmtree("/tmp/gradertools/isolation/")
os.makedirs("/tmp/gradertools/isolation/")
box = "/tmp/gradertools/isolation/"
for file in files:
shutil.copy(file, os.path.join(box, os.path.basename(file)))
isolateio=" "
if stdinfile is not None:
isolateio+="< "+stdinfile
if stdoutfile is not None:
isola
|
teio+="> "+stdoutfile
t0 = time.perf_counter()
out = subprocess.run(" ".join(["cd "+ box+ ";"]+[command]+parameters+[isolateio]), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
t1 = time.perf_counter()
self._boxdir = box
self._status = 'OK'
self._runtime = t1-t0
self._walltime = t1-t0
self._maxrss = 0 # Maximum resident set size of the process (in kilobytes).
self._cswv = 0 # Number of context switches caused by the process giving up the CPU voluntarily.
self._cswf = 0 # Number of context switches forced by the kernel.
self._cgmem = 0 # Total memory use by the whole control group (in kilobytes).
self._exitcode = out.returncode
self._stdout = out.stdout
def clean(self):
shutil.rmtree("/tmp/gradertools/isolation/")
|
openstack/horizon
|
openstack_dashboard/contrib/developer/profiler/api.py
|
Python
|
apache-2.0
| 3,772
| 0
|
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in w
|
riting, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
|
import json
from osprofiler import _utils as utils
from osprofiler.drivers.base import get_driver as profiler_get_driver
from osprofiler import notifier
from osprofiler import profiler
from osprofiler import web
from horizon.utils import settings as horizon_settings
ROOT_HEADER = 'PARENT_VIEW_TRACE_ID'
def init_notifier(connection_str, host="localhost"):
_notifier = notifier.create(
connection_str, project='horizon', service='horizon', host=host)
notifier.set(_notifier)
@contextlib.contextmanager
def traced(request, name, info=None):
if info is None:
info = {}
profiler_instance = profiler.get()
if profiler_instance is not None:
trace_id = profiler_instance.get_base_id()
info['user_id'] = request.user.id
with profiler.Trace(name, info=info):
yield trace_id
else:
yield
def _get_engine():
connection_str = horizon_settings.get_dict_config(
'OPENSTACK_PROFILER', 'receiver_connection_string')
return profiler_get_driver(connection_str)
def list_traces():
engine = _get_engine()
fields = ['base_id', 'timestamp', 'info.request.path', 'info']
traces = engine.list_traces(fields)
return [{'id': trace['base_id'],
'timestamp': trace['timestamp'],
'origin': trace['info']['request']['path']} for trace in traces]
def get_trace(trace_id):
def rec(_data, level=0):
_data['level'] = level
_data['is_leaf'] = not _data['children']
_data['visible'] = True
_data['childrenVisible'] = True
finished = _data['info']['finished']
for child in _data['children']:
__, child_finished = rec(child, level + 1)
# NOTE(tsufiev): in case of async requests the root request usually
# finishes before the dependent requests do so, to we need to
# normalize the duration of all requests by the finishing time of
# the one which took longest
if child_finished > finished:
finished = child_finished
return _data, finished
engine = _get_engine()
trace = engine.get_report(trace_id)
data, max_finished = rec(trace)
data['info']['max_finished'] = max_finished
return data
def update_trace_headers(keys, **kwargs):
trace_headers = web.get_trace_id_headers()
trace_info = utils.signed_unpack(
trace_headers[web.X_TRACE_INFO], trace_headers[web.X_TRACE_HMAC],
keys)
trace_info.update(kwargs)
p = profiler.get()
trace_data = utils.signed_pack(trace_info, p.hmac_key)
trace_data = [key.decode() if isinstance(key, bytes)
else key for key in trace_data]
return json.dumps({web.X_TRACE_INFO: trace_data[0],
web.X_TRACE_HMAC: trace_data[1]})
if not horizon_settings.get_dict_config('OPENSTACK_PROFILER', 'enabled'):
def trace(function):
return function
else:
def trace(function):
func_name = function.__module__ + '.' + function.__name__
decorator = profiler.trace(func_name)
return decorator(function)
|
murat1985/bagpipe-bgp
|
bagpipe/exabgp/message/update/__init__.py
|
Python
|
apache-2.0
| 2,921
| 0.030127
|
# encoding: utf-8
"""
update/__init__.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2012 Exa Networks. All rights reserved.
Modified by Orange - 2014
"""
from copy import deepcopy
from bagpipe.exabgp.structure.address import AFI,SAFI
from bagpipe.exabgp.message import Message,prefix
from bagpipe.exabgp.message.update.attribute.mprnlri import MPRNLRI
from bagpipe.exabgp.message.update.attribute.mpurnlri import MPURNLRI
# =================================================================== Update
#def bgp_mp (self):
# if AttributeID.NEXT_HOP in self:
# if self[AttributeID.NEXT_HOP].next_hop.afi != AFI.ipv4:
# return MPRNLRI(self).pack()
# return ''
#
#def bgp_resdraw (self):
# if AttributeID.NEXT_HOP in self:
# if self[AttributeID.NEXT_HOP].next_hop.afi != AFI.ipv4:
# return MPURNLRI(self.afi,self.safi,self).pack()
# return ''
from bagpipe.exabgp.message.update.attribute import AttributeID
class Update (Message):
TYPE = chr(0x02)
# All the route must be of the same family and have the same next-hop
def __init__ (self,routes):
self.routes = routes
self.afi = routes[0].nlri.
|
afi
sel
|
f.safi = routes[0].nlri.safi
# The routes MUST have the same attributes ...
def announce (self,asn4,local_asn,remote_asn):
if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]:
nlri = ''.join([route.nlri.pack() for route in self.routes])
mp = ''
else:
nlri = ''
mp = MPRNLRI(self.routes).pack()
# FIXME: needs same fix as below for next hop ?
attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn)
return self._message(prefix('') + prefix(attr + mp) + nlri)
def update (self,asn4,local_asn,remote_asn):
if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]:
nlri = ''.join([route.nlri.pack() for route in self.routes])
mp = ''
attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn)
else:
nlri = ''
#mp = MPURNLRI(self.routes).pack() + MPRNLRI(self.routes).pack()
mp = MPRNLRI(self.routes).pack()
# remove NEXT_HOP from attributes, because it's already been encoded in the MPNLRI
if AttributeID.NEXT_HOP not in self.routes[0].attributes:
raise Exception("Routes advertised need a NEXT_HOP attribute")
attributes = deepcopy(self.routes[0].attributes)
del attributes[AttributeID.NEXT_HOP]
attr = attributes.bgp_announce(asn4,local_asn,remote_asn)
return self._message(prefix(nlri) + prefix(attr + mp) + nlri)
def withdraw (self,asn4=False,local_asn=None,remote_asn=None):
if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]:
nlri = ''.join([route.nlri.pack() for route in self.routes])
mp = ''
attr = ''
else:
nlri = ''
mp = MPURNLRI(self.routes).pack()
attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn)
return self._message(prefix(nlri) + prefix(attr + mp))
|
Karosuo/Linux_tools
|
xls_handlers/xls_sum_venv/lib/python3.6/site-packages/pip/_internal/commands/list.py
|
Python
|
gpl-3.0
| 10,150
| 0
|
from __future__ import absolute_import
import json
import logging
from pip._vendor import six
from pip._vendor.six.moves import zip_longest
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.exceptions import CommandError
from pip._internal.index import PackageFinder
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
from pip._internal.utils.packaging import get_installer
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
default="columns",
choices=('columns', 'freeze', 'json'),
help="Select the output format among: columns (default), freeze, "
"or json",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
cmd_opts.add_option(
'--exclude-editable',
action='store_false',
dest
|
='include_editable',
help='Exclude editable package from output.',
)
cmd_opts.add_option(
'--include-editable',
action='store_true',
dest='include_editable',
help='Include editable package from output.',
default=True,
)
|
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group, self.parser
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
session=session,
)
def run(self, options, args):
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
)
# get_not_required must be called firstly in order to find and
# filter out all dependencies correctly. Otherwise a package
# can't be identified as requirement because some parent packages
# could be filtered out before.
if options.not_required:
packages = self.get_not_required(packages, options)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return {pkg for pkg in packages if pkg.key not in dep_keys}
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
if options.verbose >= 1:
logger.info("%s==%s (%s)", dist.project_name,
dist.version, dist.location)
else:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len
|
pcdummy/socketrpc
|
examples/gevent_srpc.py
|
Python
|
bsd-3-clause
| 5,981
| 0.00535
|
#!/usr/bin/python -OO
# -*- coding: utf-8 -*-
# vim: set et sts=4 sw=4 encoding=utf-8:
#
###############################################################################
#
# This file is part of socketrpc.
#
# Copyright (C) 2011 Rene Jochum <rene@jrit.at>
#
###############################################################################
### START Library location
# Set import Library to ../socketrpc in dev mode
import sys
import os
if os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), os.pardir, 'socketrpc')):
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), os.pardir))
### END library location
from gevent import monkey; monkey.patch_all()
from gevent.pool import Pool
from socketrpc import __version__
from socketrpc.gevent_srpc import SocketRPCProtocol, SocketRPCServer, SocketRPCClient, set_serializer
import logging
from optparse import OptionParser
def parse_commandline(parser=None):
if parser is None:
parser = OptionParser(usage="""%prog [-v] [-s <serializer>] [-H <host>] [-p <port>] [-r <# of requests>] MODE
Use this to test/benchmark socketrpc on gevent or to learn using it.
Available MODEs:
server: Run a single thread server,
you need to start this before you can do client* calls.
clientbounce: Run a single request on the server.
clientlarge: Request 1mb of zeros from the server
clientparallel: Run parallel requests (specify with -r)
clientserial: Run serial requests (specify with -r)""")
parser.add_option("-v", "--version", dest="print_version",
help="print current Version", action="store_true")
parser.add_option("-H", "--host", dest="host", default='127.0.0.1',
help="HOST to connect/listen. Default: 127.0.0.1", metavar="HOST")
parser.add_option("-p", "--port", dest="port", default='9990',
help="PORT to connect/listen. Default: 9990", metavar="PORT")
parser.add_option("-s", "--serializer", dest="serializer", default='pickle2',
help="Use serializer SERIALIZER, available are: bson, json and pickle2. Default: pickle2", metavar="SERIALIZER")
parser.add_option("-r", "--requests", dest="requests", default=100000,
help="NUMBER of parallel/serial requests. Default: 100000", metavar="NUMBER")
parser.add_option("-d", "--debug", dest="debug", default=False,
help="Debug print lots of data? Default: False", action="store_true")
# Parse the commandline
parser.set_defaults(verbose=True)
(options, args) = parser.parse_args()
# Print Version and exit if requested
if options.print_version:
print "%s: %s" % ('socketrpc', __version__)
sys.exit(0)
if len(args) < 1:
print 'Please give a MODE'
sys.exit(1)
result = {
'serializer': options.serializer,
'requests': int(options.requests),
'mode': args[0],
'host': options.host,
'port': int(options.port),
'debug': options.debug,
}
return result
def start(options):
logging.basicConfig(level=logging.NOTSET, format='%(asctime)s\t%(name)-35s\t%(levelname)s\t%(message)s')
SocketRPCProtocol.debug = options['debug']
set_serializer(options['serializer'])
mode = options['mode']
if mode == 'server':
class ServerProtocol(SocketRPCProtocol):
def docall_echo(self, args):
""" RPC Call, the result will be passed
to the client.
"""
return args
def docall_largedata(self, args):
return "\0" * 1024 * 1024 * 3
def docall_bounce(self, args):
""" This is just here to show that server is able to do
a "call" on the client
"""
return self.call(args[0], args[1]).get()
SocketRPCServer((options['host'], options['port']), ServerProtocol, backlog=2048).serve_forever()
elif mode.startswith('client'):
# The test data to transfer
params = {'g': 'is',
'e': 'very',
'v': 'cool',
|
'e': 'fast',
'n': 'and',
't': 'sexy!'}
class ClientProtocol(SocketRPCProtocol):
def docall_log(self, args):
self.logger.log(args[0], '"%s" logged from the server' % args[1])
return '%s: logged on the client, facility: %d' % (args[1], args[0])
if mode == 'clientbounce':
client = SocketRPCClient
|
((options['host'], options['port']), ClientProtocol)
for i in xrange(options['requests']):
client.call('bounce', ['log', (logging.WARN, 'test')]).get()
elif mode == 'clientlarge':
client = SocketRPCClient((options['host'], options['port']), ClientProtocol)
for i in xrange(options['requests']):
client.call('largedata', []).get()
elif mode == 'clientparallel':
# Parallel execution, sliced
client = SocketRPCClient((options['host'], options['port']), ClientProtocol)
def run100():
# I'm not using gevent.pool.Pool for memory efficience
pool = Pool()
for b in xrange(1000):
pool.add(client.call('echo', params))
# Blocks until all results arrived
pool.join()
for i in xrange(options['requests'] / 1000):
run100()
elif mode == 'clientserial':
# One after another
client = SocketRPCClient((options['host'], options['port']), ClientProtocol)
for i in xrange(options['requests']):
# The ".get" blocks until the result arrives
client.call('echo', params).get()
if __name__ == '__main__':
options = parse_commandline()
start(options)
|
dogukantufekci/workplace_saas
|
workplace_saas/_apps/places/models.py
|
Python
|
mit
| 1,622
| 0.000617
|
from django.db import models
class PlaceType(models.Model):
name = models.CharField(
max_length=100,
unique=True,
)
class Place(models.Model):
type = models.ForeignField(
PlaceType,
related_name='places',
)
name = models.CharField(
max_length=100,
)
class PlaceAltName(models.Model):
place = models.ForeignField(
Place,
related_name='place_alt_names'
)
alt_name = models.CharField(
max_length=100,
)
class Meta:
unique_together = (('place', 'alt_name',),)
# Place Type: Country -----------------------------------
class Country(models.Model):
place = models.OneToOneField(
Place,
related_name='country',
)
tld = models.CharField(
max_length=100,
)
cca2 = models.CharField(
max_length=2,
)
cc
|
a3 = models.CharField(
max_length=3,
)
ccn3 = models.CharField(
max_length=3,
)
world_region = models.ForeignField(
Place,
relate
|
d_name='countries_world_region',
)
world_sub_region = models.ForeignField(
Place,
related_name='countries_world_sub_region'
)
class CountryCallingCode(models.Model):
country = models.ForeignField(
Country,
related_name='country_calling_codes'
)
calling_code = models.CharField(
max_length=100,
)
class CountryCurrency(models.Model):
country = models.ForeignField(
Country,
related_name='country_currencies'
)
currency = models.CharField(
max_length=100,
)
|
rajashreer7/autotest-client-tests
|
linux-tools/sblim_sfcb/sblim_sfcb.py
|
Python
|
gpl-2.0
| 1,610
| 0.004969
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class sblim_sfcb(test.test):
"""
Autotest module for testing basic functionality
|
of sblim_sfcb
@author Wang Tao <wangttao@cn.ibm.com>
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
if not sm.check_installed('gcc'):
logging.debug("gcc missing - trying to install")
sm.install('gcc'
|
)
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/sblim_sfcb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./sblim-sfcb-test.sh'], cwd="%s/sblim_sfcb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
lfairchild/PmagPy
|
programs/conversion_scripts/irm_magic.py
|
Python
|
bsd-3-clause
| 3,055
| 0.008838
|
#!/usr/bin/env python
"""
NAME
irm_magic.py
DESCRIPTION
Creates MagIC file from an IRM excel file. If you have multiple Excel files you will have to run the
program for each Excel file and combine each type of file (locations.txt, sites.txt, etc.) manually
using "combine_magic.py"
The program creates the standard file names for MagIC uploading (locations.txt, sites.txt, sample.txt,
specimens.txt, measurements.txt) and creates measurements files of each Excel measurement worksheet name
attached so that individual table conversions can be inspected, if desired. You will have to fill in the
meta-data missing in these files before they will pass data verification. Many empty required data columns
have been included in the files for convenience.
SYNTAX
irm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits
-ID DIRECTORY: directory for input files, default = current directory
-WD DIRECTORY: directory for output files, default = current directory
-f FILE: the IRM Excel data file name, required
(the file name flag may be ommited and just the file name used, if no other flags are present)
-cite CITATION: specify the citation, default = This study (use "This study" unless you already
have the DOI for the paper the dataset is associated with.
-M flag: the MPMSdc file type (default:0)
use 0 for IRM file type as of July 7th, 2021. Data has header with the specimen name on 4 columns
use 1 for earlier type where the first two header columns are "specimen" and the specimen name
example IRM data file Sprain is of this type
EXAMPLE
Command line for the example dataset:
irm_magic.py example.xlsx - (example dataset yet to be choosen)
"""
import sys
from pmagpy import convert_2_magic as convert
def do_help():
"""
returns help string of script
"""
return __doc__
def main():
kwargs = {}
if '-h' in sys.argv:
help(__name__)
sys.exit()
if '-ID' in sys.argv:
ind=sys.argv.index('-ID')
kwargs['input_dir_path'] = sys.argv[ind+1]
else:
kwargs['input_dir_path'] = './'
if '-WD' in sys.argv:
|
ind=sys.argv.index('-WD')
kwargs['output_dir_path'] = sys.argv[ind+1]
else:
kwargs['output_dir_path'] = './'
if '-f' in sys.argv:
ind=sys.argv.index('-f')
kwargs['mag_file'] = sys.argv[ind+1]
elif len(sys.argv) == 2:
kwargs['mag_file'] = sys.argv[1]
else:
print("You must specify the IRM excel data file name with the -f flag.")
exit()
if '-cit' in sys.argv:
ind
|
=sys.argv.index('-cit')
kwargs['citation'] = sys.argv[ind+1]
else:
kwargs['citation'] = 'This study'
if '-M' in sys.argv:
ind=sys.argv.index('-M')
kwargs['MPMSdc_type'] = sys.argv[ind+1]
else:
kwargs['MPMSdc_type'] = '0'
convert.irm(**kwargs)
if __name__ == "__main__":
main()
|
jeremiahyan/odoo
|
addons/mail/models/ir_model.py
|
Python
|
gpl-3.0
| 4,660
| 0.003219
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENS
|
E file for full copyright and licensing details.
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class IrModel(models.Model):
_inherit = 'ir.model'
_order = 'is_mail_thread DESC, name ASC'
is_mail_thread = fields.Boolean(
string="Mail Thread", default=False,
help="Whether this model supports messages and notifications.",
)
is
|
_mail_activity = fields.Boolean(
string="Mail Activity", default=False,
help="Whether this model supports activities.",
)
is_mail_blacklist = fields.Boolean(
string="Mail Blacklist", default=False,
help="Whether this model supports blacklist.",
)
def unlink(self):
# Delete followers, messages and attachments for models that will be unlinked.
models = tuple(self.mapped('model'))
query = "DELETE FROM mail_activity_type WHERE res_model IN %s"
self.env.cr.execute(query, [models])
query = "DELETE FROM mail_followers WHERE res_model IN %s"
self.env.cr.execute(query, [models])
query = "DELETE FROM mail_message WHERE model in %s"
self.env.cr.execute(query, [models])
# Get files attached solely by the models
query = """
SELECT DISTINCT store_fname
FROM ir_attachment
WHERE res_model IN %s
EXCEPT
SELECT store_fname
FROM ir_attachment
WHERE res_model not IN %s;
"""
self.env.cr.execute(query, [models, models])
fnames = self.env.cr.fetchall()
query = """DELETE FROM ir_attachment WHERE res_model in %s"""
self.env.cr.execute(query, [models])
for (fname,) in fnames:
self.env['ir.attachment']._file_delete(fname)
return super(IrModel, self).unlink()
def write(self, vals):
if self and ('is_mail_thread' in vals or 'is_mail_activity' in vals or 'is_mail_blacklist' in vals):
if any(rec.state != 'manual' for rec in self):
raise UserError(_('Only custom models can be modified.'))
if 'is_mail_thread' in vals and any(rec.is_mail_thread > vals['is_mail_thread'] for rec in self):
raise UserError(_('Field "Mail Thread" cannot be changed to "False".'))
if 'is_mail_activity' in vals and any(rec.is_mail_activity > vals['is_mail_activity'] for rec in self):
raise UserError(_('Field "Mail Activity" cannot be changed to "False".'))
if 'is_mail_blacklist' in vals and any(rec.is_mail_blacklist > vals['is_mail_blacklist'] for rec in self):
raise UserError(_('Field "Mail Blacklist" cannot be changed to "False".'))
res = super(IrModel, self).write(vals)
self.flush()
# setup models; this reloads custom models in registry
self.pool.setup_models(self._cr)
# update database schema of models
models = self.pool.descendants(self.mapped('model'), '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
else:
res = super(IrModel, self).write(vals)
return res
def _reflect_model_params(self, model):
vals = super(IrModel, self)._reflect_model_params(model)
vals['is_mail_thread'] = issubclass(type(model), self.pool['mail.thread'])
vals['is_mail_activity'] = issubclass(type(model), self.pool['mail.activity.mixin'])
vals['is_mail_blacklist'] = issubclass(type(model), self.pool['mail.thread.blacklist'])
return vals
@api.model
def _instanciate(self, model_data):
model_class = super(IrModel, self)._instanciate(model_data)
if model_data.get('is_mail_thread') and model_class._name != 'mail.thread':
parents = model_class._inherit or []
parents = [parents] if isinstance(parents, str) else parents
model_class._inherit = parents + ['mail.thread']
if model_data.get('is_mail_activity') and model_class._name != 'mail.activity.mixin':
parents = model_class._inherit or []
parents = [parents] if isinstance(parents, str) else parents
model_class._inherit = parents + ['mail.activity.mixin']
if model_data.get('is_mail_blacklist') and model_class._name != 'mail.thread.blacklist':
parents = model_class._inherit or []
parents = [parents] if isinstance(parents, str) else parents
model_class._inherit = parents + ['mail.thread.blacklist']
return model_class
|
divio/django-shop
|
shop/admin/product.py
|
Python
|
bsd-3-clause
| 6,026
| 0.004315
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django import forms
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.contrib import admin
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from adminsortable2.admin import SortableInlineAdminMixin
from cms.models import Page
from shop.models.related import ProductPageModel, ProductImageModel
class ProductImageInline(SortableInlineAdminMixin, admin.StackedInline):
model = ProductImageModel
extra = 1
ordering = ('order',)
def _find_catalog_list_apphook():
from shop.cms_apphooks import CatalogListCMSApp
from cms.apphook_pool import apphook_pool
for name, app in apphook_pool.apps.items():
if isinstance(app, CatalogListCMSApp):
return name
else:
raise ImproperlyConfigured("You must register a CMS apphook of type `CatalogListCMSApp`.")
class CategoryModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
if Site.objects.count() >=2 :
page_sitename=str(Site.objects.filter(djangocms_nodes=obj.node_id).first().name)
return '{} | {}'.format(str(obj), page_sitename)
else:
return str(obj)
class CMSPageAsCategoryMixin(object):
"""
|
Add this mixin class to the ModelAdmin class for products wishing to be assigned to djangoCMS
pages when used as categories.
"""
def __init__(self, *args, **kwargs):
super(CMSPag
|
eAsCategoryMixin, self).__init__(*args, **kwargs)
if not hasattr(self.model, 'cms_pages'):
raise ImproperlyConfigured("Product model requires a field named `cms_pages`")
def get_fieldsets(self, request, obj=None):
fieldsets = list(super(CMSPageAsCategoryMixin, self).get_fieldsets(request, obj=obj))
fieldsets.append((_("Categories"), {'fields': ('cms_pages',)}),)
return fieldsets
def get_fields(self, request, obj=None):
# In ``get_fieldsets()``, ``cms_pages`` is added, so remove it from ``fields`` to
# avoid showing it twice.
fields = list(super(CMSPageAsCategoryMixin, self).get_fields(request, obj))
try:
fields.remove('cms_pages')
except ValueError:
pass
return fields
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'cms_pages':
# restrict many-to-many field for cms_pages to ProductApp only
limit_choices_to = {
'publisher_is_draft': False,
'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook()),
}
queryset = Page.objects.filter(**limit_choices_to)
widget = admin.widgets.FilteredSelectMultiple(_("CMS Pages"), False)
required = not db_field.blank
field = CategoryModelMultipleChoiceField(queryset=queryset, widget=widget, required=required)
return field
return super(CMSPageAsCategoryMixin, self).formfield_for_manytomany(db_field, request, **kwargs)
def save_related(self, request, form, formsets, change):
old_cms_pages = form.instance.cms_pages.all()
new_cms_pages = form.cleaned_data.pop('cms_pages')
# remove old
for page in old_cms_pages:
if page not in new_cms_pages:
for pp in ProductPageModel.objects.filter(product=form.instance, page=page):
pp.delete()
# add new
for page in new_cms_pages:
if page not in old_cms_pages:
ProductPageModel.objects.create(product=form.instance, page=page)
return super(CMSPageAsCategoryMixin, self).save_related(request, form, formsets, change)
class InvalidateProductCacheMixin(object):
"""
If caching is enabled, add this class as the first mixin to Django's model admin for the
corresponding product.
"""
def __init__(self, *args, **kwargs):
if not hasattr(cache, 'delete_pattern'):
warnings.warn("\n"
"Your caching backend does not support deletion by key patterns.\n"
"Please use 'django-redis-cache', or wait until the product's HTML\n"
"snippet cache expires by itself.")
super(InvalidateProductCacheMixin, self).__init__(*args, **kwargs)
def save_model(self, request, product, form, change):
if change:
self.invalidate_cache(product)
super(InvalidateProductCacheMixin, self).save_model(request, product, form, change)
def invalidate_cache(self, product):
"""
The method ``ProductCommonSerializer.render_html`` caches the rendered HTML snippets.
Invalidate them after changing something in the product.
"""
try:
cache.delete_pattern('product:{}|*'.format(product.id))
except AttributeError:
pass
class UnitPriceMixin(object):
def get_list_display(self, request):
list_display = super(UnitPriceMixin, self).get_list_display(request)
if 'get_unit_price' not in list_display:
list_display.append('get_unit_price')
return list_display
def get_unit_price(self, obj):
return str(obj.unit_price)
get_unit_price.short_description = _("Unit Price")
class CMSPageFilter(admin.SimpleListFilter):
title = _("Category")
parameter_name = 'category'
def lookups(self, request, model_admin):
limit_choices_to = {
'publisher_is_draft': False,
'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook())
}
queryset = Page.objects.filter(**limit_choices_to)
return [(page.id, page.get_title()) for page in queryset]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(cms_pages__id=self.value())
|
rogerthat-platform/rogerthat-backend
|
src-test/rogerthat_tests/mobicage/capi/test_feature_version.py
|
Python
|
apache-2.0
| 1,999
| 0.001501
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the
|
License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import mc_unittest
from rogerthat.bizz.profile import create_user_profile
from rogerthat.bizz.system import update_app_asset_response
from rogerthat.capi.system import updateAppAsset
from rogerthat.dal.mobile import get_mobile_settings_cached
from rogerthat.models.propert
|
ies.profiles import MobileDetails
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile
from rogerthat.rpc.rpc import logError
from rogerthat.to.app import UpdateAppAssetRequestTO
class Test(mc_unittest.TestCase):
def testSendNews(self):
self.set_datastore_hr_probability(1)
scale_x = 1
request = UpdateAppAssetRequestTO(u"kind", u"url", scale_x)
app_user = users.User('geert@example.com')
user_profile = create_user_profile(app_user, 'geert', language='en')
mobile = users.get_current_mobile()
user_profile.mobiles = MobileDetails()
user_profile.mobiles.addNew(mobile.account, Mobile.TYPE_ANDROID_HTTP, None, u"rogerthat")
user_profile.put()
ms = get_mobile_settings_cached(mobile)
ms.majorVersion = 0
ms.minorVersion = 2447
ms.put()
updateAppAsset(update_app_asset_response, logError, app_user, request=request)
ms.minorVersion = 2449
ms.put()
updateAppAsset(update_app_asset_response, logError, app_user, request=request)
|
spektom/incubator-airflow
|
airflow/ti_deps/deps/exec_date_after_start_date_dep.py
|
Python
|
apache-2.0
| 1,807
| 0.001107
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding co
|
pyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License
|
for the
# specific language governing permissions and limitations
# under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class ExecDateAfterStartDateDep(BaseTIDep):
NAME = "Execution Date"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.start_date and ti.execution_date < ti.task.start_date:
yield self._failing_status(
reason="The execution date is {0} but this is before the task's start "
"date {1}.".format(
ti.execution_date.isoformat(),
ti.task.start_date.isoformat()))
if (ti.task.dag and ti.task.dag.start_date and
ti.execution_date < ti.task.dag.start_date):
yield self._failing_status(
reason="The execution date is {0} but this is before the task's "
"DAG's start date {1}.".format(
ti.execution_date.isoformat(),
ti.task.dag.start_date.isoformat()))
|
dsoprea/protobufp
|
protobufp/read_buffer.py
|
Python
|
gpl-2.0
| 4,934
| 0.004459
|
import logging
from threading import RLock
from struct import unpack
class ReadBuffer(object):
"""This class receives incoming data, and stores it in a list of extents
(also referred to as buffers). It allows us to leisurely pop off sequences
of bytes, which we build from the unconsumed extents. As the extents are
depleted, we maintain an index to the first available, non-empty extent.
We will only occasionally cleanup.
"""
__locker = RLock()
# TODO: Reduce this for testing.
__cleanup_interval = 100
def __init__(self):
self.__log = logging.getLogger(self.__class__.__name__)
self.__buffers = []
self.__length = 0
self.__read_buffer_index = 0
self.__hits = 0
def push(self, data):
with self.__class__.__locker:
self.__buffers.append(data)
self.__length += len(data)
def read_message(self):
"""Try to read a message from the buffered data. A message is defined
as a 32-bit integer size, followed that number of bytes. First we try
to non-destructively read the integer. Then, we try to non-
destructively read the remaining bytes. If both are successful, we
|
then
go back to remove the span from the front of the buffers.
"""
with self.__class__.__locker:
result
|
= self.__passive_read(4)
if result is None:
return None
(four_bytes, last_buffer_index, updates1) = result
(length,) = unpack('>I', four_bytes)
result = self.__passive_read(length, last_buffer_index)
if result is None:
return None
(data, last_buffer_index, updates2) = result
# If we get here, we found a message. Remove it from the buffers.
for updates in (updates1, updates2):
for update in updates:
(buffer_index, buffer_, length_consumed) = update
self.__buffers[buffer_index] = buffer_ if buffer_ else ''
self.__length -= length_consumed
self.__read_buffer_index = last_buffer_index
self.__hits += 1
if self.__hits >= self.__class__.__cleanup_interval:
self.__cleanup()
self.__hits = 0
return data
def __passive_read(self, length, start_buffer_index=None):
"""Read the given length of bytes, or return None if we can't provide
[all of] them yet. When the given length is available but ends in the
middle of a buffer, we'll split the buffer. We do this to make it
simpler to continue from that point next time (it's always simpler to
start at the beginning of a buffer), as well as simpler to remove the
found bytes later, if need be.
"""
if length > self.__length:
return None
with self.__class__.__locker:
collected = []
need_bytes = length
i = start_buffer_index if start_buffer_index is not None \
else self.__read_buffer_index
updates = []
while need_bytes > 0:
len_current_buffer = len(self.__buffers[i])
if need_bytes >= len_current_buffer:
# We need at least as many bytes as are in the current
# buffer. Consume them all.
collected.append(self.__buffers[i][:])
updates.append((i, [], len_current_buffer))
need_bytes -= len_current_buffer
else:
# We need less bytes than are in the current buffer. Slice
# the current buffer in half, even if the data isn't going
# anywhere [yet].
first_half = self.__buffers[i][:need_bytes]
second_half = self.__buffers[i][need_bytes:]
self.__buffers[i] = first_half
self.__buffers.insert(i + 1, second_half)
# We only mark the buffer that came from the first half as
# having an update (the second half of the buffer wasn't
# touched).
collected.append(first_half)
updates.append((i, [], need_bytes))
need_bytes = 0
i += 1
sequence = ''.join(collected)
return (sequence, i, updates)
def __cleanup(self):
"""Clip buffers that the top of our list that have been completely
exhausted.
"""
# TODO: Test this.
with self.__class__.__locker:
while self.__read_buffer_index > 0:
del self.__buffers[0]
self.__read_buffer_index -= 1
|
rangadi/beam
|
sdks/python/apache_beam/runners/direct/direct_runner_test.py
|
Python
|
apache-2.0
| 4,539
| 0.003084
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import threading
import unittest
import hamcrest as hc
import apache_beam as beam
from apache_beam.metrics.
|
cells import DistributionData
from apache_beam.metrics.cells import DistributionResult
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.metric import Metrics
from apache_beam.metrics.metricbase import MetricName
from apache
|
_beam.pipeline import Pipeline
from apache_beam.runners import DirectRunner
from apache_beam.runners import TestDirectRunner
from apache_beam.runners import create_runner
from apache_beam.testing import test_pipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class DirectPipelineResultTest(unittest.TestCase):
def test_waiting_on_result_stops_executor_threads(self):
pre_test_threads = set(t.ident for t in threading.enumerate())
for runner in ['DirectRunner', 'BundleBasedDirectRunner',
'SwitchingDirectRunner']:
pipeline = test_pipeline.TestPipeline(runner=runner)
_ = (pipeline | beam.Create([{'foo': 'bar'}]))
result = pipeline.run()
result.wait_until_finish()
post_test_threads = set(t.ident for t in threading.enumerate())
new_threads = post_test_threads - pre_test_threads
self.assertEqual(len(new_threads), 0)
def test_direct_runner_metrics(self):
class MyDoFn(beam.DoFn):
def start_bundle(self):
count = Metrics.counter(self.__class__, 'bundles')
count.inc()
def finish_bundle(self):
count = Metrics.counter(self.__class__, 'finished_bundles')
count.inc()
def process(self, element):
gauge = Metrics.gauge(self.__class__, 'latest_element')
gauge.set(element)
count = Metrics.counter(self.__class__, 'elements')
count.inc()
distro = Metrics.distribution(self.__class__, 'element_dist')
distro.update(element)
return [element]
p = Pipeline(DirectRunner())
pcoll = (p | beam.Create([1, 2, 3, 4, 5])
| 'Do' >> beam.ParDo(MyDoFn()))
assert_that(pcoll, equal_to([1, 2, 3, 4, 5]))
result = p.run()
result.wait_until_finish()
metrics = result.metrics().query()
namespace = '{}.{}'.format(MyDoFn.__module__,
MyDoFn.__name__)
hc.assert_that(
metrics['counters'],
hc.contains_inanyorder(
MetricResult(
MetricKey('Do', MetricName(namespace, 'elements')),
5, 5),
MetricResult(
MetricKey('Do', MetricName(namespace, 'bundles')),
1, 1),
MetricResult(
MetricKey('Do', MetricName(namespace, 'finished_bundles')),
1, 1)))
hc.assert_that(
metrics['distributions'],
hc.contains_inanyorder(
MetricResult(
MetricKey('Do', MetricName(namespace, 'element_dist')),
DistributionResult(DistributionData(15, 5, 1, 5)),
DistributionResult(DistributionData(15, 5, 1, 5)))))
gauge_result = metrics['gauges'][0]
hc.assert_that(
gauge_result.key,
hc.equal_to(MetricKey('Do', MetricName(namespace, 'latest_element'))))
hc.assert_that(gauge_result.committed.value, hc.equal_to(5))
hc.assert_that(gauge_result.attempted.value, hc.equal_to(5))
def test_create_runner(self):
self.assertTrue(
isinstance(create_runner('DirectRunner'),
DirectRunner))
self.assertTrue(
isinstance(create_runner('TestDirectRunner'),
TestDirectRunner))
if __name__ == '__main__':
unittest.main()
|
smmribeiro/intellij-community
|
python/testData/completion/superMethodWithAnnotation.after.py
|
Python
|
apache-2.0
| 202
| 0.009901
|
from typing import Dict
class Pare
|
nt:
def overridable_method(self, param: str)
|
-> Dict[str, str]:
pass
class Child(Parent):
def overridable_method(self, param: str) -> Dict[str, str]:
|
ThinkmanWang/NotesServer
|
3rd-lib/DBUtils-1.1/setup.py
|
Python
|
apache-2.0
| 2,421
| 0.003717
|
"""Setup Script for DBUtils"""
__version__ = '1.1'
__revision__ = "$Rev: 8220 $"
__date__ = "$Date: 2011-08-14 14:01:04 +0200 (So, 14. Aug 2011) $"
from sys import version_info
py_version = version_info[:2]
if not (2, 3) <= py_version < (3, 0):
raise ImportError('Python %d.%d is not supported by DBUtils.' % py_version)
import warnings
warnings.filterwarnings('ignore', 'Unknown distribution option')
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from distutils.dist import DistributionMetadata
except ImportError:
pass
else:
try:
DistributionMetadata.classifiers
except AttributeError:
DistributionMetadata.classifiers = None
try:
DistributionMetadata.download_url
except AttributeError:
|
DistributionMetadata.download_url = None
try:
DistributionMetadata.package_data
except AttributeError:
DistributionMetadata.package_data = None
try:
DistributionMetadata.zip_safe
except AttributeError:
Distribution
|
Metadata.zip_safe = None
setup(
name='DBUtils',
version=__version__,
description='Database connections for multi-threaded environments.',
long_description='''\
DBUtils is a suite of tools providing solid, persistent and pooled connections
to a database that can be used in all kinds of multi-threaded environments
like Webware for Python or other web application servers. The suite supports
DB-API 2 compliant database interfaces and the classic PyGreSQL interface.
''',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Open Software License',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
author='Christoph Zwerschke',
author_email='cito@online.de',
url='http://www.webwareforpython.org/DBUtils',
download_url='http://www.webwareforpython.org/downloads/DBUtils/',
platforms=['any'],
license='Open Software License',
packages=['DBUtils', 'DBUtils.Examples', 'DBUtils.Tests'],
package_data={'DBUtils': ['Docs/*']},
zip_safe=0
)
|
abantos/bolt
|
test/test_btoptions.py
|
Python
|
mit
| 3,939
| 0.007616
|
"""
"""
import logging
import unittest
import bolt._btoptions as btoptions
class TestOptions(unittest.TestCase):
def setUp(self):
# If we don't initialize it from an empty options list, it parses
# the arguments to nosetests and the test fail. The actual code using
# it doesn't need to do it because it will parse the arguments to
# bolt.
self.default_options = btoptions.Options([])
return super(TestOptions, self).setUp()
def test_defau
|
lt_command_is_properly_initialized(self):
self.assertEqual(self.default_options.command, btoptions.Default.COMMAND)
def test_command_is_version_if_switch_specified(self):
self.given(btoptions.OptionSwitch.VERSION_LONG)
self.assertEqual(self.options.command, btoptions.Commands.VERSION)
def test_default_task_is_properly_initialized(self
|
):
self.assertEqual(self.default_options.task, btoptions.Default.TASK)
def test_returns_correct_task_if_specified(self):
task = 'a_task'
self.given(task)
self.assertEqual(self.options.task, task)
def test_default_boltfile_is_properly_initialized(self):
self.assertEqual(self.default_options.bolt_file, btoptions.Default.BOLTFILE)
def test_sets_boltfile_with_long_switch(self):
boltfile = 'a_bolt_file.py'
self.given(btoptions.OptionSwitch.BOLTFILE_LONG, boltfile)
self.assertEqual(self.options.bolt_file, boltfile)
def test_sets_boltfile_with_short_switch(self):
boltfile = 'a_bolt_file.py'
self.given(btoptions.OptionSwitch.BOLTFILE_SHORT, boltfile)
self.assertEqual(self.options.bolt_file, boltfile)
def test_default_log_level_is_properly_initialized(self):
self.assertEqual(self.default_options.log_level, btoptions.Default.LOG_LEVEL)
def test_sets_log_level_with_long_switch(self):
log_level = 'error'
self.given(btoptions.OptionSwitch.LOG_LEVEL_LONG, log_level)
self.assertEqual(self.options.log_level, logging.ERROR)
def test_sets_log_level_with_short_switch(self):
log_level = 'debug'
self.given(btoptions.OptionSwitch.LOG_LEVEL_SHORT, log_level)
self.assertEqual(self.options.log_level, logging.DEBUG)
def test_converts_correctly_from_log_level_string_to_logging_level(self):
# NOTSET
self.verify_log_level('', logging.NOTSET)
self.verify_log_level('n', logging.NOTSET)
self.verify_log_level('notset', logging.NOTSET)
# DEBUG
self.verify_log_level('d', logging.DEBUG)
self.verify_log_level('dbg', logging.DEBUG)
self.verify_log_level('debug', logging.DEBUG)
def test_default_log_file_is_properly_initialized(self):
self.assertEqual(self.default_options.log_file, btoptions.Default.LOG_FILE)
def test_sets_the_log_file_with_long_switch(self):
log_file = 'log.txt'
self.given(btoptions.OptionSwitch.LOG_FILE_LONG, log_file)
self.assertEqual(self.options.log_file, log_file)
def test_sets_the_log_file_with_short_switch(self):
log_file = 'log.txt'
self.given(btoptions.OptionSwitch.LOG_FILE_SHORT, log_file)
self.assertEqual(self.options.log_file, log_file)
def test_continue_on_error_is_properly_initialized(self):
self.assertEqual(self.default_options.continue_on_error, btoptions.Default.CONTINUE_ON_ERROR)
def test_sets_continue_on_error_with_long_switch(self):
self.given(btoptions.OptionSwitch.CONTINUE_ON_ERROR_LONG)
self.assertTrue(self.options.continue_on_error)
def given(self, *args):
self.options = btoptions.Options(args)
def verify_log_level(self, str_level, expected):
self.given(btoptions.OptionSwitch.LOG_LEVEL_LONG, str_level)
self.assertEqual(self.options.log_level, expected)
if __name__=="__main__":
unittest.main()
|
brokendata/bigmler
|
bigmler/command.py
|
Python
|
apache-2.0
| 6,139
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - Command and Stored Command class for command retrieval
"""
from __future__ import absolute_import
import os
import shlex
import bigmler.processing.args as a
import bigmler.utils as u
from bigml.multivote import PLURALITY
from bigmler.defaults import DEFAULTS_FILE
f
|
rom bigmler.defaults import get_user_defaults
from bigmler.prediction import MAX_MODELS
from bigmler.parser import create_parser
COMMAND_LOG = u".bigmler"
DIRS_LOG = u".bigmler_dir_stack"
SESSIONS_LOG = u"bigmler_sessions"
def tail(file_handler, window=1):
"""Returns the last n lines of a file.
"""
bufsiz = 1024
file_handler.seek(0, 2)
file_bytes = file_handler.tell()
size = window + 1
block = -1
data = []
while size >
|
0 and file_bytes > 0:
if (file_bytes - bufsiz) > 0:
# Seek back one whole bufsiz
file_handler.seek(block * bufsiz, 2)
# read BUFFER
new_data = [file_handler.read(bufsiz)]
new_data.extend(data)
data = new_data
else:
# file too small, start from begining
file_handler.seek(0, 0)
# only read what was not read
data.append(file_handler.read(file_bytes))
lines_found = data[0].count('\n')
size -= lines_found
file_bytes -= bufsiz
block -= 1
return ''.join(data).splitlines()[-window:]
def get_log_reversed(file_name, stack_level):
"""Reads the line of a log file that has the chosen stack_level
"""
lines_list = tail(open(file_name, "r"), window=(stack_level + 1))
return lines_list[0].decode(u.SYSTEM_ENCODING)
def get_stored_command(args, debug=False, command_log=COMMAND_LOG,
dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG):
"""Restoring the saved command from stack to the arguments object
"""
# Restore the args of the call to resume from the command log file
stored_command = StoredCommand(args, command_log, dirs_log)
command = Command(None, stored_command=stored_command)
# Logs the issued command and the resumed command
session_file = os.path.join(stored_command.output_dir, sessions_log)
stored_command.log_command(session_file=session_file)
# Parses resumed arguments.
command_args = a.parse_and_check(command)
if debug:
# set debug on if it wasn't in the stored command but now is
command_args.debug = True
return command_args, session_file, stored_command.output_dir
class Command(object):
"""Objects derived from user given command and the user defaults file
"""
def __init__(self, args, stored_command=None):
self.stored = (args is None and
isinstance(stored_command, StoredCommand))
self.args = args if not self.stored else stored_command.args
self.resume = not self.stored and '--resume' in self.args
self.defaults_file = (None if not self.stored else
os.path.join(stored_command.output_dir,
DEFAULTS_FILE))
self.user_defaults = get_user_defaults(self.defaults_file)
self.command = (a.get_command_message(self.args) if not self.stored
else stored_command.command)
self.parser, self.common_options = create_parser(
general_defaults=self.user_defaults,
constants={'NOW': a.NOW,
'MAX_MODELS': MAX_MODELS,
'PLURALITY': PLURALITY})
self.flags, self.train_stdin, self.test_stdin = a.get_flags(self.args)
class StoredCommand(object):
"""Objects derived from a stored bigmler command
"""
def __init__(self, resume_args, command_log, dirs_log, stack_level=0):
"""Constructor that extracts the command from the file
``command_log``: file for stored commands
``dirs_log``: file for associated work directories
``stack_level``: index in the stack for the command to be retrieved
"""
self.resume_command = a.get_command_message(resume_args)
self.command = get_log_reversed(command_log, stack_level)
self.output_dir = get_log_reversed(dirs_log, stack_level)
self.defaults_file = os.path.join(self.output_dir, DEFAULTS_FILE)
self.args = [arg.decode(u.SYSTEM_ENCODING) for arg in
shlex.split(self.command.encode(u.SYSTEM_ENCODING))[1:]]
if not ("--output" in self.args or "--output-dir" in self.args):
current_directory = u"%s%s" % (os.getcwd(), os.sep)
if self.output_dir.startswith(current_directory):
self.output_dir = self.output_dir.replace(current_directory,
"", 1)
self.args.append("--output-dir")
self.args.append(self.output_dir)
def log_command(self, session_file=None):
"""Logging the resumed command in the sessions_log file
"""
u.log_message(self.resume_command, log_file=session_file)
message = u"\nResuming command:\n%s\n\n" % self.command
u.log_message(message, log_file=session_file, console=True)
try:
with open(self.defaults_file, 'r') as defaults_handler:
contents = defaults_handler.read()
message = u"\nUsing the following defaults:\n%s\n\n" % contents
u.log_message(message, log_file=session_file, console=True)
except IOError:
pass
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_30/ar_/test_artificial_1024_Quantization_PolyTrend_30__0.py
|
Python
|
bsd-3-clause
| 268
| 0.085821
|
import pyaf.Bench.TS_datasets as tsds
import
|
tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_ord
|
er = 0);
|
Osmose/monews
|
monews/wsgi.py
|
Python
|
mit
| 387
| 0.002584
|
"""
WSGI config for monews project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more infor
|
mation on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "monews.setti
|
ngs")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
thkim107/sim
|
KAKAO_DATA_PREPARE_eval_multithread.py
|
Python
|
mit
| 1,759
| 0.030131
|
import h5py
from scipy.spatial import distance
import scipy.misc
import numpy as np
from joblib import Parallel, delayed # installation by 'conda install joblib'
#path = '/home/sungkyun/Dropbox/kakao coversong git/sim/data/training/CP_1000ms_training_s2113_d2113_170106223452.h5'
path = '/home/sungkyun/Dropbox/kakao coversong git/sim/data/eval/Eval1000_CP_1000ms.h5'
f1 = h5py.File(path)
datasetNames=[n for n in f1.keys()]
X = f1['X']
#%%
def oti(cover1,cover2,chroma_dim):
cover1_mean = np.sum(cover1,axis=0)/np.max(np.sum(cover1,axis=0))
cover2_mean = np.
|
sum(cover2,axis=0)/np.max(np.sum(cover2,axis=0))
dist_store = np.zeros(chroma_dim)
for i in range(0,chroma_dim):
cover2_mean_shifted = np.roll(cover2_mean, i)
dist = np.dot(cover1_mean,cover2_mean_shifted)
dist_store[i] = dist
oti = np.argmax(dist_store)
cover2_shifted = np.roll(cover2, oti, axis=1)
return cover1, cover2_shifted
def simple_matrix(X,Y):
XX = ot
|
i(X,Y,12)[0]
YY = oti(X,Y,12)[1]
M = [[0 for col in range(180)] for row in range(180)]
for i in range(180):
for j in range(180):
M[i][j] = distance.euclidean(XX[i,:],YY[j,:])
return np.asarray(M)
#%% Preprocess & Save Eval data
def my_func(idx_start):
for i in range(idx_start, idx_start+1):
print((str)(i)+'th start processing')
for j in range(1000):
scipy.misc.imsave('/home/sungkyun/Data/KAKAO_ALL_PAIR_EVAL/'+'{:0=4}'.format(i)+'_'+'{:0=4}'.format(j)+'.jpg',simple_matrix(X[i],X[j]))
print((str)(i)+'th complete')
return 0
#%% multithread : using 7 thread
idx_start=range(0,330)
n_thread = -1
_ = Parallel(n_jobs=n_thread, verbose=10, backend="multiprocessing")(map(delayed(my_func), idx_start ))
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/effective_network_security_group_list_result.py
|
Python
|
mit
| 1,400
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Effective
|
NetworkSecurityGroupListResult(Model):
"""Response for list effective network security groups API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param value: A list of effective network security groups.
:type value:
list[~azure.mgmt.network.v2017_08_01.models.EffectiveNetworkSecurityGroup]
|
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None):
super(EffectiveNetworkSecurityGroupListResult, self).__init__()
self.value = value
self.next_link = None
|
yuanxu/django-scaffold
|
scaffold_toolkit/zui/forms.py
|
Python
|
gpl-2.0
| 5,477
| 0.000183
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.admin.widgets import AdminFileWidget
from django.forms import (
HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput,
PasswordInput
)
from django.forms.widgets import CheckboxInput
from .zui import (
ge
|
t_zui_setting, get_form_renderer, get_field_renderer,
get_formset_renderer
)
from .text import text_concat, t
|
ext_value
from .exceptions import BootstrapError
from .utils import add_css_class, render_tag, split_css_classes
from .components import render_icon
FORM_GROUP_CLASS = 'form-group'
def render_formset(formset, **kwargs):
"""
Render a formset to a Bootstrap layout
"""
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render()
def render_formset_errors(formset, **kwargs):
"""
Render formset errors to a Bootstrap layout
"""
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render_errors()
def render_form(form, **kwargs):
"""
Render a form to a Bootstrap layout
"""
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render()
def render_form_errors(form, type='all', **kwargs):
"""
Render form errors to a Bootstrap layout
"""
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render_errors(type)
def render_field(field, **kwargs):
"""
Render a field to a Bootstrap layout
"""
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render()
def render_label(content, label_for=None, label_class=None, label_title=''):
"""
Render a label with content
"""
attrs = {}
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
return render_tag('label', attrs=attrs, content=content)
def render_button(
content, button_type=None, icon=None, button_class='', size='',
href='', name=None, value=None):
"""
Render a button with content
"""
attrs = {}
classes = add_css_class('btn', button_class)
size = text_value(size).lower().strip()
if size == 'xs':
classes = add_css_class(classes, 'btn-xs')
elif size == 'sm' or size == 'small':
classes = add_css_class(classes, 'btn-sm')
elif size == 'lg' or size == 'large':
classes = add_css_class(classes, 'btn-lg')
elif size == 'md' or size == 'medium':
pass
elif size:
raise BootstrapError(
'Parameter "size" should be "xs", "sm", "lg" or ' +
'empty ("{}" given).'.format(size))
if button_type:
if button_type == 'submit':
if not any([c.startswith('btn-') for c in split_css_classes(classes)]):
classes = add_css_class(classes, 'btn-primary')
elif button_type not in ('reset', 'button', 'link'):
raise BootstrapError(
'Parameter "button_type" should be "submit", "reset", ' +
'"button", "link" or empty ("{}" given).'.format(button_type))
attrs['type'] = button_type
attrs['class'] = classes
icon_content = render_icon(icon) if icon else ''
if href:
attrs['href'] = href
tag = 'a'
else:
tag = 'button'
if name:
attrs['name'] = name
if value:
attrs['value'] = value
return render_tag(
tag, attrs=attrs, content=text_concat(
icon_content, content, separator=' '))
def render_field_and_label(
field, label, field_class='', label_for=None, label_class='',
layout='', **kwargs):
"""
Render a field with its label
"""
if layout == 'horizontal':
if not label_class:
label_class = get_zui_setting('horizontal_label_class')
if not field_class:
field_class = get_zui_setting('horizontal_field_class')
if not label:
label = ' '
label_class = add_css_class(label_class, 'control-label')
html = field
if field_class:
html = '<div class="{klass}">{html}</div>'.format(
klass=field_class, html=html)
if label:
html = render_label(
label, label_for=label_for, label_class=label_class) + html
return html
def render_form_group(content, css_class=FORM_GROUP_CLASS):
"""
Render a Bootstrap form group
"""
return '<div class="{klass}">{content}</div>'.format(
klass=css_class,
content=content,
)
def is_widget_required_attribute(widget):
"""
Is this widget required?
"""
if not get_zui_setting('set_required'):
return False
if not widget.is_required:
return False
if isinstance(
widget, (
AdminFileWidget, HiddenInput, FileInput,
CheckboxInput, CheckboxSelectMultiple)):
return False
return True
def is_widget_with_placeholder(widget):
"""
Is this a widget that should have a placeholder?
Only text, search, url, tel, e-mail, password, number have placeholders
These are all derived form TextInput, except for Textarea
"""
# PasswordInput inherits from Input in Django 1.4.
# It was changed to inherit from TextInput in 1.5.
return isinstance(widget, (TextInput, Textarea, PasswordInput))
|
oesteban/tract_querier
|
tract_querier/setup.py
|
Python
|
bsd-3-clause
| 766
| 0.001305
|
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('tract_querier', parent_package, top_path)
config.add_subpackage('tractography')
config.add_subpackage('tract_math')
config.add_subpackage('code_util')
config.add_subpackage('tensor')
config.add_subpackage('nipype')
config.add_data_files(('queries', [
'data/FreeSurfer.qry',
'data/JHU_MNI_SS_WMPM_Type_I.qry',
'data/JHU_MNI_SS_WMPM_Type_II.qry',
'data/freesurfer_queries
|
.qry',
'data/mori_queries.qry',
]))
return config
if __name__ == '__main__':
from distutils.core import setup
setup(**configuration(top_path='').todic
|
t())
|
maruohon/Minecraft-Overviewer
|
overviewer_core/files.py
|
Python
|
gpl-3.0
| 6,437
| 0.005127
|
# This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import tempfile
import shutil
import logging
import stat
default_caps = {"chmod_works": True, "rename_works": True}
def get_fs_caps(dir_to_test):
return {"chmod_works": does_chmod_work(dir_to_test),
"rename_works": does_rename_work(dir_to_test)
}
def does_chmod_work(dir_to_test):
"Detects if chmod works in a given directory"
# a CIFS mounted FS is the only thing known to reliably not provide chmod
if not os.path.isdir(dir_to_test):
return True
f1 = tempfile.NamedTemporaryFile(dir=dir_to_test)
try:
f1_stat = os.stat(f1.name)
os.chmod(f1.name, f1_stat.st_mode | stat.S_IRUSR)
chmod_works = True
logging.debug("Detected that chmods work in %r" % dir_to_test)
except OSError:
chmod_works = False
logging.debug("Detected that chmods do NOT work in %r" % dir_to_test)
return chmod_works
def does_rename_work(dir_to_test):
with tempfile.NamedTemporaryFile(dir=dir_to_test) as f1:
with tempfile.NamedTemporaryFile(dir=dir_to_test) as f2:
try:
os.rename(f1.name,f2.name)
except OSError:
renameworks = False
logging.debug("Detected that overwriting renames do NOT work in %r" % dir_to_test)
else:
renameworks = True
logging.debug("Detected that overwriting renames work in %r" % dir_to_test)
# re-make this file so it can be deleted without error
open(f1.name, 'w').close()
return renameworks
## useful recursive copy, that ignores common OS cruft
def mirror_dir(src, dst, entities=None, capabilities=default_caps):
'''copies all of the entities from src to dst'''
chmod_works = capabilities.get("chmod_works")
if not os.path.exists(dst):
os.mkdir(dst)
if entities and type(entities) != list: raise Exception("Expected a list, got a %r instead" % type(entities))
# files which are problematic and should not be copied
# usually, generated by the OS
skip_files = ['Thumbs.db', '.DS_Store']
for entry in os.listdir(src):
if entry in skip_files:
continue
if entities and entry not in entities:
continue
if os.path.isdir(os.path.join(src,entry)):
mirror_dir(os.path.join(src, entry), os.path.join(dst, entry), capabilities=capabilities)
elif os.path.isfile(os.path.join(src,entry)):
try:
if chmod_works:
shutil.copy(os.path.join(src, entry), os.path.join(dst, entry))
else:
shutil.copyfile(os.path.join(src, entry), os.path.join(dst, entry))
except IOError as outer:
try:
# maybe permission problems?
src_stat = os.stat(os.path.join(src, entry))
os.chmod(os.path.join(src, entry), src_stat.st_mode | stat.S_IRUSR)
dst_stat = os.stat(os.path.join(dst, entry))
os.chmod(os.path.join(dst, entry), dst_stat.st_mode | stat.S_IWUSR)
except OSError: # we don't care if this fails
pass
# try again; if this stills throws an error, let it propagate up
if chmod_works:
|
shutil.copy(os.path.join(src, entry), os.path.jo
|
in(dst, entry))
else:
shutil.copyfile(os.path.join(src, entry), os.path.join(dst, entry))
# Define a context manager to handle atomic renaming or "just forget it write
# straight to the file" depending on whether os.rename provides atomic
# overwrites.
# Detect whether os.rename will overwrite files
doc = """This class acts as a context manager for files that are to be written
out overwriting an existing file.
The parameter is the destination filename. The value returned into the context
is the filename that should be used. On systems that support an atomic
os.rename(), the filename will actually be a temporary file, and it will be
atomically replaced over the destination file on exit.
On systems that don't support an atomic rename, the filename returned is the
filename given.
If an error is encountered, the file is attempted to be removed, and the error
is propagated.
Example:
with FileReplacer("config") as configname:
with open(configout, 'w') as configout:
configout.write(newconfig)
"""
class FileReplacer(object):
__doc__ = doc
def __init__(self, destname, capabilities=default_caps):
self.caps = capabilities
self.destname = destname
if self.caps.get("rename_works"):
self.tmpname = destname + ".tmp"
def __enter__(self):
if self.caps.get("rename_works"):
# rename works here. Return a temporary filename
return self.tmpname
return self.destname
def __exit__(self, exc_type, exc_val, exc_tb):
if self.caps.get("rename_works"):
if exc_type:
# error
try:
os.remove(self.tmpname)
except Exception, e:
logging.warning("An error was raised, so I was doing "
"some cleanup first, but I couldn't remove "
"'%s'!", self.tmpname)
else:
# copy permission bits, if needed
if self.caps.get("chmod_works") and os.path.exists(self.destname):
shutil.copymode(self.destname, self.tmpname)
# atomic rename into place
os.rename(self.tmpname, self.destname)
|
tbttfox/TwistyTools
|
ttUI/QtFiles/combo.py
|
Python
|
gpl-3.0
| 899
| 0.002225
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'combotest.ui'
#
# Created: Wed Feb 29 11:35:04 2012
# by: PyQt4 UI code generator 4.7.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Form(object):
def setupUi
|
(self, Form):
Form.setObjectName("Form")
self.comboMode = QtGui.QComboBox(Form)
self.comboMode.setMaxCount(2)
self.comboMode.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
self.comboMode.setObjectName("comboMode")
self.comboMode.addItem("Puzzle Mode")
self.comboMode.addItem("Piece Mode")
QtCore.QMetaObject.connectSlotsByName(Form)
if __name__ == "__main__":
import sys
app =
|
QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
petewarden/tensorflow
|
tensorflow/python/lib/core/bfloat16_test.py
|
Python
|
apache-2.0
| 17,940
| 0.005128
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for the bfloat16 Python type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import math
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.core import _pywrap_bfloat16
from tensorflow.python.platform import test
bfloat16 = _pywrap_bfloat16.TF_bfloat16_type()
def numpy_assert_allclose(a, b, **kwargs):
a = a.astype(np.float32) if a.dtype == bfloat16 else a
b = b.astype(np.float32) if b.dtype == bfloat16 else b
return np.testing.assert_allclose(a, b, **kwargs)
epsilon = float.fromhex("1.0p-7")
# Values that should round trip exactly to float and back.
FLOAT_VALUES = [
0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon,
-1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0,
float("inf"),
float("-inf"),
float("nan")
]
class Bfloat16Test(parameterized.TestCase):
"""Tests the non-numpy Python methods of the bfloat16 type."""
def testRoundTripToFloat(self):
for v in FLOAT_VALUES:
np.testing.assert_equal(v, float(bfloat16(v)))
def testRoundTripNumpyTypes(self):
for dtype in [np.float16, np.float32, np.float64]:
np.testing.assert_equal(-3.75, dtype(bfloat16(dtype(-3.75))))
np.testing.assert_equal(1.5, float(bfloat16(dtype(1.5))))
np.testing.assert_equal(4.5, dtype(bfloat16(np.array(4.5, dtype))))
np.testing.assert_equal(
np.array([2, 5, -1], bfloat16), bfloat16(np.array([2, 5, -1], dtype)))
def testRoundTripToInt(self):
for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]:
self.assertEqual(v, int(bfloat16(v)))
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(({
"testcase_name": "_" + dtype.__name__,
"dtype": dtype
} for dtype in [bfloat16, np.float16, np.float32, np.float64]))
def testRoundTripToNumpy(self, dtype):
for v in FLOAT_VALUES:
np.testing.assert_equal(v, bfloat16(dtype(v)))
np.testing.assert_equal(v, dtype(bfloat16(dtype(v))))
np.testing.assert_equal(v, dtype(bfloat16(np.array(v, dtype))))
if dtype != bfloat16:
np.testing.assert_equal(
np.array(FLOAT_VALUES, dtype),
bfloat16(np.array(FLOAT_VALUES, dtype)).astype(dtype))
def testStr(self):
self.assertEqual("0", str(bfloat16(0.0)))
self.assertEqual("1", str(bfloat16(1.0)))
self.assertEqual("-3.5", str(bfloat16(-3.5)))
self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", str(bfloat16(float("inf"))))
self.assertEqual("-inf", str(bfloat16(float("-inf"))))
self.assertEqual("nan", str(bfloat16(float("nan"))))
def testRepr(self):
self.assertEqual("0", repr(bfloat16(0)))
self.assertEqual("1", repr(bfloat16(1)))
self.assertEqual("-3.5", repr(bfloat16(-3.5)))
self.assertEqual("0.0078125", repr(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", repr(bfloat16(float("inf"))))
self.assertEqual("-inf", repr(bfloat16(float("-inf"))))
self.assertEqual("nan", repr(bfloat16(float("nan"))))
def testHash(self):
self.assertEqual(0, hash(bfloat16(0.0)))
self.assertEqual(0x3f80, hash(bfloat16(1.0)))
self.assertEqual(0x7fc0, hash(bfloat16(float("nan"))))
# Tests for Python operations
def testNegate(self):
for v in FLOAT_VALUES:
np.testing.assert_equal(-v, float(-bfloat16(v)))
def testAdd(self):
np.testing.assert_equal(0, float(bfloat16(0) + bfloat16(0)))
np.testing.assert_equal(1, float(bfloat16(1) + bfloat16(0)))
np.testing.assert_equal(0, float(bfloat16(1) + bfloat16(-1)))
np.testing.assert_equal(5.5, float(bfloat16(2) + bfloat16(3.5)))
np.testing.assert_equal(1.25, float(bfloat16(3.5) + bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("inf")) + bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("-inf")) + bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan")))))
# Test type promotion against Numpy scalar values.
self.assertEqual(np.float32, type(bfloat16(3.5) + np.float16(2.25)))
self.assertEqual(np.float32, type(np.float16(3.5) + bfloat16(2.25)))
self.assertEqual(np.float32, type(bfloat16(3.5) + np.float32(2.25)))
self.assertEqual(np.float32, type(np.float32(3.5) + bfloat16(2.25)))
self.assertEqual(np.float64, type(bfloat16(3.5) + np.float64(2.25)))
self.assertEqual(np.float64, type(np.float64(3.5) + bfloat16(2.25)))
self.assertEqual(np.float64, type(bfloat16(3
|
.5) + float(2.25)))
self.assertEqual(np.float64, type(float(3.5) + bfloat16(2.25)))
self.assertEqual(np.float32,
|
type(bfloat16(3.5) + np.array(2.25, np.float32)))
self.assertEqual(np.float32,
type(np.array(3.5, np.float32) + bfloat16(2.25)))
def testSub(self):
np.testing.assert_equal(0, float(bfloat16(0) - bfloat16(0)))
np.testing.assert_equal(1, float(bfloat16(1) - bfloat16(0)))
np.testing.assert_equal(2, float(bfloat16(1) - bfloat16(-1)))
np.testing.assert_equal(-1.5, float(bfloat16(2) - bfloat16(3.5)))
np.testing.assert_equal(5.75, float(bfloat16(3.5) - bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(-2.25) - bfloat16(float("inf"))))
np.testing.assert_equal(
float("inf"), float(bfloat16(-2.25) - bfloat16(float("-inf"))))
self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan")))))
def testMul(self):
np.testing.assert_equal(0, float(bfloat16(0) * bfloat16(0)))
np.testing.assert_equal(0, float(bfloat16(1) * bfloat16(0)))
np.testing.assert_equal(-1, float(bfloat16(1) * bfloat16(-1)))
np.testing.assert_equal(-7.875, float(bfloat16(3.5) * bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("inf")) * bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("-inf")) * bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan")))))
def testDiv(self):
self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0))))
np.testing.assert_equal(float("inf"), float(bfloat16(1) / bfloat16(0)))
np.testing.assert_equal(-1, float(bfloat16(1) / bfloat16(-1)))
np.testing.assert_equal(-1.75, float(bfloat16(3.5) / bfloat16(-2)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("inf")) / bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("-inf")) / bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan")))))
def testLess(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v < w, bfloat16(v) < bfloat16(w))
def testLessEqual(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w))
def testGreater(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v > w, bfloat16(v) > bfloat16(w))
def testGreaterEqual(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w))
def
|
agfor/chipy.org
|
chipy_org/apps/profiles/models.py
|
Python
|
mit
| 772
| 0
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
display_name = models.CharField(
max_length=200, verbose_name='Name for Security Chec
|
k In')
show = models
|
.BooleanField(
default=False, verbose_name="Show my information in the member list")
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
"""Create a matching profile whenever a user object is created."""
if created:
profile, new = UserProfile.objects.get_or_create(
user=instance, display_name=instance.get_full_name())
|
endlessm/chromium-browser
|
third_party/catapult/third_party/gsutil/third_party/apitools/samples/fusiontables_sample/fusiontables_v1/fusiontables_v1_client.py
|
Python
|
bsd-3-clause
| 34,754
| 0.004892
|
"""Generated client library for fusiontables version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from samples.fusiontables_sample.fusiontables_v1 import fusiontables_v1_messages as messages
class FusiontablesV1(base_api.BaseApiClient):
"""Generated client library for service fusiontables version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://www.googleapis.com/fusiontables/v1/'
_PACKAGE = u'fusiontables'
_SCOPES = [u'https://www.googleapis.com/auth/fusiontables', u'https://www.googleapis.com/auth/fusiontables.readonly']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'FusiontablesV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new fusiontables handle."""
url = url or self.BASE_URL
super(FusiontablesV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.column = self.ColumnService(self)
self.query = self.QueryService(self)
self.style = self.StyleService(self)
self.table = self.TableService(self)
self.task = self.TaskService(self)
self.template = self.TemplateService(self)
class ColumnService(base_api.BaseApiService):
"""Service class for the column resource."""
_NAME = u'column'
def __init__(self, client):
super(FusiontablesV1.ColumnService, self).__init__(client)
self._upload_configs = {
}
def Delete(self, request, global_params=None):
r"""Deletes the column.
Args:
request: (FusiontablesColumnDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(FusiontablesColumnDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'fusiontables.column.delete',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
re
|
lative_path=u'tables/{tableId}/columns/{columnId}',
request_field='',
request_type_name=u'FusiontablesColum
|
nDeleteRequest',
response_type_name=u'FusiontablesColumnDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves a specific column by its id.
Args:
request: (FusiontablesColumnGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.column.get',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns/{columnId}',
request_field='',
request_type_name=u'FusiontablesColumnGetRequest',
response_type_name=u'Column',
supports_download=False,
)
def Insert(self, request, global_params=None):
r"""Adds a new column to the table.
Args:
request: (FusiontablesColumnInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
Insert.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.column.insert',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns',
request_field=u'column',
request_type_name=u'FusiontablesColumnInsertRequest',
response_type_name=u'Column',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Retrieves a list of columns.
Args:
request: (FusiontablesColumnListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ColumnList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.column.list',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'tables/{tableId}/columns',
request_field='',
request_type_name=u'FusiontablesColumnListRequest',
response_type_name=u'ColumnList',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the name or type of an existing column. This method supports patch semantics.
Args:
request: (FusiontablesColumnPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'fusiontables.column.patch',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns/{columnId}',
request_field=u'column',
request_type_name=u'FusiontablesColumnPatchRequest',
response_type_name=u'Column',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates the name or type of an existing column.
Args:
request: (FusiontablesColumnUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'fusiontables.column.update',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns/{columnId}',
request_field=u'column',
request_type_name=u'FusiontablesColumnUpdateRequest',
response_type_name=u'Column',
supports_download=False,
)
class QueryService(base_api.BaseApiService):
"""Service class for the query resource."""
_NAME = u'query'
def __init__(self, client):
super(FusiontablesV1.QueryService, self).__init__(client)
self._upload_configs = {
}
def Sql(self, request, global_params=None, download=None):
r"""Executes an SQL
|
z411/weabot
|
tenjin.py
|
Python
|
agpl-3.0
| 81,524
| 0.006526
|
##
## $Release: 1.1.1 $
## $Copyright: copyright(c) 2007-2012 kuwata-lab.com all rights reserved. $
## $License: MIT License $
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
"""Very fast and light-weight template engine based embedded Python.
See User's Guide and examples for details.
http://www.kuwata-lab.com/tenjin/pytenjin-users-guide.html
http://www.kuwata-lab.com/tenjin/pytenjin-examples.html
"""
__version__ = "$Release: 1.1.1 $"[10:-2]
__license__ = "$License: MIT License $"[10:-2]
__all__ = ('Template', 'Engine', )
import sys, os, re, time, marshal
from time import time as _time
from os.path import getmtime as _getmtime
from os.path import isfile as _isfile
random = pickle = unquote = None # lazy import
python3 = sys.version_info[0] == 3
python2 = sys.version_info[0] == 2
logger = None
##
## utilities
##
def _write_binary_file(filename, content):
global random
if random is None: from random import random
tmpfile = filename + str(random())[1:]
f = open(tmpfile, 'w+b') # on windows, 'w+b' is preffered than 'wb'
try:
f.write(content)
finally:
f.close()
if os.path.exists(tmpfile):
try:
os.rename(tmpfile, filename)
except:
os.remove(filename) # on windows, existing file should be removed before renaming
os.rename(tmpfile, filename)
def _read_binary_file(filename):
f = open(filename, 'rb')
try:
return f.read()
finally:
f.close()
codecs = None # lazy import
def _read_text_file(filename, encoding=None):
global codecs
if not codecs: import codecs
f = codecs.open(filename, encoding=(encoding or 'utf-8'))
try:
return f.read()
finally:
f.close()
def _read_template_file(filename, encoding=None):
s = _read_binary_file(filename) ## binary(=str)
if encoding: s = s.decode(encoding) ## binary(=str) to unicode
return s
_basestring = basestring
_unicode = unicode
_bytes = str
def _ignore_not_found_error(f, default=None):
try:
return f()
except OSError, ex:
if ex.errno == 2: # error: No such file or directory
return default
raise
def create_module(module_name, dummy_func=None, **kwargs):
"""ex. mod = create_module('tenjin.util')"""
try:
mod = type(sys)(module_name)
except:
# The module creation above does not work for Jython 2.5.2
import imp
mod = imp.new_module(module_name)
mod.__file__ = __file__
mod.__dict__.update(kwargs)
sys.modules[module_name] = mod
if dummy_func:
exec(dummy_func.func_code, mod.__dict__)
return mod
def _raise(exception_class, *args):
raise exception_class(*args)
##
## helper method's module
##
def _dummy():
global unquote
unquote = None
global to_str, escape, echo, new_cycle, generate_tostrfunc
global start_capture, stop_capture, capture_as, captured_as, CaptureContext
global _p, _P, _decode_params
def generate_tostrfunc(encode=None, decode=None):
"""Generate 'to_str' function with encode or decode encoding.
ex. generate to_str() function which encodes unicode into binary(=str).
to_str = tenjin.generate_tostrfunc(encode='utf-8')
repr(to_str(u'hoge')) #=> 'hoge' (str)
ex. generate to_str() function which decodes binary(=str) into unicode.
to_str = tenjin.generate_tostrfunc(decode='utf-8')
repr(to_str('hoge')) #=> u'hoge' (unicode)
"""
if encode:
if decode:
raise ValueError("can't specify both encode and decode encoding.")
else:
def to_str(val, _str=str, _unicode=unicode, _isa=isinstance, _encode=encode):
"""Convert val into string or return '' if None. Unicode will be encoded into binary(=str)."""
if _isa(val, _str): return val
if val is None: return ''
#if _isa(val, _unicode): return val.encode(_encode) # unicode to binary(=str)
if _isa(val, _unicode):
return val.encode(_encode) # unicode to binary(=str)
return _str(val)
else:
if decode:
def to_str(val, _str=str, _unicode=unicode, _isa=isinstance, _decode=decode):
"""Convert val into string or return '' if None. Binary(=str) will be decoded into unicode."""
#if _isa(val, _str): return val.decode(_decode) # binary(=str) to unicode
if _isa(val, _str):
return val.decode(_decode)
if val is None: return ''
if _isa(val, _unicode): return val
return _unicode(val)
else:
def to_str(val, _str=str, _unicode=unicode, _isa=isinstance):
"""Convert val into string or return '' if None. Both binary(=str) and unicode will be retruned as-is."""
if _isa(val, _str): return val
if val is None: return ''
if _isa(val, _unicode): return val
return _str(val)
return to_str
to_str = generate_tostrfunc(encode='utf-8') # or encode=None?
def echo(string):
"""add string value into _buf. this is equivarent to '#{string}'."""
lvars = sys._getframe(1).f_locals # local variables
lvars['_buf'].append(string)
def new_cycle(*values):
"""Generate cycle object.
ex.
cycle = new_cycle('odd', 'even')
print(cycle()) #=>
|
'odd'
print(c
|
ycle()) #=> 'even'
print(cycle()) #=> 'odd'
print(cycle()) #=> 'even'
"""
def gen(values):
i, n = 0, len(values)
while True:
yield values[i]
i = (i + 1) % n
return gen(values).next
class CaptureContext(object):
def __init__(self, name, store_to_context=True, lvars=None):
self.name = name
self.store_to_context = store_to_context
self.lvars = lvars or sys._getframe(1).f_locals
def __enter__(self):
lvars = self.lvars
self._buf_orig = lvars['_buf']
lvars['_buf'] = _buf = []
lvars['_extend'] = _buf.extend
return self
def __exit__(self, *args):
lvars = self.lvars
_buf = lvars['_buf']
lvars['_buf'] = self._buf_orig
lvars['_extend'] = self._buf_orig.extend
lvars[self.name] = self.captured = ''.join(_buf)
if self.store_to_context and '_context' in lvars:
lvars['_context'][self.name] = self.captured
|
kencochrane/django-intercom
|
django_intercom/settings.py
|
Python
|
bsd-3-clause
| 925
| 0.005405
|
from django.conf import settings
INTERCOM_APPID = getattr(settings, 'INTERCOM_APPID', None)
INTERCOM_SECURE_KEY = getattr(settings, 'INTERCOM_SECURE_KEY', None)
INTERCOM_ENABLE_INBOX = getattr(settings, 'INTERCOM_ENABLE_INBOX', True)
INTERCOM_ENABLE_INBOX_COUNTER = getattr(settings, 'INTERCOM_EN
|
ABLE_INBOX_COUNTER', True)
INTERCOM_INBOX_CSS_SELECTOR = getattr(settings, 'INTERCOM_INBOX_CSS_SELECTOR', '#Intercom')
INTERCOM_USER_DATA_CLASS = getattr(settings, 'I
|
NTERCOM_USER_DATA_CLASS', None)
INTERCOM_CUSTOM_DATA_CLASSES = getattr(settings, 'INTERCOM_CUSTOM_DATA_CLASSES', None)
INTERCOM_COMPANY_DATA_CLASS = getattr(settings, 'INTERCOM_COMPANY_DATA_CLASS', None)
INTERCOM_DISABLED = getattr(settings, 'INTERCOM_DISABLED', False)
INTERCOM_INCLUDE_USERID = getattr(settings, 'INTERCOM_INCLUDE_USERID', True)
INTERCOM_UNAUTHENTICATED_USER_EMAIL = getattr(settings, 'INTERCOM_UNAUTHENTICATED_USER_EMAIL', 'lead@example.com')
|
enthought/etsproxy
|
enthought/kiva/pdfmetrics.py
|
Python
|
bsd-3-clause
| 45
| 0
|
# proxy module
from k
|
iva.pdfmetrics import *
|
|
GuLinux/PySpectrum
|
qtcommons.py
|
Python
|
gpl-3.0
| 3,204
| 0.009363
|
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QFileDialog, QToolBar, QToolButton, QMenu, QAction, QLabel, QApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QStandardPaths, QTimer
import os
class QtCommons:
def nestWidget(parent, child):
l = QVBoxLayout()
l.setContentsMargins(0,0,0,0)
l.setSpacing(0)
parent.setLayout(l)
parent.layout().addWidget(child)
return child
def open_files(title, file_types, on_ok, dir='', parent=None):
def setup_dialog(
|
dialog):
dialog.setFileMode(QFileDialog.ExistingFiles)
dialog.setAcceptMode(QFileDialog.AcceptOpen)
dialog.filesSelected.connect(on_ok)
QtCommons.__open_dialog__(title, file_types, dir, setup_dialog
|
, parent)
def open_file(title, file_types, on_ok, dir='', parent=None):
def setup_dialog(dialog):
dialog.setFileMode(QFileDialog.ExistingFile)
dialog.setAcceptMode(QFileDialog.AcceptOpen)
dialog.fileSelected.connect(lambda file:on_ok((file,dialog.selectedNameFilter)))
QtCommons.__open_dialog__(title, file_types, dir, setup_dialog, parent)
def open_dir(title, on_ok, dir='', parent=None):
def setup_dialog(dialog):
dialog.setFileMode(QFileDialog.Directory)
dialog.setOption(QFileDialog.ShowDirsOnly)
dialog.setAcceptMode(QFileDialog.AcceptOpen)
dialog.fileSelected.connect(lambda f: on_ok((f, )))
QtCommons.__open_dialog__(title, None, dir, setup_dialog, parent)
def save_file(title, file_types, on_ok, dir='', parent=None):
def setup_dialog(dialog):
dialog.setFileMode(QFileDialog.AnyFile)
dialog.setDefaultSuffix('fit')
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.fileSelected.connect(lambda file:on_ok((file,dialog.selectedNameFilter)))
QtCommons.__open_dialog__(title, file_types, dir, setup_dialog, parent)
def __open_dialog__(title, file_types, dir, setup_dialog, parent=None):
dialog = QFileDialog(parent)
if file_types: dialog.setNameFilter(file_types)
dialog.setDirectory(dir)
dialog.setWindowTitle(title)
setup_dialog(dialog)
dialog.finished.connect(lambda: dialog.deleteLater())
dialog.show()
def addToolbarPopup(toolbar, text = None, icon_name = None, icon_file = None, actions = [], popup_mode = QToolButton.InstantPopup, toolbutton_style=Qt.ToolButtonTextBesideIcon):
button = QToolButton()
button.setToolButtonStyle(toolbutton_style)
button.setDefaultAction(QAction(button))
if text:
button.defaultAction().setText(text)
button.defaultAction().setIconText(text)
button.setPopupMode(popup_mode)
button.setMenu(QMenu())
if icon_name:
button.defaultAction().setIcon(QIcon.fromTheme(icon_name))
if icon_file:
button.defaultAction().setIcon(QIcon(icon_file))
for action in actions:
button.menu().addAction(action)
toolbar.addWidget(button)
return button
|
Astutech/Pushwoosh-Python-library
|
setup.py
|
Python
|
mit
| 2,681
| 0
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the lon
|
g description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pushwoosh',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.2',
description='A simple client for the Pushwoosh
|
push notification service.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Astutech/Pushwoosh-Python-library',
# Author details
author='Astutech',
author_email='matthew@astutech.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='push pushwoosh interface client',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['pushwoosh'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['six'],
)
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/tpgamefiles/scr/Spell600 - Frog Tongue.py
|
Python
|
mit
| 1,771
| 0.040655
|
from toee import *
def OnBeginSpellCast( spell ):
print "Frog Ton
|
gue OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect( spell ):
print "Frog Tongue OnSpellEffect"
# it takes 1 round to pull the target to the frog (normally)
spell.duration = 0
target_item = spell.target_list[0]
# if the target is larger than the frog, it takes 2 turns to "pull" the target in
if target_item.obj.get_size > sp
|
ell.caster.get_size:
spell.duration = 1
has_freedom = 0
if target_item.obj.d20_query(Q_Critter_Has_Freedom_of_Movement):
has_freedom = 1
ranged_touch_res = spell.caster.perform_touch_attack( target_item.obj )
if (ranged_touch_res & D20CAF_HIT) and not has_freedom:
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 21000 )
# hit
#target_item.obj.condition_add_with_args( 'sp-Frog Tongue', spell.id, spell.duration, 0 )
spell.caster.condition_add_with_args( 'sp-Frog Tongue', spell.id, spell.duration, 0 )
target_item.partsys_id = game.particles( 'sp-Frog Tongue', target_item.obj )
else:
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 21001 )
spell.caster.anim_callback( ANIM_CALLBACK_FROG_FAILED_LATCH )
# missed
if not (ranged_touch_res & D20CAF_HIT):
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30007 )
game.particles( 'Fizzle', target_item.obj )
spell.target_list.remove_target( target_item.obj )
spell.spell_end( spell.id )
def OnBeginRound( spell ):
if spell.caster == OBJ_HANDLE_NULL:
spell.spell_end(spell.id, 1)
elif spell.caster.is_unconscious():
spell.spell_end(spell.id, 1)
print "Frog Tongue OnBeginRound"
def OnEndSpellCast( spell ):
print "Frog Tongue OnEndSpellCast"
|
antoinecarme/pyaf
|
tests/artificial/transf_Fisher/trend_ConstantTrend/cycle_30/ar_12/test_artificial_128_Fisher_ConstantTrend_30_12_20.py
|
Python
|
bsd-3-clause
| 267
| 0.086142
|
import p
|
yaf.Bench.TS_datasets as tsds
import tests.artificial.pr
|
ocess_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 30, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 12);
|
healthchecks/healthchecks
|
hc/front/tests/test_add_msteams.py
|
Python
|
bsd-3-clause
| 1,478
| 0
|
from django.test.utils import override_settings
from hc.api.models import Channel
from hc.test import BaseTestCase
class AddMsTeamsTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = "/projects/%s/add_msteams/" % self.project.code
def test_instructions_work(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertContains(r, "Integration Settings", status_code=200)
def test_it_works(self):
form = {"value": "https://example.com/foo"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, self.channels_url)
c = Channel.objects.get()
self.assertEqual(c.kind, "msteams")
self.assertEqual(c.value, "https://example.com/foo")
self.assertEqual(c.project, self.project)
def test_it_
|
requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
self.client.login(username="bob@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
@override_settings(MSTEAMS_ENABLED=False)
def test_it_handles_disabled_integration(self):
sel
|
f.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
|
jvdm/AutobahnPython
|
examples/asyncio/wamp/session/series/backend.py
|
Python
|
mit
| 1,661
| 0
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#########################################################################
|
######
import datetime
from autobahn.asyncio.wamp import ApplicationSession
class Component(Applicati
|
onSession):
"""
A simple time service application component.
"""
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, u'com.timeservice.now')
|
savoirfairelinux/sous-chef
|
src/notification/urls.py
|
Python
|
agpl-3.0
| 77
| 0
|
from django.conf.urls import url
app_na
|
me = "not
|
ification"
urlpatterns = []
|
DIRACGrid/DIRACWeb
|
dirac/lib/helpers.py
|
Python
|
gpl-3.0
| 1,413
| 0.035386
|
"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to both as 'h'.
"""
from webhelpers import *
from webhelpers.html import tags
from routes import url_for
from pylons import request
def javascript_link( *urls, **attrs ):
return _modTag( urls, attrs, tags.javascript_link )
def stylesheet_link( *urls, **attrs ):
return _modTag( urls, attrs, tags.stylesheet_link )
def _modTag( urls, attrs, functor ):
nUrls = urls
sN = request.environ[ 'SCRIPT_NAME' ]
if sN:
if sN[0] == "/":
sN = sN[1:]
nUrls = []
for url in urls:
if url.find( "http" ) == 0:
nUrls.append( url )
else:
if url[0] == "/":
url = "/%s%s" % ( sN, url )
nUrls.append( url )
return functor( *nUrls, **attrs )
def logo_wrap
|
( fn ):
def wrap( self = None ):
return "<html><body><img src='/images/logos/logo.png'/><br><br><br><br><p class='lrg'>\
The <a href='http://diracgrid.org'>DIRAC</a> project is a complete \
Grid solution for a community of users needing access to \
distributed computing resources.</p><br><p class='lrg'>Do you want \
to help your c
|
ommunity? Get <a href='https://github.com/DIRACGrid'>\
involved</a>!</p><br>\
<p class='footer'>" + fn( self ) + "</p></body></html>"
return wrap
|
Smiter/voodoo
|
voodoo/wsgi.py
|
Python
|
gpl-2.0
| 1,522
| 0.000657
|
"""
WSGI con
|
fig for voodoo project.
This module contains the WSGI a
|
pplication used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "voodoo.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
# import newrelic.agent
# newrelic.agent.initialize('/home/smiter/python_home/projects/birt_django/voodoo/voodoo/newrelic-1.10.2.38/newrelic.ini')
# import os
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "voodoo.settings")
# from django.core.wsgi import get_wsgi_application
# application = get_wsgi_application()
# application = newrelic.agent.wsgi_application()(application)
|
Sciprios/EvolutionaryPartyProblemSimulator
|
PartyProblemSimulator/Experiments/Experiment.py
|
Python
|
mit
| 3,577
| 0.00615
|
from PartyProblemSimulator.BooleanEquation.Equation import Equation
from threading import Thread
class Experiment(Thread):
""" An experiment to be run on the Party Problem Simulator. """
def run(self):
""" Should be implemented to execute the experiment and save results. """
results = self._do_experiment()
self._save_results(results)
def _do_experiment(self):
""" Execute the experiment and return results. """
raise NotImplementedError("The do_experiment method of Experiment is not implemented.")
def _test_method(self, method, no_trials, test_cases):
""" Tests the given method with x trials on all test cases provided. """
results = []
for test_case in test_cases:
test_case_aes = 0
test_case_sr = 0
trial_count =
|
0
|
while trial_count < no_trials:
trial_res = self._do_trial(method(), Equation(test_case['Equation']), test_case['NumVars']) # Do the trial
if trial_res['Success']: # Only add information if it was successful
test_case_sr = test_case_sr + 1
test_case_aes = test_case_aes + trial_res['Evaluations']
trial_count = trial_count + 1
try:
test_case_aes = test_case_aes / test_case_sr # Divide by the number of successes
except ZeroDivisionError:
test_case_aes = 0
test_case_sr = test_case_sr / no_trials # No. Successful trials / percentage
results.append({
"AES": test_case_aes,
"SR": test_case_sr
})
return results
def _do_trial(self, method, equation, variable_count):
""" Does a single trial of the algorithm provided. """
method.run(equation, variable_count)
results = {} # Build response
results['Evaluations'] = method.get_num_evaluations()
if (method.get_best_genome() is None) or (method.get_best_genome().evaluate(equation) == 1): # Did the method find a solution?
results['Success'] = True
else:
results['Success'] = False
return results
def _save_results(self, results):
""" Saves the results of this experiment to disk. """
for res in results:
with open('PartyProblemSimulator\Experiments\Results\{}.res'.format(res['Method']), 'w') as file: # Open file with name of method used
file.write("METHOD NAME: {}\n".format(res['Method'])) # Output the goodies
file.write("AES: {}\n".format(res['Overall']['AES']))
file.write("SR: {}\n".format(res['Overall']['SR']))
file.write("--------------------------\n")
for case_res in res['CaseResults']:
file.write("Case AES: {}\t\tCase SR: {}\n".format(case_res['AES'], case_res['SR']))
def _load_test_cases(self):
""" Loads or creates the test cases to be used. """
raise NotImplementedError("The _load_test_cases method of Experiment is not implemented.")
def _calculate_results(self, results):
""" Calculates the SR (Success Rate) and AES (Average Evaluations per Solution) based on the results given."""
sr = 0
aes = 0
for result in method_results:
aes = aes + result['AES']
sr = sr + result['SR']
aes = aes / len(method_results)
sr = sr / len(method_results)
return {"AES": aes, "SR": sr}
|
indiofish/lov-o-meter
|
src/analysis/analyser.py
|
Python
|
gpl-3.0
| 2,602
| 0
|
from collections import namedtuple, Counter
from datetime import timedelta
from analysis import sentiment
from analysis import qa_analysis
ChatData = namedtuple('ChatData',
['interval',
'avg_chats',
'sentiments',
'qa_ratio'])
class Analyser(object):
"""with the parsed data, gather information"""
def __init__(self):
super(Analyser, self).__init__()
self.senti = sentiment.Sentiment()
# self.__get_words__()
def analyse(self, chat):
interval = self.__interval__(chat)
avg_chat = self.__chat_per_day__(chat)
senti = self.__sentiment__(chat)
qa_ratio = self.__questions__(chat)
ret = ChatData(interval=interval,
avg_chats=avg_chat,
sentiments=senti,
qa_ratio=qa_ratio)
return ret
# calculate interval between chats
def __interval__(self, chat):
tmp_time = timedelta(seconds=0)
for i in range(1, len(chat)):
tmp_tim
|
e += chat[i].time - chat[i-1].time
avg_interval = tmp_time.total_seconds() // len(chat)
return avg_interval
# TODO: should we use n of chats, or length?
def __chat_per_day__(self, chat):
c
|
nt = Counter()
for c in chat:
cnt[c.time.date()] += 1
return sum(cnt.values()) // len(cnt)
def __questions__(self, chat):
total_q = 0
ans = 0
# self, other
questions = [[], []]
for c in chat:
if qa_analysis.is_question(c.contents):
score = qa_analysis.score(c.contents)
questions[c.user].append(score)
total_q += score
elif qa_analysis.reply(c.contents) == 1:
# the other speaker's question is answered
if questions[not(c.user)]:
ans += questions[not(c.user)].pop()
elif qa_analysis.reply(c.contents) == -1:
if questions[not(c.user)]:
questions[not(c.user)].pop()
if total_q == 0:
return 0
return ans / total_q
def __sentiment__(self, chat):
ret = [0, 0]
cnt = 0
for c in chat:
p = self.senti.analyse(c.contents)
ret[0] += p[0]
ret[1] += p[1]
if ret[0] != 0 or ret[1] != 0:
cnt += 1
if cnt != 0:
ret[0] /= cnt
ret[1] /= cnt
ret[0] *= 100
ret[1] *= 100
return ret
|
ecreall/lagendacommun
|
lac/views/user_management/deactivate.py
|
Python
|
agpl-3.0
| 857
| 0.003501
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from lac.content.processes.user_management.behaviors import (
Deactivate)
from lac.content.person import Person
from lac import _
@view_config(
name='deactivate',
context=
|
Person,
renderer='pontus:templates/views_templates/grid.pt',
)
class DeactivateView(Basi
|
cView):
title = _('Deactivate the member')
name = 'deactivate'
behaviors = [Deactivate]
viewid = 'deactivateview'
def update(self):
results = self.execute(None)
return results[0]
DEFAULTMAPPING_ACTIONS_VIEWS.update({Deactivate: DeactivateView})
|
blckshrk/Weboob
|
weboob/tools/capabilities/paste.py
|
Python
|
agpl-3.0
| 2,825
| 0.004248
|
# -*- coding: utf-8 -*-
# Copyright(C) 2011 Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.paste import ICapPaste
class BasePasteBackend(ICapPaste):
EXPIRATIONS = {}
"""
List of expirations and their corresponding remote codes (any type can be used).
The expirations, i.e. the keys, are integers representing the duration
in seconds. There also can be one False key, for the "forever" expiration.
"""
def get_closest_expiration(self, max_age):
"""
Get the expiration closest (and less or equal to) max_age (int, in seconds).
max_age set to False means we want it to never expire.
@return int or False if found, else None
"""
# "forever"
if max_age is False and False in self.EXPIRATIONS:
return max_age
# get timed expirations, longest first
expirations = sorted([e for e in self.EXPIRATIONS if e is not False], reverse=True)
# find the first expiration that is below or equal to the maximum wanted age
for e in expirations:
if max_age is False or max_age >= e:
return e
def test():
class MockPasteBackend(BasePasteBackend):
def __init__(self, expirations):
self.EXPIRATIONS = expirations
# all expirations are too high
assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1) is None
# we found a suitable lower or equal expiration
asser
|
t MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_e
|
xpiration(84) is 42
assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(False) is False
assert MockPasteBackend({1337: '', 42: ''}).get_closest_expiration(False) is 1337
assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1336) is 42
assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1337) is 1337
assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1338) is 1337
# this format should work, though of doubtful usage
assert MockPasteBackend([1337, 42, False]).get_closest_expiration(84) is 42
|
StackStorm/st2
|
contrib/examples/actions/pythonactions/yaml_string_to_object.py
|
Python
|
apache-2.0
| 795
| 0
|
# C
|
opyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or
|
agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from st2common.runners.base_action import Action
class YamlStringToObject(Action):
def run(self, yaml_str):
return yaml.safe_load(yaml_str)
|
alfasin/st2
|
st2common/tests/unit/test_rbac_resolvers.py
|
Python
|
apache-2.0
| 7,834
| 0.002553
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2common.services import rbac as rbac_services
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.rbac.types import SystemRole
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.pack import Pack
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.pack import PackDB
from st2common.rbac.resolvers import get_resolver_for_resource_type
from st2common.rbac.migrations import insert_system_roles
from st2tests.base import CleanDbTestCase
__all__ = [
'BasePermissionsResolverTestCase',
'PermissionsResolverUtilsTestCase'
]
class BasePermissionsResolverTestCase(CleanDbTestCase):
def setUp(self):
super(BasePermissionsResolverTestCase, self).setUp()
# Make sure RBAC is enabeld
cfg.CONF.set_override(name='enable', override=True, group='rbac')
self.users = {}
self.roles = {}
self.resources = {}
# Run role "migrations"
insert_system_roles()
# Insert common mock objects
self._insert_common_mocks()
def _user_has_resource_db_permissions(self, resolver, user_db, resource_db, permission_types):
"""
|
Method which verifies that user
|
has all the provided permissions.
"""
self.assertTrue(isinstance(permission_types, (list, tuple)))
self.assertTrue(len(permission_types) > 1)
for permission_type in permission_types:
result = resolver.user_has_resource_db_permission(
user_db=user_db,
resource_db=resource_db,
permission_type=permission_type)
if not result:
return False
return True
def _insert_common_mocks(self):
self._insert_common_mock_users()
self._insert_common_mock_resources()
self._insert_common_mock_roles()
self._insert_common_mock_role_assignments()
def _insert_common_mock_users(self):
# Insert common mock users
user_1_db = UserDB(name='admin')
user_1_db = User.add_or_update(user_1_db)
self.users['admin'] = user_1_db
user_2_db = UserDB(name='observer')
user_2_db = User.add_or_update(user_2_db)
self.users['observer'] = user_2_db
user_3_db = UserDB(name='no_roles')
user_3_db = User.add_or_update(user_3_db)
self.users['no_roles'] = user_3_db
user_4_db = UserDB(name='1_custom_role_no_permissions')
user_4_db = User.add_or_update(user_4_db)
self.users['1_custom_role_no_permissions'] = user_4_db
user_5_db = UserDB(name='1_role_pack_grant')
user_5_db = User.add_or_update(user_5_db)
self.users['custom_role_pack_grant'] = user_5_db
def _insert_common_mock_resources(self):
pack_1_db = PackDB(name='test_pack_1', ref='test_pack_1', description='',
version='0.1.0', author='foo', email='test@example.com')
pack_1_db = Pack.add_or_update(pack_1_db)
self.resources['pack_1'] = pack_1_db
pack_2_db = PackDB(name='test_pack_2', ref='test_pack_2', description='',
version='0.1.0', author='foo', email='test@example.com')
pack_2_db = Pack.add_or_update(pack_2_db)
self.resources['pack_2'] = pack_2_db
def _insert_common_mock_roles(self):
# Insert common mock roles
admin_role_db = rbac_services.get_role_by_name(name=SystemRole.ADMIN)
observer_role_db = rbac_services.get_role_by_name(name=SystemRole.OBSERVER)
self.roles['admin_role'] = admin_role_db
self.roles['observer_role'] = observer_role_db
# Custom role 1 - no grants
role_1_db = rbac_services.create_role(name='custom_role_1')
self.roles['custom_role_1'] = role_1_db
# Custom role 2 - one grant on pack_1
# "pack_create" on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.PACK_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_3_db = RoleDB(name='custom_role_pack_grant', permission_grants=permission_grants)
role_3_db = Role.add_or_update(role_3_db)
self.roles['custom_role_pack_grant'] = role_3_db
def _insert_common_mock_role_assignments(self):
# Insert common mock role assignments
role_assignment_admin = UserRoleAssignmentDB(user=self.users['admin'].name,
role=self.roles['admin_role'].name)
role_assignment_admin = UserRoleAssignment.add_or_update(role_assignment_admin)
role_assignment_observer = UserRoleAssignmentDB(user=self.users['observer'].name,
role=self.roles['observer_role'].name)
role_assignment_observer = UserRoleAssignment.add_or_update(role_assignment_observer)
user_db = self.users['1_custom_role_no_permissions']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_1'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_pack_grant']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_pack_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
class PermissionsResolverUtilsTestCase(unittest2.TestCase):
def test_get_resolver_for_resource_type_valid_resource_type(self):
valid_resources_types = [ResourceType.PACK, ResourceType.SENSOR, ResourceType.ACTION,
ResourceType.RULE, ResourceType.EXECUTION,
ResourceType.KEY_VALUE_PAIR,
ResourceType.WEBHOOK]
for resource_type in valid_resources_types:
resolver_instance = get_resolver_for_resource_type(resource_type=resource_type)
resource_name = resource_type.split('_')[0].lower()
class_name = resolver_instance.__class__.__name__.lower()
self.assertTrue(resource_name in class_name)
def test_get_resolver_for_resource_type_unsupported_resource_type(self):
expected_msg = 'Unsupported resource: alias'
self.assertRaisesRegexp(ValueError, expected_msg, get_resolver_for_resource_type,
resource_type='alias')
|
jeanpimentel/contents
|
tests/functional/test_file_with_long_levels.py
|
Python
|
gpl-3.0
| 3,294
| 0.003947
|
import sure
import tempfile
from contents import contents
def test_file_with_long_levels():
content = '''/**
* Project X
* Author: Jean Pimentel
* Date: August, 2013
*/
/* > Intro */
Toc toc! Penny! Toc toc! Penny! Toc toc! Penny!
/* >> The Big Bang Theory << */
The Big Bang Theory is an American sitcom created by Chuck Lorre and Bill Prady.
/* ==>>> Characters ========================================================= */
Leonard Hofstadter, Sheldon Cooper, Howard Wolowitz, Rajesh Koothrappali, Penny
/* >>>> Production
============================================================================= */
Executive producer(s): Chuck Lorre, Bill Prady, Steven Molaro
Producer(s): Faye Oshima Belyeu
/* =>>>>> Info section: number of seasons - number of episodes
============================================================================= */
No. of seasons: 5
No. of episodes: 111
/* =>>>>>> A collection of our favorite quotes from the show <=============== */
* Sheldon: Scissors cuts paper, paper covers rock, rock crushes lizard, lizard poisons Spock, Spock smashes scissors, scissors decapitates lizard, lizard eats paper, paper disproves Spock, Spock vaporizes rock, and as it always has, rock crushes scissors.
* Sheldon: I'm not insane, my mother had me tested!
'''
new_content = '''/* TABLE OF CONTENTS
Intro ............................................................... 17
The Big Bang Theory ............................................. 20
Characters .................................................. 23
Production .............................................. 26
Info section: number of seasons - number of e[...] .. 31
A collection of our favorite quotes from [...] .. 36
================
|
============================================================= */
/**
* Project X
* Author: Jean Pimentel
* Date: August, 2013
*/
/* > Intro */
Toc toc! Penny! Toc toc! Penny! Toc toc! Penny!
/* >> The Big Bang Theory << */
The Big Bang Theory is an American sitcom created by Chuck Lorre and Bill
|
Prady.
/* ==>>> Characters ========================================================= */
Leonard Hofstadter, Sheldon Cooper, Howard Wolowitz, Rajesh Koothrappali, Penny
/* >>>> Production
============================================================================= */
Executive producer(s): Chuck Lorre, Bill Prady, Steven Molaro
Producer(s): Faye Oshima Belyeu
/* =>>>>> Info section: number of seasons - number of episodes
============================================================================= */
No. of seasons: 5
No. of episodes: 111
/* =>>>>>> A collection of our favorite quotes from the show <=============== */
* Sheldon: Scissors cuts paper, paper covers rock, rock crushes lizard, lizard poisons Spock, Spock smashes scissors, scissors decapitates lizard, lizard eats paper, paper disproves Spock, Spock vaporizes rock, and as it always has, rock crushes scissors.
* Sheldon: I'm not insane, my mother had me tested!
'''
temp = tempfile.NamedTemporaryFile()
try:
temp.write(content)
temp.seek(0)
contents(temp.name)
temp.seek(0)
temp.read().should.be.equal(new_content)
finally:
temp.close()
|
mindriot101/bokeh
|
bokeh/io/tests/test_showing.py
|
Python
|
bsd-3-clause
| 7,140
| 0.004762
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from mock import Mock, patch
# External imports
# Bokeh imports
from bokeh.application.application import Application
from bokeh.io.doc import curdoc
from bokeh.io.output import output_notebook
from bokeh.io.state import curstate, State
from bokeh.models.plots import Plot
from bokeh.models.renderers import GlyphRenderer
# Module under test
import bokeh.io.showing as bis
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@patch('bokeh.io.showing._show_with_state')
def test_show_with_default_args(mock__show_with_state):
curstate().reset()
default_kwargs = dict(browser=None, new="tab", notebook_handle=False)
p = Plot()
bis.show(p, **default_kwargs)
assert mock__show_with_state.call_count == 1
assert mock__show_with_state.call_args[0] == (p, curstate(), None, "tab")
assert mock__show_with_state.call_args[1] == {'notebook_handle': False}
assert curdoc().roots == []
@patch('bokeh.io.showing._show_with_state')
def test_show_with_explicit_args(mock__show_with_state):
curstate().reset()
kwargs = dict(browser="browser", new="new", notebook_handle=True)
p = Plot()
bis.show(p, **kwargs)
assert mock__show_with_state.call_count == 1
assert mock__show_with_state.call_args[0] == (p, curstate(), "browser", "new")
assert mock__show_with_state.call_args[1] =
|
= {'notebook_handle': True}
assert curdoc().roots == []
@patch('bokeh.io.showing.run_notebook_hook')
def test_show_with_app(mock_run_notebook_hook):
curstate().reset()
app = Application()
output_notebook()
bis.show(app, notebook_url="baz")
assert curstate().notebook_type == "jupyter"
assert mock_run_notebook_hook.call_count == 1
assert mock_run_notebook_hook.call_args[0][0] == curstate().notebook_type
assert mock_run_notebook_hook.call
|
_args[0][1:] == ("app", app, curstate(), "baz")
assert mock_run_notebook_hook.call_args[1] == {}
@patch('bokeh.io.showing._show_with_state')
def test_show_doesn_not_adds_obj_to_curdoc(m):
curstate().reset()
assert curstate().document.roots == []
p = Plot()
bis.show(p)
assert curstate().document.roots == []
p = Plot()
bis.show(p)
assert curstate().document.roots == []
@pytest.mark.parametrize('obj', [1, 2.3, None, "str", GlyphRenderer()])
@pytest.mark.unit
def test_show_with_bad_object(obj):
with pytest.raises(ValueError):
bis.show(obj)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
@patch('bokeh.io.showing.run_notebook_hook')
@patch('bokeh.io.showing._show_file_with_state')
@patch('bokeh.io.showing.get_browser_controller')
def test__show_with_state_with_notebook(mock_get_browser_controller,
mock__show_file_with_state,
mock_run_notebook_hook):
mock_get_browser_controller.return_value = "controller"
s = State()
p = Plot()
s.output_notebook()
bis._show_with_state(p, s, "browser", "new")
assert s.notebook_type == "jupyter"
assert mock_run_notebook_hook.call_count == 1
assert mock_run_notebook_hook.call_args[0] == ("jupyter", "doc", p, s, False)
assert mock_run_notebook_hook.call_args[1] == {}
assert mock__show_file_with_state.call_count == 0
s.output_file("foo.html")
bis._show_with_state(p, s, "browser", "new")
assert s.notebook_type == "jupyter"
assert mock_run_notebook_hook.call_count == 2
assert mock_run_notebook_hook.call_args[0] == ("jupyter", "doc", p, s, False)
assert mock_run_notebook_hook.call_args[1] == {}
assert mock__show_file_with_state.call_count == 1
assert mock__show_file_with_state.call_args[0] == (p, s, "new", "controller")
assert mock__show_file_with_state.call_args[1] == {}
@patch('bokeh.io.notebook.get_comms')
@patch('bokeh.io.notebook.show_doc')
@patch('bokeh.io.showing._show_file_with_state')
@patch('bokeh.io.showing.get_browser_controller')
def test__show_with_state_with_no_notebook(mock_get_browser_controller,
mock__show_file_with_state,
mock_show_doc,
mock_get_comms):
mock_get_browser_controller.return_value = "controller"
mock_get_comms.return_value = "comms"
s = State()
s.output_file("foo.html")
bis._show_with_state("obj", s, "browser", "new")
assert s.notebook_type == None
assert mock_show_doc.call_count == 0
assert mock__show_file_with_state.call_count == 1
assert mock__show_file_with_state.call_args[0] == ("obj", s, "new", "controller")
assert mock__show_file_with_state.call_args[1] == {}
@patch('os.path.abspath')
@patch('bokeh.io.showing.save')
def test(mock_save, mock_abspath):
controller = Mock()
mock_save.return_value = "savepath"
s = State()
s.output_file("foo.html")
bis._show_file_with_state("obj", s, "window", controller)
assert mock_save.call_count == 1
assert mock_save.call_args[0] == ("obj",)
assert mock_save.call_args[1] == {"state": s}
assert controller.open.call_count == 1
assert controller.open.call_args[0] == ("file://savepath",)
assert controller.open.call_args[1] == {"new": 1}
bis._show_file_with_state("obj", s, "tab", controller)
assert mock_save.call_count == 2
assert mock_save.call_args[0] == ("obj",)
assert mock_save.call_args[1] == {"state": s}
assert controller.open.call_count == 2
assert controller.open.call_args[0] == ("file://savepath",)
assert controller.open.call_args[1] == {"new": 2}
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.