repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
andyofmelbourne/crappy-crystals | utils/phasing_3d/src/mappers.py | Python | gpl-3.0 | 8,434 | 0.018852 | import numpy as np
import sys
import copy
def isValid(thing, d=None):
"""
checks if 'thing' is valid. If d (a dictionary is not None) then
check if d['thing'] is valid.
"""
valid = False
if d is not None :
if thing not in d.keys():
return valid
else :
thing2 = d[thing]
if thing2 is not None and thing2 is not False :
valid = True
return valid
class Modes(dict):
def __init__(self, **args):
dict.__init__(self, **args)
def __ | add__(self, value):
out = self.copy()
for k in self.keys():
if type(value) == Modes :
out[k] = self[k] + value[k]
else :
out[k] = self[k] + value
return out
def __iadd__(self, value):
for k in self.keys():
if type(value) == Modes :
self[k] += value[k]
else :
self[k] += value
return self
def __sub__(se | lf, value):
out = self.copy()
for k in self.keys():
if type(value) == Modes :
out[k] = self[k] - value[k]
else :
out[k] = self[k] - value
return out
def __isub__(self, value):
for k in self.keys():
if type(value) == Modes :
self[k] -= value[k]
else :
self[k] -= value
return self
def __mul__(self, value):
out = self.copy()
for k in self.keys():
if type(value) == Modes :
out[k] = self[k] * value[k]
else :
out[k] = self[k] * value
return out
def __imul__(self, value):
for k in self.keys():
if type(value) == Modes :
self[k] *= value[k]
else :
self[k] *= value
return self
def copy(self):
out = Modes()
for k in self.keys():
out[k] = self[k].copy()
return out
class Mapper():
def __init__(self, I, **args):
modes = Modes()
# check if there is a background
if isValid('background', args):
if args['background'] is True :
modes['B'] = np.random.random((I.shape)).astype(args['dtype'])
else :
modes['B'] = np.sqrt(args['background']).astype(args['dtype'])
if isValid('O', args):
modes['O'] = args['O']
else :
modes['O'] = np.random.random(I.shape).astype(args['c_dtype'])
# this is the radial value for every pixel
# in the volume
self.rs = None
self.mask = 1
if isValid('mask', args):
self.mask = args['mask']
self.alpha = 1.0e-10
if isValid('alpha', args):
self.alpha = args['alpha']
self.I_norm = (self.mask * I).sum()
self.amp = np.sqrt(I.astype(args['dtype']))
# define the data projection
# --------------------------
if 'B' in modes.keys() :
self.Pmod = self.Pmod_back
else :
self.Pmod = self.Pmod_single
# define the support projection
# -----------------------------
if isValid('voxel_number', args) :
self.voxel_number = args['voxel_number']
else :
self.voxel_number = False
self.S = args['support']
self.support = None
if isValid('support', args):
self.support = args['support']
self.modes = modes
def object(self, modes):
return modes['O']
def Psup(self, modes):
out = modes.copy()
if self.voxel_number :
O = out['O']
self.S = choose_N_highest_pixels( (O * O.conj()).real, self.voxel_number, support = self.support)
out['O'] *= self.S
if 'B' in modes.keys() :
out['B'], self.rs, self.r_av = radial_symetry(out['B'], rs = self.rs)
return out
def Pmod_single(self, modes):
out = modes.copy()
out['O'] = pmod_single(self.amp, modes['O'], self.mask, alpha = self.alpha)
return out
def Pmod_back(self, modes):
out = modes.copy()
out['O'], out['B'] = pmod_back(self.amp, modes['B'], modes['O'], self.mask, alpha = self.alpha)
return out
def Imap(self, modes):
O = np.fft.fftn(modes['O'])
if 'B' in modes.keys() :
I = (O.conj() * O).real + modes['B']**2
else :
I = (O.conj() * O).real
return I
def Emod(self, modes):
M = self.Imap(modes)
eMod = np.sum( self.mask * ( np.sqrt(M) - self.amp )**2 )
eMod = np.sqrt( eMod / self.I_norm )
return eMod
def finish(self, modes):
out = {}
out['support'] = self.S
out['I'] = self.Imap(modes)
if 'B' in modes.keys() :
out['background'] = modes['B']**2
out['r_av'] = self.r_av
return out
def l2norm(self, delta, array0):
num = 0
den = 0
for k in delta.keys():
num += np.sum( (delta[k] * delta[k].conj()).real )
den += np.sum( (array0[k] * array0[k].conj()).real )
return np.sqrt(num / den)
def choose_N_highest_pixels_slow(array, N):
percent = (1. - float(N) / float(array.size)) * 100.
thresh = np.percentile(array, percent)
support = array > thresh
# print '\n\nchoose_N_highest_pixels'
# print 'percentile :', percent, '%'
# print 'intensity threshold:', thresh
# print 'number of pixels in support:', np.sum(support)
return support
def choose_N_highest_pixels(array, N, tol = 1.0e-5, maxIters=1000, support=None):
"""
Use bisection to find the root of
e(x) = \sum_i (array_i > x) - N
then return (array_i > x) a boolean mask
This is faster than using percentile (surprising)
If support is not None then values outside the support
are ignored.
"""
s0 = array.max()
s1 = array.min()
if support is not None :
a = array[support > 0]
else :
a = array
support = 1
for i in range(maxIters):
s = (s0 + s1) / 2.
e = np.sum(a > s) - N
if np.abs(e) < tol :
break
if e < 0 :
s0 = s
else :
s1 = s
S = (array > s) * support
#print 'number of pixels in support:', np.sum(support), i, s, e
return S
def pmod_single(amp, O, mask = 1, alpha = 1.0e-10):
O = np.fft.fftn(O)
O = Pmod_single(amp, O, mask = mask, alpha = alpha)
O = np.fft.ifftn(O)
return O
def Pmod_single(amp, O, mask = 1, alpha = 1.0e-10):
out = mask * O * amp / (abs(O) + alpha)
out += (1 - mask) * O
return out
def pmod_back(amp, background, O, mask = 1, alpha = 1.0e-10):
O = np.fft.fftn(O)
O, background = Pmod_back(amp, background, O, mask = mask, alpha = alpha)
O = np.fft.ifftn(O)
return O, background
def Pmod_back(amp, background, O, mask = 1, alpha = 1.0e-10):
M = mask * amp / np.sqrt((O.conj() * O).real + background**2 + alpha)
out = O * M
background *= M
out += (1 - mask) * O
return out, background
def radial_symetry(background, rs = None, is_fft_shifted = True):
if rs is None :
i = np.fft.fftfreq(background.shape[0]) * background.shape[0]
j = np.fft.fftfreq(background.shape[1]) * background.shape[1]
k = np.fft.fftfreq(background.shape[2]) * background.shape[2]
i, j, k = np.meshgrid(i, j, k, indexing='ij')
rs = np.sqrt(i**2 + j**2 + k**2).astype(np.int16)
if is_fft_shifted is False :
rs = np.fft.fftshift(rs)
rs = rs.ravel()
########### Find the radial average
# get the r histogram
r_hist = np.bincount(rs)
# get the radial total
r_av = np.bincount(rs, background.ravel())
# prevent divide by zero
nonzero = np.where(r_hist != 0)
zero = np.where(r_hist == 0)
# get the ave |
NoctuaNivalis/qutebrowser | qutebrowser/commands/cmdexc.py | Python | gpl-3.0 | 1,545 | 0 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Exception classes for commands modules.
Defined here to avoid circular dependency hell.
"""
class Error(Exception):
"""Base class for all cmdexc errors."""
class CommandError(Error):
"""Raised | when a command encounters an error while running."""
pass
class NoSuchCommandError(Error):
"""Raised when a command wasn't found."""
pass
class ArgumentTypeError(Error):
"""Raised when an argument had an invalid type."""
pass
class PrerequisitesError(Error):
"""Raised when a cmd can't be used because some prerequisites aren't met.
This is raised for example when we're in the wrong mode whil | e executing the
command, or we need javascript enabled but don't have done so.
"""
pass
|
sakura-internet/saklient.python | tests/test_enum.py | Python | mit | 1,117 | 0.016115 | # -*- coding:utf-8 -*-
import unittest, sys, os
sys.path[:0] = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from saklient.cloud.enums.eserverinstancestatus import EServerInstanceStatus
class TestEnum(unittest.TestCase):
def test_should_be_defined(self):
self.assertEqual(EServerInstanceStatus.UP, "up");
self.assertEqual(EServerInstanceStatus.DOWN, "down");
def test_should_be_compared(self):
self.assertEqual(EServerInstanceStatus.compare(" | up", "up"), 0);
self.assertEqual(EServerInstanceStatus.compare("up", "down"), 1);
self.assertEqual(EServerInstanceStatus.compare("down", "up"), -1);
self.assertEqual(EServerInstanceStatus.compare("UNDEFINED-SYMBOL", "up"), None);
self.assertEqual(EServerInstanceStatus.compare("up", "UNDEFINED-SYMBOL"), None);
self.assertEqual(EServerInstanceStatus.compare(None, "up"), None);
| self.assertEqual(EServerInstanceStatus.compare("up", None), None);
self.assertEqual(EServerInstanceStatus.compare(None, None), None);
if __name__ == '__main__':
unittest.main()
|
jucacrispim/toxicbuild | toxicbuild/master/signals.py | Python | agpl-3.0 | 1,360 | 0 | # -*- coding: utf-8 -*-
# Copyright 2015 Juca Crispim <juca@poraodojuca.net>
# This file is part of toxicbuild.
# toxicbuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbuild is distributed in the hope that it will be useful,
# but WITHOUT | ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You | should have received a copy of the GNU Affero General Public License
# along with toxicbuild. If not, see <http://www.gnu.org/licenses/>.
from asyncblink import signal
revision_added = signal('revision-added')
step_started = signal('step-started')
step_finished = signal('step-finished')
step_output_arrived = signal('step-output-arrived')
build_preparing = signal('build-preparing')
build_started = signal('build-started')
build_finished = signal('build-finished')
build_added = signal('build-added')
repo_status_changed = signal('repo-status-changed')
build_cancelled = signal('build-cancelled')
buildset_added = signal('buildset-added')
buildset_started = signal('buildset-started')
buildset_finished = signal('buildset-finished')
|
bcorfman/stage | const.py | Python | mit | 301 | 0.003322 | __author__ = 'brandon.corfman'
# Global constants used across the application.
AVERAGED_OVER_ALL_AZIMUTHS = -1
AVERAGED_ON_NON_ZERO_AVS = 0
BY_AZIMUTH = 1
GYPSY_PINK = (0.6745, 0.196, 0.3882) # color coveted by JJS; used for blast volumes
W | HITE = (1.0, 1.0, 1.0)
CMPID, R1, R2, R3, Z1, Z2 = range | (6)
|
fichtner/libucl | python/setup.py | Python | bsd-2-clause | 1,142 | 0.024518 | try:
fro | m setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
import os
import sys
tests_require = []
if sys.version < '2.7':
tests_require.append('unittest2')
uclmodule = Extension(
'ucl', |
libraries = ['ucl'],
sources = ['src/uclmodule.c'],
language = 'c'
)
setup(
name = 'ucl',
version = '0.8',
description = 'ucl parser and emmitter',
ext_modules = [uclmodule],
test_suite = 'tests',
tests_require = tests_require,
author = "Eitan Adler, Denis Volpato Martins",
author_email = "lists@eitanadler.com",
url = "https://github.com/vstakhov/libucl/",
license = "MIT",
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: DFSG approved",
"License :: OSI Approved :: MIT License",
"Programming Language :: C",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries",
]
)
|
ntymtsiv/tempest | tempest/api/network/base_security_groups.py | Python | apache-2.0 | 2,428 | 0 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o | r agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from | tempest.common.utils import data_utils
class BaseSecGroupTest(base.BaseNetworkTest):
@classmethod
def setUpClass(cls):
super(BaseSecGroupTest, cls).setUpClass()
def _create_security_group(self):
# Create a security group
name = data_utils.rand_name('secgroup-')
resp, group_create_body = self.client.create_security_group(name)
self.assertEqual('201', resp['status'])
self.addCleanup(self._delete_security_group,
group_create_body['security_group']['id'])
self.assertEqual(group_create_body['security_group']['name'], name)
return group_create_body, name
def _delete_security_group(self, secgroup_id):
resp, _ = self.client.delete_security_group(secgroup_id)
self.assertEqual(204, resp.status)
# Asserting that the security group is not found in the list
# after deletion
resp, list_body = self.client.list_security_groups()
self.assertEqual('200', resp['status'])
secgroup_list = list()
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
self.assertNotIn(secgroup_id, secgroup_list)
def _delete_security_group_rule(self, rule_id):
resp, _ = self.client.delete_security_group_rule(rule_id)
self.assertEqual(204, resp.status)
# Asserting that the security group is not found in the list
# after deletion
resp, list_body = self.client.list_security_group_rules()
self.assertEqual('200', resp['status'])
rules_list = list()
for rule in list_body['security_group_rules']:
rules_list.append(rule['id'])
self.assertNotIn(rule_id, rules_list)
|
ddico/odoo | addons/website_mass_mailing/models/website_mass_mailing.py | Python | agpl-3.0 | 635 | 0.00315 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class MassMailingPopup(models.Model):
_name = 'website.mass_mailing.popup'
_description = "Mailing list popup"
def _de | fault_popup_content(self):
return self.env['ir.ui.view']._render_template('website_mass_mailing.s_newsletter_subscribe_popup_content')
mailing_list_id = fields.Many2one('mailing.list')
website_id = fields.Many2one('website')
| popup_content = fields.Html(string="Website Popup Content", default=_default_popup_content, translate=True, sanitize=False)
|
marvel-explorer/marvel-explorer | m_explorer/m_profile/permissions.py | Python | mit | 439 | 0 | from rest_framework import permissions
class IsObjectOwner(permissions.BasePermission):
"""
Custom permission to only allow
owners of an | object to view and edit it. Returns
true if user is owner
"""
def has_object_permission(self, request, view, obj):
"""Return Bool representing object permissions."""
| if request.method == "POST":
return True
return obj.user == request.user
|
luci/luci-py | appengine/components/components/auth/machine_auth.py | Python | apache-2.0 | 7,005 | 0.007566 | # Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Implements authentication based on LUCI machine tokens.
LUCI machine tokens are short lived signed protobuf blobs that (among other
information) contain machines' FQDNs.
Each machine has a TLS certificate (and corresponding private key) it uses
to authenticate to LUCI token server when periodically refreshing machine
tokens. Other LUCI backends then simply verifies that the short lived machine
token was signed by the trusted LUCI token server key. That way all the
complexities of dealing with PKI (like checking for certificate revocation) are
implemented in a dedicated LUCI token server, not in the each individual
service.
See:
* https://github.com/luci/luci-go/tree/main/appengine/cmd/tokenserver
* https://github.com/luci/luci-go/tree/main/client/cmd/luci_machine_tokend
* https://github.com/luci/luci-go/tree/main/server/auth/machine
"""
import base64
import logging
from google.protobuf import message
from components import utils
from . import api
from . import model
from . import signature
from .proto import machine_token_pb2
# Part of public API of 'auth' component, exposed by this module.
__all__ = [
'BadTokenError',
'TransientError',
'machine_authentication',
'optional_machine_authentication',
]
# HTTP header that carries the machine token.
MACHINE_TOKEN_HEADER = 'X-Luci-Machine-Token'
# Name of a group with trusted token servers. This group should contain service
# account emails of token servers we trust.
TOKEN_SERVERS_GROUP = 'auth-token-servers'
# How much clock difference we tolerate.
ALLOWED_CLOCK_DRIFT_SEC = 10
# For how long to cache the certificates of the token server in the memory.
CERTS_CACHE_EXP_SEC = 60 * 60
class BadTokenError(api.AuthenticationError):
"""Raised if the supplied machine token is not valid.
See app logs for more details.
"""
def __init__(self):
super(BadTokenError, self).__init__('Bad machine token')
class TransientError(Exception):
"""Raised on transient errors.
Supposed to trigger HTTP 500 response.
"""
def machine_authentication(request):
"""Implementation of the machine authentication.
See components.auth.handler.AuthenticatingHandler.get_auth_methods for details
of the expected interface.
Args:
request: webapp2.Request with the incoming request.
Returns:
(auth.Identity, None) with machine ID ("bot:<fqdn>") on success or
(None, None) if there's no machine token header (which means this
authentication method is not applicable).
Raises:
BadTokenError (which is api.AuthenticationError) if machine token header is
present, but the token is invalid. We also log the error details, but return
only generic error message to the user.
"""
token = request.headers.get(MACHINE_TOKEN_HEADER)
if not token:
return None, None
# Deserialize both envelope and the body.
try:
token = b64_decode(token)
except TypeError as exc:
log_error(request, None, exc, 'Failed to decode base64')
raise BadTokenError()
try:
envelope = machine_token_pb2.MachineTokenEnvelope()
envelope.MergeFromString(token)
body = machine_token_pb2.MachineTokenBody()
body.MergeFromString(envelope.token_body)
except message.DecodeError as exc:
log_error(request, None, exc, 'Failed to deserialize the token')
raise BadTokenError()
# Construct an identity of a token server that signed the token to check that
# it belongs to "auth-token-servers" group.
try:
signer_service_account = model.Identity.from_bytes('user:' + body.issued_by)
except ValueError as exc:
log_error(request, body, exc, 'Bad issued_by field - %s', body.issued_by)
raise BadTokenError()
# Reject tokens from unknown token servers right away.
if not api.is_group_member(TOKEN_SERVERS_GROUP, signer_service_account):
log_error(request, body, None, 'Unknown token issuer - %s', body.issued_by)
raise BadTokenError()
# Check the expiration time before doing any heavier checks.
now = utils.time_time()
if now < body.issued_at - ALLOWED_CLOCK_DRIFT_SEC:
log_error(request, body, None, 'The token is not yet valid')
raise BadTokenError()
if now > body.issued_at + body.lifetime + ALLOWED_CLOCK_DRIFT_SEC:
log_error(request, body, None, 'The token has expired')
raise BadTokenError()
# Check the token was actually signed by the server.
try:
certs = signature.get_service_account_certificates(body.issued_by)
is_valid_sig = certs.check_signature(
blob=envelope.token_body,
key_name=envelope.key_id,
signature=envelope.rsa_sha256)
if not is_valid_sig:
log_error(request, body, None, 'Bad signature')
raise BadTokenError()
except signature.CertificateError as exc:
if exc.transient:
raise TransientError(str(exc))
log_error(
request, body, exc, 'Unexpected error when checking the signature')
raise BadTokenError()
# The token is valid. Construct the bot identity.
try:
ident = model.Identity.from_bytes('bot:' + body.machine_fqdn)
except ValueError as exc:
log_error(request, body, exc, 'Bad machine_fqdn - %s', body.machine_fqdn)
raise BadTokenError()
# Unfortunately 'bot:*' identity namespace is shared between token-based
# identities and old IP-whitelist based identity. They shouldn't intersect,
# but better to enforce this.
if ident == model.IP_WHITELISTED_BOT_ID:
log_error(request, body, None, 'Bot ID %s is forbidden', ident.to_bytes())
raise BadTokenError()
return ident, None
| def optional_machine_authentication(request):
"""It's like machine_authentication except it ignores broken tokens.
Usable during development and initial roll out when machine tokens may not
be working all the time.
"""
try:
return machine_authentication(request)
except BadTokenError:
return None, None # error details are already logged
def b64_decode(data):
"" | "Decodes standard unpadded base64 encoded string."""
mod = len(data) % 4
if mod:
data += '=' * (4 - mod)
return base64.b64decode(data)
def log_error(request, token_body, exc, msg, *args):
"""Logs details about the request and the token, along with error message."""
lines = [('machine_auth: ' + msg) % args]
if exc:
lines.append(' exception: %s (%s)' % (exc, exc.__class__.__name__))
if request:
lines.append(' remote_addr: %s' % request.remote_addr)
if token_body:
lines.extend([
' machine_fqdn: %s' % token_body.machine_fqdn,
' issued_by: %s' % token_body.issued_by,
' issued_at: %s' % token_body.issued_at,
' now: %s' % int(utils.time_time()), # for comparison with issued_at
' lifetime: %s' % token_body.lifetime,
' ca_id: %s' % token_body.ca_id,
' cert_sn: %s' % token_body.cert_sn,
])
logging.warning('\n'.join(lines))
|
MarvinTeichmann/tensorflow_examples | input/input.py | Python | mit | 8,318 | 0.013104 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 11:50:47 2015
@author: teichman
"""
import tensorflow as tf
from tensorflow.python | .framework import ops
from tensorflow.python.framework impo | rt dtypes
from tensorflow.python.training import queue_runner
import os
import numpy as np
# Global constants describing out car data set.
IMAGE_SIZE = 32
NUM_CLASSES = 2
def input_pipeline(filename, batch_size, num_labels,
processing_image=lambda x:x,
processing_label=lambda y:y,
num_epochs=None):
"""The input pipeline for reading images classification data.
The data should be stored in a single text file of using the format:
/path/to/image_0 label_0
/path/to/image_1 label_1
/path/to/image_2 label_2
...
Args:
filename: the path to the txt file
batch_size: size of batches produced
num_epochs: optionally limited the amount of epochs
Returns:
List with all filenames in file image_list_file
"""
# Reads pfathes of images together with there labels
image_list, label_list = read_labeled_image_list(filename)
images = ops.convert_to_tensor(image_list, dtype=dtypes.string)
labels = ops.convert_to_tensor(label_list, dtype=dtypes.int32)
# Makes an input queue
input_queue = tf.train.slice_input_producer([images, labels],
num_epochs=num_epochs,
shuffle=True)
# Reads the actual images from
image, label = read_images_from_disk(input_queue,num_labels=num_labels)
pr_image = processing_image(image)
pr_label = processing_label(label)
image_batch, label_batch = tf.train.batch([pr_image, pr_label],
batch_size=batch_size)
tf.image_summary('images', image_batch)
return image_batch, label_batch
def inputs(filename, batch_size, num_labels,num_epochs=None):
def pr_image(image):
return tf.image.per_image_whitening(image)
return input_pipeline(filename, batch_size,num_labels, processing_image=pr_image
,num_epochs=None)
def distorted_inputs(filename, batch_size, num_labels,num_epochs=None):
def pr_image(image):
distorted_image = image
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# randomize the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
return tf.image.per_image_whitening(distorted_image)
return input_pipeline(filename, batch_size,num_labels, processing_image=pr_image
,num_epochs=None)
def read_images_from_disk(input_queue, num_labels):
"""Consumes a single filename and label as a ' '-delimited string.
Args:
filename_and_label_tensor: A scalar string tensor.
Returns:
Two tensors: the decoded image, and the string label.
"""
label = input_queue[1]
file_contents = tf.read_file(input_queue[0])
example = tf.image.decode_png(file_contents, channels=3)
processed_example = preprocessing(example)
# processed_labels = create_one_hot(label,num_labels)
processed_label = label
return processed_example, processed_label
def preprocessing(image):
resized_image = tf.image.resize_images(image, IMAGE_SIZE,
IMAGE_SIZE, method=0)
resized_image.set_shape([IMAGE_SIZE,IMAGE_SIZE,3])
return resized_image
def create_one_hot(label, num_labels = 10):
"""Produces one_hot vectors out of numerical labels
Args:
label_batch: a batch of labels
num_labels: maximal number of labels
Returns:
Label Coded as one-hot vector
"""
labels = tf.sparse_to_dense(label, [num_labels], 1.0, 0.0)
return labels
def read_labeled_image_list(image_list_file):
"""Reads a .txt file containing pathes and labeles
Args:
image_list_file: a .txt file with one /path/to/image per line
label: optionally, if set label will be pasted after each line
Returns:
List with all filenames in file image_list_file
"""
f = open(image_list_file, 'r')
filenames = []
labels = []
for line in f:
filename, label = line[:-1].split(' ')
filenames.append(filename)
labels.append(int(label))
return filenames, labels
def create_input_queues(image, label, capacity=100):
"""Creates Queues a FIFO Queue out of Input tensor objects.
This function is no longer used in the input pipeline.
However it took me a while to understand queuing and it might be useful
fot someone at some point.
Args:
image: an image tensor object, generated by queues.
label: an label tensor object, generated by queues.
Returns: Two FiFO Queues
"""
#create input queues
im_queue = tf.FIFOQueue(capacity, dtypes.uint8)
enqueue_op = im_queue.enqueue(image)
queue_runner.add_queue_runner(queue_runner.QueueRunner(im_queue,
[enqueue_op]))
label_queue = tf.FIFOQueue(capacity, dtypes.uint8)
enqueue_op = label_queue.enqueue(label)
queue_runner.add_queue_runner(queue_runner.QueueRunner(label_queue,
[enqueue_op]))
return im_queue, label_queue
def test_one_hot():
data_folder = "/fzi/ids/teichman/no_backup/DATA/"
data_file = "Vehicle_Data/train.txt"
filename = os.path.join(data_folder, data_file)
# Reads pfathes of images together with there labels
image_list, label_list = read_labeled_image_list(filename)
images = ops.convert_to_tensor(image_list, dtype=dtypes.string)
labels = ops.convert_to_tensor(label_list, dtype=dtypes.int32)
# Makes an input queue
input_queue = tf.train.slice_input_producer([images, labels],
num_epochs=None,
shuffle=True)
# Reads the actual images from
image, label = read_images_from_disk(input_queue, NUM_CLASSES)
label_one_hot = create_one_hot(label,2)
init_op = tf.initialize_all_variables()
sess = tf.Session()
with sess.as_default():
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print(sess.run([label,label_one_hot]))
print(sess.run([label,label_one_hot]))
print(sess.run([label,label_one_hot]))
print(sess.run([label,label_one_hot]))
print(sess.run([label,label_one_hot]))
print(sess.run([label,label_one_hot]))
sess.close()
def test_pipeline():
data_folder = "/fzi/ids/teichman/no_backup/DATA/"
data_file = "Vehicle_Data/test.txt"
filename = os.path.join(data_folder, data_file)
image_batch, label_batch = inputs(filename, 75,2)
# Create the graph, etc.
init_op = tf.initialize_all_variables()
sess = tf.Session()
with sess.as_default():
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print(label_batch.eval())
coord.request_stop()
coord.join(threads)
prin |
cesc-park/CRCN | doc2vec_training.py | Python | mit | 1,588 | 0.0233 |
from gensim import corpora, models, similarities
import json
import os
# from cpython cimport PyCObject_AsVoidPtr
# from scipy.li | nalg.blas import cblasfrom scipy.linalg.blas import cblas
# ctypedef void (*saxpy_ptr) (const int *N, const float *alpha, const float *X, const int *incX, float *Y, const int *incY) nogil
# cdef saxpy_ptr saxpy=<saxpy_ptr>PyCObject_AsVoidPtr(cblas.saxpy._cpointer)
jsonfile = open('./da | ta/example.json', 'r')
json_data=jsonfile.read()
jsondata=json.loads(json_data)
json_imgs=jsondata['images']
sentences=[]
for i,jsonimg in enumerate(json_imgs):
concatpara=""
for sentence in jsonimg['sentences']:
ensent=sentence['raw'].encode('ascii','ignore')
if ensent not in concatpara:
concatpara+=ensent
key=str(i)
sentences.append(models.doc2vec.TaggedDocument(concatpara.split(), [key]))
model = models.Doc2Vec(size=300,alpha=0.025, min_alpha=0.025,window=8, min_count=5, seed=1,sample=1e-5, workers=4) # use fixed learning rate
model.build_vocab(sentences)
for epoch in range(100):
print epoch
model.train(sentences)
model.alpha -= 0.0001 # decrease the learning rate
model.min_alpha = model.alpha
# if epoch%200==0 and epoch!=0:
# print "save check point"
# accuracy_list=model.accuracy('./model/questions-words.txt')
# error=0
# correct=0
# for accuracy in accuracy_list:
# error=error+len(accuracy['incorrect'])
# correct=correct+len(accuracy['correct'])
# print "accuracy :", correct*1.0/(correct+error)
# model.save('./model/disney_model.doc2vec')
#model.init_sims(replace=True)
model.save('./model/example.doc2vec')
|
flavoi/diventi | diventi/products/migrations/0073_remove_product_order_index.py | Python | apache-2.0 | 337 | 0 | # Genera | ted by Django 2.2.24 on 2021-10-02 14:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0072_product_order_index'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='order_index',
),
] | |
wilsonssun/baseball-gamethread | build/lxml/src/lxml/html/tests/test_clean.py | Python | bsd-3-clause | 1,225 | 0.008163 | import unittest, sys
from lxml.tests.common_imports import make_doctest
from lxml.etree import LIBXML_VERSION
import lxml.html
from lxml.html.clean import Cleaner
class CleanerTest(unittest.TestCase):
def test_allow_tags(self):
html = """
<html>
< | head>
</head>
<body>
<p>some text</p>
<table>
<tr>
<td>hello</td><td>world</td>
</tr>
<tr>
<td>hello</td><td>world</td>
</tr>
</table>
<img>
</body>
</html>
"""
html_root = lxml.html.document_fromstring(html)
cleaner = | Cleaner(
remove_unknown_tags = False,
allow_tags = ['table', 'tr', 'td'])
result = cleaner.clean_html(html_root)
self.assertEquals(12-5+1, len(list(result.iter())))
def test_suite():
suite = unittest.TestSuite()
if sys.version_info >= (2,4):
suite.addTests([make_doctest('test_clean.txt')])
if LIBXML_VERSION >= (2,6,31):
suite.addTests([make_doctest('test_clean_embed.txt')])
suite.addTests(unittest.makeSuite(CleanerTest))
return suite
|
ad-m/django-atom | atom/models.py | Python | bsd-3-clause | 587 | 0.001704 | from os.path import basename
from django.db import models
from django.utils.translation import ugettext_lazy as _
class AttachmentBase(models.Model):
attachment = models.File | Field(upload_to="letters/%Y/%m/%d", verbose_name=_("File"))
@prope | rty
def filename(self):
return basename(self.attachment.name)
def __unicode__(self):
return "%s" % (self.filename)
def get_absolute_url(self):
return self.attachment.url
class Meta:
abstract = True
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
|
mediatum/mediatum | mediatum.py | Python | gpl-3.0 | 257 | 0.003891 | #! /usr/bi | n/env nix-shell
#! nix-shell -i python
import sys
if __nam | e__ == '__main__':
from bin.mediatum import main
try:
main()
except KeyboardInterrupt:
print(" - Mediatum stopped by KeyboardInterrupt.")
sys.exit(1)
|
lucasdavid/grapher | grapher/environment.py | Python | mit | 638 | 0 | import abc
from flask import Flask
from . import settings
class Environment(metaclass=abc.ABCMeta):
instance | = None
def __init__(self, name):
if self.instance is not None:
raise RuntimeError('The environment %s is already running. '
'Only one instance is allowed.'
% Environment.instance.name)
self.name = name
self.settings = settings.effective
self.app = Flask(name)
self.app.config.from_object(self.settin | gs)
self.user_artifacts = {}
self.schemas = {}
Environment.instance = self
|
EndPointCorp/lg_ros_nodes | lg_nav_to_device/src/lg_nav_to_device/background_stopper.py | Python | apache-2.0 | 2,636 | 0 | import rospy
import threading
import types
from lg_common.helpers import load_director_message, find_window_with_activity
from lg_msg_defs.msg import ApplicationState
class BackgroundStopper:
def __init__(self, disable_activities, device_writer):
self.disable_activities = disable_activities
self.device_writer = device_writer
self._current_scene_slug = ''
self._disabled_scene_slug = ''
self._activity_disabled = False
self._slug_disabled = False
self._lock = threading.Lock()
self._states = {}
def _set_writer_state(self, state):
self.device_writer.state = state
def _consider_scene_slugs(self):
if self._current_scene_slug == '':
return
elif self._disabled_scene_slug == '':
self._slug_disabled = False
self._set_writer_state(True)
elif self._current_scene_slug == self._disabled_scene_slug:
self._slug_disabled = True
self._set_writer_state(False)
else:
self._slug_disabled = False
self._set_writer_state(True)
def _cons | ider_disabled_states(self):
if self._activity_disabled or self._slug_disabled:
return
for topic, visible in list(self._states.items()):
if visible:
self._set_writer_state(False)
return
self._set_writer_state(True)
def handle_scene(self, msg):
with self._lock:
self._handle_scene(msg)
def _handle_sce | ne(self, msg):
self._states = {}
data = load_director_message(msg)
self._current_scene_slug = data.get('slug')
self._consider_scene_slugs()
for activity in self.disable_activities:
window = find_window_with_activity(data, activity)
if len(window) > 0:
self._activity_disabled = True
self._set_writer_state(False)
return
self._activity_disabled = False
def handle_slug(self, msg):
with self._lock:
self._handle_disable_for_scene_slug(msg)
def _handle_disable_for_scene_slug(self, msg):
slug_req = msg.data
self._disabled_scene_slug = slug_req
self._consider_scene_slugs()
def handle_disabled_state(self, topic, msg):
with self._lock:
self._handle_disabled_state(topic, msg)
def _handle_disabled_state(self, topic, msg):
self._states[topic] = msg.state == ApplicationState.VISIBLE
self._consider_disabled_states()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
antoinecarme/pyaf | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_NoCycle_SVR.py | Python | bsd-3-clause | 147 | 0.047619 | import tests.model_control.test_ozone_custom_models_enabled | as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['NoCycle'] , ['SV | R'] ); |
harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/ec2/elb/instancestate.py | Python | gpl-3.0 | 2,657 | 0 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class InstanceState(object):
"""
Represents the sta | te of an EC2 Load Balancer Instance
"""
def __init__(self, load_balancer=None, description=None,
state=None, instance_id=None, reason_code=None):
"""
:ivar boto.ec2.elb.loadbalancer.LoadBalancer load_balancer: The
load balancer this instance is registered to.
:ivar str description: A description of the instance.
:ivar str instance_id: The EC2 instance ID.
:ivar str reason_code: Provides informat | ion about the cause of
an OutOfService instance. Specifically, it indicates whether the
cause is Elastic Load Balancing or the instance behind the
LoadBalancer.
:ivar str state: Specifies the current state of the instance.
"""
self.load_balancer = load_balancer
self.description = description
self.state = state
self.instance_id = instance_id
self.reason_code = reason_code
def __repr__(self):
return 'InstanceState:(%s,%s)' % (self.instance_id, self.state)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Description':
self.description = value
elif name == 'State':
self.state = value
elif name == 'InstanceId':
self.instance_id = value
elif name == 'ReasonCode':
self.reason_code = value
else:
setattr(self, name, value)
|
simvisage/oricreate | oricreate/hu/__init__.py | Python | gpl-3.0 | 50 | 0 | from .hu_psi_constraints import HuPsiConstraint | s
| |
AdaptiveApplications/carnegie | tarc_bus_locator_client/numpy-1.8.1/numpy/doc/broadcasting.py | Python | mit | 5,576 | 0.000179 | """
========================
Broadcasting over arrays
========================
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
have compatible shapes. Broadcasting provides a means of vectorizing
array operations so that looping occurs in C instead of Python. It does
this without making needless copies of data and usually leads to
efficient algorithm implementations. There are, however, cases where
broadcasting is a bad idea because it leads to inefficient use of memory
that slows computation.
NumPy operations are usually done on pairs of arrays on an
element-by-element basis. In the simplest case, the two arrays must
have exactly the same shape, as in the following example:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = np.array([2.0, 2.0, 2.0])
>>> a * b
array([ 2., 4., 6.])
NumPy's broadcasting rule relaxes this constraint when the arrays'
shapes meet certain constraints. The simplest broadcasting example occurs
when an array and a scalar value are combined in an operation:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
array([ 2., 4., 6.])
The result is equivalent to the previous example where ``b`` was an array.
We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
without actually making copies, so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
because broadcasting moves less memory around during the multiplication
(``b`` is a scalar rather than an array).
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
It starts with the trailing dimensions, and works its way forward. Two
dimensions are compatible when
1) they are equal, or
2) one of them is 1
If these conditions are not met, a
``ValueError: frames are not aligned`` exception is thrown, indicating that
the arrays have incompatible shapes. The size of the resulting array
is the maximum size along each dimension of the input arrays.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
each color in the image by a different value, you can multiply the image
by a one-dimensional array with 3 values. Lining up the sizes of the
trailing axes of these arrays according to the broadcast rules, shows that
they are compatible::
Image (3d array): 256 x 256 x 3
Scale (1d array): 3
Result (3d array): 256 x 256 | x 3
When either | of the dimensions compared is one, the larger of the two is
used. In other words, the smaller of two axes is stretched or "copied"
to match the other.
In the following example, both the ``A`` and ``B`` arrays have axes with
length one that are expanded to a larger size during the broadcast
operation::
A (4d array): 8 x 1 x 6 x 1
B (3d array): 7 x 1 x 5
Result (4d array): 8 x 7 x 6 x 5
Here are some more examples::
A (2d array): 5 x 4
B (1d array): 1
Result (2d array): 5 x 4
A (2d array): 5 x 4
B (1d array): 4
Result (2d array): 5 x 4
A (3d array): 15 x 3 x 5
B (3d array): 15 x 1 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 1
Result (3d array): 15 x 3 x 5
Here are examples of shapes that do not broadcast::
A (1d array): 3
B (1d array): 4 # trailing dimensions do not match
A (2d array): 2 x 1
B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
An example of broadcasting in practice::
>>> x = np.arange(4)
>>> xx = x.reshape(4,1)
>>> y = np.ones(5)
>>> z = np.ones((3,4))
>>> x.shape
(4,)
>>> y.shape
(5,)
>>> x + y
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape
>>> xx.shape
(4, 1)
>>> y.shape
(5,)
>>> (xx + y).shape
(4, 5)
>>> xx + y
array([[ 1., 1., 1., 1., 1.],
[ 2., 2., 2., 2., 2.],
[ 3., 3., 3., 3., 3.],
[ 4., 4., 4., 4., 4.]])
>>> x.shape
(4,)
>>> z.shape
(3, 4)
>>> (x + z).shape
(3, 4)
>>> x + z
array([[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.]])
Broadcasting provides a convenient way of taking the outer product (or
any other outer operation) of two arrays. The following example shows an
outer addition operation of two 1-d arrays::
>>> a = np.array([0.0, 10.0, 20.0, 30.0])
>>> b = np.array([1.0, 2.0, 3.0])
>>> a[:, np.newaxis] + b
array([[ 1., 2., 3.],
[ 11., 12., 13.],
[ 21., 22., 23.],
[ 31., 32., 33.]])
Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
See `this article <http://www.scipy.org/EricsBroadcastingDoc>`_
for illustrations of broadcasting concepts.
"""
from __future__ import division, absolute_import, print_function
|
pozar87/apts | apts/objects/objects.py | Python | apache-2.0 | 1,992 | 0.006024 | import ephem
import numpy
import pytz
import datetime
from datetime import timedelta
from ..constants import ObjectTableLabels
class Objects:
def __init__(self, place):
self.place = place
def get_visible(self, conditions, start, stop, hours_margin=0, sort_by=[ObjectTableLabels.TRANSIT]):
visible = self.objects
# Add ID collumn
visible['ID'] = visible.index
visible = visible[
# Filter objects by they transit
(visible.Transit > start - timedelta(hours=hours_margin)) &
(visible.Transit < stop + timedelta(hours=hours_margin)) &
# Filter objects by they min altitude at transit
(visible.Altitude > conditions.min_object_altitude) &
# Filter object by they magnitude
(visible.Magnitude < conditions.max_object_magnitude)]
# Sort objects by given order
visible = visible.sort_values(sort_by, ascending=[1])
return visible
def fixed_body(RA, Dec):
# Create body at given coordinates
body = ephem.FixedBody()
body._ra = str(RA)
body._dec = str(Dec)
return body
def _co | mpute_tranzit(self, body):
# Return transit time in local time
self.place.date = datetime.datetime.now()
return self.place.next_transit(body).datetime().replace(tzinfo=pytz.UTC).astimezone(self.place.local_timezone)
def _compute_setting(self, body):
# Return setting time in local time
self.place.date = datetime.datetime.now()
| return self.place.next_setting(body).datetime().replace(tzinfo=pytz.UTC).astimezone(self.place.local_timezone)
def _compute_rising(self, body):
# Return rising time in local time
self.place.date = datetime.datetime.now()
return self.place.next_rising(body).datetime().replace(tzinfo=pytz.UTC).astimezone(self.place.local_timezone)
def _altitude_at_transit(self, body, transit):
# Calculate objects altitude at transit time
self.place.date = transit.astimezone(pytz.UTC)
body.compute(self.place)
return numpy.degrees(body.alt)
|
drorlab/dabble | test/runtests.py | Python | gpl-2.0 | 233,788 | 0.000038 | #! /usr/bin/env python
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# a fully functional basic pytest script.
#
# Pytest is a thing that tests packages, pytest itself is a package that some-
# one might want to install, especially if they're looking to run tests inside
# some package they want to install. Pytest has a lot of code to collect and
# execute tests, and other such sort of "tribal knowledge" that has been en-
# coded in its code base. Because of this we basically include a basic copy
# of pytest inside this blob. We do this because it let's you as a maintainer
# or application developer who wants people who don't deal with python much to
# easily run tests without installing the complete pytest package.
#
# If you're wondering how this is created: you can create it yourself if you
# have a complete pytest installation by using this command on the command-
# line: ``py.test --genscript=runtests.py``.
sources = """
eNrsvYt2G0mSKDb3+nG9uN6Xvb72tX2PayBrq6oFQqS6Z2eWt9G9GjU1q51uSUePnV5zeKEiUARr
CFRBVQWRvDPj4y/wL/gXfI4/wt/iv3C88llZAKjunlmfY3aLBKoyIzMjIyMjIiMj/rd/+fv3P0re
/sX6djxdVovxdFqURTudvv8Xb/9hOBxG8GxRlIvo8ctnURKv62q+meV1E0dZOY/iWVU2mxV9h49l
PmvzefShyKKr/Pa6qudNGgGQweD9v3z7r7CFpp2//0/e/B//4kc/Klbrqm6j5rYZDGbLrGmi1+08
qc5/AzDS40EEP9j8KrvKm6it1gfL/EO+jNa37WVVRivoxhJeZB+yYpmdL/Mogy9llLVtXZxv2nxE
EPCHG8IhtJf5KoLKF0XdtFE2m+VNM1YtDejDPL+IFAaSJl9eSFfwB78CeubFDF5GE+z6WPphV17k
LfZC6o+iMlvlFpS2vjVf8GcFoKBJ6iVUouK6QH4zy9dt9IzentR1VbuV66xo8uixGjWVSIaAaUD0
MUzJZjmPyqoVJET3m2F0P3KbqPN2UwNGBwOoA33BaUgH7//Tt3+KEzar5vkYf73/z978yZWetvXt
wJrAi7paRUXZrGHuVFNPXkz/8fGrx69+8Xokn3958k+/evHqq9eDwfmmWMKMTOt8XUOL+GcwwN/L
4hy+Q7tSYjwFdDHAJMYC8SiKpWCcDgbFBc3CByDAoiph3i6q08Oz6ItJ9CnjiXrW1tksP89mV6pv
F1W9ytopIxcrVuXydpAvm9yqpUc/Xd8+2hOEUPITqNYl5es6W6/zOsrqagNr5yVTMjYRcdmG6DBI
hiOY6WssalESDB6n9jJrkN4SKTCKhrNqelEsc5zmYerTCxViJNPogFzloYKQ9tMqLQEFG2eOa4yt
FkPlYbktizIvK7+KeXEQHXVrdltxWpDF4VJ/aH28uV2rpYEYy2ykH0f3a1gUGn1p6i54eG66YK/z
/L2emwo4S21hWpaUqT/hIvjFBlHmu0Bgd7GABsHV/w74MJBSe6uBrbP20mdYSHQCJ6MCMuRoXRUl
c8QqaqpNPcsZIwk0lwObzGAVtxrMqlhcttQTqoeVkNPO2k22XN7CLBQNAUMKSMeahvFnzYSGbY+X
1SxbJgonNskYjN8Dfn97nkfzqoxbJD/oTNFEs8t8dgVN+KS/HtObxCPye9G3334rkBDGZVbPYd0t
iyscXB5d50U9x42tmHn1ipIKNC1sbhmWAX50ihQ6y6Cl8WY9z1r+fAZ9zJsvnfo42tD4/Eld903i
xWa55PnYPpWydF/z1MmkAkeiziMQNavYg6i6oOdEvxY8/dnhdoq/MQBTBoCOItr16AUs6nJudRWH
3NlSsJIh9+88MGvRxo0aIbHd0Kju6VVoCgooIO91BmMExDhIUdPj9MIanh4KbvH1opGl+yGrJ08z
2Dz6htVu1jAN1wUsQBwHVAWRCRYS0kYTGt7AISsg9hjaiCNYCU3eRm/qDQABQlEtYG2GJVMNpQsW
isq5A0qkMt2FJrq+zGHEdd7Atx48XgIUJqpLoIzZhmcEcECrHhExsLYXaw3ox1AGRBEYMTFSXBrq
ib2iodfuOtbVHuh6F8ts0UR/bUkXd6uhZRBvzqUwdIEQeXqsIJ2pPf1pDS86m/qv3D09U7v6BZaO
LqvlnDjjlJhfQzLzxXSxrM7hG8EAjnN9WcwugY3iLKAYA/wO+CvwrvxDttwAw5mP++XTETfli6l6
u6W3Y+hAd5vlzVn1xipr988qKGOwYNKD0HZJJdwXnthBIpICxFJHD1MEZtzmSKwh1qFf8kbBaIcP
sMRtIkY5UXVibLgslnxelbknMwTZwHCYBvd3DyTKU26PZS4s9oHzKpPHEtsnnwDhNd7Q1OyjkjXP
Y7U3MWqdDiN3QIWsBv5Bwmi2jLL5vJCPNE2aJzSDwGAbAg30t1m2iolI+wAjvGsYenDoAxCyvk3S
TjnZPBMaqY9JwgjjwqXKka5v4+8mn033QCAU+xjk/fttyPvhUGFpPTzA7fiILISgRqTkSJtBdXai
uMkuABsg55UHdT7bgNr0AdqAJXCAVJrCeqqRYZFihlw+lv02OG6zUIpqjJCpH9ID07uiAS1uk/d1
UKDYO99H7LFLEEKJcnGvbSJSo7HacgOjwpGArGq2vX032KKcLTfzvLOpqo3U33z23VSh29A1oJfT
M0Md2Ml6gaRqGIvCAjTuCbkd3czAHeOeVM6TBKqOXJI8hUdnlopjqVG/zG8DChRRJu5/LAuwOA7b
UzUD6uGBbhokmZfN7azqbKvUH7WFvlFK9EkJve8qyFmEkADDOb5HRGSW7q73QDIbTJv2dokbCvLv
AQ9jXaMBQD3bokgT/I5lR71gKYU+9myq6vW4Pbc3VrNl5a3VSWl35WrvgBDY5ugpjjQZknQ1BPV9
WZWLYep3zh7zSquiAR2C5BRvq/S2tKe6jBk1joVFiT7Idb7EwfbAtjF0IERH2ztvkEbVtyeGRjXt
gTj83CWY6H5zfH/+BSrrPnhUMEd2Fx4cfZw8sUMD2dQ1yhpG6rAXtcgUk+7gtXTQQdpeMsP+2j4p
+aCfkxJrKfYBrh1CocN9mWFsm+2AHKg3QtXlREMa0bJUv4dSElg2CM55vcxuSVKuyWhlb21F2eY1
MNPQfL0yb3l/z4olgjEThMxayTgZQGyhRD6PkFGgAc+WbnBRnleg3lyjhojdp/cNyQbwDWuILO7L
lZr3BAVKQxgtb9Nj3b90jDvuOnE58o0lKE8dDBCkkYX+EWplm+V8ikOf4M6V+nsb2X+Bs6JVAwTZ
mxH2Iw1sHr6tzOCWMZijLbk8EBmBzGYRgPO2Exchk+gmSDuqgENymkuEbQX30Hj/D6xgsbhvGRBF
Zzo4imD3y3CZWoYBZePObpItnGkUHbpKvtWNETDsliw/E5zgsBiiyc8svbGnTF/nsF+yQIHbKpGi
Bo1LF2cL2CKIhnim0bZ1bkmd98iKADW03ZkoOUJctraBzLVgK22HTTs2t6mzcpFPoZ2P4mKFsups
VaT0Di0WCIANDZasDzovAZ5GBUBEVHShMoQg+/IIH0v2guE9WHVDNYusOIF6zKasTbhFg5E0G6DU
LTZzaWQUTUcg2OAJS3ACbL4/ErSOOj3e50canMjfzmHS69uyzW4Csh73zt7IH1jmAnM8ckcUE2JP
oeSZmfnwRnhKaD6GfpzxOtTEaG8n/NBRMC6L+Twvt9gWSaQvLpxdXGw0eHCIwj2II1rYBHj5dOoR
c1MtP4jRHMG5OsSqAnpgAyOxTdQdYaEHpf8OifRvqqdxp1fx2WAvyb1PQei0JKrl9qb2UhQEOilq
tpjXtAEpr9O/i9LeInFBd0gXOFqOTY8DHaDq8ZdffmmUVTk/8nmFY5PvdANJv2evXvqbtcHIeZXV
82c48/Vm3QYOobw6wTaH0PuOoDaMoqdoxr9fg/SLu8X95tdlRL9RFL4oPcGXT5xHBNNaJGW/WuDj
R4yhGk2CRr0GGb4jzUlxX5xDcCjNefpfgtq6pfjpF/ogNC9n2brZLNH+hSJcdXGR19FlsbjEgxz0
BDCKFJ3j86pUYJCxFrl1uI9/T0S5c7WKPjWxPfc4Cb4tsmXxH3PeXRfFB9TyRRzxRjD2zY/qlLY9
H0UxqFplftPGnhBG6lsC7CkgnF1fIg2gzr2V3eLPbZEv5zyprGgjxGBJBDfB32PpkUeUTTv2jc0w
AEve6+4JoUpQxdDhbNPKY1zhE6Yfpl35Yklk8gSWDJphdIU+I5AhACXt8qE7kqI68SHBURd0mfj5
LRL5B7LZZ+UtkO/qvChJC8CqrGXK1kimfFt2hK3F5UfkZcKbDJ6zkgTRkpB3cJ4faJHaci1oUEHJ
6xVAnLs9o15ny2V | 13SAGlTuLNKLGFsQAyDRjt2NVLe4LLdv6sga1nKTOV9UHFl+hy5uS9rGcz3bP
i7bhk7N5ni0dcHSuhWdEJPoq47GSTx/q4aVh2yl05kbZvFxSkhODG4s1dd6TwjuJekXAJKESIp5G
0JipNaEJTTuHZPiTWCRn17YdLhQkcklZtlWcQ | onuOsMqquiYCtrA0572hcqspm+0/WkiNNhT1daK
nPphrQfhWV/TtHdfN/z7xtiNgqcinj9UARuo5gYgCFpNsBm02cDOkmj4vKOlY7syVrMZqqXRkrze
gt6aNMsCvh+m/iCkFXbgos0IIMLDTufJWql5cbGEFaA4XzlZZqvzeRbdHNOc3oy13JnehSHhcpnB
PpoB0ePYmogWnr/iQZzBJR9dbMoZMSBafSj+GjupsjiP7KaeAUx3GUjTI+JZbC5w5GKy4uKqxe6o
AqBLZzA4l77EtGRNFMl6DAGQ0mGngMYMz5SIf/E4iY+5YJ4RGvgcFI0tjFYHFsxCbgT9FC01H/J0
27GEoVaZRyUppa6SP6uz5pJIeYv+ACTTkvGDO2AxbTYZ0+Qs88ygS1BlFHXiz7reuJcbFsgNSS1M
DkCYO1gqxYa+HaW+zsZCDZY4Lc5Cth827xrc9a5v1+RtLefTg6Mz2yRHB0cVbBDz/GYL0oiksIza
FYgBPXSmHUmnzg1Mp29VXSxw/wWaQdPAGiXQuoDvLHfyAE1dPpSoLZq1cctmhUn029+76B6Z44a8
RF9WPJrzBiXeQXPHWYPOulEay/M57uNVdF3VV+IK4FVlryKa1WiVtxmMZAHIWOGWKWeT83xWQdtV
TV5HYsBZFx4gXiSLvKR+Nq77IFHhZfaBtNrLh3T6FeXvNyC1trcuIPSQwo4jNwE4bcDEwnTTsbIX
86TzBt1jBI+yS7mtkX0KtATx4yI0mmnL8NgD2nQFSdzWySTT5K2wEeb0p2cdE+eyS9MX7gg670G/
RkeFrhuDTRzkcoclYY6WYWkbWr8YqyPOi7GcZE8J6/32Gzz6kOHTIKUT06MJfLh7tUcT1dPQ9u2t
aJek9GmhPanm0D1kyxuo8ZHktVqDZpLEvSNC+aK333FwrPGX6OeLqIy18niiGOmz8qIKO9c25AwM
HJccgWGTUOtCa5Bmli/z5ZqmuMw+FItMC9Qeg1YMZEqafwsqEhoZ4l6lcbPWKgvbt319paAj6rC9
FF9MvDH4lO6fNNDYLFkIYJwenY2ix3S8COgiS0mAKCwLvXis67rxqlnEvgV0Sx/CFGc10Gjg2+Hh
WNSXMSlMDYpLSSyHpXEPcbNg50yRO/7jKPbOUwHD0jnomLGvH3ubNtGeWxUFTa52enjWX1PNiFuZ
WTLXPtpSG7cWTYpe++dS/9GW+tTJsuOEhY9toxh+B4kYH1nmzi40Le0k6liqK9l6J9amjjnLMiKZ
jCS9w2GwwwCi+7DbnYNoNOET4ShxxgdaukhNph+p40E0Q39QtWrrWzp83+Zh4iKEbMZ8DuaKvCQI
xwpgLFbjvFFGY5bBPUoBYK4fsTr6c4l4xD6s5EJKztcWDLEAeDYDh6JycUjJVKPaApE0VWDBQBHP
SE1ailypANjnOQhnIAQuwmI4nY7gFhu6KWGma2QtDOsURXHa8W+qoiRtuOm8xT/j2rfJIocV/HeO
K6iGxVg8xhFgL1ZTp5qm7BpnPqXiY4vS6trizkxvgIrdpxQufRh+grI0vAtpPF5DjFNuzlpneOED
aIHUHn+xybpQ7EEtE71A3PNlW/cw62zs6lSWg5ttPguIl57XfI88KWN9BUNAs/jXIEgglhIbOhrB
pe+urmc5ysGArlkK4WWPp+K3y3zC7jeuWJKdN2R7lILtOWuUE17RqKGjk9Y29oE7YEreY946VBZJ
90DXNdiZrh7TZ+VlgY7HniLq1sMBHUc4oN/R/P2urH6H5swPlpzDpVzGIeM7RkU8V8b0KGFVrXNK
hATaEmF4ZgeQ3xuiV8bshJs2+FP2CBkcwikWZQUKXFg7LgQSSpQxA4uDB2lIHK68iE/07vOcqiZd
Pze6NmdRss+6kn7pg1hZ/2teiluqqzZBpjC2tW6FNBV+6S7j1ML9xQoZ1lM28ObzExZ0EovezUdF
9PQ7TPPy16J69cGifPWhewVkBRwa9/VcdQMZzs6zwTBnITOEq7912ADzDO6xzT/wKHvkcrW0a7qE
VYo6lN2/TVkgd/pn00fpj/RTXU7wZ7ujHwnTYXuD9n0Shyflt/JUbI2sMf2CjQxV3ZjTrHtsAfEP
2vjW5LK6nq6y+irHU6XhF1wDYVtPT/ovMuzgyJoimevuy4T5DNcwmYnVjldIvCtdhiiSqnCsiW7X
ux0hzaO8Ix/dAtJ59IDgT95r5fZC9ilns0YLWqnOzMT3wfaJuigWG3Reh3nkonw9h04nPX+d7n1O
ddAd8EMkaYebOzhKv/8z76B/gtshXEx9vsrbGu92oK8T1sI69FfaZ7AeGQtpdMAKhfYASD2Jipax
4y7W8TFWriy84l3vMj3/adi3Knzga1y1ePrmudBKGnavsbqsvfW1U37QW9jz6/euHEwtB31/uMoz
Uq2Gzs2uxvV+tzzgWfiDD6wsF6UjKloe8SyOBnyO4L3tA88ARXJXw1HwU8990ZYvoe+PsS3e2mxB
curMtXLLRDY+JYv35IBFUG0CGkU7VcwLxcSJ+xLLnEebtZpm0oHGQRXLwuMOlzzjUOXdbkLPk7Tj
k8KDgeKHdgPWm8+jw+O+Wg8mkcVDzEJYw+Yxhf3ookDIQ8KC0/+u6sZjlyl1ADxQxE9FTk3zZ8qz
dpu3wgVZ20siMgfOsQWoS2DbusNuN/2laVFYO+UDwsBwy0DS/bpsajw4Ov5OnSaT3ExZ+MK8R7aa
CXBJNWWEXyOdqn3Lm37TFdCTNUZwSSmZR9oeSRsT/jOiFZEtxcm5w+IIprtGXSuPD/YzA9FflKF1
pcY7hP8+ka/WLrwQD/Q6d+wytqwFO7S1Iao2pFs9uva4WS+LNol/XcbWPTIS16Q/TFCWkPVAOnd6
dOxeLlJUI21vWWFWAyGClnNFwV7g6M |
gallenmu/MoonGen | libmoon/deps/tbb/build/build.py | Python | mit | 8,400 | 0.009167 | #!/usr/bin/env python
#
# Copyright (c) 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
#
# Provides unified tool for preparing TBB for packaging
from __future__ import print_function
import os
import re
import sys
import shutil
import platform
import argparse
from glob import glob
from collections import OrderedDict
jp = os.path.join
is_win = (platform.system() == 'Windows')
is_lin = (platform.system() == 'Linux')
is_mac = (platform.system() == 'Darwin')
default_prefix = os.getenv('PREFIX', 'install_prefix')
if is_win:
default_prefix = jp(default_prefix, 'Library') # conda-specific by default on Windows
parser = argparse.ArgumentParser()
parser.add_argument('--tbbroot', default='.', help='Take Intel TBB from here')
parser.add_argument('--prefix', default=default_prefix, help='Prefix')
parser.add_argument('--prebuilt', default=[], action='append', help='Directories to | find prebuilt files')
parser.add_argument('--no-rebuild', default=False, action='store_true', help='do not rebuild')
parser.add_argument('--install', default=False, action='store_true', help='install all')
parser.add_argument('--install-libs', default=False, action='store_true', help=' | install libs')
parser.add_argument('--install-devel', default=False, action='store_true', help='install devel')
parser.add_argument('--install-docs', default=False, action='store_true', help='install docs')
parser.add_argument('--install-python',default=False, action='store_true', help='install python module')
parser.add_argument('--make-tool', default='make', help='Use different make command instead')
parser.add_argument('--copy-tool', default=None, help='Use this command for copying ($ tool file dest-dir)')
parser.add_argument('--build-args', default="", help='specify extra build args')
parser.add_argument('--build-prefix', default='local', help='build dir prefix')
if is_win:
parser.add_argument('--msbuild', default=False, action='store_true', help='Use msbuild')
parser.add_argument('--vs', default="2012", help='select VS version for build')
parser.add_argument('--vs-platform', default="x64", help='select VS platform for build')
parser.add_argument('ignore', nargs='?', help="workaround conda-build issue #2512")
args = parser.parse_args()
if args.install:
args.install_libs = True
args.install_devel = True
args.install_docs = True
args.install_python= True
def custom_cp(src, dst):
assert os.system(' '.join([args.copy_tool, src, dst])) == 0
if args.copy_tool:
install_cp = custom_cp # e.g. to use install -p -D -m 755 on Linux
else:
install_cp = shutil.copy
bin_dir = jp(args.prefix, "bin")
lib_dir = jp(args.prefix, "lib")
inc_dir = jp(args.prefix, 'include')
doc_dir = jp(args.prefix, 'share', 'doc', 'tbb')
if is_win:
os.environ["OS"] = "Windows_NT" # make sure TBB will interpret it corretly
libext = '.dll'
libpref = ''
dll_dir = bin_dir
else:
libext = '.dylib' if is_mac else '.so.2'
libpref = 'lib'
dll_dir = lib_dir
tbb_names = ["tbb", "tbbmalloc", "tbbmalloc_proxy"]
##############################################################
def run_make(arg):
if os.system('%s -j %s'% (args.make_tool, arg)) != 0:
print("\nBummer. Running serial build in order to recover the log and have a chance to fix the build")
assert os.system('%s %s'% (args.make_tool, arg)) == 0
os.chdir(args.tbbroot)
if args.prebuilt:
release_dirs = sum([glob(d) for d in args.prebuilt], [])
print("Using pre-built files from ", release_dirs)
else:
if is_win and args.msbuild:
preview_release_dir = release_dir = jp(args.tbbroot, 'build', 'vs'+args.vs, args.vs_platform, 'Release')
if not args.no_rebuild or not os.path.isdir(release_dir):
assert os.system('msbuild /m /p:Platform=%s /p:Configuration=Release %s build/vs%s/makefile.sln'% \
(args.vs_platform, args.build_args, args.vs)) == 0
preview_debug_dir = debug_dir = jp(args.tbbroot, 'build', 'vs'+args.vs, args.vs_platform, 'Debug')
if not args.no_rebuild or not os.path.isdir(debug_dir):
assert os.system('msbuild /m /p:Platform=%s /p:Configuration=Debug %s build/vs%s/makefile.sln'% \
(args.vs_platform, args.build_args, args.vs)) == 0
else:
release_dir = jp(args.tbbroot, 'build', args.build_prefix+'_release')
debug_dir = jp(args.tbbroot, 'build', args.build_prefix+'_debug')
if not args.no_rebuild or not (os.path.isdir(release_dir) and os.path.isdir(debug_dir)):
run_make('tbb_build_prefix=%s %s'% (args.build_prefix, args.build_args))
preview_release_dir = jp(args.tbbroot, 'build', args.build_prefix+'_preview_release')
preview_debug_dir = jp(args.tbbroot, 'build', args.build_prefix+'_preview_debug')
if not args.no_rebuild or not (os.path.isdir(preview_release_dir) and os.path.isdir(preview_debug_dir)):
run_make('tbb_build_prefix=%s_preview %s tbb_cpf=1 tbb'% (args.build_prefix, args.build_args))
release_dirs = [release_dir, debug_dir, preview_release_dir, preview_debug_dir]
filemap = OrderedDict()
def append_files(names, dst, paths=release_dirs):
global filemap
files = sum([glob(jp(d, f)) for d in paths for f in names], [])
filemap.update(dict(zip(files, [dst]*len(files))))
if args.install_libs:
append_files([libpref+f+libext for f in tbb_names], dll_dir)
if args.install_devel:
dll_files = [libpref+f+'_debug'+libext for f in tbb_names] # adding debug libraries
if not is_win or not args.msbuild:
dll_files += [libpref+"tbb_preview"+libext, libpref+"tbb_preview_debug"+libext]
if is_win:
dll_files += ['tbb*.pdb'] # copying debug info
if is_lin:
dll_files += ['libtbb*.so'] # copying linker scripts
# symlinks .so -> .so.2 should not be created instead
# since linking with -ltbb when using links can result in
# incorrect dependence upon unversioned .so files
append_files(dll_files, dll_dir)
if is_win:
append_files(['*.lib', '*.def'], lib_dir) # copying linker libs and defs
for rootdir, dirnames, filenames in os.walk(jp(args.tbbroot,'include')):
files = [f for f in filenames if not '.html' in f]
append_files(files, jp(inc_dir, rootdir.split('include')[1][1:]), paths=(rootdir,))
if args.install_python: # RML part
irml_dir = jp(args.tbbroot, 'build', args.build_prefix+'_release')
run_make('-C src tbb_build_prefix=%s %s python_rml'% (args.build_prefix, args.build_args))
if is_lin:
append_files(['libirml.so.1'], dll_dir, paths=[irml_dir])
if args.install_docs:
files = [
'CHANGES',
'LICENSE',
'README',
'README.md',
'Release_Notes.txt',
]
append_files(files, doc_dir, paths=release_dirs+[jp(args.tbbroot, d) for d in ('.', 'doc')])
for f in filemap.keys():
assert os.path.exists(f)
assert os.path.isfile(f)
if filemap:
print("Copying to prefix =", args.prefix)
for f, dest in filemap.items():
if not os.path.isdir(dest):
os.makedirs(dest)
print("+ %s to $prefix%s"%(f,dest.replace(args.prefix, '')))
install_cp(f, dest)
if args.install_python: # Python part
paths = [os.path.abspath(d) for d in (args.prefix, irml_dir, lib_dir, inc_dir)]
os.environ["TBBROOT"] = paths[0]
# all the paths must be relative to python/ directory or be absolute
assert os.syst |
andrius-momzyakov/grade | web/migrations/0009_auto_20170730_0156.py | Python | gpl-3.0 | 735 | 0.002809 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-30 01:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(m | igrations.Migration):
dependencies = [
('web', '0008_contactphone_place_on_header'),
]
operations = [
migrations.AddField(
model_name='contactemail',
name='place_on_header',
field=models.BooleanField(default=False, verbose_name='Размещать в заголовке'),
),
migrations.AddField(
model_name='contactperson' | ,
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='Фото'),
),
]
|
philanthropy-u/edx-platform | openedx/features/partners/migrations/0008_auto_20201203_0918.py | Python | agpl-3.0 | 1,673 | 0.002989 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2020-12-03 14:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partners', '0007_auto_20201203_0823'),
]
operations = [
migrations.AlterField(
model_name='partner',
name='heading1',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_na | me='partner',
name='heading1_description',
field=models.TextField(blank=True, help_text='max 500 characters', max_length=500),
),
migrations.AlterField(
model_name='partner',
name='heading2',
| field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='partner',
name='heading2_description',
field=models.TextField(blank=True, help_text='max 500 characters', max_length=500),
),
migrations.AlterField(
model_name='partner',
name='main_description',
field=models.TextField(blank=True, help_text='max 500 characters', max_length=500),
),
migrations.AlterField(
model_name='partner',
name='main_title',
field=models.CharField(blank=True, help_text='max 100 characters', max_length=100),
),
migrations.AlterField(
model_name='partner',
name='partnership_year',
field=models.PositiveIntegerField(blank=True, default=2020, null=True),
),
]
|
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/polynomial/tests/test_polynomial.py | Python | mit | 19,204 | 0 | """Tests for polynomial module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
def trim(x):
return poly.polytrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = | [1, 0, -8, 0, 8]
T | 5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestConstants(TestCase):
def test_polydomain(self):
assert_equal(poly.polydomain, [-1, 1])
def test_polyzero(self):
assert_equal(poly.polyzero, [0])
def test_polyone(self):
assert_equal(poly.polyone, [1])
def test_polyx(self):
assert_equal(poly.polyx, [0, 1])
class TestArithmetic(TestCase):
def test_polyadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = poly.polyadd([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polysub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = poly.polysub([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polymulx(self):
assert_equal(poly.polymulx([0]), [0])
assert_equal(poly.polymulx([1]), [0, 1])
for i in range(1, 5):
ser = [0] * i + [1]
tgt = [0] * (i + 1) + [1]
assert_equal(poly.polymulx(ser), tgt)
def test_polymul(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += 1
res = poly.polymul([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polydiv(self):
# check zero division
assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])
# check scalar division
quo, rem = poly.polydiv([2], [2])
assert_equal((quo, rem), (1, 0))
quo, rem = poly.polydiv([2, 2], [2])
assert_equal((quo, rem), ((1, 1), 0))
# check rest.
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0] * i + [1, 2]
cj = [0] * j + [1, 2]
tgt = poly.polyadd(ci, cj)
quo, rem = poly.polydiv(tgt, ci)
res = poly.polyadd(poly.polymul(quo, ci), rem)
assert_equal(res, tgt, err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5)) * 2 - 1
y = poly.polyval(x, [1., 2., 3.])
def test_polyval(self):
# check empty input
assert_equal(poly.polyval([], [1]).size, 0)
# check normal input)
x = np.linspace(-1, 1)
y = [x ** i for i in range(5)]
for i in range(5):
tgt = y[i]
res = poly.polyval(x, [0] * i + [1])
assert_almost_equal(res, tgt)
tgt = x * (x ** 2 - 1)
res = poly.polyval(x, [0, -1, 0, 1])
assert_almost_equal(res, tgt)
# check that shape is preserved
for i in range(3):
dims = [2] * i
x = np.zeros(dims)
assert_equal(poly.polyval(x, [1]).shape, dims)
assert_equal(poly.polyval(x, [1, 0]).shape, dims)
assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)
def test_polyvalfromroots(self):
# check exception for broadcasting x values over root array with
# too few dimensions
assert_raises(ValueError, poly.polyvalfromroots,
[1], [1], tensor=False)
# check empty input
assert_equal(poly.polyvalfromroots([], [1]).size, 0)
assert_(poly.polyvalfromroots([], [1]).shape == (0,))
# check empty input + multidimensional roots
assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0)
assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0))
# check scalar input
assert_equal(poly.polyvalfromroots(1, 1), 0)
assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,))
# check normal input)
x = np.linspace(-1, 1)
y = [x ** i for i in range(5)]
for i in range(1, 5):
tgt = y[i]
res = poly.polyvalfromroots(x, [0] * i)
assert_almost_equal(res, tgt)
tgt = x * (x - 1) * (x + 1)
res = poly.polyvalfromroots(x, [-1, 0, 1])
assert_almost_equal(res, tgt)
# check that shape is preserved
for i in range(3):
dims = [2] * i
x = np.zeros(dims)
assert_equal(poly.polyvalfromroots(x, [1]).shape, dims)
assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims)
assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims)
# check compatibility with factorization
ptest = [15, 2, -16, -2, 1]
r = poly.polyroots(ptest)
x = np.linspace(-1, 1)
assert_almost_equal(poly.polyval(x, ptest),
poly.polyvalfromroots(x, r))
# check multidimensional arrays of roots and values
# check tensor=False
rshape = (3, 5)
x = np.arange(-3, 2)
r = np.random.randint(-5, 5, size=rshape)
res = poly.polyvalfromroots(x, r, tensor=False)
tgt = np.empty(r.shape[1:])
for ii in range(tgt.size):
tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii])
assert_equal(res, tgt)
# check tensor=True
x = np.vstack([x, 2 * x])
res = poly.polyvalfromroots(x, r, tensor=True)
tgt = np.empty(r.shape[1:] + x.shape)
for ii in range(r.shape[1]):
for jj in range(x.shape[0]):
tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii])
assert_equal(res, tgt)
def test_polyval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d)
# test values
tgt = y1 * y2
res = poly.polyval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = poly.polyval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_polyval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d)
# test values
tgt = y1 * y2 * y3
res = poly.polyval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = poly.polyval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_polygrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j->ij', y1, y2)
res = poly.polygrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = poly.polygrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3) * 2)
def test_polygrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = poly.polygrid3d(x1, x2, x3, self.c3d)
assert_almost_equal( |
stewartsmith/bzr | bzrlib/tests/test_diff.py | Python | gpl-2.0 | 65,261 | 0.005225 | # Copyright (C) 2005-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
from cStringIO import StringIO
import subprocess
import sys
import tempfile
from bzrlib import (
diff,
errors,
osutils,
patiencediff,
_patiencediff_py,
revision as _mod_revision,
revisionspec,
revisiontree,
tests,
transform,
)
from bzrlib.symbol_versioning import deprecated_in
from bzrlib.tests import features, EncodingAdapter
from bzrlib.tests.blackbox.test_diff import subst_dates
from bzrlib.tests import (
features,
)
def udiff_lines(old, new, allow_binary=False):
output = StringIO()
diff.internal_diff('old', old, 'new', new, output, allow_binary)
output.seek(0, 0)
return output.readlines()
def external_udiff_lines(old, new, use_stringio=False):
if use_stringio:
# StringIO has no fileno, so it tests a different codepath
output = StringIO()
else:
output = tempfile.TemporaryFile()
try:
diff.external_diff('old', old, 'new', new, output, diff_opts=['-u'])
except errors.NoDiff:
raise tests.TestSkipped('external "diff" not present to test')
output.seek(0, 0)
lines = output.readlines()
output.close()
return lines
class TestDiff(tests.TestCase):
def test_add_nl(self):
"""diff generates a valid diff for patches that add a newline"""
lines = udiff_lines(['boo'], ['boo\n'])
self.check_patch(lines)
self.assertEquals(lines[4], '\\ No newline at end of file\n')
## "expected no-nl, got %r" % lines[4]
def test_add_nl_2(self):
"""diff generates a valid diff for patches that change last line and
add a newline.
"""
lines = udiff_lines(['boo'], ['goo\n'])
self.check_patch(lines)
self.assertEquals(lines[4], '\\ No newline at end of file\n')
## "expected no-nl, got %r" % lines[4]
def test_remove_nl(self):
"""diff generates a valid diff for patches that change last line and
add a newline.
"""
lines = udiff_lines(['boo\n'], ['boo'])
self.check_patch(lines)
self.assertEquals(lines[5], '\\ No newline at end of file\n')
## "expected no-nl, got %r" % lines[5]
def check_patch(self, lines):
self.assert_(len(lines) > 1)
## "Not enough lines for a file header for patch:\n%s" % "".join(lines)
self.assert_(lines[0].startswith ('---'))
## 'No orig line for patch:\n%s' % "".join(lines)
self.assert_(lines[1].startswith ('+++'))
## 'No mod line for patch:\n%s' % "".join(lines)
self.assert_(len(lines) > 2)
## "No hunks for patch:\n%s" % "".join(lines)
self.assert_(lines[2].startswith('@@'))
## "No hunk header for patch:\n%s" % "".join(lines)
self.assert_('@@' in lines[2][2:])
## "Unterminated hunk header for patch:\n%s" % "".join(lines)
def test_binary_lines(self):
empty = []
uni_lines = [1023 * 'a' + '\x00']
self.assertRaises(errors.BinaryFile, udiff_lines, uni_lines , empty)
self.assertRaises(errors.BinaryFile, udiff_lines, empty, uni_lines)
udiff_lines(uni_lines , empty, allow_binary=True)
udiff_lines(empty, uni_lines, allow_binary=True)
def test_external_diff(self):
lines = external_udiff_lines(['boo\n'], ['goo\n'])
self.check_patch(lines)
self.assertEqual('\n', lines[-1])
def test_external_diff_no_fileno(self):
# Make sure that we can handle not having a fileno, even
# if the diff is large
lines = external_udiff_lines(['boo\n']*10000,
['goo\n']*10000,
use_stringio=True)
self.check_patch(lines)
def test_external_diff_binary_lang_c(self):
for lang in ('LANG', 'LC_ALL', 'LANGUAGE'):
self.overrideEnv(lang, 'C')
lines = external_udiff_lines(['\x00foobar\n'], ['foo\x00bar\n'])
# Older versions of diffutils say "Binary files", newer
# versions just say "Files".
self.assertContainsRe(lines[0], '(Binary f|F)iles old and new differ\n')
self.assertEquals(lines[1:], ['\n'])
def test_no_external_diff(self):
"""Check that NoDiff is raised when diff is not available"""
# Make sure no 'diff' command is available
# XXX: Weird, using None instead of '' breaks the test -- vila 20101216
sel | f.overrideEnv('PATH', '')
self.assertRaises(errors.NoDiff, diff.external_diff,
'old', ['boo\n'], 'new', ['goo\n'],
StringIO(), diff_opts=['-u'])
def test_internal_diff_default(self):
# Default internal diff encoding is utf | 8
output = StringIO()
diff.internal_diff(u'old_\xb5', ['old_text\n'],
u'new_\xe5', ['new_text\n'], output)
lines = output.getvalue().splitlines(True)
self.check_patch(lines)
self.assertEquals(['--- old_\xc2\xb5\n',
'+++ new_\xc3\xa5\n',
'@@ -1,1 +1,1 @@\n',
'-old_text\n',
'+new_text\n',
'\n',
]
, lines)
def test_internal_diff_utf8(self):
output = StringIO()
diff.internal_diff(u'old_\xb5', ['old_text\n'],
u'new_\xe5', ['new_text\n'], output,
path_encoding='utf8')
lines = output.getvalue().splitlines(True)
self.check_patch(lines)
self.assertEquals(['--- old_\xc2\xb5\n',
'+++ new_\xc3\xa5\n',
'@@ -1,1 +1,1 @@\n',
'-old_text\n',
'+new_text\n',
'\n',
]
, lines)
def test_internal_diff_iso_8859_1(self):
output = StringIO()
diff.internal_diff(u'old_\xb5', ['old_text\n'],
u'new_\xe5', ['new_text\n'], output,
path_encoding='iso-8859-1')
lines = output.getvalue().splitlines(True)
self.check_patch(lines)
self.assertEquals(['--- old_\xb5\n',
'+++ new_\xe5\n',
'@@ -1,1 +1,1 @@\n',
'-old_text\n',
'+new_text\n',
'\n',
]
, lines)
def test_internal_diff_no_content(self):
output = StringIO()
diff.internal_diff(u'old', [], u'new', [], output)
self.assertEqual('', output.getvalue())
def test_internal_diff_no_changes(self):
output = StringIO()
diff.internal_diff(u'old', ['text\n', 'contents\n'],
u'new', ['text\n', 'contents\n'],
output)
self.assertEqual('', output.getvalue())
def test_internal_diff_returns_bytes(self):
import StringIO
output = StringIO.StringIO()
diff.internal_diff(u'old_\xb5', ['old_text\n'],
u'new_\xe5', ['new_text\n'], output)
self.assertIsInstance(output.getvalue(), str,
'internal_diff should return bytestrings')
def test_interna |
geobricks/geobricks_dockerizer | geobricks_common_settings.py | Python | gpl-2.0 | 684 | 0.001462 | import logging
settings_app = {
"settings": {
# base url used by nginx
"base_url": "",
# Logging configurations
"logging": {
"level": logging.WARN,
"format": "%(asctime)s | %(levelname)-8s | %(name)-20s | Line: %(lineno)-5d | %(message)s",
"datefmt": "%d-%m-%Y | %H:%M:%s"
},
# Folders
"folders": {
"tmp": "/tmp/",
"geoserver_datadir": "/geoserver_data_dir/",
"distribution": "/distribution/",
| "distribution_sld": "/distribution_sld/",
"st | orage": "/storage/",
"workspace_layer_separator": ":"
}
}
}
|
RyanChinSang/ECNG3020-ORSS4SCVI | BETA/TestCode/OpenCV/imutils-test/imultils-test2.py | Python | gpl-3.0 | 2,355 | 0.000425 | from threading import Thread
import winsound
import cv2
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indica | te if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
| t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
self.stream.release()
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
vs = WebcamVideoStream(src=0).start()
while 1:
img = vs.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale3(roi_gray, outputRejectLevels=True)
s = 0
a = 0
for i in eyes[2]:
s = s + i[0]
a = s / len(eyes[2])
if a < 0.25:
# frequency = 1500 # Set Frequency To 2500 Hertz
# duration = 1000 # Set Duration To 1000 ms == 1 second
# winsound.Beep
print("BEEP")
for (ex, ey, ew, eh) in eyes[0]:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
cv2.imshow("img", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
vs.stop()
|
nwcrazysword/reversalcode | src/test.py | Python | apache-2.0 | 233 | 0.030043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017- | 07-21 18:15:11
import os
import tensorflow as tf
import numpy as np
x_data=np.float32(np.random.rand(2,100))
y_data=np.dot([0.100,0.200],x | _data)+0.300
b=tf.Variable() |
MSusik/invenio | invenio/ext/admin/__init__.py | Python | gpl-2.0 | 3,353 | 0.003877 | # -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Flask-Admin support in Invenio
------------------------------
Please see http://flask-admin.readthedocs.org/en/latest/quickstart/ prior to
reading this documentation, to understand how Flask-Admin works.
Flask admin allows you to easily create web administration interfaces for your
SQLAlchemy models. This extension takes care of using Blueprint as base class
for the admin views.
By default this extension will look for invenio.<name>_admin modules and call
the method register_admin(app, admin) in each module to allow to register its
administration views.
By default all view are restricted to super users only. This can be changed via
the acc_<action>_action class variables.
Usage example - create a file called <module>_admin.py::
from invenio.ext.admin import InvenioModelView
from invenio.ext.sqlalchemy import db
from invenio.<module>_models import MyModel
| class MyModelAdmin(InvenioModelView):
acc_edit_action = 'cfgmymodel'
_can_create = False
_can_edit = True
_can_delete = False
# ... Flaks-Admin options ...
# column_list = ( ... )
def __init__(self, model, session, **kwargs):
super(MyModelAdmin, self).__init__(model, session, **kwargs)
def register_admin(app, admin):
ad | min.add_view(MyModelAdmin(MyModel, db.session, name='My model',
category="My Category"))
"""
from __future__ import absolute_import
from flask.ext.admin import Admin
from flask.ext.registry import ModuleAutoDiscoveryRegistry
from .views import AdminIndexView
#
# Utility method
#
class AdminDiscoveryRegistry(ModuleAutoDiscoveryRegistry):
setup_func_name = 'register_admin'
def __init__(self, *args, **kwargs):
self.admin = kwargs.pop('admin', None)
super(AdminDiscoveryRegistry, self).__init__(*args, **kwargs)
def register(self, module, *args, **kwargs):
super(AdminDiscoveryRegistry, self).register(
module, self.app, self.admin, *args, **kwargs
)
def setup_app(app):
"""
Register all administration views with the Flask application
"""
app.config.setdefault("ADMIN_NAME", "Invenio")
# Initialize app
admin = Admin(
name=app.config['ADMIN_NAME'],
index_view=AdminIndexView(),
base_template="admin_base.html"
)
admin.init_app(app)
# Create registry and run discovery
app.extensions['registry']['admin'] = AdminDiscoveryRegistry(
'admin', app=app, with_setup=True, admin=admin
)
|
bmcage/centrifuge-1d | centrifuge1d/modules/base/options.desc.py | Python | gpl-2.0 | 16,567 | 0.002294 | # This file contains the description of base options
CONFIG_OPTIONS = {
'general': {'g': "Gravitational constant. Default is 981 cm/s^2. Type:float."},
'experiment': \
{'exp_type': ("Identifier of type of experiment do determine which "
"module should be used. Type: string"),
'tube_no': ("Tube number. The given tube number should correspond to "
"an entry in the 'constants.ini' file, where starting "
"and ending filter proberties should be specified"
"Type: integer"),
'tube_diam': "Diameter of the tube.",
're': ("Distance (radius) from the centrifuge axis to the end of the "
"ending filter; on the other hand, value r0 is the distance "
"the centrifuge axis to the beginning of soil sample, i.e. "
"re=r0 + L + fl2, where L is soil length (see 'l0') and "
"'fl2' is the thickness of the ending filter. Either 're' "
"or 'r0' has to be specified (but not both). Type: float"),
'r0': "See parameter re",
'r0_fall': ("When simulating falling head test in gravitational field "
"we use centrifuge with a very long arm that rotates at "
"slow speed such, that the effect is similar to the g "
"level. Corresponding rotational speed is calculated "
"automatically. Type: float"),
'ks': ("Saturated hydraulic conductivity of the soil sample in cm/s. "
"Type: float"),
'l0': "Soil sample (initial) length. Type: float or array of floats.",
'wt_out': ("Distance from 're' to the basis of the outflow cup. "
"Used for computing the force acting on the outflow cup. "
"If force is not computed, it can be any value (as it is) "
"ignored. Type: float"),
'wl0': ("Length of water on the inflow (above the soil) in cm. "
"Type: float"),
'ww0': ("Weight of water on the inflow (above the soil) in gramms. "
"Only one of 'wl0' and 'ww0' can be specified."
"Type: float"),
'descr': "(optional) Description of the experiment. Type: string"
},
'filters': \
# These values are set based on the value of 'tube_no'. See 'tube_no'
# for more.
{'fl1': "Length (thickness) of the starting filter. Type: float",
'fl2': "Length (thickness) of the ending filter. Type: float",
'ks1': ("Saturated hydraulic conductivity of starting filter [cm/s]."
"Type: float"),
'ks2': ("Saturated hydraulic conductivity of ending filter [cm/s]."
"Type: float"),
},
'fluid': \
# we use water, so these are probably not needed to be changed
{'density': "Fluid density. For water it is ~1.0 g/cm3. Type: float."},
'measurements': \
{'include_acceleration': ("Flag whether to include acceleration and/or "
"deceleration into simulation. If True, "
"acceleration is included and deceleration "
"only if also 'deceleration_duration' > 0. "
"See also 'deceleration_duration'. "
"Type: boolean"),
'duration': ("Duration of a phase for omega > 0. If the flag "
"'include_acceleration' is True, also centrifuge "
"acceleration is simulated during this phase."
"One iteration consists of an optional centrifugation "
"phase followed by an optional deceleration phase and an "
"also optional gravitational phase. In each iteration at "
"least one phase has to have duration > 0 (otherwise "
"there is not much to simulate:D), whereas deceleration "
"phase duration > 0 has obviously sense only if a "
"centrifugal phase duration is also > 0."
"Type: float or array of floats."),
'deceleration_duration': ("Duration of a deceleration phase.If the "
"flag 'include_acceleration' is True, then "
"also deceleration is simulated. Otherwise "
"this value is ignored. "
"Type: same as 'duration'."),
'fh_duration': ("Duration of a phase for omega = 0, i.e. only "
"gravitational force is applied. Used when sample was "
"for example left some time outside the centrifuge "
"or for falling-head test simulation. See also option "
"'r0_fall'. Type: same as 'duration'."),
'measurements_keep',: "See 'measurements_remove'.",
'measurements_remove': \
("For filtering measurements 'measurements_keep' (further 'keep') "
"and 'measurements_remove' (further 'remove') are used. They are "
"of type dict with keys being the measurements names and values "
"is a list of indices. If 'keep' is supplied, indices specified "
"will be preserved whereas the rest is dropped. On the other "
"if 'remove' is supplied, indices specified in there will be "
"removed an the rest is preserved. These two parameters are not "
"complementary, in fact what can be achieved using one, can be "
"achived also using only the other. But sometimes it is more "
"convenient to specify it one way than the other."),
'smoothing': ("Measured data can be 'smoothed', which may improve "
"found results. Type: dict with key being measurement "
"name and value is one of 'smlin' - linear averaging, "
"'smtri' - triangular averaging, 'smgau' - gaussian "
"averaging."),
'l1': ("Soil sample length at measured time. "
"Type: array of floats or None"),
'wl1': ("Measured length of water above the soil at given (measured) | "
"time in cm. Type: array of floats or None"),
'ww1': ("Measured weight of water above the soil at given (measured) "
"time, in gramms. Type: array of floats or None"),
'wl_out': ("Measured length of water in the outflow chamber at given "
"(measured) time. Units: array of floats or None"),
'gc1': "Measured gravitational center | . Units: array of floats or None",
'rm1': "Measured rotational momentum. Units: array of floats or None",
'measurements_scale_coefs': \
("When running inverse problem, multiple meaurements can be "
"given, potentially with different scales (e.g. MO ~ 0.5 whereas "
"GC ~ 2.5, which can cause that solver \"prefers\" the "
"optimization of GC in favour of MO, because it introduces adds "
"more error in least-squares. Therefore a scaling is important "
"to make the data of roughly the same order.) By default the "
"data is scaled so that the biggest number in measurements of "
"given type is in <1, 10) interval. See also *_weights, options, "
"which specify a weight - i.e. importance of given measurement."
"Value: dict of type: {measurement_name1: scale_coef1, ...}."),
'gf_mo': ("Measured centrifugal force of the expelled water. More "
"precisely it is the force divided by g (= gravitational "
"constant)."),
'gf_mt': ("Measured centrifugal force of the water inside the tube. "
"More precisely it is the force divided by g "
"(= gravitational constant)."),
'gf_mo_tara': ("Force implied on th |
jhseu/tensorflow | tensorflow/python/training/rmsprop.py | Python | apache-2.0 | 10,430 | 0.002972 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""One-line documentation for rmsprop module.
rmsprop algorithm [tieleman2012rmsprop]
A detailed description of rmsprop.
- maintain a moving (discounted) average of the square of gradients
- divide gradient by the root of this average
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(mean_square + epsilon)
delta = - mom
This implementation of RMSProp uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving (discounted) average of the
gradients, and uses that average to estimate the variance:
mean_grad = decay * mean_square{t-1} + (1-decay) * gradient
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t /
sqrt(mean_square - mean_grad**2 + epsilon)
delta = - mom
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.RMSPropOptimizer"])
class RMSPropOptimizer(optimizer.Optimizer):
"""Optimizer that implements the RMSProp algorithm (Tielemans et al.
2012).
References:
Coursera slide 29:
Hinton, 2012
([pdf](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf))
"""
def __init__(self,
learning_rate,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
use_locking=False,
centered=False,
name="RMSProp"):
"""Construct a new RMSProp optimizer.
Note that in the dense implementation of this algorithm, variables and their
corresponding accumulators (momentum, gradient moving average, square
gradient moving average) will be updated even if the gradient is zero
(i.e. accumulators will decay, momentum will be applied). The sparse
implementation (used when the gradient is an `IndexedSlices` object,
typically because of `tf.gather` or an embedding lookup in the forward pass)
will not update variable slices or their accumulators unless those slices
were used in the forward pass (nor is there an "eventual" correction to
account for these omitted updates). This leads to more efficient updates for
large embedding lookup tables (where most of the slices are not accessed in
a particular graph execution), but differs from the published algorithm.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
decay: Discounting factor for the history/coming gradient
momentum: A scalar tensor.
epsilon: Small value to avoid zero denominator.
use_locking: If True use locks for update operation.
centered: If True, gradients are normalized by the estimated variance of
the gradient; if False, by the uncentered second moment. Setting this to
True may help with training, but is slightly more expensive in terms of
computation and memory. Defaults to False.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "RMSProp".
@compatibility(eager)
When eager executio | n is enabled, `learning_rate`, `decay`, `momentum`, and
`epsilon` can each be a callable that takes no arguments and returns the
actual value to use. This can be useful for changing these values across
different invocations of optimizer functions.
@end_compatibility
"""
super(RMSPropOptimizer, self).__init__( | use_locking, name)
self._learning_rate = learning_rate
self._decay = decay
self._momentum = momentum
self._epsilon = epsilon
self._centered = centered
# Tensors for learning rate and momentum. Created in _prepare.
self._learning_rate_tensor = None
self._decay_tensor = None
self._momentum_tensor = None
self._epsilon_tensor = None
def _create_slots(self, var_list):
for v in var_list:
if v.get_shape().is_fully_defined():
init_rms = init_ops.ones_initializer(dtype=v.dtype.base_dtype)
else:
init_rms = array_ops.ones_like(v)
self._get_or_make_slot_with_initializer(v, init_rms, v.get_shape(),
v.dtype.base_dtype, "rms",
self._name)
if self._centered:
self._zeros_slot(v, "mg", self._name)
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
lr = self._call_if_callable(self._learning_rate)
decay = self._call_if_callable(self._decay)
momentum = self._call_if_callable(self._momentum)
epsilon = self._call_if_callable(self._epsilon)
self._learning_rate_tensor = ops.convert_to_tensor(lr, name="learning_rate")
self._decay_tensor = ops.convert_to_tensor(decay, name="decay")
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
self._epsilon_tensor = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.apply_centered_rms_prop(
var,
mg,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
else:
return training_ops.apply_rms_prop(
var,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.resource_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
else:
return training_ops.resource_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking) |
almcc/cinder-data | example/server/app/settings.py | Python | mit | 4,215 | 0.001423 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_^@_*(#&=lo-gt=1d)_c--27h7#hlqlt@(gteqt3$-awssiqr='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.mess | ages',
'django.contrib.staticfiles',
'rest_framework',
'cars',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.con | trib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'PAGE_SIZE': 10,
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
}
|
chirilo/mozillians | mozillians/users/api/v1.py | Python | bsd-3-clause | 7,048 | 0.001419 | from urllib2 import unquote
from urlparse import urljoin
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models import Q
from funfactory import utils
from tastypie import fields, http
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
from mozillians.api.v1.authenticators import AppAuthentication
from mozillians.api.v1.paginator import Paginator
from mozillians.api.v1.resources import (ClientCacheResourceMixIn,
GraphiteMixIn)
from mozillians.users.models import GroupMembership, UserProfile
class UserResource(ClientCacheResourceMixIn, GraphiteMixIn, ModelResource):
"""User Resource."""
email = fields.CharField(attribute='user__email', null=True, readonly=True)
username = fields.CharField(attribute='user__username', null=True, readonly=True)
vouched_by = fields.IntegerField(attribute='vouched_by__id',
null=True, readonly=True)
date_vouched = fields.DateTimeField(attribute='date_vouched', null=True,
readonly=True)
groups = fields.CharField()
skills = fields.CharField()
languages = fields.CharField()
url = fields.CharField()
accounts = fields.CharField()
city = fields.CharField(attribute='geo_city__name', null=True, readonly=True, default='')
region = fields.CharField(attribute='geo_region__name', null=True, readonly=True, default='')
country = fields.CharField(attribute='geo_country__code', null=True, readonly=True, default='')
photo_thumbnail = fields.CharField()
class Meta:
queryset = UserProfile.objects.all()
authentication = AppAuthentication()
authorization = ReadOnlyAuthorization()
serializer = Serializer(formats=['json', 'jsonp'])
paginator_class = Paginator
cache_control = {'max-age': 0}
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'users'
restrict_fields = False
restricted_fields = ['email', 'is_vouched']
fields = ['id', 'full_name', 'is_vouched', 'vouched_by',
'date_vouched', 'groups', 'skills',
'bio', 'photo', 'ircname', 'country', 'region', 'city',
'date_mozillian', 'timezone', 'email', 'allows_mozilla_sites',
'allows_community_sites']
def build_filters(self, filt | ers=None):
database_filters = {}
valid_filters = [f for f in filters if f in
['email', 'country', 'r | egion', 'city', 'ircname',
'username', 'groups', 'skills',
'is_vouched', 'name', 'accounts']]
getvalue = lambda x: unquote(filters[x].lower())
if 'accounts' in valid_filters:
database_filters['accounts'] = Q(
externalaccount__identifier__icontains=getvalue('accounts'))
if 'email' in valid_filters:
database_filters['email'] = Q(
user__email__iexact=getvalue('email'))
if 'username' in valid_filters:
database_filters['username'] = Q(
user__username__iexact=getvalue('username'))
if 'name' in valid_filters:
database_filters['name'] = Q(full_name__iexact=getvalue('name'))
if 'is_vouched' in valid_filters:
value = getvalue('is_vouched')
if value == 'true':
database_filters['is_vouched'] = Q(is_vouched=True)
elif value == 'false':
database_filters['is_vouched'] = Q(is_vouched=False)
if 'country' in valid_filters:
database_filters['country'] = Q(geo_country__code=getvalue('country'))
if 'region' in valid_filters:
database_filters['region'] = Q(geo_region__name=getvalue('region'))
if 'city' in valid_filters:
database_filters['city'] = Q(geo_city__name=getvalue('city'))
if 'ircname' in valid_filters:
database_filters['ircname'] = Q(
**{'{0}__iexact'.format('ircname'):
getvalue('ircname')})
if 'groups' in valid_filters:
kwargs = {
'groups__name__in': getvalue('groups').split(','),
'groupmembership__status': GroupMembership.MEMBER
}
database_filters['groups'] = Q(**kwargs)
if 'skills' in valid_filters:
database_filters['skills'] = Q(skills__name__in=getvalue('skills').split(','))
return database_filters
def dehydrate(self, bundle):
if (bundle.request.GET.get('restricted', False) or not
bundle.data['allows_mozilla_sites']):
data = {}
for key in self._meta.restricted_fields:
data[key] = bundle.data[key]
bundle = Bundle(obj=bundle.obj, data=data, request=bundle.request)
return bundle
def dehydrate_accounts(self, bundle):
accounts = [{'identifier': a.identifier, 'type': a.type}
for a in bundle.obj.externalaccount_set.all()]
return accounts
def dehydrate_groups(self, bundle):
groups = bundle.obj.groups.values_list('name', flat=True)
return list(groups)
def dehydrate_skills(self, bundle):
skills = bundle.obj.skills.values_list('name', flat=True)
return list(skills)
def dehydrate_languages(self, bundle):
languages = bundle.obj.languages.values_list('code', flat=True)
return list(languages)
def dehydrate_photo(self, bundle):
if bundle.obj.photo:
return urljoin(settings.SITE_URL, bundle.obj.photo.url)
return ''
def dehydrate_photo_thumbnail(self, bundle):
return urljoin(settings.SITE_URL, bundle.obj.get_photo_url())
def dehydrate_url(self, bundle):
url = reverse('phonebook:profile_view',
args=[bundle.obj.user.username])
return utils.absolutify(url)
def get_detail(self, request, **kwargs):
if request.GET.get('restricted', False):
raise ImmediateHttpResponse(response=http.HttpForbidden())
return super(UserResource, self).get_detail(request, **kwargs)
def apply_filters(self, request, applicable_filters):
if (request.GET.get('restricted', False) and
'email' not in applicable_filters and len(applicable_filters) != 1):
raise ImmediateHttpResponse(response=http.HttpForbidden())
mega_filter = Q()
for db_filter in applicable_filters.values():
mega_filter &= db_filter
if request.GET.get('restricted', False):
mega_filter &= Q(allows_community_sites=True)
return UserProfile.objects.complete().filter(mega_filter).distinct().order_by('id')
|
Karuji/GMProjectImporter | PythonToGMX.py | Python | mit | 493 | 0.022312 | import xml.etree.ElementTree as ET
import | os
from Element import Element
class PythonToGMX(object):
def __init__(self, pythonTree):
self.pythonroot = python | Tree
self.root = ET.Element(eval(self.pythonroot.tag))
for child in self.pythonroot.children:
self.process(child, self.root)
def process(self, element, parent):
elem = ET.SubElement(parent, eval(element.tag), element.attrib)
elem.text = eval(element.text)
for child in element.children:
self.process(child, elem)
|
tanglu-org/laniakea | src/admin/lkadmin/cli.py | Python | gpl-3.0 | 2,762 | 0.000362 | # Copyright (C) 2018 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Gen | eral Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software i | s distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import sys
from argparse import ArgumentParser, HelpFormatter
__mainfile = None
def check_print_version(options):
if options.show_version:
from laniakea import __version__
print(__version__)
sys.exit(0)
class CustomArgparseFormatter(HelpFormatter):
def _split_lines(self, text, width):
print(text)
if text.startswith('CF|'):
return text[3:].splitlines()
return HelpFormatter._split_lines(self, text, width)
def create_parser(formatter_class=None):
''' Create lkadmin CLI argument parser '''
if not formatter_class:
formatter_class = CustomArgparseFormatter
parser = ArgumentParser(description='Administer a Laniakea instance', formatter_class=formatter_class)
subparsers = parser.add_subparsers(dest='sp_name', title='subcommands')
# generic arguments
parser.add_argument('--verbose', action='store_true', dest='verbose',
help='Enable debug messages.')
parser.add_argument('--version', action='store_true', dest='show_version',
help='Display the version of Laniakea itself.')
import lkadmin.core as core
core.add_cli_parser(subparsers)
import lkadmin.job as job
job.add_cli_parser(subparsers)
import lkadmin.synchrotron as synchrotron
synchrotron.add_cli_parser(subparsers)
import lkadmin.spears as spears
spears.add_cli_parser(subparsers)
import lkadmin.ariadne as ariadne
ariadne.add_cli_parser(subparsers)
import lkadmin.isotope as isotope
isotope.add_cli_parser(subparsers)
import lkadmin.planter as planter
planter.add_cli_parser(subparsers)
return parser
def run(mainfile, args):
if len(args) == 0:
print('Need a subcommand to proceed!')
sys.exit(1)
global __mainfile
__mainfile = mainfile
parser = create_parser()
args = parser.parse_args(args)
check_print_version(args)
args.func(args)
|
google-research/google-research | es_maml/zero_order/adaptation_optimizers.py | Python | apache-2.0 | 6,752 | 0.007701 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS | ,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of adapters for Zero-Order ES-MAML."""
# pylint: disable=g-doc-return-or-yield,missing-docstring,g-doc-args,line-too-long,invalid-name,pointless-string-statement, super-init-not-called, unused-argument
from __future__ import absolute_import
from __future__ import division
from __future__ import print | _function
import abc
from absl import logging
import numpy as np
from es_maml.util.dpp.dpp import DPP
from es_maml.util.log_util import AlgorithmState
class Adaptation:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def adaptation_step(self, policy_params, adaptation_params, task, **kwargs):
raise NotImplementedError("Abstract method")
@abc.abstractmethod
def get_total_num_parameters(self):
raise NotImplementedError("Abstract method")
class BlackboxAdaptation(Adaptation):
def __init__(self, num_queries, adaptation_blackbox_optimizer,
adaptation_precision_parameter, **kwargs):
self.num_queries = num_queries
self.adaptation_blackbox_optimizer = adaptation_blackbox_optimizer
self.adaptation_precision_parameter = adaptation_precision_parameter
self.perturbation_type = kwargs["perturbation_type"]
self.dpp_rho = kwargs["dpp_rho"]
def adaptation_step(self, policy_params, adaptation_params, task_value_fn,
**kwargs):
dim = policy_params.shape[0]
if self.perturbation_type == "Gaussian":
perturbations = np.random.normal(
size=(self.num_queries, dim)) * self.adaptation_precision_parameter
elif self.perturbation_type == "DPP":
perturbations = np.random.normal(
size=(self.dpp_rho * self.num_queries,
dim)) * self.adaptation_precision_parameter
dpp = DPP(perturbations)
dpp.compute_kernel(kernel_type="rbf")
idx = dpp.sample_k(self.num_queries)
perturbations = perturbations[idx]
es_hess = np.zeros((dim, dim))
pivot = task_value_fn(policy_params, **kwargs)
function_values = []
all_values = [pivot]
for p in perturbations:
temp_task_value = task_value_fn(policy_params + p, **kwargs)
function_values.append(temp_task_value)
all_values.append(temp_task_value)
es_hess += temp_task_value * np.outer(p, p.T) / np.square(
self.adaptation_precision_parameter)
function_values = np.array(function_values)
avg_fval = np.mean(function_values)
for p in perturbations:
es_hess -= avg_fval * np.outer(p, p.T) / np.square(
self.adaptation_precision_parameter)
es_hess /= len(perturbations)
if np.std(all_values) == 0: # in case function vals all equal
return policy_params
else:
out = self.adaptation_blackbox_optimizer.run_step(
perturbations=perturbations,
function_values=function_values,
current_input=policy_params,
current_value=pivot)
return out
def get_total_num_parameters(self):
return 0
def get_initial(self):
return np.array([])
class HillClimbAdaptation(Adaptation):
def __init__(self, num_queries, adaptation_precision_parameter, **kwargs):
self.num_queries = num_queries
self.adaptation_precision_parameter = adaptation_precision_parameter
self.parallel_evaluations = kwargs.get("parallel_evaluations", 1)
self.parallel_alg = kwargs.get("parallel_alg", "batch")
def adaptation_step(self,
policy_params,
adaptation_params,
task_value_fn,
loader=False,
**kwargs):
parallel_alg = kwargs.get("parallel_alg", self.parallel_alg)
parallel_evaluations = kwargs.get("parallel_evaluations",
self.parallel_evaluations)
dim = policy_params.shape[0]
state = kwargs.pop("algorithm_state", AlgorithmState())
if not state.meta_eval_passed:
while len(state.single_values) < parallel_evaluations:
state.single_values.append(task_value_fn(policy_params, **kwargs))
state.best_params_so_far = policy_params
state.pivot = np.average(state.single_values)
state.meta_eval_passed = True
if loader:
logging.info("Average Objective of HillClimbing Iteration %d is: %f", 0,
np.average(state.single_values))
state.query_index += 1
state.single_values = []
while state.query_index <= self.num_queries:
if parallel_alg == "average":
state.p = np.random.normal(
size=(dim)) * self.adaptation_precision_parameter
while len(state.single_values) < parallel_evaluations:
single_value = task_value_fn(state.best_params_so_far + state.p,
**kwargs)
state.single_values.append(single_value)
temp_task_value = np.average(state.single_values)
state.single_values = []
if temp_task_value > state.pivot:
state.pivot = temp_task_value
state.best_params_so_far = state.best_params_so_far + state.p
elif parallel_alg == "batch":
while len(state.single_values) < parallel_evaluations:
p = np.random.normal(size=(dim)) * self.adaptation_precision_parameter
state.temp_perturbations.append(p)
single_value = task_value_fn(state.best_params_so_far + p, **kwargs)
state.single_values.append(single_value)
best_index = np.argmax(state.single_values)
if state.single_values[best_index] > state.pivot:
state.pivot = state.single_values[best_index]
state.best_params_so_far = state.best_params_so_far + state.temp_perturbations[
best_index]
state.temp_perturbations = []
if loader and len(state.single_values) == parallel_evaluations:
logging.info("Average Objective of HillClimbing Iteration %d is: %f",
state.query_index, np.average(state.single_values))
state.query_index += 1
state.single_values = []
return state.best_params_so_far
def get_total_num_parameters(self):
return 0
def get_initial(self):
return np.array([])
|
agraubert/Beymax | beymax/bots/game_systems/story.py | Python | mit | 13,971 | 0.001432 | import re
from string import printable
import os
import queue
import subprocess
import discord
import asyncio
import threading
import time
from ...utils import DBView
from .base import GameSystem, GameError, JoinLeaveProhibited, GameEndException
from math import ceil, floor
printable_set = set(printable)
printable_bytes = printable.encode()
def avg(n):
return sum(n)/len(n)
class BackgroundGameExit(GameError):
pass
more_patterns = [
re.compile(r'\*+(MORE|more)\*+'), # Match ****MORE****
re.compile(r'.*\.\.\.+\s*$') # Match ....
]
score_patterns = [
re.compile(r'([0-9]+)/[0-9]+'),
re.compile(r'Score:[ ]*([-]*[0-9]+)'),
re.compile(r'([0-9]+):[0-9]+ [AaPp][Mm]'),
re.compile(r'Your score is (\d+)')
]
clean_patterns = [
# re.compile(r'[0-9]+/[0-9+]'),
# re.compile(r'Score:[ ]*[-]*[0-9]+'),
re.compile(r'Moves:[ ]*[0-9]+'),
re.compile(r'Turns:[ ]*[0-9]+'),
# re.compile(r'[0-9]+:[0-9]+ [AaPp][Mm]'),
re.compile(r' [0-9]+ \.'),
re.compile(r'^([>.][>.\s]*)'),
re.compile(r'Warning: @[\w_]+ called .*? \(PC = \w+\) \(will ignore further occurrences\)')
] + more_patterns + score_patterns
def multimatch(text, patterns):
for pattern in patterns:
result = pattern.search(text)
if result:
return result
return False
class Player(object):
def __init__(self, frotz, game):
(self.stdinRead, self.stdinWrite) = os.pipe()
(self.stdoutRead, self.stdoutWrite) = os.pipe()
self.buffer = queue.Queue()
self.remainder = b''
self.score = 0
self.closed = False
self.proc = subprocess.Popen(
'%s games/%s.z5' % (os.path.abspath(frotz), game),
universal_newlines=False,
shell=True,
stdout=self.stdoutWrite,
stdin=self.stdinRead
)
self._reader = threading.Thread(
target=Player.reader,
args=(self,),
daemon=True,
)
self._reader.start()
def write(self, text):
if not text.endswith('\n'):
text+='\n'
os.write(self.stdinWrite, text.encode())
def reader(self):
while not self.closed:
# self.buffer.put(self.readline())
try:
line = self.readline()
self.buffer.put(line)
except OSError:
self.closed = True
def readline(self):
# intake = self.remainder
# while b'\n' not in intake:
# intake += os.read(self.stdoutRead, 64)
# print("Buffered intake:", intake)
# lines = intake.split(b'\n')
# self.remainder = b'\n'.join(lines[1:])
# return lines[0].decode().rstrip()
return os.read(self.stdoutRead, 256).decode()
def readchunk(self, clean=True):
if self.proc.returncode is not None:
raise BackgroundGameExit(
"Player exited with returncode %d" % self.proc.returncode
)
try:
content = [self.buffer.get(timeout=5)]
except queue.Empty:
raise BackgroundGameExit(
"No content in buffer"
)
time.sleep(1)
while not self.buffer.empty():
try:
while not self.buffer.empty():
content.append(self.buffer.get(timeout=1))
except queue.Empty:
pass
time.sleep(1)
#now merge up lines
# print("Raw content:", ''.join(content))
# import pdb; pdb.set_trace()
content = [line.rstrip() for line in ''.join(content).split('\n')]
# clean metadata
if multimatch(content[-1], more_patterns):
self.write('\n')
time.sleep(0.5)
content += self.readchunk(False)
# print("Merged content:", content)
if not clean:
return content
for i in range(len(content)):
line = content[i]
result = multimatch(line, score_patterns)
if result:
self.score = int(result.group(1))
result = multimatch(line, clean_patterns)
while result:
line = result.re.sub('', line)
result = multimatch(line, clean_patterns)
content[i] = line
return '\n'.join(line for line in content if len(line.rstrip()))
def quit(self):
try:
self.write('quit')
self.write('y')
self.closed = True
try:
self.proc.wait(1)
except:
self.proc.kill()
os.close(self.stdinRead)
os.close(self.stdinWrite)
os.close(self.stdoutRead)
os.close(self.stdoutWrite)
except OSError:
pass
class StorySystem(GameSystem):
name = 'Interactive Story'
instructions = (
'Here are the controls for the Interactive Story system:\n'
'`$` : Simply type `$` to enter a blank line to the game\n'
'That can be useful if the game is stuck or '
'if it ignored your last input\n'
'Some menus may ask you to type a space to continue.\n'
'`quit` : Quits the game in progress\n'
'This is also how you end the game if you finish it\n'
'`score` : View your score\n'
'Some games may have their own commands in addition to these'
' ones that I handle personally\n'
'Lastly, if you want to make a comment in the channel'
' without me forwarding your message to the game, '
'simply start the message with `$! `, for example:'
' `$! Any ideas on how to unlock this door?`'
)
def __init__(self, bot, game):
super().__init__(bot, game)
self.player = Player(bot.config_get('zmachine', default='dfrotz'), game)
self.state = {}
@classmethod
def games(cls):
return (
[f[:-3] for f in os.listdir('games') if f.endswith('.z5')]
if os.path.isdir('games')
else []
)
@classmethod
async def restore(cls, bot, game):
# Attempt to restore the game state
# Return StorySystem if successful
# Return None if unable to restore state
try:
async with DBView(story={'host': None, 'game': ''}) as db:
if db['story']['host'] is None:
raise GameError("No primary player defined in state")
system = StorySystem(bot, db['story']['game'])
system.state.update(db['story'])
if 'transcript' in db['story']:
for msg in db['story']['transcript']:
print("Replaying", msg)
system.player.write(msg)
await asyncio.sleep(0.5)
print(system.player.readchunk())
if system.player.proc.returncode is not None:
break
return system
except Exception as e:
raise GameEn | dException("Unable to restore") from e
@property
def played(self):
return (
'transcript' in self.state and
len(self.state['transcript'])
)
def is_playing(self, user):
return user.id in self.state['players']
async def on_input(self, user, channel, message):
try:
| content = message.content.strip().lower()
if content == 'save':
await self.bot.send_message(
self.bot.fetch_channel('games'),
"Unfortunately, saved games are not supported by "
"the story system at this time."
)
elif content == 'score':
self.player.write('score')
self.player.readchunk()
self.state['score'] = self.player.score
await self.bot.send_message(
self.bot.fetch_channel('games'),
'Your score is %d' % self.player.score
)
if self.player.proc.returncode is not None:
|
xavitorne/mountain-rehub | config.py | Python | mit | 490 | 0.012245 | im | port os
basedir = os.path.abspath(os.path.dirname(__file__))
# if you have postgres use this uri:
SQLALCHEMY_DATABASE_URI = "postgresql://rehub:rehub@localhost/rehub"
# if you want to use sqlite, use this one:
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'rehub.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
DEBUG=True
DATABASE=os.path.join(basedir, 'rehub.db')
USERNAME='admin'
PASSWORD='adm | in'
SECRET_KEY='$cippalippa!'
CSRF_ENABLED=True
|
glarue-ol/sensorlab-observer | observer/m_sensorlab/sensorlab.py | Python | mpl-2.0 | 20,640 | 0.003004 | # -*- coding: utf-8 -*-
"""
`author` : Quentin Lampin <quentin.lampin@orange.com>
`license` : MPL
`date` : 2015/10/05
SensorLab Encapsulation Library
--------------------------
This module provides utility functions to generate SensorLab formatted event reports.
`Requires python 3.2 or above`
"""
import struct
from .frame_format import *
FIRMWARE_PROPERTY_ID = 0x00
STATE_PROPERTY_ID = 0x01
LATITUDE_PROPERTY_ID = 0x02
LONGITUDE_PROPERTY_ID = 0x03
ALTITUDE_PROPERTY_ID = 0x04
SHUNT_VOLTAGE_PROPERTY_ID = 0x05
BUS_VOLTAGE_PROPERTY_ID = 0x06
CURRENT_PROPERTY_ID = 0x07
POWER_PROPERTY_ID = 0x08
TIMESTAMP_PROPERTY_ID = 0x09
def sensorlab_header(node_id, event_id):
"""
Builds a SensorLab Header.
The SensorLab header is the first element of each Sensorlab PCAP record. It defines which node/event is
described by the record.
The SensorLab header is 40bits long and its structure is:
- nodeID : 4-bytes field. ID of the node which the report relates to
- eventID : 1-byte. ID of the reported event
Possible values for the eventID are:
- EVENT_NODE_ADD = 0x00
- EVENT_NODE_PROPERTY_ADD = 0x01
- EVENT_NODE_PROPERTY_UPDATE = 0x02
- EVENT_NODE_REMOVE = 0x03
- EVENT_ENTITY_ADD = 0x10
- EVENT_ENTITY_PROPERTY_ADD = 0x11
- EVENT_ENTITY_PROPERTY_UPDATE = 0x12
- EVENT_ENTITY_REMOVE = 0x13
- EVENT_LINK_ADD = 0x20
- EVENT_LINK_PROPERTY_ADD = 0x21
- EVENT_LINK_PROPERTY_UPDATE = 0x22
- EVENT_LINK_REMOVE = 0x23
- EVENT_FRAME_PRODUCE = 0x30
- EVENT_FRAME_PROPERTY_ADD = 0x31
- EVENT_FRAME_PROPERTY_UPDATE = 0x32
- EVENT_FRAME_DATA_UPDATE = 0x33
- EVENT_FRAME_TX = 0x34
- EVENT_FRAME_RX = 0x35
- EVENT_FRAME_CONSUME = 0x36
Args:
node_id (unsigned int): node ID
event_id (unsigned int): event ID
Returns:
bytes: the header content
"""
header = struct.pack("<IB", node_id, event_id)
return header
def node_add_header(properties_count):
"""
Builds a SensorLab Node Add Header.
A node is declared using the Node Add Header (event ID: EVENT_NODE_ADD).
If propertiesCount > 0, the Node Add header must be followed by the corresponding number of
PropertyDeclarationPayload.
The NodeAdd header structure is:
- propertiesCount : 1-byte. Number of properties describing the node node_state.
Args:
properties_count (unsigned int): Number of properties describing the node node_state (up to 255)
Returns:
bytes: header content
"""
header = struct.pack("<B", properties_count)
return header
def node_property_add_header(properties_count):
"""
Builds a SensorLab Node Property Add Header.
A node property is declared using the Node Property Add Header (event ID: EVENT_NODE_PROPERTY_ADD).
If propertiesCount > 0, the NodePropertyAdd header must be followed by the corresponding number of
PropertyDeclarationPayload.
The NodePropertyAdd header structure is:
- propertiesCount : 1-byte. Number of properties describing the node node_state.
Args:
properties_count (unsigned int): Number of properties describing the node node_state (up to 255)
Returns:
bytes: header content
"""
header = struct.pack("<B", properties_count)
return header
def node_property_update_header(properties_count):
"""
Builds a SensorLab Node Property Update Header.
A node property is updated using the Node Property Update Header (event ID: EVENT_NODE_PROPERTY_UPDATE).
If propertiesCount > 0, the NodePropertyUpdate header must be followed by the corresponding number of
PropertyUpdatePayload.
Args:
properties_count (unsigned int): Number of properties describing the node node_state (up to 255)
Returns:
bytes: header content
"""
header = struct.pack("<B", properties_count)
return header
def entity_add_header(entity_id, entity_name_length, properties_count, entity_name):
"""
Builds a Entity Add Header.
A node entity is declared using the Entity Add Header (event ID: EVENT_ENTITY_ADD).
If propertiesCount > 0, the Entity Add header must be followed by the corresponding number of
PropertyDeclarationPayload.
The Entity Add Header contains:
- entity_id : 1-byte
- entity_name_length : 1-byte field
- properties_count : 1-byte
- entity_name : Entity's name (variable size)
Args:
entity_id (unsigned char): ID of the new entity
entity_name_length (unsigned int): Entity's name length
properties_count (unsigned int): Number of properties describing the entity node_state (up to 255)
entity_name (string): Name of the entity, ASCII encoded.
Returns:
bytes: header content
"""
header = struct.pack("<BBB", entity_id, entity_name_length, properties_count)
header += entity_name.encode('ascii')
return header
def entity_property_add_header(entity_id, properties_count):
"""
Builds a Entity Property Add Header.
An entity property is declared using the Entity Property Add Header (event ID: EVENT_ENTITY_PROPERTY_ADD).
If propertiesCount > 0, the EntityPropertyAdd header must be followed by the corresponding number of
PropertyDeclarationPayload.
The EntityPropertyAdd header structure is:
- entity_id : 1-byte. ID of the entity.
- properties_count : 1-byte. Number of properties describing the entity node_state.
Args:
entity_id (unsigned char): entity ID.
properties_count (unsigned int): Number of properties describing the entity node_state (up to 255)
Returns:
bytes: header content
"""
header = struct.pack("<BB", entity_id, propertie | s_count)
return header
def entity_property_update_header(entity_id, properties_count):
"""
Builds a Entity Property Update Header.
An entity property is updated using the Node Property Update Header (event ID: EVENT_ENTITY_PROPERTY_UPDATE).
If propertiesCount > 0, the NodePropertyUpdate header must be followed by the corresponding number of
PropertyUpdatePayload.
The E | ntityPropertyAdd header structure is:
- entityID : 1-byte. ID of the entity.
- propertiesCount : 1-byte. Number of properties describing the entity node_state.
Args:
entity_id (unsigned char):
properties_count (unsigned char)
Returns:
bytes: the Entity Property Update header.
"""
header = struct.pack("<BB", entity_id, properties_count)
return header
def entity_remove_header(entity_id):
"""
Builds a Entity Remove Header.
A node entity is removed using the Entity Remove Header (event ID: EVENT_ENTITY_REMOVE).
The Entity Remove Header contains:
- entity_id : 1-byte. ID of the entity.
Args:
entity_id (unsigned char): ID of the new entity
Returns:
bytes: header content
"""
header = struct.pack("<B", entity_id)
return header
def property_declaration_payload(property_id, unit_prefix, unit, data_type,
property_name_length, property_value_length,
property_name, property_value):
"""
Builds a Property Declaration Payload.
The property declaration payload declares a property.
The PropertyDeclarationPayload structure is:
- property_id : 1 byte field. Property ID.
- unit_prefix : 1 byte field. Unit prefix.
- unit |
Akuli/editor | porcupine/plugins/run/terminal.py | Python | mit | 5,179 | 0.001159 | """Run commands in a new terminal window."""
from __future__ import annotations
import logging
import os
import shlex
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from tkinter import messagebox
from porcupine import get_main_window, utils
log = logging.getLogger(__name__)
_this_dir = Path(__file__).absolute().parent
if sys.platform == "win32":
run_script = _this_dir / "windows_run.py"
else:
run_script = _this_dir / "bash_run.sh"
# getting this to work in powershell turned out to be hard :(
def _run_in_windows_cmd(command: str, cwd: Path) -> None:
log.debug("using Windows command prompt")
real_command = [str(utils.python_executable), str(run_script), str(cwd), command]
if not utils.running_pythonw:
# windows wants to run python in the same terminal that
# Porcupine was started from, this is the only way to open a
# new command prompt i found and it works :) we need cmd
# because start is built in to cmd (lol)
real_command = ["cmd", "/c", "start"] + real_command
subprocess.Popen(real_command)
def _run_in_macos_terminal_app(command: str, cwd: Path) -> None:
log.debug("using MacOS termi | nal.app")
assert shutil.which("bash") is not None
with tempfile.NamedTemporaryFile("w", delete=False, prefix="porcupine-run-") as file:
print("#!/usr/bin/env bash", file=file)
print("rm", shlex.quote(file.name), file=file) # runs even if command is interrupted
print(
shlex.q | uote(str(run_script)),
"--dont-wait",
shlex.quote(str(cwd)),
shlex.quote(command),
file=file,
)
os.chmod(file.name, 0o755)
subprocess.Popen(["open", "-a", "Terminal.app", file.name])
# the terminal might be still opening when we get here, that's why
# the file deletes itself
# right now the file removes itself before it runs the actual command so
# it's removed even if the command is interrupted
def _run_in_x11_like_terminal(command: str, cwd: Path) -> None:
terminal: str = os.environ.get("TERMINAL", "x-terminal-emulator")
# to config what x-terminal-emulator is:
#
# $ sudo update-alternatives --config x-terminal-emulator
#
# TODO: document this
if terminal == "x-terminal-emulator":
log.debug("using x-terminal-emulator")
terminal_or_none = shutil.which(terminal)
if terminal_or_none is None:
log.warning("x-terminal-emulator not found")
# Ellusion told me on irc that porcupine didn't find his
# xfce4-terminal, and turned out he had no x-terminal-emulator...
# i'm not sure why, but this should work
#
# well, turns out he's using arch, so... anything could be wrong
terminal_or_none = shutil.which("xfce4-terminal")
if terminal_or_none is None:
# not much more can be done
messagebox.showerror(
"x-terminal-emulator not found",
"Cannot find x-terminal-emulator in $PATH. "
"Are you sure that you have a terminal installed?",
)
return
terminal_path = Path(terminal_or_none)
log.info(f"found a terminal: {terminal_path}")
terminal_path = terminal_path.resolve()
log.debug(f"absolute path to terminal: {terminal_path}")
# sometimes x-terminal-emulator points to mate-terminal.wrapper,
# it's a python script that changes some command line options
# and runs mate-terminal but it breaks passing arguments with
# the -e option for some reason
if terminal_path.name == "mate-terminal.wrapper":
log.info("using mate-terminal instead of mate-terminal.wrapper")
terminal = "mate-terminal"
else:
terminal = str(terminal_path)
else:
log.debug(f"using $TERMINAL or fallback 'x-terminal-emulator', got {terminal!r}")
if shutil.which(terminal) is None:
messagebox.showerror(
f"{terminal!r} not found",
f"Cannot find {terminal!r} in $PATH. Try setting $TERMINAL to a path to a working"
" terminal program.",
)
return
real_command = [str(run_script), str(cwd), command]
real_command.extend(map(str, command))
subprocess.Popen([terminal, "-e", " ".join(map(shlex.quote, real_command))])
# this figures out which terminal to use every time the user wants to run
# something but it doesn't really matter, this way the user can install a
# terminal while porcupine is running without restarting porcupine
def run_command(command: str, cwd: Path) -> None:
log.info(f"Running {command} in {cwd}")
widget = get_main_window() # any tkinter widget works
windowingsystem = widget.tk.call("tk", "windowingsystem")
if windowingsystem == "win32":
_run_in_windows_cmd(command, cwd)
elif windowingsystem == "aqua" and not os.environ.get("TERMINAL", ""):
_run_in_macos_terminal_app(command, cwd)
else:
_run_in_x11_like_terminal(command, cwd)
|
cosven/pat_play | classics/test_quick_sort.py | Python | gpl-3.0 | 103 | 0 | def quick_sort(array):
p | ass
def test_quick_sort():
assert quick_sort([1 | , 3, 2]) == [1, 2, 3]
|
indico/indico | indico/modules/events/surveys/forms.py | Python | mit | 7,812 | 0.006272 | # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import time
from flask import request
from markupsafe import escape
from wtforms.fields import BooleanField, HiddenField, IntegerField, SelectField, StringField, TextAreaField
from wtforms.validators import DataRequired, NumberRange, Optional
from indico.core.db import db
from indico.modules.events.surveys.models.surveys import Survey
from indico.util.i18n import _
from indico.util.placeholders import get_missing_placeholders, render_placeholder_info
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import EmailListField, FileField, IndicoDateTimeField
from indico.web.forms.validators import HiddenUnless, LinkedDateTime, UsedIf, ValidationError
from indico.web.forms.widgets import CKEditorWidget, SwitchWidget
class SurveyForm(IndicoForm):
_notification_fields = ('notifications_enabled', 'notify_participants', 'start_notification_emails',
'new_submission_emails')
title = StringField(_('Title'), [DataRequired()], description=_('The title of the survey'))
introduction = TextAreaField(_('Introduction'), description=_('An introduction to be displayed before the survey'))
anonymous = BooleanField(_('Anonymous submissions'), widget=SwitchWidget(),
description=_('User information will not be attached to submissions'))
require_user = BooleanField(_('Only logged-in users'), [HiddenUnless('anonymous')], widget=SwitchWidget(),
description=_('Require users to be logged in for submitting the survey'))
limit_submissions = BooleanField(_('Limit submissions'), widget=SwitchWidget(),
description=_('Whether there is a submission cap'))
submission_limit = IntegerField(_('Capacity'),
[HiddenUnless('limit_submissions'), DataRequired(), NumberRange(min=1)],
description=_('Maximum number of submissions accepted'))
private = BooleanField(_('Private survey'), widget=SwitchWidget(),
description=_('Only selected people can answer the survey.'))
partial_completion = BooleanField(_('Partial completion'), widget=SwitchWidget(),
description=_('Allow to save answers without submitting the survey.'))
notifications_enabled = BooleanField(_('Enabled'), widget=SwitchWidget(),
description=_('Send email notifications for specific events related to the '
'survey.'))
notify_participants = BooleanField(_('Participants'), [HiddenUnless('notifications_enabled', preserve_data=True)],
widget=SwitchWidget(),
description=_('Notify participants of the event when this survey starts.'))
start_notification_emails = EmailListField(_('Start notification recipients'),
[HiddenUnless('notifications_enabled', preserve_data=True)],
description=_('Email addresses to notify about the start of the survey'))
new_submission_emails = EmailListField(_('New submission notification recipients'),
[HiddenUnless('notifications_enabled', preserve_data=True)],
description=_('Email addresses to notify when a new submission is made'))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
def validate_title(self, field):
query = (Survey.query.with_parent(self.event)
.filter(db.func.lower(Survey.title) == field.data.lower(),
Survey.title != field.object_data,
~Survey.is_deleted))
if query.count():
raise ValidationError(_('There is already a survey named "{}" on this event').format(escape(field.data)))
def post_validate(self):
if not self.anonymous.data:
self.require_user.data = True
class ScheduleSurveyForm(IndicoForm):
start_dt = IndicoDateTimeField(_('Start'), [UsedIf(lambda form, field: form.allow_reschedule_start), Optional()],
default_time=time(0, 0),
description=_('Moment when the survey will open for submissions'))
end_dt = IndicoDateTimeField(_('End'), [Optional(), LinkedDateTime('start_dt')],
default_time=time(23, 59),
description=_('Moment when the survey will close'))
resend_start_notification = BooleanField(_('Resend start notification'), widget=SwitchWidget(),
description=_('Resend the survey start notification.'))
def __init__(self, *args, **kwargs):
survey = kwargs.pop('survey')
self.allow_reschedule_start = kwargs | .pop('allow_reschedule_start')
self.timezone = survey.event.timezone
super().__init__(*args, **kwargs)
if not survey.start_notification_sent or not self.allow_reschedule_start:
del self.resend_start_notification
class SectionForm(IndicoForm):
display_as_section = BooleanField(_('Display as section'), widget=SwitchWidget(), default=True,
description=_('Wheth | er this is going to be displayed as a section or standalone'))
title = StringField(_('Title'), [HiddenUnless('display_as_section', preserve_data=True), DataRequired()],
description=_('The title of the section.'))
description = TextAreaField(_('Description'), [HiddenUnless('display_as_section', preserve_data=True)],
description=_('The description text of the section.'))
class TextForm(IndicoForm):
description = TextAreaField(_('Text'),
description=_('The text that should be displayed.'))
class ImportQuestionnaireForm(IndicoForm):
json_file = FileField(_('File'), [DataRequired(_('You need to upload a JSON file.'))],
accepted_file_types='application/json,.json',
description=_('Choose a previously exported survey content to import. '
'Existing sections will be preserved.'))
class InvitationForm(IndicoForm):
from_address = SelectField(_('From'), [DataRequired()])
subject = StringField(_('Subject'), [DataRequired()])
body = TextAreaField(_('Email body'), [DataRequired()], widget=CKEditorWidget(simple=True))
recipients = EmailListField(_('Recipients'), [DataRequired()], description=_('One email address per line.'))
copy_for_sender = BooleanField(_('Send copy to me'), widget=SwitchWidget())
submitted = HiddenField()
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super().__init__(*args, **kwargs)
self.from_address.choices = list(event.get_allowed_sender_emails().items())
self.body.description = render_placeholder_info('survey-link-email', event=None, survey=None)
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
def validate_body(self, field):
missing = get_missing_placeholders('survey-link-email', field.data, event=None, survey=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
|
mher/pymongo | pymongo/son.py | Python | apache-2.0 | 600 | 0 | # | Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Lice | nse is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bson.son import *
|
willingc/portal | systers_portal/membership/admin.py | Python | gpl-2.0 | 111 | 0 | from | django.contrib import admin
from membership.models import JoinRequest
admin.site.register(JoinRe | quest)
|
noroutine/ansible | lib/ansible/modules/network/cumulus/_cl_bond.py | Python | gpl-3.0 | 15,451 | 0.000647 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cl_bond
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bond port on Cumulus Linux
deprecated: Deprecated in 2.3. Use M(nclu) instead.
description:
- Configures a bond interface on Cumulus Linux To configure a bridge port
use the cl_bridge module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bonding found in the
Cumulus User Guide at U(http://docs.cumulusnetworks.com).
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Configures the port to use DHCP.
To enable this feature use the option I(dhcp).
choices: ['dhcp']
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
mstpctl_bpduguard:
description:
- Enables BPDU Guard on a port in vlan-aware mode.
choices:
- true
- false
mstpctl_portnetwork:
description:
- Enables bridge assurance in vlan-aware mode.
choices:
- true
- false
mstpctl_portadminedge:
description:
- Enables admin edge port.
choices:
- true
- false
clag_id:
description:
- Specify a unique clag_id for every dual connected bond on each
peer switch. The value must be between 1 and 65535 and must be the
same on both peer switches in order for the bond to be considered
dual-connected.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
miimon:
description:
- The mii link monitoring interval.
default: 100
mode:
description:
- The bond mode, as of Cum | ulus Linux 2.5 only LACP bond mode is
supported.
default: '802.3ad'
min_links:
description:
- Minimum number of links.
default: 1
lacp_bypass_allow:
description:
- Enable LACP bypass.
lacp_bypass_period:
description:
- | Period for enabling LACP bypass. Max value is 900.
lacp_bypass_priority:
description:
- List of ports and priorities. Example I("swp1=10, swp2=20").
lacp_bypass_all_active:
description:
- Activate all interfaces for bypass.
It is recommended to configure all_active instead
of using bypass_priority.
lacp_rate:
description:
- The lacp rate.
default: 1
slaves:
description:
- Bond members.
required: True
xmit_hash_policy:
description:
- Transmit load balancing algorithm. As of Cumulus Linux 2.5 only
I(layer3+4) policy is supported.
default: layer3+4
location:
description:
- Interface directory location.
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager - \
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bond interface with IP address
- cl_bond:
name: bond0
slaves:
- swp4-5
ipv4: 10.1.1.1/24
# configure bond as a dual-connected clag bond
- cl_bond:
name: bond1
slaves:
- swp1s0
- swp2s0
clag_id: 1
# define cl_bond once in tasks file
# then write interface config in variables file
# with just the options you want.
- cl_bond:
name: "{{ item.key }}"
slaves: "{{ item.value.slaves }}"
clag_id: "{{ item.value.clag_id|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
with_dict: "{{ cl_bonds }}"
# In vars file
# ============
---
cl_bonds:
bond0:
alias_name: uplink to isp
slaves:
- swp1
- swp3
ipv4: 10.1.1.1/24'
bond2:
vids:
- 1
- 50
clag_id: 1
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', don't add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config'] |
chemelnucfin/tensorflow | tensorflow/python/keras/layers/cudnn_recurrent.py | Python | apache-2.0 | 20,970 | 0.003386 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent layers backed by cuDNN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util.tf_export import keras_export
class _CuDNNRNN(RNN):
"""Private base class for CuDNNGRU and CuDNNLSTM layers.
Arguments:
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
time_major: Boolean (default False). If true, the inputs and outputs will be
in shape `(timesteps, batch, ...)`, whereas in the False case, it will
be `(batch, timesteps, ...)`.
"""
def __init__(self,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
**kwargs):
# We invoke the base layer's initializer directly here because we do not
# want to create RNN cell instance.
super(RNN, self).__init__(**kwargs) # pylint: disable=bad-super-call
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.time_major = time_major
self.supports_masking = False
self.input_spec = [InputSpec(ndim=3)]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]
self.constants_spec = None
self._states = None
self._num_constants = 0
self._vector_shape = constant_op.constant([-1])
def call(self, inputs, mask=None, training=None, initial_state=None):
if isinstance(mask, list):
mask = mask[0]
if mask is not None:
raise ValueError('Masking is not supported for CuDNN RNNs.')
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
if self.go_backwards:
# Reverse time axis.
inputs = K.reverse(inputs, 1)
output, states = self._process_batch(inputs, initial_state)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates)
if self.return_state:
return [output] + states
| else:
return output
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'time_major': self.time_major,
}
base_config = super( # pylint: disable=bad-super-call
RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config): |
return cls(**config)
@property
def trainable_weights(self):
if self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def non_trainable_weights(self):
if not self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def losses(self):
return super(RNN, self).losses
def get_losses_for(self, inputs=None):
return super( # pylint: disable=bad-super-call
RNN, self).get_losses_for(inputs=inputs)
@keras_export(v1=['keras.layers.CuDNNGRU'])
class CuDNNGRU(_CuDNNRNN):
"""Fast GRU implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output in the output
sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
"""
def __init__(self,
units,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
|
rloliveirajr/sklearn_transformers | trans4mers/feature_extraction/relative_location_fingerprint.py | Python | gpl-2.0 | 463 | 0 | import numpy as np
from .fingerprint import Fingerprint
class RelativeLocationFingerprint(Fingerprint):
def trans_func_(self, row):
values = row
max_value = max(values)
features = []
for i in range(0, len(values)):
for j in range(0, len(values | )):
if i == j:
continue
r = values[i] / float(values[j])
| features.append(r)
return features
|
NLViewJeroen/NLView-Repository | plugin.video.NLVIEW/foxsports.py | Python | gpl-3.0 | 17,021 | 0.021033 | import urllib, urllib2, re, cookielib, os, sys, socket
import xbmc, xbmcplugin, xbmcgui, xbmcaddon
import utils, sqlite3
def Main():
utils.addDir('Zoeken','http://www.foxsports.nl/search/videos/?q=',230,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png','',fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Laatste Video','http://www.foxsports.nl/video/filter/fragments/1/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Samenvattingen','',237,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Doelpunten','',238,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Interviews','',239,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Analyses','http://www.foxsports.nl/video/filter/fragments/1/analyses/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Voetbal','http://www.foxsports.nl/video/filter/fragments/1/alle/voetbal/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Tennis','http://www.foxsports.nl/video/filter/fragments/1/alle/tennis/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Overige','http://www.f | oxsports.nl/video/filter/fragments/1/alle/overige/',228,'https://raw.githubusercontent.com/doki1/rep | o/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Aanbevolen','http://www.foxsports.nl/video/filter/fragments/1/aanbevolen/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Meest bekeken','http://www.foxsports.nl/video/meest_bekeken/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Videoklasiekers','http://www.foxsports.nl/video/filter/fragments/1/videoklassiekers/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Meer','http://www.foxsports.nl/video/filter/fragments/1/meer_video/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
xbmcplugin.endOfDirectory(utils.addon_handle)
def MainSamenvattingen():
utils.addDir('Alle Samenvattingen','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Voetbal Samenvattingen','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Eredivisie','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/eredivisie/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Jupiler League','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/jupiler-league/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('KNVB Beker','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/knvb-beker/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('UEFA Europa League','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/uefa-europa-league/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Barclays Premier League','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/barclays-premier-league/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Bundesliga','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/bundesliga/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('FA Cup','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/fa-cup/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('DFB Pokal','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/dfb-pokal/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('UEFA Europa League Kwalificatie','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/uefa-europa-league-kwalificatie/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('EK Kwalificatie','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/ek-kwalificatie/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Tweede Bundesliga','http://www.foxsports.nl/video/filter/fragments/1/samenvattingen/voetbal/tweede-bundesliga/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
xbmcplugin.endOfDirectory(utils.addon_handle)
def MainDoelpunten():
utils.addDir('Alle Doelpunten','http://www.foxsports.nl/video/filter/fragments/1/doelpunten/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Eredivisie','http://www.foxsports.nl/video/filter/fragments/1/doelpunten/voetbal/eredivisie/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('Jupiler League','http://www.foxsports.nl/video/filter/fragments/1/doelpunten/voetbal/jupiler-league/',228,'https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fs.png',1,fanart='https://raw.githubusercontent.com/doki1/repo/master/NLView%20XML/fanart.jpg')
utils.addDir('KNVB Beker','http://www.foxsports.nl/video/filter/fragments/1/doelpunten/voetbal/knvb-beker/',228,'https://raw.git |
alexisflesch/texamator | partielatormods/guis/guiquit.py | Python | gpl-3.0 | 3,316 | 0.003317 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'quitter.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(274, 91)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/all/icones/TeXamator.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(20, 5, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.pushButton_cancel = QtWidgets.QPushButton(Dialog)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/all/icones/cancel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_cancel.setIcon(icon1)
self.pushButton_cancel.setObjectName("pushButton_cancel")
self.horizontalLayout.addWidget(self.pushButton_cancel)
self.pushButton_ok = QtWidgets.QPushButton(Dialog)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/all/icones/apply.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_ok.setIcon(icon2)
self.pushButton_ok.setObjectName("pushButton_ok")
self.horizontalLayout.addWidget(self.pushButton_ok)
self.verticalLayout.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(Dialog)
self.pushButton_cancel.clicked.connect(Dialog.reject)
self.pushButton_ok.clicked.connect(Dialog.accept)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Quit"))
self.label.setText(_translate("Di | alog", "Do you really want to quit TeXamator ?"))
self.pushButton_cancel.setText(_translate("Dialog", "Cancel"))
self.pushButton_ok.setText(_translate("Dialog", "Ok"))
from . import icones_rc
if __name__ == "__main__":
import sys
app = | QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
kenhys/sphinxcontrib-openstreetmap | sphinxcontrib/__init__.py | Python | bsd-2-clause | 365 | 0 | # -*- coding: utf-8 -*-
"""
sphinxcontrib
~~~~~~~~~~~~~
This package is a namespace package that contains all extensions
distributed in the ``sphinx-contrib`` distribution.
:copyright: Copyright 2015 by HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see | LICENSE for details.
"""
__import__('pkg_resources').declar | e_namespace(__name__)
|
pfschwartz/openelisglobal-core | liquibase/OE4.2/testCatalogCI_Regional_labs/scripts/populateNames.py | Python | mpl-2.0 | 1,560 | 0.005128 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
test_names = []
guids = []
populateNames = []
name_results = []
name_file = open('englishTestName.txt','r')
sample_type_file = open("sampleType.txt")
guid_file = open("guid.txt")
result_file = open("../populateNames.sql", "w")
for line in name_file:
test_names.append(line.strip())
name_file.close()
for line in guid_file:
guids.append(line.strip())
guid_file.close()
insertString = "INSERT INTO localization( id, description, english, french, lastupdated)\n"
updateNameString = "update clinlims.test set name_localization_id = lastval() wh | ere guid ='"
updateReportingStri | ng = "update clinlims.test set reporting_name_localization_id = lastval() where guid ='"
for row in range(0, len(guids)):
if guids[row]:
name_results.append(insertString)
name_results.append("\tVALUES ( nextval('localization_seq'), 'test name', '" + test_names[row] + "', (select name from clinlims.test where guid = '" + guids[row] + "' ), now());\n")
name_results.append(updateNameString + guids[row] + "';\n")
name_results.append(insertString)
name_results.append("\tVALUES ( nextval('localization_seq'), 'test report name', '" + test_names[row] + "', (select reporting_description from clinlims.test where guid = '" + guids[row] + "' ), now());\n")
name_results.append(updateReportingString + guids[row] + "';\n")
for line in name_results:
result_file.write(line)
print "Done look for results in populateNames.sql"
|
BroukPytlik/volunteer-organiser | organiser/board/migrations/0017_auto_20150804_2008.py | Python | gpl-3.0 | 430 | 0.002326 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('board', '0016_auto_20150804_1938'),
]
o | perations = [
migrations.Alt | erField(
model_name='holiday',
name='until',
field=models.DateField(verbose_name='until', blank=True, null=True),
),
]
|
danwent/Perspectives-Server | notary_util/threaded_scanner.py | Python | gpl-3.0 | 10,401 | 0.030382 | # This file is part of the Perspectives Notary Server
#
# Copyright (C) 2011 Dan Wendlandt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Scan a list of services and update Observation records in the notary database.
For running scans without connecting to the database see util/simple_scanner.py.
"""
from __future__ import print_function
import argparse
import errno
import logging
import os
import sys
import threading
import time
import notary_common
import notary_logs
from notary_db import ndb
# TODO: HACK
# add ..\util to the import path so we can import ssl_scan_sock
sys.path.insert(0,
os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from util.ssl_scan_sock import attempt_observation_for_service, SSLScanTimeoutException, SSLAlertException
DEFAULT_SCANS = 10
DEFAULT_WAIT = 20
DEFAULT_INFILE = "-"
LOGFILE = "scanner.log"
stats = None
results = None
class ResultStore(object):
"""
Store and retrieve observation results in a thread-safe way.
"""
def __init__(self):
self.results = []
self.lock = threading.Lock()
def add(self, result):
"""
Add a result to the set.
"""
with self.lock:
self.results.append(result)
def get(self):
"""
Return the list of existing results
and empty the set.
"""
# use a lock so we don't lose any results
# between retrieving the current set
# and adding new ones
with self.lock:
# copy existing results so we can clear the list
results_so_far = list(self.results)
self.results = []
return results_so_far
class ScanThread(threading.Thread):
"""
Scan a remote service and retrieve the fingerprint for its TLS certificate.
"""
def __init__(self, db, sid, global_stats, timeout_sec, sni):
self.db = db
self.sid = sid
self.global_stats = global_stats
self.global_stats.active_threads += 1
threading.Thread.__init__(self)
self.timeout_sec = timeout_sec
self.sni = sni
self.global_stats.threads[sid] = time.time()
def _get_errno(self, e):
"""
Return the error number attached to an Exception,
or 0 if none exists.
"""
try:
return e.args[0]
except Exception:
return 0 # no error
def _record_failure(self, e):
"""Record an exception that happened during a scan."""
stats.failures += 1
self.db.report_metric('ServiceScanFailure', str(e))
if (isinstance(e, SSLScanTimeoutException)):
stats.failure_timeouts += 1
return
if (isinstance(e, SSLAlertException)):
stats.failure_ssl_alert += 1
return
if (isinstance(e, ValueError)):
stats.failure_other += 1
return
err = self._get_errno(e)
if err == errno.ECONNREFUSED or err == errno.EINVAL:
stats.failure_conn_refused += 1
elif err == errno.EHOSTUNREACH or err == errno.ENETUNREACH:
stats.failure_no_route += 1
elif err == errno.ECONNRESET:
stats.failure_conn_reset += 1
elif err == -2 or err == -3 or err == -5 or err == 8:
stats.failure_dns += 1
else:
stats.failure_othe | r += 1
def run(self):
"""
Scan a remote service and retrieve the fingerprint for its TLS certificate.
The fingerprint is appended to the global results list.
"""
try:
fp = attempt_observation_for_service(self.sid, self.timeout_sec, self.sni)
if (fp != None):
results.add((self.sid, fp))
else:
# error already logged, but tally error count
stats.failures += 1
stats.failure_socket + | = 1
except Exception, e:
self._record_failure(e)
logging.error("Error scanning '{0}' - {1}".format(self.sid, e))
logging.exception(e)
self.global_stats.num_completed += 1
self.global_stats.active_threads -= 1
if self.sid in self.global_stats.threads:
del self.global_stats.threads[self.sid]
class GlobalStats(object):
"""
Count various statistics and causes of scan failures
for later analysis.
"""
def __init__(self):
self.failures = 0
self.num_completed = 0
self.active_threads = 0
self.num_started = 0
self.threads = {}
# individual failure counts
self.failure_timeouts = 0
self.failure_no_route = 0
self.failure_conn_refused = 0
self.failure_conn_reset = 0
self.failure_dns = 0
self.failure_ssl_alert = 0
self.failure_socket = 0
self.failure_other = 0
def _record_observations_in_db(db, results):
"""
Record a set of service observations in the database.
"""
if len(results) == 0:
return
try:
for r in results:
db.report_observation(r[0], r[1])
except Exception as e:
# TODO: we should probably retry here
logging.critical("DB Error: Failed to write results of length {0}".format(
len(results)))
logging.exception(e)
def get_parser():
"""Return an argument parser for this module."""
parser = argparse.ArgumentParser(parents=[ndb.get_parser()],
description=__doc__)
parser.add_argument('service_id_file', type=argparse.FileType('r'), nargs='?', default=DEFAULT_INFILE,
help="File that contains a list of service names - one per line. Will read from stdin by default.")
parser.add_argument('--scans', '--scans-per-sec', '-s', nargs='?', default=DEFAULT_SCANS, const=DEFAULT_SCANS, type=int,
help="How many scans to run per second. Default: %(default)s.")
parser.add_argument('--timeout', '--wait', '-w', nargs='?', default=DEFAULT_WAIT, const=DEFAULT_WAIT, type=int,
help="Maximum number of seconds each scan will wait (asychronously) for results before giving up. Default: %(default)s.")
parser.add_argument('--sni', action='store_true', default=False,
help="use Server Name Indication. See section 3.1 of http://www.ietf.org/rfc/rfc4366.txt.\
Default: \'%(default)s\'")
parser.add_argument('--logfile', action='store_true', default=False,
help="Log to a file on disk rather than standard out.\
A rotating set of {0} logs will be used, each capturing up to {1} bytes.\
File will written to {2}\
Default: \'%(default)s\'".format(
notary_logs.LOGGING_BACKUP_COUNT + 1,
notary_logs.LOGGING_MAXBYTES,
notary_logs.get_log_file(LOGFILE)))
loggroup = parser.add_mutually_exclusive_group()
loggroup.add_argument('--verbose', '-v', default=False, action='store_true',
help="Verbose mode. Print more info about each scan.")
loggroup.add_argument('--quiet', '-q', default=False, action='store_true',
help="Quiet mode. Only print system-critical problems.")
return parser
def main(db, service_id_file, logfile=False, verbose=False, quiet=False, rate=DEFAULT_SCANS,
timeout_sec=DEFAULT_WAIT, sni=False):
"""
Run the main program.
Scan a list of services and update Observation records in the notary database.
"""
global stats
global results
stats = GlobalStats()
results = ResultStore()
notary_logs.setup_logs(logfile, LOGFILE, verbose=verbose, quiet=quiet)
start_time = time.time()
localtime = time.asctime(time.localtime(start_time))
# read all service names to start;
# otherwise the database can lock up
# if we're accepting data piped from another process
all_sids = [line.rstrip() for line in service_id_file]
print("Starting scan of %s service-ids at: %s" % (len(all_sids), localtime))
print("INFO: *** Timeout = %s sec Scans-per-second = %s" % \
(timeout_sec, rate))
db.report_metric('ServiceScanStart', "ServiceCount: " + str(len(all_sids)))
# create a thread to scan each service
# and record results as they come in
for sid in all_sids:
try:
# ignore non SSL services
# TODO: use a regex instead
if sid.split(",")[1] == notary_common.SSL_TYPE:
stats.num_started += 1
t = ScanThread(db, sid, stats, timeout_sec, sni)
t.start()
if (stats.num_started % rate) == 0:
time.sleep(1)
_record_observations_in_db(db, results.get())
|
chen2aaron/TangoWithDjangoProject | rango/urls.py | Python | gpl-2.0 | 646 | 0.008621 | # -*- coding:utf-8 -*-
from django.conf.urls import url, patterns
from rango import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r' | ^about/$', views.about, name='about'),
# 匹配URL斜杠前所有的字母数字
# 例如 a-z, A-Z, 或者 0-9)和连字符(-
# 然后把这个值作为category_name_slug参数传递给views.category(),
url(r'^category/(?P<category_name_slug>[\w\-]+)/$',views.category, name='category'),
url(r'^add_category/$', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/add_page/$',views.add_page, name='add_page' | ),
)
|
blue-yonder/bonfire | bonfire/_version.py | Python | bsd-3-clause | 8,274 | 0 | #! -*- coding: utf-8 -*-
"""
Retrieval of version number
This file helps to compute a version number in source trees obtained from
git-archive tarball (such as those provided by githubs download-from-tag
feature). Distribution tarballs (built by setup.py sdist) and build
directories (produced by setup.py build) will contain a much shorter file
that just contains the computed version number.
This file was generated by PyScaffold.
"""
import inspect
import os
import re
import subprocess
import sys
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# general settings
tag_prefix = 'v' # tags are like v1.2.0
package = "bonfire"
namespace = []
root_pkg = namespace[0] if namespace else package
if namespace:
pkg_path = os.path.join(*namespace[-1].split('.') + [package])
else:
pkg_path = package
class ShellCommand(object):
def __init__(self, command, shell=True, cwd=None):
self._command = command
self._shell = shell
self._cwd = cwd
def __call__(self, *args):
command = "{cmd} {args}".format(cmd=self._command,
args=subprocess.list2cmdline(args))
output = subprocess.check_output(command,
shell=self._shell,
cwd=self._cwd,
stderr=subprocess.STDOUT,
universal_newlines=True)
return self._yield_output(output)
def _yield_output(self, msg):
for line in msg.splitlines():
yield line
def get_git_cmd(**args):
if sys.platform == "win32":
for cmd in ["git.cmd", "git.exe"]:
git = ShellCommand(cmd, **args)
try:
git("--version")
except (subprocess.CalledProcessError, OSError):
continue
return git
return None
else:
git = ShellCommand("git", **args)
try:
git("--version")
except (subprocess.CalledProcessError, OSError):
return None
return git
def version_from_git(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
git = get_git_cmd(cwd=root)
if not git:
print("no git found")
return None
try:
tag = next(git("describe", "--tags", "--dirty", "--always"))
except subprocess.CalledProcessError:
return None
if not tag.startswith(tag_prefix):
if verbose:
print("tag '{}' doesn't start with prefix '{}'".format(tag,
tag_prefix))
return None
tag = tag[len(tag_prefix):]
sha1 = next(git("rev-parse", "HEAD"))
full = sha1.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = dict()
try:
with open(versionfile_abs, "r") as fh:
for line in fh.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
except EnvironmentError:
return None
return keywords
def version_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return None # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return None # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion be | haves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distingui | sh
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '{}', no digits".format(",".join(refs-tags)))
if verbose:
print("likely tags: {}".format(",".join(sorted(tags))))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking {}".format(r))
return {"version": r,
"full": keywords["full"].strip()}
else:
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def version_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '{}', but '{}' doesn't start with "
"prefix '{}'".format(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):].split('-')[0]
return {"version": version, "full": ""}
def git2pep440(ver_str):
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + "+dirty"
elif dash_count == 2:
tag, commits, sha1 = ver_str.split('-')
return "{}.post0.dev{}+{}".format(tag, commits, sha1)
elif dash_count == 3:
tag, commits, sha1, _ = ver_str.split('-')
return "{}.post0.dev{}+{}.dirty".format(tag, commits, sha1)
else:
raise RuntimeError("Invalid version string")
def get_versions(verbose=False):
vcs_kwds = {"refnames": git_refnames, "full": git_full}
parentdir = package + '-'
root = __location__
# pkg_path is the relative path from the top of the source
# tree (where the .git directory might live) to this file.
# Invert this to find the root of our package.
for _ in pkg_path.split(os.sep):
root = os.path.dirname(root)
# different version retrieval methods as (method, args, comment)
ver_retrieval = [
(version_from_keywords, (vcs_kwds, tag_prefix, verbose),
'expanded keywords'),
(version_from_parentdir, (parentdir, root, verbose), 'parentdir'),
(version_from_git, (tag_prefix, root, verbose), 'git')
]
for method, args, comment in ver_retrieval:
ver = method(*args)
if ver:
if verbose:
print("got version from {}".format(comment))
break
else:
ver = {"ve |
haoyuchen1992/osf.io | website/addons/googledrive/serializer.py | Python | apache-2.0 | 1,189 | 0 | from website.addons.base.serializer import OAuthAddonSerializer
from website.addons.googledrive.exceptions import ExpiredAuthError
class GoogleDriveSerializer(OAuthAddonSerializer):
@property
def addon_serialized_urls(self):
node = self.node_settings.owner
return {
'files': node.web_url_for('collect_file_trees'),
'config': node.api_url_for('googledrive_config_put'),
'deauthorize': node.api_url_for('googledrive_remove_us | er_auth'),
'importAuth': node.api_url_for('googledrive_import_user_auth'),
'folders': node.api_url_for('googledrive_folders'),
'accounts': node.api_url_for('list_googledrive_user_accounts')
}
@property
def serialized_node_settings(self):
result = super(GoogleDriveSerializer, self).serialized_node_settings
| valid_credentials = True
if self.node_settings.external_account is not None:
try:
self.node_settings.fetch_access_token()
except ExpiredAuthError:
valid_credentials = False
result['validCredentials'] = valid_credentials
return {'result': result}
|
kstaniek/fiblary | examples/info.py | Python | apache-2.0 | 1,435 | 0.000697 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Klaudiusz Staniek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fiblary.client import Client
logging.basicConfig(
format='%(asctime)-15s %(levelname)s: %(module)s:%(funcName)s'
':%(lineno)d: %(message)s',
level=logging.CRITICAL)
def main():
hc2 = Client(
'v3',
'http://192.168.1.230/api/',
'admin',
'admin'
)
info = hc2.info.get()
print info
weather = hc2.weather.get()
print weather
login = hc2.login.get()
print login
devices = hc2.devices.get(1)
print devices
devices = hc2.devices.list(name='Ceiling Lamp')
print devices
print type(devices)
for device in device | s:
| print device.name
devices = hc2.devices.list(id=1)
for device in devices:
print device.name
if __name__ == '__main__':
main()
|
anhstudios/swganh | data/scripts/templates/object/tangible/ship/components/engine/shared_eng_moncal_ifs32.py | Python | mit | 477 | 0.046122 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE | IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/engine/shared_eng_moncal_ifs32.iff"
result.att | ribute_template_id = 8
result.stfName("space/space_item","eng_moncal_ifs32_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
tanujs22/crowdsource-platform | crowdsourcing/viewsets/project.py | Python | mit | 18,897 | 0.005239 | from rest_framework import status, viewsets
from rest_framework.response import Response
from crowdsourcing.serializers.project import *
from crowdsourcing.serializers.task import TaskWorkerSerializer
from rest_framework.decorators import detail_route, list_route
from crowdsourcing.models import Module, Category, Project, Requester, ProjectRequester, \
ModuleReview, ModuleRating, BookmarkedProjects, Task, TaskWorker, WorkerRequesterRating, Worker
from crowdsourcing.permissions.project import IsProjectOwnerOrCollaborator
from crowdsourcing.permissions.util import IsOwnerOrReadOnly
from crowdsourcing.permissions.project import IsReviewerOrRaterOrReadOnly
from rest_framework.permissions import IsAuthenticated
from rest_framework import mixins
from django.shortcuts import get_object_or_404
from django.db.models import Prefetch
from crowdsourcing.utils import get_model_or_none
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.filter(deleted=False)
serializer_class = CategorySerializer
@detail_route(methods=['post'])
def update_category(self, request, id=None):
category_serializer = CategorySerializer(data=request.data)
category = self.get_object()
if category_serializer.is_valid():
category_serializer.update(category, category_serializer.validated_data)
return Response({'status': 'updated category'})
else:
return Response(category_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
category = self.queryset
categories_serialized = CategorySerializer(category, many=True)
return Response(categories_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
category_serializer = CategorySerializer()
category = self.get_object()
category_serializer.delete(category)
return Response({'status': 'deleted category'})
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.filter(deleted=False)
serializer_class = ProjectSerializer
permission_classes = [IsAuthenticated]
@detail_route(methods=['post'], permission_classes=[IsProjectOwnerOrCollaborator])
def update_project(self, request, pk=None):
project_serializer = ProjectSerializer(data=request.data, partial=True)
project = self.get_object()
if p | roject_serializer.is_valid():
project_serializer.update(project, project_serializer.validated_data)
return Response({'status': 'updated project'})
else:
| return Response(project_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
requester_id = -1
if hasattr(request.user.userprofile, 'requester'):
requester_id = request.user.userprofile.requester.id
try:
query = '''
SELECT p.id, p.name, p.description, Max(mod.relevant_requester_rating) FROM (
SELECT id, name, description, created_timestamp, last_updated, owner_id, project_id, imputed_rating,
CASE WHEN real_weight IS NULL AND average_requester_rating IS NOT NULL THEN average_requester_rating
WHEN real_weight IS NULL AND average_requester_rating IS NULL THEN 1.99
WHEN real_weight IS NOT NULL AND average_requester_rating IS NULL THEN real_weight
ELSE real_weight + 0.1 * average_requester_rating END relevant_requester_rating
FROM (
SELECT rnk.*, wrr.weight as real_weight, avg.average_requester_rating FROM (
--This fetches the modules according to cascading release
SELECT evr.*
FROM(
SELECT avgrat.*, CASE WHEN weight IS NULL
AND adj_average_worker_rating IS NOT NULL THEN adj_average_worker_rating
WHEN weight IS NULL AND adj_average_worker_rating IS NULL THEN 1.99
WHEN weight IS NOT NULL AND adj_average_worker_rating IS NULL THEN weight
ELSE weight + 0.1 * adj_average_worker_rating END worker_relevant_rating
FROM (
SELECT m.*, als.weight, als.adj_average_worker_rating, imputed_rating FROM crowdsourcing_module m
INNER JOIN crowdsourcing_requester r ON m.owner_id = r.id
INNER JOIN crowdsourcing_userprofile u ON r.profile_id = u.id
LEFT OUTER JOIN
(SELECT w.* FROM crowdsourcing_workerrequesterrating w
INNER JOIN(
SELECT origin_id, MAX(last_updated) AS max_date FROM crowdsourcing_workerrequesterrating
WHERE origin_type='requester' AND target_id = %(worker_profile)s GROUP BY origin_id) tb
ON w.origin_id = tb.origin_id AND w.last_updated = tb.max_date
AND w.origin_type='requester' AND w.target_id=%(worker_profile)s) w
ON u.id = w.origin_id
LEFT OUTER JOIN (
SELECT temp.origin_id, temp.target_id, temp.average_worker_rating, temp.count, temp.weight,
(temp.average_worker_rating * temp.count - temp.weight) /
(temp.count-1) as adj_average_worker_rating FROM
(SELECT w.*, average_worker_rating, count from crowdsourcing_workerrequesterrating w
INNER JOIN
(SELECT target_id, AVG(weight) AS average_worker_rating, COUNT(target_id) from
(SELECT wr.* FROM crowdsourcing_workerrequesterrating wr
INNER JOIN (
SELECT origin_id, target_id, MAX(last_updated) AS max_date
FROM crowdsourcing_workerrequesterrating
GROUP BY origin_id, target_id) fltr
ON fltr.origin_id=wr.origin_id AND fltr.target_id=wr.target_id AND
wr.last_updated=fltr.max_date AND wr.target_id=%(worker_profile)s AND wr.origin_type='requester') sult
GROUP BY target_id) avgreq
ON w.target_id=avgreq.target_id
INNER JOIN (
SELECT origin_id, target_id, MAX(last_updated) AS max_date
FROM crowdsourcing_workerrequesterrating
GROUP BY origin_id, target_id
) tmp ON w.origin_id = tmp.origin_id AND w.target_id = tmp.target_id AND
w.last_updated=tmp.max_date AND w.origin_type='requester') temp) als
ON owner_id=als.origin_id
INNER JOIN (
SELECT id, CASE WHEN elapsed_time > hard_deadline THEN 0
WHEN elapsed_time/hard_deadline > submitted_tasks/total_tasks THEN
min_rating * (1 - (elapsed_time/hard_deadline - submitted_tasks/total_tasks))
ELSE min_rating END imputed_rating
FROM (
SELECT m.*, COALESCE(submitted_tasks, 0) as submitted_tasks,
(num_tasks * m.repetition) AS total_tasks,
|
Yellowpal/django | life/bill/migrations/0002_auto_20161111_1113.py | Python | gpl-3.0 | 1,237 | 0.002425 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-11 03:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.delet | ion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('bill', '0001_initial'),
]
operations = [
m | igrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('status', models.IntegerField()),
('update_date', models.DateTimeField(auto_now=True)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.AddField(
model_name='bill',
name='update_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='bill',
name='type_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='bill.Type'),
preserve_default=False,
),
]
|
kg-bot/SupyBot | plugins/UrbanDict/plugin.py | Python | gpl-3.0 | 4,100 | 0.000488 | ###
# Copyright (c) 2004-2005, Kevin Murphy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials | provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERC | HANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import SOAP
import supybot.utils as utils
from supybot.commands import *
import supybot.callbacks as callbacks
class UrbanDict(callbacks.Plugin):
threaded = True
server = SOAP.SOAPProxy('http://api.urbandictionary.com/soap')
def _licenseCheck(self, irc):
license = self.registryValue('licenseKey')
if not license:
irc.error('You must have a free UrbanDictionary API license key '
'in order to use this command. You can get one at '
'<http://www.urbandictionary.com/api.php>. Once you '
'have one, you can set it with the command '
'"config supybot.plugins.UrbanDict.licenseKey <key>".',
Raise=True)
return license
def urbandict(self, irc, msg, args, words):
"""<phrase>
Returns the definition and usage of <phrase> from UrbanDictionary.com.
"""
license = self._licenseCheck(irc)
definitions = self.server.lookup(license, ' '.join(words))
if not len(definitions):
irc.error('No definition found.', Raise=True)
word = definitions[0].word
definitions = ['%s (%s)' % (d.definition, d.example)
for d in definitions]
irc.reply(utils.web.htmlToText('%s: %s' % (word,
'; '.join(definitions))))
urbandict = wrap(urbandict, [many('something')])
def _define(self, irc, getDefinition, license):
definition = getDefinition(license)
word = definition.word
definitions = ['%s (%s)' % (definition.definition, definition.example)]
irc.reply(utils.web.htmlToText('%s: %s' % (word,
'; '.join(definitions))))
def daily(self, irc, msg, args):
"""takes no arguments
Returns the definition and usage of the daily phrase from
UrbanDictionary.com.
"""
license = self._licenseCheck(irc)
self._define(irc, self.server.get_daily_definition, license)
daily = wrap(daily)
def random(self, irc, msg, args):
"""takes no arguments
Returns the definition and usage of a random phrase from
UrbanDictionary.com.
"""
license = self._licenseCheck(irc)
self._define(irc, self.server.get_random_definition, license)
random = wrap(random)
Class = UrbanDict
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/bsddb/test/test_compat.py | Python | gpl-2.0 | 3,862 | 0.002589 | """
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os, string
import unittest
import tempfile
from test_all import verbose
try:
# For Python 2.3
from bsddb import db, hashopen, btopen, rnopen
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, hashopen, btopen, rnopen
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = string.split("The quick brown fox jumped over the lazy dog.")
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
assert getTest[1] == 'quick', 'data mismatch!'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
assert rec == f.last(), 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
assert f.has_key('f'), 'Error, missing key!'
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key'] |
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaul | tTest='test_suite')
|
purushothamc/myibitsolutions | hashing/list_anagrams.py | Python | gpl-3.0 | 430 | 0.009302 | def anagrams(A):
mapper = dict()
for index, string in enumer | ate(A):
string = "".join(sorted(string))
print mapper
if not mapper.get(string, 0):
mapper | [string] = [index + 1]
else:
mapper[string].append(index + 1)
result = []
for key, val in mapper.iteritems():
result.append(val)
return result
A = [ "cat", "dog", "god", "tca" ]
print anagrams(A) |
mattbasta/perfalator | tests/js/test_traversal.py | Python | bsd-3-clause | 1,900 | 0 | from js_helper import TestCase
class TestFunctionTraversal(TestCase):
"""
Consider the following tree:
- body
|-function a()
| |- foo
| |- bar
|-zip
|-function b()
| |-abc
| |-def
|-zap
In the old traversal technique, it would be evaluated in the order:
body a() foo bar zip b() abc def zap
If the tree is considered as a graph, this would be prefix notation
traversal.
This is not optimal, however, as JS commonly uses callbacks which are set
up before delegation code. The new traversal technique should access nodes
in the following order:
body zip zap a() foo bar b() abc def
If the tree is considered a graph, this would be a custo | m prefix notation
traversal where all | non-function nodes are traversed before all function
nodes.
"""
def test_function_declaration_order(self):
"""Test that function declarations happen in the right time."""
self.run_script("""
foo = "first";
function test() {
foo = "second";
}
bar = foo;
""")
self.assert_var_eq("bar", "first")
self.assert_var_eq("foo", "second")
def test_function_expression_order(self):
"""Test that function expressions happen in the right time."""
self.run_script("""
foo = "first"
var x = function() {
foo = "second";
}
bar = foo;
""")
self.assert_var_eq("bar", "first")
self.assert_var_eq("foo", "second")
def test_nested_functions(self):
"""Test that nested functions are considered in the right order."""
self.run_script("""
foo = "first"
function test() {
function a() {foo = "second"}
foo = "third";
}
""")
self.assert_var_eq("foo", "second")
|
Ledoux/ShareYourSystem | Pythonlogy/build/lib/ShareYourSystem/Standards/Itemizers/Teamer/09_ExampleDoc.py | Python | mit | 228 | 0.039474 |
#ImportModules
import | ShareYourSystem as SYS
#define
MyTeamDict=SYS.Teamer.TeamDict([('a',1),('b',2)])
#print
print('MyTeamDict is ')
SYS._print(MyTeamDict)
#get
print(MyTeamDict.get | Value(0))
print(MyTeamDict.getValue(1))
|
azumimuo/family-xbmc-addon | plugin.video.phstreams/resources/lib/resolvers/veehd.py | Python | gpl-2.0 | 1,792 | 0.006138 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 | of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources | .lib.modules import client
domains = ['veehd.com']
def resolve(url):
try:
headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url})
result = client.request(url, close=False)
result = result.replace('\n','')
url = re.compile('function\s*load_download.+?src\s*:\s*"(.+?)"').findall(result)[0]
url = urlparse.urljoin('http://veehd.com', url)
result = client.request(url, close=False)
i = client.parseDOM(result, 'iframe', ret='src')
if len(i) > 0:
i = urlparse.urljoin('http://veehd.com', i[0])
client.request(i, close=False)
result = client.request(url)
url = re.compile('href *= *"([^"]+(?:mkv|mp4|avi))"').findall(result)
url += re.compile('src *= *"([^"]+(?:divx|avi))"').findall(result)
url += re.compile('"url" *: *"(.+?)"').findall(result)
url = urllib.unquote(url[0])
url += headers
return url
except:
return
|
google/clusterfuzz | src/clusterfuzz/_internal/tests/appengine/handlers/issue_redirector_test.py | Python | apache-2.0 | 2,102 | 0.002379 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apac | he.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the issue redirector handler."""
import unittest
import mock
import webtest
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
class HandlerTest(unittest.TestCase):
"""Test Handler."""
def setUp(self):
test_helpers.patch(self, [
'libs.issue_management.issue_tracker_utils.get_issue_url',
'libs.helpers.get_testcase',
'clusterfuzz._internal.metrics.logs._is_running_on_app_engine',
])
self.mock._is_running_on_app_engine.return_value = True # pylint: disable=protected-access
import server
self.app = webtest.TestApp(server.app)
def test_succeed(self):
"""Test redirection succeeds."""
testcase = data_types.Testcase()
testcase.bug_information = '456789'
self.mock.get_testcase.return_value = testcase
self.mock.get_issue_url.return_value = 'http://google.com/456789'
response = self.app.get('/issue/12345')
self.assertEqual(302, response.status_int)
self.assertEqual('http://google.com/456789', response.headers['Location'])
self.mock.get_testcase.assert_has_calls([mock.call('12345')])
self.mock.get_issue_url.assert_has_calls([mock.call(testcase)])
def test_no_issue_url(self):
"""Test no issue url."""
self.mock.get_testcase.return_value = data_types.Testcase()
self.mock.get_issue_url.return_value = ''
response = self.app.get('/issue/12345', expect_errors=True)
self.assertEqual(404, response.status_int)
|
seung-lab/cloud-volume | cloudvolume/datasource/precomputed/image/__init__.py | Python | bsd-3-clause | 18,157 | 0.009748 | """
The Precomputed format is a neuroscience imaging format
designed for cloud storage. The specification is located
here:
https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed
This datasource contains the code for manipulating images.
"""
from functools import reduce
import itertools
import operator
import uuid
import numpy as np
from tqdm import tqdm
from cloudfiles import CloudFiles, compression
from cloudvolume import lib, exceptions
from cloudvolume.lru import LRU
from ....lib import Bbox, Vec, sip, first, BboxLikeType
from .... import sharedmemory, chunks
from ... import autocropfn, readonlyguard, ImageSourceInterface
from .. import sharding
from .common import chunknames, gridpoints, compressed_morton_code
from . import tx, rx
class PrecomputedImageSource(ImageSourceInterface):
def __init__(
self, config, meta, cache,
autocrop:bool = False,
bounded:bool = True,
non_aligned_writes:bool = False,
fill_missing:bool = False,
delete_black_uploads:bool = False,
background_color:int = 0,
readonly:bool = False,
lru_bytes:int = 0,
):
self.config = config
self.meta = meta
self.cache = cache
self.autocrop = bool(autocrop)
self.bounded = bool(bounded)
self.fill_missing = bool(fill_missing)
self.non_aligned_writes = bool(non_aligned_writes)
self.readonly = bool(readonly)
self.delete_black_uploads = bool(delete_black_uploads)
self.background_color = background_color
self.shared_memory_id = self.generate_shared_memory_location()
self.lru = LRU(lru_bytes, size_in_bytes=True)
def generate_shared_memory_location(self):
return 'precomputed-shm-' + str(uuid.uuid4())
def unlink_shared_memory(self):
"""Unlink the current shared memory location from the filesystem."""
return sharedmemory.unlink(self.shared_memory_id)
def grid_size(self, mip=None):
mip = mip if mip is not None else self.config.mip
return np.ceil(self.meta.volume_size(mip) / self.meta.chunk_size(mip)).astype(np.int64)
def check_bounded(self, bbox, mip):
if self.bounded and not self.meta.bounds(mip).contains_bbox(bbox):
raise exceptions.OutOfBoundsError("""
Requested cutout not contained within dataset bounds.
Cloudpath: {}
Requested: {}
Bounds: {}
Mip: {}
Resolution: {}
Set bounded=False to disable this warning.
""".format(
self.meta.cloudpath,
bbox, self.meta.bounds(mip),
mip, self.meta.resolution(mip)
)
)
def has_data(self, mip=None):
"""
Returns whether the specified mip appears to have data
by testing whether the "folder" exists.
Returns: bool
The mip is the index into the returned list. If
the entry is True, then the data appears to be there.
If the entry is False, then the data is not there.
"""
mip = mip if mip is not None else self.config.mip
mip = self.meta.to_mip(mip)
cf = CloudFiles(self.meta.cloudpath, secrets=self.config.secrets)
key = self.meta.key(mip)
return first(cf.list(prefix=key)) is not None
def download(
self, bbox, mip, parallel=1,
location=None, retain=False,
use_shared_memory=False, use_file=False,
order='F', renumber=False
):
"""
Download a cutout image from the dataset.
bbox: a Bbox object describing what region to download
mip: which resolution to fetch, 0 is the highest resolution
parallel: how many processes to use for downloading
location: if using shared memory or downloading to a file,
which file location should be used?
retain: don't delete the shared memory file after download
completes
use_shared_memory: download to a shared memory location.
This enables efficient inter-process communication and
efficient parallel operation. mutually exclusive with
use_file.
use_file: download image directly to a file named by location.
mutually exclusive with use_shared_memory.
order: The underlying shared memory or file buffer can use either
C or Fortran order for storing a multidimensional array.
renumber: dynamically rewrite downloaded segmentation into
a more compact data type. Only compatible with single-process
non-sharded download.
Returns:
if renumber:
(4d ndarray, remap dict)
else:
4d ndarray
"""
if self.autocrop:
bbox = Bbox.intersection(bbox, self.meta.bounds(mip))
self.check_bounded(bbox, mip)
if location is None:
location = self.shared_memory_id
if self.is_sharded(mip):
if renumb | er:
raise ValueError("renumber is only supported for non-shared volume | s.")
scale = self.meta.scale(mip)
spec = sharding.ShardingSpecification.from_dict(scale['sharding'])
return rx.download_sharded(
bbox, mip,
self.meta, self.cache, self.lru, spec,
compress=self.config.compress,
progress=self.config.progress,
fill_missing=self.fill_missing,
order=order,
background_color=int(self.background_color),
)
else:
return rx.download(
bbox, mip,
meta=self.meta,
cache=self.cache,
lru=self.lru,
parallel=parallel,
location=location,
retain=retain,
use_shared_memory=use_shared_memory,
use_file=use_file,
fill_missing=self.fill_missing,
progress=self.config.progress,
compress=self.config.compress,
order=order,
green=self.config.green,
secrets=self.config.secrets,
renumber=renumber,
background_color=int(self.background_color),
)
def unique(self, bbox:BboxLikeType, mip:int) -> set:
"""Extract unique values in an efficient way."""
bbox = Bbox.create(bbox, context=self.meta.bounds(mip))
if self.autocrop:
bbox = Bbox.intersection(bbox, self.meta.bounds(mip))
self.check_bounded(bbox, mip)
if self.is_sharded(mip):
scale = self.meta.scale(mip)
spec = sharding.ShardingSpecification.from_dict(scale['sharding'])
return rx.unique_sharded(
bbox, mip,
self.meta, self.cache, self.lru, spec,
compress=self.config.compress,
progress=self.config.progress,
fill_missing=self.fill_missing,
background_color=int(self.background_color),
)
else:
return rx.unique_unsharded(
bbox, mip,
meta=self.meta,
cache=self.cache,
lru=self.lru,
parallel=1,
fill_missing=self.fill_missing,
progress=self.config.progress,
compress=self.config.compress,
green=self.config.green,
secrets=self.config.secrets,
background_color=int(self.background_color),
)
@readonlyguard
def upload(
self,
image, offset, mip,
parallel=1,
location=None, location_bbox=None, order='F',
use_shared_memory=False, use_file=False
):
if mip in self.meta.locked_mips():
raise exceptions.ReadOnlyException(
"MIP {} is currently write locked. If this should not be the case, run vol.meta.unlock_mips({}).".format(
mip, mip
)
)
offset = Vec(*offset)
bbox = Bbox( offset, offset + Vec(*image.shape[:3]) )
self.check_bounded(bbox, mip)
if self.autocrop:
image, bbox = autocropfn(self.meta, image, bbox, mip)
offset = bbox.minpt
if location is None:
location = self.shared_memory_id
if self.is_sharded(mip):
(filename, shard) = self.make_shard(image, bbox, mip)
basepath = self.meta.join(self.meta.cloudpath, self.meta.key(mip))
CloudFiles(basepath, progress=self.config.progress, secrets=self.config.secrets).put(
filename, shard,
compress=self.config.compress,
cache_control=self.config.cdn_cache
)
return
return tx.upload(
self.meta, self.cache, self.lru,
image, offset, mip,
compress=self.config.compress,
compress_level=self.config.compress_level,
|
lichengshuang/createvhost | python/asher/getcmdbinfo/bin/getcmdbinfo.py | Python | apache-2.0 | 5,242 | 0.012292 | #!/usr/bin/python
#coding: utf-8
#auth: asher
#date: 20171027
#purpose: get usefulinfo from jsonfile
import ConfigParser
import time
import datetime
import requests
import fileinput
import sys
import os
import codecs
import json
import getWarranty
reload(sys)
sys.setdefaultencoding( "utf-8" )
def getConfig():
"""
将通用的一些数据读取放在一个函数里。不再每个函数里去写一遍了。
"""
global cmdbpath
global idccontactinfoJson,iprangesJson,itemsJson,serverJson,dellserverjson
fileName = os.path.abspath(__file__)
binPath = os.path.dirname(os.path.realpath(__file__))
basePath = os.path.dirname(binPath)
confPath = basePath + '/config/'
# print confPath
conf = ConfigParser.ConfigParser()
conf.read("%s/cmdb.ini" % confPath)
#####
cmdbpath = conf.get('getcmdbinfo','cmdbpath')
# JsonFilesPath = basePath + '/files/'
if not os.path.isdir(cmdbpath):
os.mkdir(cmdbpath)
#idccontactinfo = idccontactinfo.json
idccontactinfoJson = cmdbpath + conf.get('getcmdbinfo','idccontactinfo')
iprangesJson = cmdbpath + conf.get('getcmdbinfo','ipranges')
itemsJson = cmdbpath + conf.get('getcmdbinfo','items')
serverJson = cmdbpath + conf.get('getcmdbinfo','serverinfosforidcmaintain')
dellserverjson = cmdbpath + conf.get('getcmdbinfo','dellserverjson')
def cmdbServer(stg):
##通过传入的stg,返回服务器相关的信息和idc信息
newdict = {}
getConfig()
with open(serverJson,'r') as f:
serverinfor = json.loads(f.read())
if serverinfor.has_key(stg):
dicts = serverinfor[stg]
newdict['item_id'] = dicts['item_id']
#hostname:HN-dl8
newdict['hostname'] = dicts['hostname']
#status:项目专属
newdict['status'] = dicts['status']
#idc_id:海宁
newdict['idc_id'] = dicts['idc_id']
#floor:3
newdict['floor'] = dicts['floor']
#cabinet:K08
newdict['cabinet'] = dicts['cabinet']
#cabinet_pos:10
newdict['cabinet_pos'] = dicts['cabinet_pos']
return newdict
def idcContact(stg):
##得到所有idc信息,这是通过stg
##用法:
#iddc = idcContact(stg1)
#for k,v in iddc.items():
# print k,v
idcnew = {}
getConfig()
stg1 = stg
try:
dicts = cmdbServer(stg1)
idcid = u'%s' % dicts['idc_id'].encode('UTF-8')
with open(idccontactinfoJson,'r') as f:
#idcInf = json.loads(f.read(),encoding='utf-8')
idcInf = json.loads(f.read())
if idcInf.has_key(idcid):
idcnew['tel'] = idcInf[idcid]['tel']
idcnew['address'] = idcInf[idcid]['address']
idcnew['name'] = idcInf[idcid]['name']
#return idcInf[idcid]
return idcnew
except:
pass
def dellServerInfo(stg):
"""
通过本地已有的库去查找已从dell网站下载下来的服务器的保修过报情况
"""
dells = {}
getConfig()
stg1 = stg
with open(dellserverjson,'r') as f:
dellInf = json.loads(f.read())
if dellInf.has_key(stg1):
dells['MachineDescription'] = dellInf[stg1]['MachineDescription']
dells['StartDate'] = dellInf[stg1]['StartDate']
dells['EndDate'] = dellInf[stg1]['EndDate']
expiretime = dells['EndDate']
nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
expire1 = datetime.datetime.strptime(expiretime,"%Y-%m-%d %H:%M:%S")
nowtime1 = datetime.datetime.strptime(nowtime,"%Y-%m-%d %H:%M:%S")
remaintime = str(expire1 - nowtime1).split('days')[0]
dells['RemainDays'] = remaintime
dells['ServiceLevelDescription'] = dellInf[stg1]['ServiceLevelDescription']
return dells
else:
try:
newinfos = getWarranty.getDellExpires(stg)
de | lls['MachineDescription'] = newinfos['MachineDescription']
dells['StartD | ate'] = newinfos['StartDate']
dells['EndDate'] = newinfos['EndDate']
expiretime = dells['EndDate']
nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
expire1 = datetime.datetime.strptime(expiretime,"%Y-%m-%d %H:%M:%S")
nowtime1 = datetime.datetime.strptime(nowtime,"%Y-%m-%d %H:%M:%S")
remaintime = str(expire1 - nowtime1).split('days')[0]
dells['RemainDays'] = remaintime
dells['ServiceLevelDescription'] = newinfos['ServiceLevelDescription']
bigdicts = {}
bigdicts[stg1] = dells
getWarranty.writedict2json(bigdicts,dellserverjson)
return dells
except TypeError:
pass
except NoneType:
pass
#import getWarranty
if __name__ == '__main__':
#stg1 = 'H1LMKY1'
stg1 = 'JRQMKY1'
# stg1 = '6298JY1'
dic = cmdbServer(stg1)
#print dicts
if dic:
for k,v in dic.items():
print k,v
iddc = idcContact(stg1)
if iddc:
for k,v in iddc.items():
print k,v
dellcs = dellServerInfo(stg1)
if dellcs:
for k,v in dellcs.items():
print k,v
|
L34p/HeXA-CTF-2015 | gameboard/management/commands/gamestop.py | Python | mit | 513 | 0.013645 | from django.core.management import BaseCommand
from gameboard.models import Entries
#The class must be named Command, and subclass BaseCommand
class Comm | and(BaseCommand):
# Show this when the user | types help
help = "Stop CTF! Disable all problem entries... (set is_active=False)"
# A command must define handle()
def handle(self, *args, **options):
self.stdout.write("Stop CTF!")
prob_entries = Entries.objects.all()
for entry in prob_entries:
entry.is_active = False
entry.save()
|
NS2LPS/pyslave | test/pydata/test_datadict.py | Python | mit | 574 | 0.013937 | from pydata import Data, h5todata
import numpy as np
impo | rt os
import h5py
def test_Data(tmpdir):
o = Data(x=np.ones(3), y=np.ones(3), a=5, b='hh')
assert o.b=='hh'
assert o['a']==5
o.append(np.ones(5),np.ones(5))
o.save(os.path.join(tmpdir, 'test.txt'))
o.save(os.path.join(tmpdir, 'test.h5'))
de | f test_h5todata(tmpdir):
o = Data(x=np.ones(3), y=np.ones(3), a=5, b='hh')
o.save(os.path.join(tmpdir, 'test.h5'))
f = h5py.File(os.path.join(tmpdir, 'test.h5'),'r')
d = h5todata(f['data0000'])
assert d.a==5
assert d.x[0]==1
|
srinathv/bokeh | bokeh/plot_object.py | Python | bsd-3-clause | 11,126 | 0.002696 | from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__file__)
from six import add_metaclass, iteritems
from .properties import Any, HasProps, List, MetaHasProps, Instance, String
from .query import find
from .exceptions import DataIntegrityException
from .util.serialization import dump, make_id
from .validation import check_integrity
class Viewable(MetaHasProps):
""" Any plot object (Data Model) which has its own View Model in the
persistence layer.
One thing to keep in mind is that a Viewable should have a single
unique representation in the persistence layer, but it might have
multiple concurrent client-side Views looking at it. Those may
be from different machines altogether.
"""
# Stores a mapping from subclass __view_model__ names to classes
model_class_reverse_map = {}
# Mmmm.. metaclass inheritance. On the one hand, it seems a little
# overkill. On the other hand, this is exactly the sort of thing
# it's meant for.
def __new__(cls, class_name, bases, class_dict):
if "__view_model__" not in class_dict:
class_dict["__view_model__"] = class_name
class_dict["get_class"] = Viewable.get_class
# Create the new class
newcls = super(Viewable,cls).__new__(cls, class_name, bases, class_dict)
entry = class_dict.get("__subtype__", class_dict["__view_model__"])
# Add it to the reverse map, but check for duplicates first
if entry in Viewable.model_class_reverse_map and not hasattr(newcls, "__implementation__"):
raise Warning("Duplicate __view_model__ or __subtype__ declaration of '%s' for " \
"class %s. Previous definition: %s" % \
(entry, class_name,
Viewable.model_class_reverse_map[entry]))
Viewable.model_class_reverse_map[entry] = newcls
return newcls
@classmethod
def _preload_models(cls):
from . import models; models
from .crossfilter import models as crossfilter_models; crossfilter_models
from .charts import Chart; Chart
@classmethod
def get_class(cls, view_model_name):
""" Given a __view_model__ name, returns the corresponding class
object
"""
cls._preload_models()
d = Viewable.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name)
@add_metaclass(Viewable)
class PlotObject(HasProps):
""" Base class for all plot-related objects """
session = Instance(".session.Session")
name = String()
tags = List(Any)
def __init__(self, **kwargs):
# Eventually should use our own memo instead of storing
# an attribute on the class
if "id" in kwargs:
self._id = kwargs.pop("id")
else:
self._id = make_id()
self._dirty = True
self._callbacks_dirty = False
self._callbacks = {}
self._callback_queue = []
self._block_callbacks = False
block_events = kwargs.pop('_block_events', False)
if not block_events:
super(PlotObject, self).__init__(**kwargs)
self.setup_events()
else:
self._block_callbacks = True
super(PlotObject, self).__init__(**kwargs)
@property
def ref(self):
if "__subtype__" in self.__class__.__dict__:
return {
'type': self.__view_model__,
'subtype': self.__subtype__,
'id': self._id,
}
else:
return {
'type': self.__view_model__,
'id': self._id,
}
def setup_events(self):
pass
def select(self, selector):
''' Query this object and all of its references for objects that
match the given selector.
Args:
selector (JSON-like) :
Returns:
seq[PlotObject]
'''
return find(self.references(), selector)
def select_one(self, selector):
''' Query this object and all of its references for objects that
match the given selector. Raises an error if more than one object
is found. Returns single matching object, or None if nothing is found
Args:
selector (JSON-like) :
Returns:
PlotObject
'''
result = list(self.select(selector))
if len(result) > 1:
raise DataIntegrityException("found more than one object matching %s" % selector)
if len(result) == 0:
return None
return result[0]
def set_select(self, selector, updates):
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like) :
updates (dict) :
Returns:
None
'''
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
@classmethod
def load_json(cls, attrs, instance=None):
"""Loads all json into a instance of cls, EXCEPT any references
which are handled in finalize
"""
if 'id' not in attrs:
raise RuntimeError("Unable to find 'id' attribute in JSON: %r" % attrs)
_id = attrs.pop('id')
if not instance:
instance = cls(id=_id, _block_events=True)
ref_props = {}
for p in instance.p | roperties_with_refs():
if p in attrs:
ref_props[p] = attrs.pop(p)
instan | ce._ref_props = ref_props
instance.update(**attrs)
return instance
def layout(self, side, plot):
try:
return self in getattr(plot, side)
except:
return []
def finalize(self, models):
"""Convert any references into instances
models is a dict of id->model mappings
"""
attrs = {}
for name, json in iteritems(getattr(self, "_ref_props", {})):
prop = self.__class__.lookup(name)
attrs[name] = prop.from_json(json, models=models)
return attrs
@classmethod
def collect_plot_objects(cls, *input_objs):
""" Iterate over ``input_objs`` and descend through their structure
collecting all nested ``PlotObjects`` on the go. The resulting list
is duplicate-free based on objects' identifiers.
"""
ids = set([])
objs = []
def descend_props(obj):
for attr in obj.properties_with_refs():
descend(getattr(obj, attr))
def descend(obj):
if isinstance(obj, PlotObject):
if obj._id not in ids:
ids.add(obj._id)
descend_props(obj)
objs.append(obj)
elif isinstance(obj, HasProps):
descend_props(obj)
elif isinstance(obj, (list, tuple)):
for item in obj:
descend(item)
elif isinstance(obj, dict):
for key, value in iteritems(obj):
descend(key); descend(value)
descend(input_objs)
return objs
def references(self):
"""Returns all ``PlotObjects`` that this object has references to. """
return set(self.collect_plot_objects(self))
#---------------------------------------------------------------------
# View Model connection methods
#
# Whereas a rich client rendering framework can maintain view state
# alongside model state, we need an explicit send/receive protocol for
# communicating with a set of view models that reside on the front end.
# Many of the calls one would expect in a rich client map instead to
# batched updates on the M-VM-V approach.
#---------------------------------------------------------------------
def vm_props(self, changed_only=True):
""" Returns the ViewModel-related proper |
samatdav/zulip | zerver/views/messages.py | Python | apache-2.0 | 50,615 | 0.003695 | from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models import Q
from django.http import HttpRequest, HttpResponse
from typing import Text
from typing import Any, AnyStr, Callable, Iterable, Optional, Tuple, Union
from zerver.lib.str_utils import force_bytes, force_text
from zerver.lib.html_diff import highlight_html_differences
from zerver.decorator import authenticated_api_view, authenticated_json_post_view, \
has_request_variables, REQ, JsonableError, \
to_non_negative_int
from django.utils.html import escape as escape_html
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients, truncate_body, render_incoming_message
from zerver.lib.queue import queue_json_publish
from zerver.lib.cache import (
generic_bulk_cached_fetch,
to_dict_cache_key_id,
)
from zerver.lib.message import (
access_message,
MessageDict,
extract_message_dict,
render_markdown,
stringify_message_dict,
)
from zerver.lib.response import json_success, json_error
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Realm, RealmAlias, Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_user_profile_by_email, get_stream, \
parse_usermessage_flags, \
email_to_domain, get_realm, get_active_streams, \
bulk_get_streams, get_user_profile_by_id
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias, Selectable, Select, ColumnElement, table
import re
import ujson
import datetime
from six.moves import map
import six
LARGER_THAN_MAX_MESSAGE_ID = 10000000000000000
class BadNarrowOperator(JsonableError):
def __init__(self, desc, status_code=400):
# type: (str, int) -> None
self.desc = desc
self.status_code = status_code
def to_json_error_msg(self):
# type: () -> str
return _('Invalid narrow operator: {}').format(self.desc)
Query = Any # TODO: Should be Select, but sqlalchemy stubs are busted
ConditionTransform = Any # TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
def __init__(self, user_profile, msg_id_column):
# type: (UserProfile, str) -> None
self.user_profile = user_profile
self.msg_id_column = msg_id_column
def add_term(self, query, term):
# type: (Query, Dict[str, Any]) -> Query
| # We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
| raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'private':
query = query.select_from(join(query.froms[0], table("zerver_recipient"),
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
# type: (Text) -> Text
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if c == '\000':
s[i] = '\0'
elif ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
stream = get_stream(operand, self.user_profile.realm)
if stream is None:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
# (unsocial, ununsocial, social.d, etc)
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
# Since the regex has a `.+` in it and "" is invalid as a
# stream name, this will always match
assert(m is not None)
base_stream_name = m.group(1)
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrow |
ElDeveloper/qiita | qiita_db/support_files/patches/python_patches/75.py | Python | bsd-3-clause | 7,784 | 0 | from qiita_db.user import User
from json import loads, dumps
from qiita_core.qiita_settings import r_client
from qiita_db.sql_connection import TRN
from qiita_db.software import Software, Command
from qiita_db.exceptions import QiitaDBError, QiitaDBDuplicateError
from qiita_db.util import convert_to_id
# one of the main side issues raised by #2901 is that new users emails were not
# added to the redis database that keep track of emails and is used to
# autocomplete when sharing a study. In the next look we will find all `Users`
# in the `user` level, search them in the redis database, and keep those ones
# that are not found
missing_emails = [u[0] for u in User.iter() if User(u[0]).level == 'user'
and not r_client.execute_command(
'zrangebylex', 'qiita-usernames',
'[%s' % u[0], u'[%s\xff' % u[0])]
# now just add them
for email in missing_emails:
r_client.zadd('qiita-usernames', {email: 0})
# adding new internal command for INSDC download
# note that create_command is a method that has been used in previous patches
def create_command(software, name, description, parameters, outputs=None,
analysis_only=False):
r"""Replicates the Command.create code at the time the patch was written"""
# Perform some sanity checks in the parameters dictionary
if not parameters:
raise QiitaDBError(
"Error creating command %s. At least one parameter should "
"be provided." % name)
sql_param_values = []
sql_artifact_params = []
for pname, vals in parameters.items():
if len(vals) != 2:
raise QiitaDBError(
"Malformed parameters dictionary, the format should be "
"{param_name: [parameter_type, default]}. Found: "
"%s for parameter name %s" % (vals, pname))
ptype, dflt = vals
# Check that the type is one of the supported types
supported_types = ['string', 'integer', 'float', 'reference',
'boolean', 'prep_template', 'analysis']
if ptype not in supported_types and not ptype.startswith(
('choice', 'mchoice', 'artifact')):
supported_types.extend(['choice', 'mchoice', 'artifact'])
raise QiitaDBError(
"Unsupported parameters type '%s' for parameter %s. "
"Supported types are: %s"
% (ptype, pname, ', '.join(supported_types)))
if ptype.startswith(('choice', 'mchoice')) and dflt is not None:
choices = set(loads(ptype.split(':')[1]))
dflt_val = dflt
if ptype.startswith('choice'):
# In the choice case, the dflt value is a single string,
# create a list with it the string on it to use the
# issuperset call below
dflt_val = [dflt_val]
else:
# jsonize the list to store it in the DB
dflt = dumps(dflt)
if not choices.issuperset(dflt_val):
raise QiitaDBError(
"The default value '%s' for the parameter %s is not "
"listed in the available choices: %s"
% (dflt, pname, ', '.join(choices)))
if ptype.startswith('artifact'):
atypes = loads(ptype.split(':')[1])
sql_artifact_params.append(
[pname, 'artifact', atypes])
else:
if dflt is not None:
sql_param_values.append([pname, ptype, False, dflt])
else:
sql_param_values.append([pname, ptype, True, None])
with TRN:
sql = """SELECT EXISTS(SELECT *
FROM qiita.software_command
WHERE software_id = %s AND name = %s)"""
TRN.add(sql, [software.id, name])
if TRN.execute_fetchlast():
raise QiitaDBDuplicateError(
"command", "software: %d, name: %s"
% (software.id, name))
# Add the command to the DB
sql = """INSERT INTO qiita.software_command
(name, software_id, description, is_analysis)
VALUES (%s, %s, | %s, %s)
RETURNING command_id"""
sql_params = [name, software.id, description, analysis_only]
TRN.add(sql, sql_params)
c_id = TRN.execute_fetchlast()
# Add the parameters to the DB
sql = """INSERT INTO qiita.command_parameter
(command_id, parameter_name, parameter_type, required,
| default_value)
VALUES (%s, %s, %s, %s, %s)
RETURNING command_parameter_id"""
sql_params = [[c_id, pname, p_type, reqd, default]
for pname, p_type, reqd, default in sql_param_values]
TRN.add(sql, sql_params, many=True)
TRN.execute()
# Add the artifact parameters
sql_type = """INSERT INTO qiita.parameter_artifact_type
(command_parameter_id, artifact_type_id)
VALUES (%s, %s)"""
supported_types = []
for pname, p_type, atypes in sql_artifact_params:
sql_params = [c_id, pname, p_type, True, None]
TRN.add(sql, sql_params)
pid = TRN.execute_fetchlast()
sql_params = [[pid, convert_to_id(at, 'artifact_type')]
for at in atypes]
TRN.add(sql_type, sql_params, many=True)
supported_types.extend([atid for _, atid in sql_params])
# If the software type is 'artifact definition', there are a couple
# of extra steps
if software.type == 'artifact definition':
# If supported types is not empty, link the software with these
# types
if supported_types:
sql = """INSERT INTO qiita.software_artifact_type
(software_id, artifact_type_id)
VALUES (%s, %s)"""
sql_params = [[software.id, atid]
for atid in supported_types]
TRN.add(sql, sql_params, many=True)
# If this is the validate command, we need to add the
# provenance and name parameters. These are used internally,
# that's why we are adding them here
if name == 'Validate':
sql = """INSERT INTO qiita.command_parameter
(command_id, parameter_name, parameter_type,
required, default_value)
VALUES (%s, 'name', 'string', 'False',
'dflt_name'),
(%s, 'provenance', 'string', 'False', NULL)
"""
TRN.add(sql, [c_id, c_id])
# Add the outputs to the command
if outputs:
sql = """INSERT INTO qiita.command_output
(name, command_id, artifact_type_id)
VALUES (%s, %s, %s)"""
sql_args = [[pname, c_id, convert_to_id(at, 'artifact_type')]
for pname, at in outputs.items()]
TRN.add(sql, sql_args, many=True)
TRN.execute()
return Command(c_id)
with TRN:
# Retrieve the Qiita plugin
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
# Create the INSDC download command
parameters = {
'download_source': ['choice:["EBI-ENA", "SRA"]', 'EBI-ENA'],
'accession': ["string", 'None'],
}
create_command(qiita_plugin, "INSDC_download",
"Downloads an accession from a given INSDC", parameters)
|
immo/pyTOM | df/df_ontology.py | Python | gpl-3.0 | 3,602 | 0.028318 | # coding: utf-8
#
# drums-backend a simple interactive audio sampler that plays vorbis samples
# Copyright (C) 2009 C.D. Immanuel Albrecht
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def make_keys1(s):
"""make_keys1(s) turn string s into a list of keys by magic"""
ckey = ""
nkey = 0
keys = {}
for c in s+",":
if c.isalpha():
ckey = ckey + c
else:
if len(ckey) > 0:
if keys.has_key(ckey):
keys[ckey] = keys[ckey] + 1
else:
keys[ckey] = 1
ckey = ""
if c.isdigit():
nkey = nkey*10 + int(c)
keys["#"] = nkey
return keys
def make_keys2(s):
"""make_keys2(s) turn string s into a list of keys by magic"""
ckey = ""
keys = {}
for c in s+",":
if c.isalnum():
ckey = ckey + c
else:
if len(ckey) > 0:
if keys.has_key(ckey):
keys[ckey] = keys[ckey] + 1
else:
keys[ckey] = 1
ckey = ""
return keys
def join_keys(k,l):
"""join_keys(k,l) join the keys lists k and l"""
j = k
for x in l:
if j.has_key(x):
j[x] = j[x] + l[x]
else:
j[x] = l[x]
return j
def filter_vals_with_key(v, key):
"""filter_vals_with_key(v, key) filter values from v with key"""
f = {}
for x in v:
k = v[x]
if k.has_key(key):
f[x] = k
return f
def filter_vals_with_any_key(v, keys):
"""filter_vals_with_any_key(v, keys) filter values from v with any key from keys"""
f = {}
for x in v:
k = v[x]
for key in keys:
if k.has_key(key):
f[x] = k
break |
return f
def meetleft(features, partners):
"""meetleft(features, partners) meet features with partners"""
f = { | }
for x in features:
if x in partners:
f[x] = features[x]
return f
def joinleft(defaultfeatures, newfeatures):
"""joinleft(defaultfeatures, newfeatures) join features with partners"""
f = newfeatures
for x in defaultfeatures:
f[x] = defaultfeatures[x]
return f
def get_atomic_depends(vals, keys):
"""get_atomic_depends(vals, keys) get depends of vals and keys"""
depends = {}
for x in keys:
depends[x] = set([y for y in keys])
for v in vals:
has = set([x for x in vals[v] if x in keys])
for a in has:
for b in [x for x in depends[a] if x not in has]:
depends[a].remove(b)
return depends
def make_keys(s):
"""make_keys(s) make keys from s by joining both variants"""
return join_keys(make_keys1(s),make_keys2(s))
def make_feature_list(vals, keys, infos):
"""make_feature_list(vals, keys, infos) turn vals and keys with infos into a feature_list structure"""
flist = {}
for k in keys:
v = filter_vals_with_key(vals,k)
i = {}
for y in [x for x in v if x in infos]:
i[y] = infos[y]
flist[k] = i
return flist
|
michalbachowski/pycontentbits | test/factory_test.py | Python | bsd-3-clause | 5,366 | 0.000373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# python standard library
#
import unittest
from functools import partial
##
# test helper
#
from testutils import mock, IsA
##
# content bits modules
#
from contentbits.factory import CollectionFactory
class CollectionFactoryTestCase(unittest.TestCase):
def setUp(self):
self.collection = mock.MagicMock()
self.collection.return_value=self.collection
self.collection.append = mock.MagicMock()
self.item = moc | k.MagicMock()
self.cf = CollectionFactory(self.collection, self.item)
def test_init_requires_no_arguments(self):
err = False
try:
| CollectionFactory()
except TypeError:
err = True
self.assertFalse(err)
def test_init_allows_2_arguments(self):
err = False
try:
CollectionFactory(None)
CollectionFactory(None, None)
except TypeError:
err = True
self.assertFalse(err)
# from dict
def test_from_dict_expects_dict(self):
d = mock.MagicMock()
d.items = mock.MagicMock(return_value=[(0,1)])
CollectionFactory().from_dict(d)
d.items.assert_called_once_with()
def test_from_dict_uses_given_collection_class(self):
self.cf.from_dict({'a': 1})
self.collection.assert_called_once_with()
self.collection.append.assert_called_once_with(IsA(mock.MagicMock))
def test_from_dict_uses_given_item_class(self):
self.cf.from_dict({'a': 1})
self.item.assert_called_once_with(1, 'a')
def test_from_dict_returns_collection(self):
self.assertEqual(self.cf.from_dict({'a': 1}), self.collection)
# from_list
def test_from_list_expects_list(self):
CollectionFactory().from_list([1])
def test_from_list_uses_given_collection_class(self):
self.cf.from_list([1])
self.collection.assert_called_once_with()
self.collection.append.assert_called_once_with(IsA(mock.MagicMock))
def test_from_list_uses_given_item_class(self):
self.cf.from_list([1])
self.item.assert_called_once_with(1, None)
def test_from_list_returns_collection(self):
self.assertEqual(self.cf.from_list([1]), self.collection)
# from_iterator
def test_from_iterator_expects_iterator(self):
CollectionFactory().from_iterator((i for i in [1]))
def test_from_iterator_uses_given_collection_class(self):
self.cf.from_iterator((i for i in [1]))
self.collection.assert_called_once_with()
self.collection.append.assert_called_once_with(IsA(mock.MagicMock))
def test_from_iterator_uses_given_item_class(self):
self.cf.from_iterator((i for i in [1]))
self.item.assert_called_once_with(1, None)
def test_from_iterator_returns_collection(self):
self.assertEqual(self.cf.from_iterator([1]), self.collection)
# from_iterator_with_tuples
def test_from_iterator_with_tuples_expects_iterator_with_tuples(self):
self.assertRaises(TypeError, partial(
CollectionFactory().from_iterator_with_tuples,
(i for i in [1])))
def test_from_iterator_with_tuples_expects_iterator_with_tuples_1(self):
CollectionFactory().from_iterator_with_tuples((i for i in [('a', 1)]))
def test_from_iterator_with_tuples_uses_given_collection_class(self):
self.cf.from_iterator_with_tuples((i for i in [('a', 1)]))
self.collection.assert_called_once_with()
self.collection.append.assert_called_once_with(IsA(mock.MagicMock))
def test_from_iterator_with_tuples_uses_given_item_class(self):
self.cf.from_iterator_with_tuples((i for i in [('a', 1)]))
self.item.assert_called_once_with(1, 'a')
def test_from_iterator_with_tuples_returns_collection(self):
self.assertEqual(self.cf.from_iterator_with_tuples(
(i for i in [('a', 1)])), self.collection)
# discover
def test_discover_requires_1_argument(self):
self.assertRaises(TypeError, CollectionFactory().discover)
def test_discover_uses_given_collection_class(self):
self.cf.discover({'a:': 1})
self.collection.assert_called_once_with()
self.collection.append.assert_called_once_with(IsA(mock.MagicMock))
def test_discover_uses_given_item_class(self):
self.cf.discover([1])
self.item.assert_called_once_with(1, None)
def test_discover_returns_collection(self):
self.assertEqual(self.cf.discover([1]), self.collection)
def test_discover_accepts_dict(self):
self.cf.discover({'b': 2})
self.collection.assert_called_once()
self.item.assert_called_once_with(2, 'b')
def test_discover_accepts_list(self):
self.cf.discover([1])
self.collection.assert_called_once()
self.item.assert_called_once_with(1, None)
def test_discover_accepts_iterator(self):
self.cf.foo = 1
self.cf.discover((i for i in [1]))
self.collection.assert_called_trice()
self.item.assert_called_once_with(1, None)
def test_discover_accepts_iterator_with_touple(self):
self.cf.discover((i for i in [('a', 1)]))
self.collection.assert_called_trice()
self.item.assert_called_once_with(1, 'a')
if "__main__" == __name__:
unittest.main()
|
bhermanmit/openmc | openmc/mgxs/__init__.py | Python | mit | 147 | 0 | from openmc | .mgxs.groups import EnergyGroups
from openmc.mgxs.library import Library
from openmc.mgxs.mgxs import *
from openmc.mgxs.mdgxs imp | ort *
|
opencorato/sayit | speeches/utils/base32.py | Python | agpl-3.0 | 778 | 0 | digits = "0123456789abcdefghjkmnpqrstvwxyz"
class MistypedIDException(Exception):
pass
def int_to_base32(i):
"""Converts | an integer to a base32 string"""
enc = ''
while i >= 32:
i, mod = divmod(i, 32)
enc = digits[mod] + enc
enc = digits[i] + enc
return enc
def base32_to_int(s):
"""Convert a base 32 string to an integer"""
mistyped = False
if s.find('o') > -1 or s.find('i') > -1 or s.find('l') > -1:
s = s.replace('o', '0').replace('i', '1').replace('l', '1')
mistyped = True
decoded = 0
multi = 1
while len(s) > 0:
decoded += multi * digits.ind | ex(s[-1:])
multi = multi * 32
s = s[:-1]
if mistyped:
raise MistypedIDException(decoded)
return decoded
|
vericoin/vericoin-core | contrib/bitrpc/bitrpc.py | Python | mit | 9,384 | 0.002984 | from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except Exception as inst:
print inst
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except Exception as inst:
print inst
elif cmd == "getaccount":
try:
addr = raw_input("Enter a VeriCoin address: ")
print access.getaccount(addr)
except Exception as inst:
print inst
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except Exception as inst:
print inst
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except Exception as inst:
print inst
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except Exception as inst:
print inst
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except Exception as inst:
print inst
elif cmd == "getblockcount":
try:
print access.getblockcount()
except Exception as inst:
print inst
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except Exception as inst:
print inst
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except Exception as inst:
print inst
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except Exception as inst:
print inst
elif cmd == "getgenerate":
try:
print access.getgenerate()
except Exception as inst:
print inst
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except Exception as inst:
print inst
elif cmd == "getinfo":
try:
print access.getinfo()
except Exception as inst:
print inst
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except Exception as inst:
print inst
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc) |
except:
print access.getreceivedbyaccount()
except Exception as inst:
print inst
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a VeriCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print | access.getreceivedbyaddress()
except Exception as inst:
print inst
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except Exception as inst:
print inst
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except Exception as inst:
print inst
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except Exception as inst:
print inst
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except Exception as inst:
print inst
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except Exception as inst:
print inst
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except Exception as inst:
print inst
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except Exception as inst:
print inst
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except Exception as inst:
print inst
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except Exception as inst:
print inst
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except Exception as inst:
print inst
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except Exception as inst:
print inst
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except Exception as inst:
print inst
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print acce |
amwelch/a10sdk-python | a10sdk/core/cgnv6/cgnv6_lsn_alg_ftp_stats.py | Python | apache-2.0 | 2,390 | 0.008787 | from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param lpsv_replies: {"description": "LPSV Replies From Server", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "8"}
:param port_requests: {"description": "PORT Requests From Client", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "8"}
:param epsv_replies: {"description": "EPSV Replies From Server", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "8"}
:param pasv_replies: {"description": "PASV Replies From Server", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "8"}
:param lprt_requests: {"description": "LPRT Requests From Client", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "8"}
:param eprt_requests: {"description": "EPRT Requests From Client", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.lpsv_replies = ""
self.port_requests = ""
self.epsv_replies = ""
self.pasv_replies = ""
self.lprt_requests = ""
self.eprt_requests = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ftp(A10BaseClass):
"""Class Description::
Statistics for the object ftp.
Class ftp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
|
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/lsn/alg/ftp/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
| self.required=[]
self.b_key = "ftp"
self.a10_url="/axapi/v3/cgnv6/lsn/alg/ftp/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
Kalvar/python-GreyTheory | greytheory/packages/models/grey_class.py | Python | mit | 2,945 | 0.008829 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import copy
import numpy as np
from ..libs.grey_lib import GreyLib
from ..libs.grey_math import GreyMath
from .grey_factory import GreyFactory
from .grey_forecast import GreyForecast
class GreyClass (object):
_TAG_FORECAST_NEXT_MOMENT = "forecasted_next_moment"
_TAG_FORECAST_HISTORY = "history"
def __init__(self):
self.tag = self.__class__.__name__
self.patterns = []
self.keys = []
self.analyzed_results = []
self.influence_degrees = []
self.grey_lib = GreyLib()
self.grey_math = GreyMath()
# Those outputs are the results of all patterns.
def _add_outputs(self, outputs, pattern_key):
self.patterns.insert(0, outputs)
self.keys.append(pattern_key)
# Those patterns are using in AGO generator.
def _add_patterns(self, patterns, pattern_key):
self.patterns.append(patterns)
self.keys.append(pattern_key)
def ago(self, patterns):
return self.grey_lib.ago(patterns)
def remove_all_analysis(self):
# Deeply removing without others copied array.
self.analyzed_results = []
self.influence_degrees = []
self.forecasts = []
# Removing all reference links with others array.
#del self.analyzed_results
#del self.influence_degrees
#del self.forecasts
def print_self(self):
print("%r" % self.__class__.__name__)
def print_analyzed_results(self):
self.print_self()
for factory in self.analyzed_results:
print("Pattern key: %r, grey value: %r, ranking: %r" % (factory.name, factory.equation_value, factory.ranking))
def print_influence_degrees(self):
self.print_self()
string = " > ".join(self.influence_degrees)
print("The keys of parameters their influence degrees (ordering): %r" % string)
def print_forecasted_results(self):
self.print_ | self( | )
for forecast in self.analyzed_results:
print("K = %r" % forecast.k)
if forecast.tag == self._TAG_FORECAST_HISTORY:
# History.
print("From original value %r to forecasted value is %r" % (forecast.original_value, forecast.forecast_value))
print("The error rate is %r" % forecast.error_rate)
else:
# Next moments.
print("Forcated next moment value is %r" % forecast.forecast_value)
# Last forecasted moment.
last_moment = self.analyzed_results[-1]
print("The average error rate %r" % last_moment.average_error_rate)
def deepcopy(self):
return copy.deepcopy(self)
@property
def alpha(self):
return self.grey_lib.alpha
@alpha.setter
def alpha(self, value = 0.5):
self.grey_lib.alpha = value |
pqpo/appiumn_auto_re-develope | common/basePickle.py | Python | apache-2.0 | 885 | 0.00339 | __author__ = "shikun"
import pickle
import os
from common import operateFile
from common.variable import Constants
def write_pickle(dict_data, path="data.pickle"):
read = read_pickle(path)
result = []
if len(read) > 0:
read.append(dict_data)
result = read
else:
result.append(dict_data)
with open(path, 'wb') as f:
pick | le.dump(result, f, 0)
def read_pickle(path):
| pickle_data = {}
if operateFile.OperateFile(path).check_file():
with open(path, 'rb') as f:
try:
pickle_data = pickle.load(f)
except EOFError:
pass
return pickle_data
if __name__ == "__main__":
data = {"log":"132"}
write_pickle(data, path=Constants.CRASH_LOG_PATH)
read_pickle(path=Constants.CRASH_LOG_PATH)
# operateFile.OperateFile(PATH("data.pickle")).remove_file()
|
googleapis/python-bigquery-storage | samples/generated_samples/bigquerystorage_v1beta2_generated_big_query_write_append_rows_sync.py | Python | apache-2.0 | 1,884 | 0.000531 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for AppendRows
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-storage
# [START bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_sync]
from google.cloud import bigquery_st | orage_v1beta2
def sample_append_rows():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.AppendRowsRequest(
write_stream="write_stream_value",
)
# This method expects an iterator which contains
# 'bigquery_storage_v1beta2.AppendRowsRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request] |
def request_generator():
for request in requests:
yield request
# Make the request
stream = client.append_rows(requests=request_generator())
# Handle the response
for response in stream:
print(response)
# [END bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_sync]
|
heuer/segno | tests/test_png_plte.py | Python | bsd-3-clause | 1,978 | 0.002528 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2022 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Tests if the PNG serializer does not add more colors than needed.
See also issue <https://github.com/heuer/segno/issues/62>
"""
from __future__ import unicode_literals, absolute_import
import io
import pytest
import segno
def test_plte():
qr = segno.make_qr('test')
assert qr.version < 7
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, version_dark='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte2():
qr = segno.make_qr('test')
assert qr.version < 7
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, version_dark='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte3():
qr = segno.make_qr('test')
assert qr.version < 7
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, version_dark='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte_micro():
qr = segno.make_mic | ro('RAIN')
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, alignment_dark='green')
qr.save(buff | _2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte_micro2():
qr = segno.make_micro('RAIN')
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, dark_module='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
if __name__ == '__main__':
pytest.main([__file__])
|
itbabu/saleor | saleor/dashboard/product/views.py | Python | bsd-3-clause | 18,025 | 0 | from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url
from django.utils.translation import pgettext_lazy
from django.views.decorators.http import require_http_methods
from . import forms
from ...core.utils import get_paginator_items
from ...product.models import (Product, ProductAttribute, ProductClass,
ProductImage, ProductVariant, Stock,
StockLocation)
from ..views import staff_member_required
@staff_member_required
def product_class_list(request):
classes = ProductClass.objects.all().prefetch_related(
'product_attributes', 'variant_attributes')
form = forms.ProductClassForm(request.POST or None)
if form.is_valid():
return redirect('dashboard:product-class-add')
classes = get_paginator_items(classes, 30, request.GET.get('page'))
classes.object_list = [
(pc.pk, pc.name, pc.has_variants, pc.product_attributes.all(),
pc.variant_attributes.all())
for pc in classes.object_list]
ctx = {'form': form, 'product_classes': classes}
return TemplateResponse(request, 'dashboard/product/class_list.html', ctx)
@staff_member_required
def product_class_create(request):
product_class = ProductClass()
form = forms.ProductClassForm(request.POST or None,
instance=product_class)
if form.is_valid():
product_class = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added product type %s') % product_class
messages.success(request, msg)
return redirect('dashboard:product-class-list')
ctx = {'form': form, 'product_class': product_class}
return TemplateResponse(
request, 'dashboard/product/product_class_form.html', ctx)
@staff_member_required
def product_class_edit(request, pk):
product_class = get_object_or_404(
ProductClass, pk=pk)
form = forms.ProductClassForm(request.POST or None,
instance=product_class)
if form.is_valid():
product_class = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated product type %s') % product_class
messages.success(request, msg)
return redirect('dashboard:product-class-update', pk=pk)
ctx = {'form': form, 'product_class': product_class}
return TemplateResponse(
request, 'dashboard/product/product_class_form.html', ctx)
@staff_member_required
def product_class_delete(request, pk):
product_class = get_object_or_404(ProductClass, pk=pk)
products = [str(p) for p in product_class.products.all()]
if request. | method == 'POST':
product_class.delete()
messages.success(
request,
pgettext_lazy(
'Dashboard message',
'Deleted product type %s') % product_class)
return redirect('dashboard:product-class-list')
return TemplateResponse(
request,
'dashboard/product/modal_product_class_confirm_delete.html',
{'product_class': | product_class, 'products': products})
@staff_member_required
def product_list(request):
products = Product.objects.prefetch_related('images')
product_classes = ProductClass.objects.all()
form = forms.ProductClassSelectorForm(
request.POST or None, product_classes=product_classes)
if form.is_valid():
return redirect('dashboard:product-add',
class_pk=form.cleaned_data['product_cls'])
products = get_paginator_items(products, 30, request.GET.get('page'))
ctx = {'form': form, 'products': products,
'product_classes': product_classes}
return TemplateResponse(request, 'dashboard/product/list.html', ctx)
@staff_member_required
def product_create(request, class_pk):
product_class = get_object_or_404(ProductClass, pk=class_pk)
create_variant = not product_class.has_variants
product = Product()
product.product_class = product_class
product_form = forms.ProductForm(request.POST or None, instance=product)
if create_variant:
variant = ProductVariant(product=product)
variant_form = forms.ProductVariantForm(request.POST or None,
instance=variant,
prefix='variant')
variant_errors = not variant_form.is_valid()
else:
variant_form = None
variant_errors = False
if product_form.is_valid() and not variant_errors:
product = product_form.save()
if create_variant:
variant.product = product
variant_form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added product %s') % product
messages.success(request, msg)
return redirect('dashboard:product-update',
pk=product.pk)
ctx = {'product_form': product_form, 'variant_form': variant_form,
'product': product}
return TemplateResponse(
request, 'dashboard/product/product_form.html', ctx)
@staff_member_required
def product_edit(request, pk):
product = get_object_or_404(
Product.objects.prefetch_related(
'images', 'variants'), pk=pk)
edit_variant = not product.product_class.has_variants
attributes = product.product_class.variant_attributes.prefetch_related(
'values')
images = product.images.all()
variants = product.variants.all()
stock_items = Stock.objects.filter(
variant__in=variants).select_related('variant', 'location')
form = forms.ProductForm(request.POST or None, instance=product)
variants_delete_form = forms.VariantBulkDeleteForm()
stock_delete_form = forms.StockBulkDeleteForm()
if edit_variant:
variant = variants.first()
variant_form = forms.ProductVariantForm(
request.POST or None, instance=variant, prefix='variant')
variant_errors = not variant_form.is_valid()
else:
variant_form = None
variant_errors = False
if form.is_valid() and not variant_errors:
product = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated product %s') % product
messages.success(request, msg)
return redirect('dashboard:product-update', pk=product.pk)
ctx = {'attributes': attributes, 'images': images, 'product_form': form,
'product': product, 'stock_delete_form': stock_delete_form,
'stock_items': stock_items, 'variants': variants,
'variants_delete_form': variants_delete_form,
'variant_form': variant_form}
return TemplateResponse(
request, 'dashboard/product/product_form.html', ctx)
@staff_member_required
def product_delete(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.method == 'POST':
product.delete()
messages.success(
request,
pgettext_lazy('Dashboard message', 'Deleted product %s') % product)
return redirect('dashboard:product-list')
return TemplateResponse(
request, 'dashboard/product/modal_product_confirm_delete.html',
{'product': product})
@staff_member_required
def stock_edit(request, product_pk, stock_pk=None):
product = get_object_or_404(Product, pk=product_pk)
if stock_pk:
stock = get_object_or_404(Stock, pk=stock_pk)
else:
stock = Stock()
form = forms.StockForm(request.POST or None, instance=stock,
product=product)
if form.is_valid():
form.save()
messages.success(
request, pgettext_lazy('Dashboard message', 'Saved stock'))
product_url = reverse(
'dashboard:product-update', kwargs={'pk': product_pk})
success_url = request.POST.get('success_url', product_url)
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
ctx = {'form': |
wateraccounting/SEBAL | interpolation/QGIS_.py | Python | apache-2.0 | 3,702 | 0.012426 | #!/usr/bin/env python
import os
import numpy as np
from scipy import interpolate
from osgeo import gdal, osr
#functions
def GetGeoInfo(filename, | Subdataset = 0):
"""Gives geo-information derived from a georeferenced map
filename: file to be scrutinized
subdataset: layer to be used in case of HDF4 format
"""
SourceDS = gdal.Open(filename, gdal.GA_ReadOnly)
Type = SourceDS.GetDriver().ShortName
if Type == 'HDF4' or Type == 'netCDF':
SourceDS = gdal.Open(Sou | rceDS.GetSubDatasets()[Subdataset][0])
NDV = SourceDS.GetRasterBand(1).GetNoDataValue()
xsize = SourceDS.RasterXSize
ysize = SourceDS.RasterYSize
GeoT = SourceDS.GetGeoTransform()
Projection = osr.SpatialReference()
Projection.ImportFromWkt(SourceDS.GetProjectionRef())
#DataType = SourceDS.GetRasterBand(1).DataType
#DataType = gdal.GetDataTypeName(DataType)
driver = gdal.GetDriverByName(Type)
return driver, NDV, xsize, ysize, GeoT, Projection#, DataType
def OpenAsArray(filename, Bandnumber = 1, dtype = 'float32', nan_values = False):
"""Function opens a geo-map as an numpy array.
filename: map to be opened
Bandnumber: band to open as array (or layer in case of HDF4)
dtype: datatype of output array
nan_values: if True, the NDV values in the output array are substituted
with Numpy NaN values. NOTE: dtype has to be float in that case.
"""
datatypes = {"uint8": np.uint8, "int8": np.int8, "uint16": np.uint16, "int16": np.int16, "Int16": np.int16, "uint32": np.uint32,
"int32": np.int32, "float32": np.float32, "float64": np.float64, "complex64": np.complex64, "complex128": np.complex128,
"Int32": np.int32, "Float32": np.float32, "Float64": np.float64, "Complex64": np.complex64, "Complex128": np.complex128,}
DataSet = gdal.Open(filename, gdal.GA_ReadOnly)
Type = DataSet.GetDriver().ShortName
if Type == 'GTiff':
Subdataset = DataSet.GetRasterBand(Bandnumber)
NDV = Subdataset.GetNoDataValue()
if Type == 'HDF4':
Subdataset = gdal.Open(DataSet.GetSubDatasets()[Bandnumber][0])
NDV = int(Subdataset.GetMetadata()['_FillValue'])
Array = Subdataset.ReadAsArray().astype(datatypes[dtype])
if nan_values:
Array[Array == NDV] = np.nan
#missing value filling
x = np.arange(0, Array.shape[1])
y = np.arange(0, Array.shape[0])
#mask invalid values
Array = np.ma.masked_invalid(Array)
xx, yy = np.meshgrid(x, y)
#get only the valid values
x1 = xx[~Array.mask]
y1 = yy[~Array.mask]
newarr = Array[~Array.mask]
Array = interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method='nearest')
return Array
def CreateGeoTiff(Name, Array, driver, NDV, xsize, ysize, GeoT, Projection,output_folder,subdir):
"""Function creates a geotiff file from a Numpy Array.
Name: output file
Array: numpy array to save
"""
datatypes = {"uint8": 1, "int8": 1, "uint16": 2, "int16": 3, "Int16": 3, "uint32": 4,
"int32": 5, "float32": 6, "float64": 7, "complex64": 10, "complex128": 11,
"Int32": 5, "Float32": 6, "Float64": 7, "Complex64": 10, "Complex128": 11,}
file_name = output_folder + subdir + Name + '.tif'
if not os.path.isdir(output_folder + subdir):
os.makedirs(output_folder + subdir)
DataSet = driver.Create(file_name,xsize,ysize,1,datatypes[Array.dtype.name])
if NDV is None:
NDV = -9999
Array[np.isnan(Array)] = NDV
DataSet.GetRasterBand(1).SetNoDataValue(NDV)
DataSet.SetGeoTransform(GeoT)
DataSet.SetProjection(Projection.ExportToWkt())
DataSet.GetRasterBand(1).WriteArray(Array)
DataSet = None |
deepaklukose/grpc | tools/run_tests/artifacts/package_targets.py | Python | apache-2.0 | 4,688 | 0.000213 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build distribution packages."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_package.%s' % (name),
timeout_seconds=30 * 60,
flake_retries=flake_retries | ,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name,
cmd | line,
environ=None,
cwd=None,
shell=False,
flake_retries=0,
timeout_retries=0):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
cwd=cwd,
shortname='build_package.%s' % (name),
timeout_seconds=10 * 60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpPackage:
"""Builds C# nuget packages."""
def __init__(self, linux=False):
self.linux = linux
self.labels = ['package', 'csharp']
if linux:
self.name = 'csharp_package_dotnetcli_linux'
self.labels += ['linux']
else:
self.name = 'csharp_package_dotnetcli_windows'
self.labels += ['windows']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.linux:
return create_docker_jobspec(
self.name, 'tools/dockerfile/test/csharp_jessie_x64',
'src/csharp/build_packages_dotnetcli.sh')
else:
return create_jobspec(
self.name, ['build_packages_dotnetcli.bat'],
cwd='src\\csharp',
shell=True)
def __str__(self):
return self.name
class RubyPackage:
"""Collects ruby gems created in the artifact phase"""
def __init__(self):
self.name = 'ruby_package'
self.labels = ['package', 'ruby', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/artifacts/build_package_ruby.sh')
class PythonPackage:
"""Collects python eggs and wheels created in the artifact phase"""
def __init__(self):
self.name = 'python_package'
self.labels = ['package', 'python', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/artifacts/build_package_python.sh')
class PHPPackage:
"""Copy PHP PECL package artifact"""
def __init__(self):
self.name = 'php_package'
self.labels = ['package', 'php', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/artifacts/build_package_php.sh')
def targets():
"""Gets list of supported targets"""
return [
CSharpPackage(), CSharpPackage(linux=True), RubyPackage(),
PythonPackage(), PHPPackage()
]
|
lightbase/LBSociam | lbsociam/model/crimes.py | Python | gpl-2.0 | 15,820 | 0.001517 | #!/usr/env python
# -*- coding: utf-8 -*-
__author__ = 'eduardo'
import logging
import datetime
import requests
import json
from requests.exceptions import HTTPError
from lbsociam import LBSociam
from liblightbase import lbrest
from liblightbase.lbbase.struct import Base, BaseMetadata
from liblightbase.lbbase.lbstruct.group import *
from liblightbase.lbbase.lbstruct.field import *
from liblightbase.lbbase.content import Content
from liblightbase.lbutils import conv
from liblightbase.lbsearch.search import *
from pyramid.response import Response
log = logging.getLogger()
class CrimesBase(LBSociam):
"""
Criminal data base
"""
def __init__(self):
"""
Construct for social networks data
:return:
"""
LBSociam.__init__(self)
self.baserest = lbrest.BaseREST(
rest_url=self.lbgenerator_rest_url,
response_object=True
)
self.documentrest = lbrest.DocumentREST(
rest_url=self.lbgenerator_rest_url,
base=self.lbbase,
response_object=False
)
@property
def lbbase(self):
"""
Generate LB Base object
:return:
"""
category_name = Field(**dict(
name='category_name',
description='category name',
alias='category_name',
datatype='Text',
indices=['Ordenado', 'Unico'],
multivalued=False,
required=True
))
category_pretty_name = Field(**dict(
name='category_pretty_name',
description='category pretty name',
alias='category_pretty_name',
datatype='Text',
indices=['Ordenado'],
multivalued=False,
required=True
))
description = Field(**dict(
name='description',
description='category description',
alias='description',
datatype='Text',
indices=[],
multivalued=False,
required=False
))
tokens = Field(**dict(
name='tokens',
description='Identified tokens for this category',
alias='tokens',
datatype='Text',
indices=['Ordenado'],
multivalued=True,
required=False
))
total = Field(**dict(
name='total',
description='Total criminal events',
alias='total',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=False
))
date = Field(**dict(
name='date',
description='Taxonomy last update',
alias='date',
datatype='DateTime',
indices=['Ordenado'],
multivalued=False,
required=True
))
images = Field(**dict(
name='images',
description= | 'Taxonomy related images',
alias='images',
datatype='File',
indices=[],
multivalued=True,
required=False
))
default_token = Field(**dict(
name='default_token',
description='Default token for this category',
alias='default_token',
datatype='Text',
| indices=['Ordenado'],
multivalued=False,
required=False
))
color = Field(**dict(
name='color',
description='Color to be shown on interface',
alias='Color',
datatype='Text',
indices=[],
multivalued=False,
required=False
))
base_metadata = BaseMetadata(**dict(
name='crime',
description='Criminal data from social networks',
password='123456',
idx_exp=True,
idx_exp_url=self.es_url + '/crime',
idx_exp_time=300,
file_ext=True,
file_ext_time=300,
color='#FFFFFF'
))
content_list = Content()
content_list.append(total)
content_list.append(category_name)
content_list.append(category_pretty_name)
content_list.append(description)
content_list.append(tokens)
content_list.append(date)
content_list.append(images)
content_list.append(default_token)
content_list.append(color)
lbbase = Base(
metadata=base_metadata,
content=content_list
)
return lbbase
@property
def metaclass(self):
"""
Retorna metaclass para essa base
"""
return self.lbbase.metaclass()
def create_base(self):
"""
Create a base to hold twitter information on Lightbase
:param crimes: One twitter crimes object to be base model
:return: LB Base object
"""
lbbase = self.lbbase
response = self.baserest.create(lbbase)
#print(response.crimes_code)
if response.status_code == 200:
return lbbase
else:
return None
def remove_base(self):
"""
Remove base from Lightbase
:param lbbase: LBBase object instance
:return: True or Error if base was not excluded
"""
response = self.baserest.delete(self.lbbase)
if response.status_code == 200:
return True
else:
raise IOError('Error excluding base from LB')
def update_base(self):
"""
Update base from LB Base
"""
response = self.baserest.update(self.lbbase)
if response.status_code == 200:
return True
else:
raise IOError('Error updating LB Base structure')
def list(self):
"""
List all documents in base
"""
orderby = OrderBy(['id_doc'])
search = Search(
limit=None,
order_by=orderby
)
params = {
'$$': search._asjson()
}
url = self.lbgenerator_rest_url + '/' + self.lbbase.metadata.name + '/doc'
result = requests.get(
url=url,
params=params
)
results = result.json()
return results
def get_document(self, id_doc):
"""
Get document by ID on base
"""
url = self.lbgenerator_rest_url + '/' + self.lbbase.metadata.name + '/doc/' + str(id_doc)
response = requests.get(url)
if response.status_code > 300:
return None
return response.json()
def update_document(self, id_doc, new_document):
"""
Update document
:param id_doc: Document ID
:return:
"""
document = json.dumps(new_document)
#print(document)
return self.documentrest.update(id=id_doc, document=document)
def upload_file(self, fileobj):
"""
Upload file on LB
"""
url = self.lbgenerator_rest_url + "/" + self.lbbase._metadata.name + "/file"
result = requests.post(
url=url,
files={
'file': fileobj.file.read()
}
)
return result
def update_file_document(self, id_doc, file_dict):
"""
Insert file in document
"""
url = self.lbgenerator_rest_url + "/" + self.lbbase._metadata.name + \
"/doc/" + id_doc + '/images'
log.debug("URL para insercao dos atributos da imagem %s", url)
log.debug(file_dict)
result = requests.post(
url=url,
data={
'value': json.dumps(file_dict)
}
)
return result
def remove_file(self, id_doc, id_file):
"""
Remove image from base
"""
# First gets document
document = self.get_document(id_doc)
# Now create a new image dict removing selected files
new_image = list()
for image in document.get('images'):
if image['id_file'] != id_file:
new_imag |
rlenglet/openfaucet | src/openfaucet/test_ofstats.py | Python | apache-2.0 | 12,672 | 0 | # Copyright 2011 Midokura KK
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an | "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for ofstats.py.
"""
import struct
import unittest2
from openfaucet import buffer
from openfaucet import ofaction
from openfaucet import ofmatch
from openfaucet import ofstats
class TestDescriptionStats(unittest2.TestCase):
def setUp(self):
s | elf.buf = buffer.ReceiveBuffer()
self.mfr_desc = 'Dummy Manufacturer Inc.'
self.hw_desc = 'DummySwitch'
self.sw_desc = 'DummyOS'
self.serial_num = '0000000042'
self.dp_desc = 'unittest switch'
self.desc_stats = ofstats.DescriptionStats(
mfr_desc=self.mfr_desc,
hw_desc=self.hw_desc,
sw_desc=self.sw_desc,
serial_num=self.serial_num,
dp_desc=self.dp_desc)
def test_serialize(self):
self.assertEqual(
self.mfr_desc + '\x00' * (256 - len(self.mfr_desc))
+ self.hw_desc + '\x00' * (256 - len(self.hw_desc))
+ self.sw_desc + '\x00' * (256 - len(self.sw_desc))
+ self.serial_num + '\x00' * (32 - len(self.serial_num))
+ self.dp_desc + '\x00' * (256 - len(self.dp_desc)),
self.desc_stats.serialize())
def test_deserialize(self):
self.buf.append(self.mfr_desc + '\x00' * (256 - len(self.mfr_desc)))
self.buf.append(self.hw_desc + '\x00' * (256 - len(self.hw_desc)))
self.buf.append(self.sw_desc + '\x00' * (256 - len(self.sw_desc)))
self.buf.append(self.serial_num + '\x00' * (32 - len(self.serial_num)))
self.buf.append(self.dp_desc + '\x00' * (256 - len(self.dp_desc)))
self.buf.set_message_boundaries(1056)
self.assertTupleEqual(self.desc_stats,
ofstats.DescriptionStats.deserialize(self.buf))
class TestFlowStats(unittest2.TestCase):
def setUp(self):
self.buf = buffer.ReceiveBuffer()
self.match = ofmatch.Match(
in_port=0x13, dl_src='\x13\x24\x35\x46\x57\x68',
dl_dst='\x12\x23\x34\x45\x56\x67', dl_vlan=0x11, dl_vlan_pcp=0x22,
dl_type=0x3344, nw_tos=0x80, nw_proto=0xcc,
nw_src=('\xaa\xbb\xcc\xdd', 32), nw_dst=('\x21\x32\x43\x54', 32),
tp_src=0x38, tp_dst=0x49)
self.flow_stats = ofstats.FlowStats(
0xac, self.match, 0x10203040, 0x11223344, 0x1002, 0x0136, 0x0247,
0xffeeddccbbaa9988, 0x42, 0x0153, (
ofaction.ActionOutput(port=0x1234, max_len=0x9abc),
ofaction.ActionSetDlDst(dl_addr='\x12\x34\x56\x78\xab\xcd')))
def _serialize_action(self, a):
a_ser = a.serialize()
header = struct.pack('!HH', a.type, 4 + len(a_ser))
return (header, a_ser)
def _deserialize_action(self, buf):
action_type, action_length = buf.unpack('!HH')
action_class = ofaction.ACTION_CLASSES.get(action_type)
return action_class.deserialize(buf)
def test_serialize(self):
self.assertEqual(
'\x00\x70' '\xac\x00'
+ self.match.serialize()
+ '\x10\x20\x30\x40' '\x11\x22\x33\x44'
'\x10\x02' '\x01\x36' '\x02\x47'
'\x00\x00\x00\x00\x00\x00'
'\xff\xee\xdd\xcc\xbb\xaa\x99\x88'
'\x00\x00\x00\x00\x00\x00\x00\x42'
'\x00\x00\x00\x00\x00\x00\x01\x53'
'\x00\x00\x00\x08'
'\x12\x34\x9a\xbc'
'\x00\x05\x00\x10'
'\x12\x34\x56\x78\xab\xcd\x00\x00\x00\x00\x00\x00',
''.join(self.flow_stats.serialize(self._serialize_action)))
def test_deserialize(self):
self.buf.append('\x00\x70' '\xac\x00')
self.buf.append(self.match.serialize())
self.buf.append(
'\x10\x20\x30\x40' '\x11\x22\x33\x44'
'\x10\x02' '\x01\x36' '\x02\x47'
'\x00\x00\x00\x00\x00\x00'
'\xff\xee\xdd\xcc\xbb\xaa\x99\x88'
'\x00\x00\x00\x00\x00\x00\x00\x42'
'\x00\x00\x00\x00\x00\x00\x01\x53'
'\x00\x00\x00\x08'
'\x12\x34\x9a\xbc'
'\x00\x05\x00\x10'
'\x12\x34\x56\x78\xab\xcd\x00\x00\x00\x00\x00\x00')
self.buf.set_message_boundaries(112)
self.assertTupleEqual(
self.flow_stats,
ofstats.FlowStats.deserialize(self.buf, self._deserialize_action))
class TestTableStats(unittest2.TestCase):
def setUp(self):
self.buf = buffer.ReceiveBuffer()
self.wildcards = ofmatch.Wildcards(
in_port=True, dl_src=True, dl_dst=True, dl_vlan=True,
dl_vlan_pcp=True, dl_type=True, nw_tos=False, nw_proto=False,
nw_src=0, nw_dst=0, tp_src=False, tp_dst=False)
self.table_stats = ofstats.TableStats(
0xac, 'eth_wildcards', self.wildcards, 0x100000, 0x1234, 0x5678,
0x9abcd)
def test_serialize(self):
self.assertEqual('\xac\x00\x00\x00'
'eth_wildcards' + '\x00' * (32 - len('eth_wildcards'))
+ '\x00\x10\x00\x1f'
'\x00\x10\x00\x00' '\x00\x00\x12\x34'
'\x00\x00\x00\x00\x00\x00\x56\x78'
'\x00\x00\x00\x00\x00\x09\xab\xcd',
''.join(self.table_stats.serialize()))
def test_deserialize(self):
self.buf.append('\xac\x00\x00\x00'
'eth_wildcards')
self.buf.append('\x00' * (32 - len('eth_wildcards')))
self.buf.append('\x00\x10\x00\x1f'
'\x00\x10\x00\x00' '\x00\x00\x12\x34'
'\x00\x00\x00\x00\x00\x00\x56\x78'
'\x00\x00\x00\x00\x00\x09\xab\xcd')
self.buf.set_message_boundaries(64)
self.assertTupleEqual(self.table_stats,
ofstats.TableStats.deserialize(self.buf))
class TestPortStats(unittest2.TestCase):
def setUp(self):
self.buf = buffer.ReceiveBuffer()
self.port_stats = ofstats.PortStats(
port_no=0xabcd, rx_packets=0x1234, tx_packets=0x5678,
rx_bytes=0x1324, tx_bytes=0x5768, rx_dropped=0x1a2b,
tx_dropped=0x3c4d, rx_errors=0xab12, tx_errors=0xcd34,
rx_frame_err=0x1432, rx_over_err=0x2543, rx_crc_err=0x3654,
collisions=0x4765)
def test_serialize(self):
self.assertEqual('\xab\xcd\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x12\x34'
'\x00\x00\x00\x00\x00\x00\x56\x78'
'\x00\x00\x00\x00\x00\x00\x13\x24'
'\x00\x00\x00\x00\x00\x00\x57\x68'
'\x00\x00\x00\x00\x00\x00\x1a\x2b'
'\x00\x00\x00\x00\x00\x00\x3c\x4d'
'\x00\x00\x00\x00\x00\x00\xab\x12'
'\x00\x00\x00\x00\x00\x00\xcd\x34'
'\x00\x00\x00\x00\x00\x00\x14\x32'
'\x00\x00\x00\x00\x00\x00\x25\x43'
'\x00\x00\x00\x00\x00\x00\x36\x54'
'\x00\x00\x00\x00\x00\x00\x47\x65',
self.port_stats.serialize())
def test_serialize_every_counter_unavailable(self):
index = 8
port_stats_ser = ('\xab\xcd\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x12\x34'
'\x00\x00\x00\x00\x00\x00\x56\x78'
'\x00\x00\x00\x00\x00\x00\x13\x24'
'\x00\x00\x00\x00\x00\x00\x57\x68'
'\x00\x00\x00\x00\x00\x00\x1a\x2b'
'\x00\x00\x00\x00\x00\x |
Hispar/pycraft | tests/test_configuration.py | Python | mit | 3,042 | 0.001315 | """
Unit tests for the configuration loader module.
"""
import os
import json
from pycraft.configuration import ConfigurationLoader
DEFAULT_CONFIG = {
"window": {
"width": 800,
"height": 600,
"ticks_per_second": 60,
"resizeable": True,
"exclusive_mouse": True,
},
"controls": {
"forward": "W",
"backward": "S",
"right": "D",
"left": "A",
"jump": "SPACE",
"down": "LSHIFT",
"fly": "TAB",
},
"world": {
"gravity": 20.0,
"player_height": 2,
"max_jump_height": 2.0,
"terminal_velocity": 50,
"walking_speed": 5,
"flying_speed": 15,
},
}
def test_config_init():
"""
Test that the ConfigurationLoader object is generated correctly and returns
a config dictionary as expected.
"""
config = ConfigurationLoader()
# check an object was created
assert config, "No ConfigurationLoader object created!"
# Get the config and check it is the default values
config_dict = config.get_configurations()
assert config_dict == DEFAULT_CONFIG, "Config created didn't match default config"
def test_load_configuration_file():
"""
Test that we can read and write a config file.
"""
config = ConfigurationLoader()
base_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(base_dir, 'tmp-configuration.json')
config.configuration_file_path = config_path
# There shouldn't be a config file, so it will write one
config_dict = config.load_configuration_file()
assert os.path.exists(config_path), "Didn't write a config file!"
assert config_dict == DEFAULT_CONFIG, "Config returned doesn't match default config"
with open(config_path, 'r') as f:
written_config = json.load(f)
assert written_config == DEFAULT_CONFIG, "Written config doesn't match default config"
# Now we want to do it all again writing our own config file
new_config = {
"window": {
"width": 813,
"height": 2450,
"ticks_per_second": 63,
"resizeable": False,
"exclusive_mouse": False,
| },
"controls": {
"forward": "Z",
"backward": "S",
"r | ight": "D",
"left": "Q",
"jump": "LSHIFT",
"down": "TAB",
"fly": "SPACE",
},
"world": {
"gravity": 234.0,
"player_height": 123,
"max_jump_height": 34.1,
"terminal_velocity": 431,
"walking_speed": 12,
"flying_speed": 34,
},
}
with open(config_path, 'w') as f:
json.dump(new_config, f, indent=4)
opened_config = config.load_configuration_file()
assert opened_config == new_config, "Loaded config doesn't match what we wrote"
# Clean up after ourselves
os.remove(config_path)
if __name__ == "__main__":
test_config_init()
test_load_configuration_file()
|
rg3915/orcamentos | orcamentos/core/management/commands/conclude_proposal.py | Python | mit | 1,080 | 0 | from django.core.management.base import BaseCommand
from optparse import make_option
from django.utils import timezone
from orcamentos.proposal.models import Proposal
class Command(BaseCommand):
help = ''' Conclui orçamento. '''
option_list = BaseCommand.option_list + (
make_option('--num', help='número do orçamento'),
make | _option('--price', help='preço'),
)
def handle(self, num, price, *args, **kwargs):
proposal = Proposal.objects.get(num_prop=num)
# Se o status for 'aprovado', então não pode concluir.
if proposal.status == 'a':
print('Este orçamento já virou contrato.')
else:
| # verifica se o novo valor é positivo.
if float(price) <= 0 or float(price) is None:
print('O valor deve ser positivo.')
else:
proposal.price = price
proposal.status = 'co'
proposal.date_conclusion = timezone.now()
proposal.save()
print('Orçamento concluído com sucesso.')
|
tensorflow/tensorflow | tensorflow/python/ops/ragged/ragged_tensor_value.py | Python | apache-2.0 | 4,366 | 0.006413 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Value for RaggedTensor."""
import numpy as np
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
@tf_export(v | 1=["ragged.RaggedTensorValue"])
@dispatch.register_dispatchable_type
class RaggedTensorValue:
"""Represents the value of a `RaggedTensor`.
Warning: `RaggedTensorValue` should only be used in graph mode; in
eager mode, the `tf.RaggedTen | sor` class contains its value directly.
See `tf.RaggedTensor` for a description of ragged tensors.
"""
def __init__(self, values, row_splits):
"""Creates a `RaggedTensorValue`.
Args:
values: A numpy array of any type and shape; or a RaggedTensorValue.
row_splits: A 1-D int32 or int64 numpy array.
"""
if not (isinstance(row_splits, (np.ndarray, np.generic)) and
row_splits.dtype in (np.int64, np.int32) and row_splits.ndim == 1):
raise TypeError("row_splits must be a 1D int32 or int64 numpy array")
if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)):
raise TypeError("values must be a numpy array or a RaggedTensorValue")
if (isinstance(values, RaggedTensorValue) and
row_splits.dtype != values.row_splits.dtype):
raise ValueError("row_splits and values.row_splits must have "
"the same dtype")
self._values = values
self._row_splits = row_splits
row_splits = property(
lambda self: self._row_splits,
doc="""The split indices for the ragged tensor value.""")
values = property(
lambda self: self._values,
doc="""The concatenated values for all rows in this tensor.""")
dtype = property(
lambda self: self._values.dtype,
doc="""The numpy dtype of values in this tensor.""")
@property
def flat_values(self):
"""The innermost `values` array for this ragged tensor value."""
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""The row_splits for all ragged dimensions in this ragged tensor value."""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
@property
def ragged_rank(self):
"""The number of ragged dimensions in this ragged tensor value."""
values_is_ragged = isinstance(self._values, RaggedTensorValue)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def shape(self):
"""A tuple indicating the shape of this RaggedTensorValue."""
return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:]
@property
def _nested_row_partitions(self):
"""The row_partitions representing this shape."""
return [RowPartition.from_row_splits(rs) for rs in self.nested_row_splits]
def __str__(self):
return "<tf.RaggedTensorValue %s>" % self.to_list()
def __repr__(self):
return "tf.RaggedTensorValue(values=%r, row_splits=%r)" % (self._values,
self._row_splits)
def to_list(self):
"""Returns this ragged tensor value as a nested Python list."""
if isinstance(self._values, RaggedTensorValue):
values_as_list = self._values.to_list()
else:
values_as_list = self._values.tolist()
return [
values_as_list[self._row_splits[i]:self._row_splits[i + 1]]
for i in range(len(self._row_splits) - 1)
]
|
sdispater/pendulum | tests/conftest.py | Python | mit | 2,211 | 0 | import pytest
import pendulum
@pytest.fixture(autouse=True)
def setup():
pendulum.set_local_timezone(pendulum.timezone("America/Toronto"))
yield
pendulum.set_test_now()
pendulum.set_locale("en")
pendulum.set_local_timezone()
pendulum.week_starts_at(pendulum.MONDAY)
pendulum.week_ends_at(pendulum.SUNDAY)
def assert_datetime(
d, year, month, day, hour=None, minute=None, second=None, microsecond=None
):
assert year == d.year
assert month == d.month
assert day == d.day
if hour is not None:
assert hour == d.hour
if minute is not None:
assert minute == d.minute
if second is not None:
assert second == d.second
if microsecond is not None:
assert microsecond == d.microsecond
def assert_date(d, year, month, day):
assert year == d.year
assert month == d.month
assert day == d.day
def assert_time(t, hour, minute, second, microsecond=None):
assert hour == t.hour
assert minute == t.minute
assert second == t.second
if microsecond is not None:
assert microsecond == t.microsecond
def assert_duration(
dur,
years=None,
months=None,
weeks=None,
days=None,
hours=None,
minutes=None,
seconds=None,
microseconds=None,
):
expected = {}
actual = {}
if years is not None:
expected["years"] = dur.years
actual["years"] = years
if months is not None:
expected["months"] = dur | .months
actual["months"] = months
if weeks is not None:
expected["weeks"] = dur.weeks
actual["weeks"] = weeks
if days is not None:
expected["days"] = dur.remaining_days
actual["days"] = days
if hours is not None:
expected[ | "hours"] = dur.hours
actual["hours"] = hours
if minutes is not None:
expected["minutes"] = dur.minutes
actual["minutes"] = minutes
if seconds is not None:
expected["seconds"] = dur.remaining_seconds
actual["seconds"] = seconds
if microseconds is not None:
expected["microseconds"] = dur.microseconds
actual["microseconds"] = microseconds
assert expected == actual
|
akbarpn136/inventaris-dj | reformasi/views.py | Python | apache-2.0 | 290 | 0 | from django.shortcuts import render
from .models import KumpulanDokumen
# Create your views here.
def index(request):
data_dokumen = KumpulanDokumen.objects.all()
data = {
'dokumen': | data_dokumen
}
return render(request, 'reformasi/daftar_reformasi.ht | ml', data)
|
tensorflow/lucid | lucid/recipes/image_interpolation_params.py | Python | apache-2.0 | 2,768 | 0.006142 | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from lucid.optvis.param import lowres_tensor
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128,
channels=3):
"""A paramaterization for interpolating between each pair of N objectives.
Sometimes you want to interpolate between optimizing a bunch of objectives,
in a paramaterization that encourages images to align.
Args:
n_objectives: number of objectives you want interpolate between
n_interp_steps: number of interpolation steps
width: width of intepolated images
channel
Returns:
| A [n_objectives, n_objectives, n_interp_steps, width, width, channel]
shaped tensor, t, where the final [width, width, channel] should be
seen as images, such that the following properties hold:
t[a, b] = t[b, a, ::-1]
t[a, i, 0] = t[a, j, 0] for all i, j
t[a, a, i] = t[a, a, j] for all i, j
t[a, b, i] = t[b, a, -i] for all i
"""
N, M, W, Ch = n_objective | s, n_interp_steps, width, channels
const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])
example_interps = [
sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
for _ in range(N)]
example_basis = []
for n in range(N):
col = []
for m in range(N):
interp = example_interps[n] + example_interps[m][::-1]
col.append(interp)
example_basis.append(col)
interp_basis = []
for n in range(N):
col = [interp_basis[m][N-n][::-1] for m in range(n)]
col.append(tf.zeros([M, W, W, 3]))
for m in range(n+1, N):
interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch])
for k in [1, 2]])
col.append(interp)
interp_basis.append(col)
basis = []
for n in range(N):
col_ex = tf.stack(example_basis[n])
col_in = tf.stack(interp_basis[n])
basis.append(col_ex + col_in)
basis = tf.stack(basis)
return basis + const_term
|
nuance/GoTools | gotools_oracle.py | Python | mit | 3,748 | 0.004536 | import sublime
import sublime_plugin
import os
from .gotools_util import Buffers
from .gotools_util import GoBuffers
from .gotools_util import Logger
from .gotools_util import ToolRunner
import golangconfig
class GotoolsOracleCommand(sublime_plugin.TextCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_enabled():
self.oracle = ToolRunner.prepare(self.view, 'oracle')
def is_enabled(self):
return GoBuffers.is_go_source(self.view)
def run(self, edit, command=None):
if not command:
Logger.log("command is required")
return
filename, row, col, offset, offset_end = Buffers.location_at_cursor(self.view)
pos = filename + ":#" + str(offset)
# Build up a package scope contaning all packages the user might have
# configured.
# TODO: put into a utility
package_scope = []
project_pkg = golangconfig.setting_value('project_package')[0] or ""
for p in golangconfig.setting_value('build_packages'):
if p:
package_scope.append(os.path.join(project_pkg, p))
for p in golangconfig.setting_value('test_packages'):
if p:
package_scope.append(os.path.join(project_pkg, p))
for p in golangconfig.setting_value('tagged_test_packages'):
if p:
package_scope.append(os.path.join(project_pkg, p))
sublime.active_window().run_command("hide_panel", {"panel": "output.gotools_oracle"})
if command == "callees":
sublime.set_timeout_async(lambda: self.do_plain_oracle("callees", pos, package_scope), 0)
if command == "callers":
sublime.set_timeout_async(lambda: self.do_plain_oracle("callers", pos, package_scope), 0)
if command == "callstack":
sublime.set_timeout_async(lambda: self.do_plain_oracle("callstack", pos, package_scope), 0)
if command == "describe":
sublime.set_timeout_async(lambda: self.do_plain_oracle("describe", pos, package_scope), 0)
if command == "freevars":
pos = filename + ":#" + str(offset) + "," + "#" + str(offset_end)
sublime.set_timeout_async(lambda: self.do_plain_oracle("freevars", pos, package_scope), 0)
if command == "implements":
sublime.set_timeout_async(lambda: self.do_plain_oracle("implements", pos, package_scope), 0)
if command == "peers":
sublime.set_timeout_async(lambda: self.do_plain_oracle("peers", pos, package_scope), 0)
if command == "referrers":
sublime.set_timeout_async(lambda: self.do_plain_oracle("referrers", pos, package_scope), 0)
def do_plain_oracle(self, mode, pos, package_scope=[], regex="^(.*):(\d+)[.:](\d+)[:-](.*)$"):
Logger.status("running oracle " + mode + "...")
args = ["-pos=" + pos, "-format=plain", mode]
if len(package_scope) > 0:
args = args + package_scope
output, err, rc = ToolRunner.run_prepared(self.oracle, args, timeout=60)
Logger.log("orac | le " + mode + " output: " + output.rstrip())
if rc != 0:
Logger.status("oracle call failed (" + str(rc) + "): " + output.strip())
| return
Logger.status("oracle " + mode + " finished")
panel = self.view.window().create_output_panel('gotools_oracle')
panel.set_scratch(True)
panel.settings().set("result_file_regex", regex)
panel.run_command("select_all")
panel.run_command("right_delete")
panel.run_command('append', {'characters': output})
self.view.window().run_command("show_panel", {"panel": "output.gotools_oracle"})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.