repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
auto-mat/stupneprovozu | tsk/models.py | Python | gpl-3.0 | 790 | 0.006345 | # -*- coding: utf-8 -*-
from django.db import models
class SkupinaLokaci(models.Model):
name = models.CharField(max_length=255)
lokace = models.ManyToManyField("Lokace")
class Lokace(models.Model):
name = models.CharField(max_length=255)
favourite = models.BooleanField(verbose_name="Oblíbená", default=False)
| class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Provoz(models.Model):
ident = models.CharField(max_length=255)
level = models.IntegerField()
location = models.ForeignKey(Lokace)
| level = models.IntegerField()
time_generated = models.DateTimeField()
time_start = models.DateTimeField()
time_stop = models.DateTimeField()
class Meta:
ordering = ['time_generated']
|
siconos/siconos | io/swig/tests/test_native_collision.py | Python | apache-2.0 | 6,228 | 0.000803 | #!/usr/bin/env python
#
# A circle with two disks inside under earth gravity
#
from siconos.mechanics.collision.tools import Contactor
from siconos.io.mechanics_run import MechanicsHdf5Runner
import siconos.numerics as sn
import siconos.kernel as sk
import siconos
import numpy
from math import sqrt
siconos.io.mechanics_run.set_backend('native')
disk_radius = 2
circle_radius = 10
def make_input():
# Creation of the hdf5 file for input/output
with MechanicsHdf5Runner() as io:
# Definition of a disk radius 1
io.add_primitive_shape('DiskR', 'Disk', [disk_radius])
io.add_primitive_shape('CircleR', 'Circle', [circle_radius])
# Definition of the ground shape
io.add_primitive_shape('Ground', 'Line', (0, 30, 0))
# io.add_primitive_shape('Wall1', 'Line', (1, 0, -20))
# io.add_primitive_shape('Wall2', 'Line', (1, 0, 20))
# Definition of a non smooth law. As no group ids are specified it
| # is between contactors of group id 0.
io.add_Newton_impact_friction_nsl('contact', mu=0.3, e=0)
| # The disk object made with an unique Contactor : the Disk shape.
# As a mass is given, it is a dynamic system involved in contact
# detection and in the simulation. With no group id specified the
# Contactor belongs to group 0
io.add_object('disk0', [Contactor('DiskR')],
translation=[-(circle_radius-disk_radius), circle_radius],
orientation=[0], velocity=[0, 0, 0], mass=10)
io.add_object('disk1', [Contactor('DiskR')],
translation=[(circle_radius-disk_radius), circle_radius],
orientation=[0], velocity=[0, 0, 0], mass=10)
io.add_object('circle', [Contactor('CircleR')],
translation=(0, circle_radius),
orientation=[0], velocity=[0, 0, 0], mass=1)
# the ground object made with the ground shape. As the mass is
# not given, it is a static object only involved in contact
# detection.
io.add_object('ground', [Contactor('Ground')],
translation=[0, 0])
# io.add_object('wall1', [Contactor('Wall1')],
# translation=[0, 0])
# io.add_object('wall2', [Contactor('Wall2')],
# translation=[0, 0])
# Run the simulation from the inputs previously defined and add
# results to the hdf5 file. The visualisation of the output may be done
# with the vview command.
# LEMKE failure when mu=0
options = sk.solver_options_create(sn.SICONOS_FRICTION_2D_NSGS)
options.iparam[sn.SICONOS_IPARAM_MAX_ITER] = 100000
options.dparam[sn.SICONOS_DPARAM_TOL] = 1e-12
def run():
with MechanicsHdf5Runner(mode='r+') as io:
# By default earth gravity is applied and the units are those
# of the International System of Units.
# Because of fixed collision margins used in the collision detection,
# sizes of small objects may need to be expressed in cm or mm.
io.run(with_timer=False,
gravity_scale=1,
t0=0,
T=2,
h=0.005,
theta=0.50001,
Newton_max_iter=1000,
set_external_forces=None,
solver_options=options,
numerics_verbose=False,
output_contact_forces=True,
output_frequency=None)
def check():
with MechanicsHdf5Runner(mode='r') as io:
positions = io.dynamic_data()
velocities = io.velocities_data()
times, indices = numpy.unique(positions[:, 0], return_index=True)
t = 1.95
id_t = max(0, numpy.searchsorted(times, t, side='right') - 1)
if id_t < len(indices)-1:
id_t_m = list(range(indices[id_t],
indices[id_t+1]))
else:
id_t_m = [indices[id_t]]
disk1_vidx = numpy.argwhere(velocities[id_t_m, 1] == 1)
disk2_vidx = numpy.argwhere(velocities[id_t_m, 1] == 2)
circle_vidx = numpy.argwhere(velocities[:, 1] == 3)
circle_vidx= circle_vidx.reshape(circle_vidx.shape[0])
disk1_evx = velocities[id_t_m, 2][disk1_vidx]
disk1_evy = velocities[id_t_m, 3][disk1_vidx]
disk2_evx = velocities[id_t_m, 2][disk2_vidx]
disk2_evy = velocities[id_t_m, 3][disk2_vidx]
# disks velocities at the end are 0
assert disk2_evx**2 + disk2_evy**2 < 1e-10
assert disk1_evx**2 + disk1_evy**2 < 1e-10
circle_vx = velocities[circle_vidx, 2]
circle_vy = velocities[circle_vidx, 3]
# circle velocities should always be near 0
print('max circle velocity:', max(circle_vx**2 + circle_vy**2))
assert max(circle_vx**2 + circle_vy**2) < 1e-10
disk1_idx = numpy.argwhere(positions[:, 1] == 1)
disk2_idx = numpy.argwhere(positions[:, 1] == 2)
circle_idx = numpy.argwhere(positions[:, 1] == 3)
disk1_idx= disk1_idx.reshape(disk1_idx.shape[0])
disk2_idx= disk2_idx.reshape(disk2_idx.shape[0])
circle_idx= circle_idx.reshape(circle_idx.shape[0])
disk1_x = positions[disk1_idx, 2]
disk1_y = positions[disk1_idx, 3]
disk2_x = positions[disk2_idx, 2]
disk2_y = positions[disk2_idx, 3]
circle_x = positions[circle_idx, 2]
circle_y = positions[circle_idx, 3]
print('max disk1 dist:', sqrt(max((disk1_x-circle_x)**2 +
(disk1_y-circle_y)**2))
- (circle_radius - disk_radius))
print('max disk2 dist:', sqrt(max((disk2_x-circle_x)**2 +
(disk2_y-circle_y)**2))
- (circle_radius - disk_radius))
assert ((sqrt(max((disk1_x-circle_x)**2 + (disk1_y-circle_y)**2)) -
(circle_radius - disk_radius)) < 1e-4)
assert ((sqrt(max((disk2_x-circle_x)**2 + (disk2_y-circle_y)**2)) -
(circle_radius - disk_radius)**2) < 1e-4)
assert sqrt(max(circle_x**2 + (circle_y-circle_radius)**2)) < 1e-10
def test_native_collision():
make_input()
run()
check()
|
anu-ka/coding-problems | Python/to_lower.py | Python | mit | 660 | 0.001515 | # Implement function ToLowerCase() that has a string parameter str, an | d returns the same string in lowercase.
# https://leetcode.com/problems/to-lower-case/
import pytest
class Solution:
def toLowerCase(self, str: str) -> str:
lower = ""
for i in str:
ordVal = ord(i)
if ordVal >= 65 and ordVal <= 90:
lower += chr(or | dVal + 32)
else:
lower += i
return lower
@pytest.mark.parametrize(
("str", "expected"), [("aAbB", "aabb"), ("aabb", "aabb"), ("a1@B", "a1@b")]
)
def test_basic(str: str, expected: str):
assert expected == Solution().toLowerCase(str)
|
aspose-total/Aspose.Total-for-Cloud | Examples/Python/files/DownloadAParticularFileExample.py | Python | mit | 895 | 0.012291 | import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.ApiClient import ApiException
from asposestoragecloud.models import FileExistResponse
apiKey = "b125f13bf6b76ed81e | e990142d841195" # sepcify App Key
appSid = "78946fb4-3bd4-4d3e-b309-f9e2ff9ac6f9" # sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
| try:
# Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
# upload file to aspose cloud storage
response = storageApi.GetDownload("tester/test.pdf")
if response.Status == "OK":
print("Download File Done " + response.Status)
except ApiException as ex:
print ("ApiException:")
print ("Code:" + str(ex.code))
print ("Message:" +ex.message) |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/elastic_demo.py | Python | apache-2.0 | 927 | 0.004315 | # Copyright (c) 2021 PaddlePaddle A | uthors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www | .apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import time
sys.stderr.write("{}-DISTRIBUTED_TRAINER_ENDPOINTS={}\n".format(os.environ[
'PADDLE_TRAINER_ID'], os.environ['DISTRIBUTED_TRAINER_ENDPOINTS']))
sys.stderr.write("{}-PADDLE_TRAINERS={}\n".format(os.environ[
'PADDLE_TRAINER_ID'], os.environ['PADDLE_TRAINERS']))
time.sleep(600)
|
sajeeshcs/nested_projects_keystone | keystone/common/base64utils.py | Python | apache-2.0 | 13,107 | 0.000076 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Python provides the base64 module as a core module but this is mostly
limited to encoding and decoding base64 and it's variants. It is often
useful to be able to perform other operations on base64 text. This
module is meant to be used in conjunction with the core base64 module.
Standarized base64 is defined in
RFC-4648 "The Base16, Base32, and Base64 Data Encodings".
This module provides the following base64 utility functionality:
* tests if text is valid base64
* filter formatting from base64
* convert base64 between different alphabets
* Handle padding issues
- test if base64 is padded
- removes padding
- restores padding
* wraps base64 text into formatted blocks
- via iterator
- return formatted string
"""
import re
import string
import six
from six.moves import urllib
from keystone.i18n import _
class InvalidBase64Error(ValueError):
pass
base64_alphabet_re = re.compile(r'^[^A-Za-z0-9+/=]+$')
base64url_alphabet_re = re.compile(r'^[^A-Za-z0-9---_=]+$')
base64_non_alphabet_re = re.compile(r'[^A-Za-z0-9+/=]+')
base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
_strip_formatting_re = re.compile(r'\s+')
_base64_to_base64url_trans = string.maketrans('+/', '-_')
_base64url_to_base64_trans = string.maketrans('-_', '+/')
def is_valid_base64(text):
"""Test if input text can be base64 decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64, False otherwise
"""
text = filter_formatting(text)
if base64_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def is_valid_base64url(text):
"""Test if input text can be base64url decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64url,
False otherwise
"""
text = filter_formatting(text)
if base64url_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def filter_formatting(text):
"""Return base64 text without any formatting, just the base64.
Base64 text is often formatted with whitespace, line endings,
etc. This function strips out any formatting, the result will
contain only base64 characters.
Note, this function does not filter out all non-base64 alphabet
characters, it only removes characters used for formatting.
:param text: input text to filter
:type text: string
:returns: string -- filtered text without formatting
"""
return _strip_formatting_re.sub('', text)
def base64_to_base64url(text):
"""Convert base64 text to base64url text.
base64url text is designed to be safe for use in filenames and
URL's. It is defined in RFC-4648 Section 5.
base64url differs from base64 in the last two alphabet characters
at index 62 and 63, these are sometimes referred as the
altchars. The '+' character at index 62 is replaced by '-'
(hyphen) and the '/' character at index 63 is replaced by '_'
(underscore).
This function only translates the altchars, non-alphabet
characters are not filtered out.
| WARNING::
base64url continues to use the '=' pad character which is NOT URL
safe. RFC-4648 suggests two alternate methods to deal with this:
percent-encode
percent-encode the pad character (e.g. '=' becomes
'%3D'). This makes the base64url text fully safe. But
percent-encoding has the downside of requiring
percent-decoding prior to feeding the base64ur | l text into a
base64url decoder since most base64url decoders do not
recognize %3D as a pad character and most decoders require
correct padding.
no-padding
padding is not strictly necessary to decode base64 or
base64url text, the pad can be computed from the input text
length. However many decoders demand padding and will consider
non-padded text to be malformed. If one wants to omit the
trailing pad character(s) for use in URL's it can be added back
using the base64_assure_padding() function.
This function makes no decisions about which padding methodology to
use. One can either call base64_strip_padding() to remove any pad
characters (restoring later with base64_assure_padding()) or call
base64url_percent_encode() to percent-encode the pad characters.
:param text: input base64 text
:type text: string
:returns: string -- base64url text
"""
return text.translate(_base64_to_base64url_trans)
def base64url_to_base64(text):
"""Convert base64url text to base64 text.
See base64_to_base64url() for a description of base64url text and
it's issues.
This function does NOT handle percent-encoded pad characters, they
will be left intact. If the input base64url text is
percent-encoded you should call
:param text: text in base64url alphabet
:type text: string
:returns: string -- text in base64 alphabet
"""
return text.translate(_base64url_to_base64_trans)
def base64_is_padded(text, pad='='):
"""Test if the text is base64 padded.
The input text must be in a base64 alphabet. The pad must be a
single character. If the text has been percent-encoded (e.g. pad
is the string '%3D') you must convert the text back to a base64
alphabet (e.g. if percent-encoded use the function
base64url_percent_decode()).
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: bool -- True if padded, False otherwise
:raises: ValueError, InvalidBase64Error
"""
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
text_len = len(text)
if text_len > 0 and text_len % 4 == 0:
pad_index = text.find(pad)
if pad_index >= 0 and pad_index < text_len - 2:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'2nd to last char') % pad)
if pad_index == text_len - 2 and text[-1] != pad:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'non-pad last char') % pad)
return True
if text.find(pad) >= 0:
raise InvalidBase64Error(_('text is not a multiple of 4, '
'but contains pad "%s"') % pad)
return False
def base64url_percent_encode(text):
"""Percent-encode base64url padding.
The input text should only contain base64url alphabet
characters. Any non-base64url alphabet characters will also be
subject to percent-encoding.
:param text: text containing ONLY characters in the base64url alphabet
:type text: string
:returns: string -- percent-encoded base64url text
:raises: InvalidBase64Error
"""
if len(text) % 4 != 0:
raise InvalidBase64Error(_('padded base64url text must be '
'multiple of 4 characters'))
return urllib.parse.quote(text)
def base6 |
blaze/dask | dask/dataframe/io/io.py | Python | bsd-3-clause | 22,680 | 0.000617 | import os
from math import ceil
from operator import getitem
from threading import Lock
import numpy as np
import pandas as pd
from tlz import merge
from ... import array as da
from ...base import tokenize
from ...dataframe.core import new_dd_object
from ...delayed import delayed
from ...highlevelgraph import HighLevelGraph
from ...utils import M, ensure_dict
from ..core import DataFrame, Index, Series, has_parallel_type, new_dd_object
from ..shuffle import set_partition
from ..utils import check_meta, insert_meta_param_description, is_series_like, make_meta
lock = Lock()
def _meta_from_array(x, columns=None, index=None, meta=None):
"""Create empty DataFrame or Series which has correct dtype"""
if x.ndim > 2:
raise ValueError(
"from_array does not input more than 2D array, got"
" array with shape %r" % (x.shape,)
)
if index is not None:
if not isinstance(index, Index):
raise ValueError("'index' must be an instance of dask.dataframe.Index")
index = index._meta
if meta is None:
meta = pd.DataFrame()
if getattr(x.dtype, "names", None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError(f"dtype {x.dtype} doesn't have fields {extra}")
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else "f8" for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return meta._constructor_sliced(
[], name=columns, dtype=x.dtype, index=index
)
elif len(columns) == 1:
return meta._constructor(
np.array([], dtype=x.dtype), columns=columns, index=index
)
raise ValueError(
"For a 1d array, columns must be a scalar or single element list"
)
else:
if np.isnan(x.shape[1]):
raise ValueError("Shape along axis 1 must be known")
if columns is None:
columns = list(range(x.shape[1])) if x.ndim == 2 else [0]
elif len(columns) != x.shape[1]:
raise ValueError(
"Number of column names must match width of the array. "
f"Got {len(columns)} names for {x.shape[1]} columns"
)
dtypes = [x.dtype] * len(columns)
data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}
return meta._constructor(data, columns=columns, index=index)
def from_array(x, chunksize=50000, columns=None, meta=None):
"""Read any sliceable array into a Dask Dataframe
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
Parameters
----------
x : array_like
chunksize : int, optional
The number of rows per partition to use.
columns : list or string, optional
list of column names if DataFrame, single string if Series
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to use for partitions of
the Dask dataframe. By default, pandas DataFrame is used.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns, meta=meta)
meta = _meta_from_array(x, columns, meta=meta)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = "from_array-" + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if is_series_like(meta):
dsk[name, i] = (type(meta), data, None, meta.dtype, meta.name)
else:
dsk[name, i] = (type(meta), data, None, meta.columns)
return new_dd_object(dsk, name, meta, divisions)
def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):
"""
Construct a Dask DataFrame from a Pandas DataFrame
This splits an in-memory Pandas dataframe into several parts and constructs
a dask.dataframe from those parts on which Dask.dataframe can operate in
parallel. By default, the input dataframe will be sorted by the index to
produce cleanly-divided partitions (with known divisions). To preserve the
input ordering, make sure the inp | ut index is monotonically-increasing. The
``sort=False`` option will also avoid reordering, but will not result in
known divisions.
Note that, despite parallelism, Dask.dataframe may not always be faster
than Pandas. We recommend that you stay with Pandas for as long as
possible before switching | to Dask.dataframe.
Parameters
----------
data : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a Dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create. Note that depending on
the size and index of the dataframe, the output may have fewer
partitions than requested.
chunksize : int, optional
The number of rows per index partition to use.
sort: bool
Sort the input by index first to obtain cleanly divided partitions
(with known divisions). If False, the input will not be sorted, and
all divisions will be set to None. Default is True.
name: string, optional
An optional keyname for the dataframe. Defaults to hashing the input
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> from dask.dataframe import from_pandas
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, "index", None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not has_parallel_type(data):
raise TypeError("Input must be a pandas DataFrame or Series")
if (npartitions is None) == (chunksize is None):
raise ValueError("Exactly one of npartitions and chunksize must be specified.")
nrows = len(data)
if chunksize is None:
chunksize = int(ceil(nrows / npartitions))
name = name or ("from_pandas-" + tokenize(data, chunksize))
if not nrows:
return new_dd_object({(name, 0): data}, name, data, [None, None])
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions, locations = sorted_division_locations(
data.in |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/aio/operations/_load_balancer_network_interfaces_operations.py | Python | mit | 5,642 | 0.004254 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerNetworkInterfacesOperations:
"""LoadBalancerNetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets associated load balancer network interfaces.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not i | n [200]:
map_error(status_code=response.status_code, | response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore
|
YiqunPeng/Leetcode-pyq | solutions/353DesignSnakeGame.py | Python | gpl-3.0 | 2,009 | 0.008462 | class SnakeGame:
def __init__(self, width, height, food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.w, self.h = width, height
self.food = food
self.idx = 0
self.snake = collections.deque([(0, 0)])
self.snake_set = set([(0, 0)])
self.game_state = True
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
if not self.game_state: return -1
directions = {'U': (-1, 0), 'L': (0, -1), 'R': (0, 1), 'D': (1, 0)}
head = self.snake[0]
n_x = head[0] + directions[directi | on][0]
n_y = head[1] + directions[direction][1]
if not (0 <= n_x < self.h and 0 <= n_y < self.w):
self.game_state = False
return -1
if (n_x, n_y) != self.snake[-1] and (n_x, n_y) in self.snake_set:
self.game_state = False
| return -1
if self.idx < len(self.food) and self.food[self.idx] == [n_x, n_y]:
self.idx += 1
else:
self.snake_set.remove(self.snake[-1])
self.snake.pop()
self.snake.appendleft((n_x, n_y))
self.snake_set.add((n_x, n_y))
return self.idx
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction) |
ArnaudBelcour/liasis | pbsea/pbsea.py | Python | gpl-3.0 | 28,386 | 0.004791 | #!/usr/bin/env python3
import logging
import csv
import math
import numpy as np
import os
import pandas as pa
import scipy.stats as stats
import six
from statsmodels.sandbox.stats.multicomp import multipletests
logging.basicConfig(filename='analysis.log', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class PandasBasedEnrichmentAnalysis():
'''
Performs an enrichment analysis using an hypergeometric test
(also known as Fisher's exact test) and multiple correction testing.
To do this you need to enter some values (using the set_something() function):
-file of interest : the name of your file (with the extension)
containing the occurrences of each objects from a sample you want to analyze.
-file of reference : the name of your file (with the extentions)
containing the occurrences of each objects from a population.
| -number of analyzed object of interest : the number of objects in your sample
(for example the number of differentially expressed genes in a list).
-num | ber of analyzed object of reference : the number of objects in your population
(for example the number of genes in the genome of your species).
-alpha : the alpha threshold also known as type I error.
-normal approximation threshold : the threshold separating the hypergeometric test
(which runs very slowly when using big numbers) and normal approximation.
'''
def __init__(self, dataframe, name_column_interest, name_column_reference,
number_of_object_of_interest, number_of_genes_in_reference,
alpha, threshold_normal_approximation):
self.dataframe = dataframe.copy()
self._output_columns = [name_column_interest, name_column_reference,
'PercentageInInterest', 'PercentageInReference',
'pvalue_hypergeometric', 'pValueBonferroni',
'pValueHolm', 'pValueBenjaminiHochberg', 'pValueBenjaminiYekutieli']
self._column_interest = name_column_interest
self._column_reference = name_column_reference
self._number_of_analyzed_object_of_interest = number_of_object_of_interest
self._number_of_analyzed_object_of_reference = number_of_genes_in_reference
self._alpha = alpha
self._normal_approximation_threshold = threshold_normal_approximation
self._statistic_method = ""
self.multiple_test_names = ['Sidak', 'Bonferroni', 'Holm', 'BenjaminiHochberg', 'BenjaminiYekutieli']
@property
def output_columns(self):
return self._output_columns
@output_columns.setter
def output_columns(self, index, column_name):
self._output_columns[index] = column_name
@property
def column_interest(self):
return self._column_interest
@column_interest.setter
def column_interest(self, column_name):
self._column_interest = column_name
@property
def column_reference(self):
return self._column_reference
@column_reference.setter
def column_reference(self, column_name):
self._column_reference = column_name
@property
def number_of_analyzed_object_of_interest(self):
return self._number_of_analyzed_object_of_interest
@number_of_analyzed_object_of_interest.setter
def number_of_analyzed_object_of_interest(self, value):
if value > self.number_of_analyzed_object_of_reference:
raise ValueError("The number of objects in your sample of interest is greater than the number of objects in the reference.")
else:
self._number_of_analyzed_object_of_interest = value
@property
def number_of_analyzed_object_of_reference(self):
return self._number_of_analyzed_object_of_reference
@number_of_analyzed_object_of_reference.setter
def number_of_analyzed_object_of_reference(self, value):
if value < self.number_of_analyzed_object_of_interest:
raise ValueError("The number of objects in the reference is smaller than the number of objects in your sample of interest.")
else:
self._number_of_analyzed_object_of_reference = value
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, value):
self._alpha = value
@property
def statistic_method(self):
return self._statistic_method
@statistic_method.setter
def statistic_method(self, method_name):
self._statistic_method = method_name
@property
def normal_approximation_threshold(self):
return self._normal_approximation_threshold
@normal_approximation_threshold.setter
def normal_approximation_threshold(self, value):
self._normal_approximation_threshold = value
def test_on_dataframe(self, df):
analyzed_objects_with_hypergeo_test_nan = []
approximation_threshold = self.normal_approximation_threshold
value_higher_threshold = all(df[self.column_interest] > approximation_threshold)
if value_higher_threshold == False:
self.statistic_method = "pvalue_hypergeometric"
df[self.statistic_method] = df.apply(self.compute_hypergeometric_test,
axis=1)
df = df.sort_values(self.statistic_method)
elif value_higher_threshold == True:
self.output_columns[4] = 'pvalue_normal_approximation'
self.statistic_method = 'pvalue_normal_approximation'
df[self.statistic_method] = df.apply(self.compute_normal_approximation,
axis=1)
df = df.sort_values(self.statistic_method)
return df
def compute_hypergeometric_test(self, row):
number_of_object_in_interest = row[self.column_interest]
number_of_object_in_reference = row[self.column_reference]
pvalue_hypergeo = stats.hypergeom.sf(number_of_object_in_interest - 1, self.number_of_analyzed_object_of_reference,
number_of_object_in_reference, self.number_of_analyzed_object_of_interest)
return pvalue_hypergeo
def compute_normal_approximation(self, row):
number_of_object_in_interest = row[self.column_interest]
number_of_object_in_reference = row[self.column_reference]
p = number_of_object_in_reference / self.number_of_analyzed_object_of_reference
q = 1 - p
t = self.number_of_analyzed_object_of_interest / self.number_of_analyzed_object_of_reference
mu = self.number_of_analyzed_object_of_interest * p
if 0 in [self.number_of_analyzed_object_of_interest, p, q, (1 - t)]:
return np.nan
sigma = math.sqrt(self.number_of_analyzed_object_of_interest * p * q * (1 - t))
pvalue_normal = stats.norm.sf(number_of_object_in_interest, loc=mu, scale=sigma)
return pvalue_normal
def multiple_testing_correction(self, df):
logger.info('-------------------------------------Multiple testing correction-------------------------------------')
df = df.sort_values([self.statistic_method])
df = self.correction_bonferroni(df)
df = self.correction_benjamini_hochberg(df)
df = self.correction_benjamini_yekutieli(df)
df = self.correction_holm(df)
significative_objects = {}
for multiple_test_name in self.multiple_test_names:
if multiple_test_name == 'Sidak':
error_rate = self.error_rate_adjustement_sidak(df)
elif multiple_test_name == 'Bonferroni':
error_rate = self.error_rate_adjustement_bonferroni(df)
if multiple_test_name in ['Sidak', 'Bonferroni']:
object_significatives = self.selection_object_with_adjusted_error_rate(error_rate, df)
elif multiple_test_name in ['Holm', 'BenjaminiHochberg', 'BenjaminiYekutieli']:
object_significatives = self.selection_object_with_adjusted_pvalue(multiple_test |
dontnod/weblate | weblate/trans/views/files.py | Python | gpl-3.0 | 5,817 | 0.000516 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.views.decorators.http import require_POST
from weblate.lang.models import Language
from weblate.trans.forms import DownloadForm, get_upload_form
from weblate.trans.models import ComponentList, Translation
from weblate.utils import messages
from weblate.utils.data import data_dir
from weblate.utils.errors import report_error
from weblate.utils.views import (
download_translation_file,
get_component,
get_project,
get_translation,
show_form_errors,
zip_download,
)
def download_multi(translations, fmt=None):
return zip_download(data_dir('vcs'), [t.get_filename() for t in translations])
def download_component_list(request, name):
obj = get_object_or_404(ComponentList, slug=name)
components = obj.components.filter(project__in=request.user.allowed_projects)
for component in components:
component.commit_pending("download", None)
return download_multi(
Translation.objects.filter(component__in=components),
request.GET.get('format')
)
def download_component(request, project, component):
obj = get_component(request, project, component)
obj.commit_pending("download", None)
return download_multi(
obj.translation_set.all(),
request.GET.get('format')
)
def download_project(request, project):
obj = get_project(request, project)
obj.commit_pending("download", None)
return download_multi(
Translation.objects.filter(component__project=obj),
request.GET.get('format')
)
def download_lang_project(request, lang, project):
obj = get_project(request, project)
obj.commit_pending("download", None)
langobj = get_object_or_404(Language, code=lang)
return download_multi(
Translation.objects.filter(component__project=obj, language=langobj),
request.GET.get('format')
)
def download_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
kwargs = {}
if 'format' in request.GET or 'type' in request.GET:
form = DownloadForm(request.GET)
if not form.is_valid():
show_form_errors(request, form)
return redirect(obj)
kwargs['units'] = obj.unit_set.search(
form.cleaned_data,
translation=obj,
)
kwargs['fmt'] = form.cleaned_data['format']
return download_translation_file(obj, **kwargs)
@require_POST
def upload_translation(request, project, component, lang):
"""Handling of translation uploads."""
obj = get_translation(request, project, component, lang)
if not request.user.has_perm('upload.perform', obj):
raise PermissionDenied()
# Check method and lock
if obj.component.locked:
messages.error(request, _('Access denied.'))
return redirect(obj)
# Get correct form handler based on permissions
form = get_upload_form(
request.user, obj,
request.POST, request.FILES
)
# Check form validity
if not form.is_valid():
messages.error(request, _('Please fix errors in the form.'))
show_form_errors(request, form)
return redirect(obj)
# Create author name
author_name = None
author_email = None
if request.user.has_perm('upload.authorship', obj):
author_name = form.cleaned_data['author_name']
author_email = form.cleaned_data['author_email']
# Check for overwriting
overwrite = False
if request.user.has_perm('upload.overwrite', obj):
overwrite = form.cleaned_data['upload_overwrite']
# Do actual import
try:
not_found, skipped, accepted, total = obj.merge_upload(
request,
request.FILES['file'],
overwrite,
author_name,
author_email,
method=form.cleaned_data['method'],
fuzzy=form.cleaned_data['fuzzy'],
)
if total == 0:
message = _('No strings were imported from the uploaded file.')
else:
message = ungettext(
'Processed {0} string from the uploaded files '
'(skipp | ed: {1}, no | t found: {2}, updated: {3}).',
'Processed {0} strings from the uploaded files '
'(skipped: {1}, not found: {2}, updated: {3}).',
total
).format(total, skipped, not_found, accepted)
if accepted == 0:
messages.warning(request, message)
else:
messages.success(request, message)
except Exception as error:
messages.error(request, _('File upload has failed: %s') % force_text(error))
report_error(error, request, prefix='Upload error')
return redirect(obj)
|
schwehr/gdal-autotest2 | python/alg/tps_test.py | Python | apache-2.0 | 3,423 | 0.004382 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright | notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""Test Thin Plate Spline transformer in alg/gdal_tps.cpp.
Rewrite of:
https://trac.osgeo.org/gdal/browser/trunk/autotest/alg/tps.py
"""
import unittest
from osgeo import gdal
from osgeo import osr
from autotest2.gcore import gcore_util
class TransformGeolocTest(unittest.TestCase):
def testGroundControlPoints(self):
# https://trac.osgeo.org/gdal/ticket/5586
driver = gdal.GetDriverByName('MEM')
filepath = 'tps.mem'
with gcore_util.GdalUnlinkWhenDone(filepath):
datasource = driver.Create('tps.mem', 2, 2)
# An set of ground control points that will generate an error.
gcp_list = [
gdal.GCP(0, 0, 0, 0, 0),
gdal.GCP(0, 50, 0, 0, 50),
gdal.GCP(50, 0, 0, 50, 0),
gdal.GCP(50, 50, 0, 50, 50),
gdal.GCP(0 * 25, 0 * 25, 0, 25, 25)
]
datasource.SetGCPs(gcp_list, osr.GetUserInputAsWKT('WGS84'))
utm_wkt = osr.GetUserInputAsWKT('+proj=utm +zone=11 +datum=WGS84')
with gcore_util.ErrorHandler('CPLQuietErrorHandler'):
transformer = gdal.Transformer(
datasource, None, ['DST_SRS=' + utm_wkt, 'METHOD=GCP_TPS'])
self.assertIsNotNone(transformer)
# TODO(schwehr): The error observed is 3 (CPLE_FileIO), but
# expected 1 (CPLE_AppDefined).
self.assertNotEqual(gdal.GetLastErrorType(), gdal.CPLE_None)
err_msg = gdal.GetLastErrorMsg()
self.assertIn('problem inverting', err_msg)
self.assertIn('interpolation matrix', err_msg)
if __name__ == '__main__':
unittest.main()
|
bitmazk/cmsplugin-blog-language-publish | cmsplugin_blog_language_publish/tests/south_settings.py | Python | mit | 599 | 0 | """
These setting | s are used by the ``manage.py`` command.
With normal tests we want to use the fastest possible way which is an
in-memory sqlite database bu | t if you want to create South migrations you
need a persistant database.
Unfortunately there seems to be an issue with either South or syncdb so that
defining two routers ("default" and "south") does not work.
"""
from cmsplugin_blog_language_publish.tests.test_settings import * # NOQA
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
}
INSTALLED_APPS.append('south', )
|
ywcui1990/nupic | examples/opf/experiments/classification/category_hub_TP_0/description.py | Python | agpl-3.0 | 1,688 | 0.005332 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# M | ERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# -------------------------------------------------------- | --------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'claEvalClassification': True,
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_hub_TP_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmEnable': True,
'tmParams': { }}}
mod = importBaseDescription('../base_category/description.py', config)
locals().update(mod.__dict__)
|
idealabasu/code_pynamics | python/pynamics_examples/triple_pendulum_maximal.py | Python | mit | 10,420 | 0.039923 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output,PointsOutput
from pynamics.particle import Particle
from pynamics.constraint import AccelerationConstraint,KinematicConstraint
import pynamics.integration
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
import sympy
sympy.init_printing(pretty_print=False)
import math
system = System()
pynamics.set_system(__name__,system)
constrain_base=True
simplify_w = False
lA = Constant(1,'lA',system)
lB = Constant(1,'lB',system)
lC = Constant(1,'lC',system)
mA = Constant(1,'mA',system)
mB = Constant(1,'mB',system)
mC = Constant(1,'mC',system)
g = Constant(9.81,'g',system)
b = Constant(1e1,'b',system)
k = Constant(1e1,'k',system)
preload1 = Constant(0*pi/180,'preload1',system)
preload2 = Constant(0*pi/180,'preload2',system)
preload3 = Constant(0*pi/180,'preload3',system)
Ixx_A = Constant(1,'Ixx_A',system)
Iyy_A = Constant(1,'Iyy_A',system)
Izz_A = Constant(1,'Izz_A',system)
Ixx_B = Constant(1,'Ixx_B',system)
Iyy_B = Constant(1,'Iyy_B',system)
Izz_B = Constant(1,'Izz_B',system)
Ixx_C = Constant(1,'Ixx_C',system)
Iyy_C = Constant(1,'Iyy_C',system)
Izz_C = Constant(1,'Izz_C',system)
torque = Constant(0,'torque',system)
freq = Constant(3e0,'freq',system)
x,x_d,x_dd = Differentiable('x',system)
y,y_d,y_dd = Differentiable('y',system)
z,z_d,z_dd = Differentiable('z',system)
x2,x2_d,x2_dd = Differentiable('x2',system)
y2,y2_d,y2_dd = Differentiable('y2',system)
z2,z2_d,z2_dd = Differentiable('z2',system)
x3,x3_d,x3_dd = Differentiable('x3',system)
y3,y3_d,y3_dd = Differentiable('y3',system)
z3,z3_d,z3_dd = Differentiable('z3',system)
qA1,qA1_d,qA1_dd = Differentiable('qA1')
qA2,qA2_d,qA2_dd = Differentiable('qA2')
qA3,qA3_d,qA3_dd = Differentiable('qA3')
qB1,qB1_d,qB1_dd = Differentiable('qB1')
qB2,qB2_d,qB2_dd = Differentiable('qB2')
qB3,qB3_d,qB3_dd = Differentiable('qB3')
qC1,qC1_d,qC1_dd = Differentiable('qC1')
qC2,qC2_d,qC2_dd = Differentiable('qC2')
qC3,qC3_d,qC3_dd = Differentiable('qC3')
if simplify_w:
wAx,wAx_d = Differentiable('wAx',ii = 1)
wAy,wAy_d = Differentiable('wAy',ii = 1)
wAz,wAz_d = Differentiable('wAz',ii = 1)
wBx,wBx_d = Differentiable('wBx',ii = 1)
wBy,wBy_d = Differentiable('wBy',ii = 1)
wBz,wBz_d = Differentiable('wBz',ii = 1)
wCx,wCx_d = Differentiable('wCx',ii = 1)
wCy,wCy_d = Differentiable('wCy',ii = 1)
wCz,wCz_d = Differentiable('wCz',ii = 1)
initialvalues = {}
initialvalues[qA1]=0*pi/180
initialvalues[qA2]=0*pi/180
initialvalues[qA3]=0*pi/180
initialvalues[qB1]=0*pi/180
initialvalues[qB2]=0*pi/180
initialvalues[qB3]=0*pi/180
initialvalues[qC1]=0*pi/180
initialvalues[qC2]=0*pi/180
initialvalues[qC3]=0*pi/180
if not constrain_base:
initialvalues[x]=0
initialvalues[x_d]=0
initialvalues[y]=0
initialvalues[y_d]=0
initialvalues[z]=0
initialvalues[z_d]=0
if simplify_w:
initialvalues[wAx]=0
initialvalues[wAy]=0
initialvalues[wAz]=0
initialvalues[wBz]=0
initialvalues[wCz]=0
else:
initialvalues[qA1_d]=0
initialvalues[qA2_d]=0
initialvalues[qA3_d]=0
initialvalues[qB1_d]=0
initialvalues[qB2_d]=0
initialvalues[qB3_d]=0
initialvalues[qC1_d]=0
initialvalues[qC2_d]=0
initialvalues[qC3_d]=0
N = Frame('N',system)
A1 = Frame('A1',system)
A2 = Frame('A2',system)
A3 = Frame('A3',system)
B1 = Frame('B1',system)
B2 = Frame('B2',system)
B3 = Frame('B3',system)
C1 = Frame('C1',system)
C2 = Frame('C2',system)
C3 = Frame('C3',system)
# S = Frame('S',system)
system.set_newtonian(N)
A1.rotate_fixed_axis(N,[1,0,0],qA1,system)
A2.rotate_fixed_axis(A1,[0,1,0],qA2,system)
A3.rotate_fixed_axis(A2,[0,0,1],qA3,system)
B1.rotate_fixed_axis(N,[1,0,0],qB1,system)
B2.rotate_fixed_axis(B1,[0,1,0],qB2,system)
B3.rotate_fixed_axis(B2,[0,0,1],qB3,system)
C1.rotate_fixed_axis(N,[1,0,0],qC1,system)
C2.rotate_fixed_axis(C1,[0,1,0],qC2,system)
C3.rotate_fixed_axis(C2,[0,0,1],qC3,system)
if simplify_w:
wA1 = N.get_w_to(A3)
wA2 = wAx*A3.x + wAy*A3.y + wAz*A3.z
N.set_w(A3,wA2)
wB1 = N.get_w_to(B3)
wB2 = wBx*B3.x + wBy*B3.y + wBz*B3.z
N.set_w(B3,wB2)
wC1 = N.get_w_to(C3)
wC2 = wCx*C3.x + wCy*C3.y + wCz*C3.z
N.set_w(C3,wC2)
pAcm=x*A3.x+y*A3.y+z*A3.z
pBase = pAcm-lA/2*A3.x
pAB=pAcm+lA/2*A3.x
pBcm=x2*B3.x+y2*B3.y+z2*B3.z
pBA = pBcm - lB/2*B3.x
pBC = pBcm + lB/2*B3.x
pCcm=x3*C3.x+y3*C3.y+z3*C3.z
pCB = pCcm - lC/2*C3.x
pCtip=pCcm+lC/2*C3.x
wA3B3 = A3.get_w_to(B3)
wB3C3 = B3.get_w_to(C3)
wA3N = A3.get_w_to(N)
wNA3 = N.get_w_to(A3)
IA = Dyadic.build(A3,Ixx_A,Iyy_A,Izz_A)
IB = Dyadic.build(B3,Ixx_B,Iyy_B,Izz_B)
IC = Dyadic.build(C3,Ixx_C,Iyy_C,Izz_C)
# IS = Dyadic.build(S,Ixx_S,Iyy_S,Izz_S)
BodyA = Body('BodyA',A3,pAcm,mA,IA,system)
BodyB = Body('BodyB',B3,pBcm,mB,IB,system)
BodyC = Body('BodyC',C3,pCcm,mC,IC,system)
system.addforce(torque*sympy.sin(freq*2*sympy.pi*system.t)*A3.z,wNA3)
qNA = -sympy.atan2(N.x.dot(A3.y),N.x.dot(A3.x))
system.add_spring_force1(k,(qNA-preload1)*N.z,wNA3)
qAB = -sympy.atan2(A3.x.dot(B3.y),A3.x.dot(B3.x))
system.add_spring_force1(k,(qAB-preload2)*A3.z,wA3B3)
qBC = -sympy.atan2(B3.x.dot(C3.y),B3.x.dot(C3.x))
system.add_spring_force1(k,(qBC-preload3)*B3.z,wB3C3)
system.addforce(-b*wNA3,wNA3)
system.addforce(-b*wA3B3,wA3B3)
system.addforce(-b*wB3C3,wB3C3)
system.addforcegravity(-g*N.y)
eq = []
eq.append(A3.z-B3.z)
eq.append(pAB-pBA)
eq.append(B3.z-C3.z)
eq.append(pBC-pCB)
if constrain_base:
eq.append(pBase-0*N.x)
eq.append(A3.z-N.z)
eq_d = []
eq_d.extend([item.time_derivative() for item in eq])
if simplify_w:
eq_d.append(wA1-wA2)
eq_d.append(wB1-wB2)
eq_d.append(wC1-wC2)
eq_dd = [item.time_derivative() for item in eq_d]
eq_dd_scalar = []
eq_dd_scalar.append(eq_dd[0].dot(N.x))
eq_dd_scalar.append(eq_dd[0].dot(N.y))
eq_dd_scalar.append(eq_dd[1].dot(N.x))
eq_dd_scalar.append(eq_dd[1].dot(N.y))
eq_dd_scalar.append(eq_dd[1].dot(N.z))
eq_dd_scalar.append(eq_dd[2].dot(N.x))
eq_dd_scalar.append(eq_dd[2].dot(N.y))
eq_dd_scalar.append(eq_dd[3].dot(N.x))
eq_dd_scalar.append(eq_dd[3].dot(N.y))
eq_dd_scalar.append(eq_dd[3].dot(N.z))
ii=4
if constrain_base:
eq_dd_scalar.append(eq_dd[4].dot(N.x))
eq_dd_scalar.append(eq_dd[4].dot(N.y))
eq_dd_scalar.append(eq_dd[4].dot(N.z))
eq_dd_scalar.append(eq_dd[5].dot(N.x))
eq_dd_scalar.append(eq_dd[5].dot(N.y))
ii=6
if simplify_w:
eq_dd_scalar.append(eq_dd[ii+0].dot(A2.x))
eq_dd_scalar.append(eq_dd[ii+0].dot(A2.y))
eq_dd_scalar.append(eq_dd[ii+0].dot(A2.z))
eq_dd_scalar.append(eq_dd[ii+1].dot(B2.x))
eq_dd_scalar.append(eq_dd[ii+1].dot(B2.y))
eq_dd_ | scalar.append(eq_dd[ii+1].dot(B2.z))
eq_dd_scalar.append(eq_dd[ii+2].dot(C2.x))
eq_dd_scalar.append(eq_dd[ii+2].dot(C2.y))
eq_dd_scalar.append(eq_dd[ii+2].dot(C2.z))
system.add_constraint(AccelerationConstraint(eq_dd_scalar))
eq_d_scalar = []
eq_d_scalar.append(eq_d[0].dot(N.x))
eq_d_scalar.append(eq_d[0].dot(N.y))
eq_d_scalar.append(eq_d[1].dot(N.x))
eq_d_scalar.append(e | q_d[1].dot(N.y))
eq_d_scalar.append(eq_d[1].dot(N.z))
eq_d_scalar.append(eq_d[2].dot(N.x))
eq_d_scalar.append(eq_d[2].dot(N.y))
eq_d_scalar.append(eq_d[3].dot(N.x))
eq_d_scalar.append(eq_d[3].dot(N.y))
eq_d_scalar.append(eq_d[3].dot(N.z))
ii=4
if constrain_base:
eq_d_scalar.append(eq_d[4].dot(N.x))
eq_d_scalar.append(eq_d[4].dot(N.y))
eq_d_scalar.append(eq_d[4].dot(N.z))
eq_d_scalar.append(eq_d[5].dot(N.x))
eq_d_scalar.append(eq_d[5].dot(N.y))
ii=6
if simplify_w:
eq_d_scalar.append(eq_d[ii+0].dot(A2.x))
eq_d_scalar.append(eq_d[ii+0].dot(A2.y))
eq_d_scalar.append(eq_d[ii+0].dot(A2.z))
eq_d_scalar.append(eq_d[ii+1].dot(B2.x))
eq_d_scalar.append(eq_d[ii+1].dot(B2.y))
eq_d_scalar.append(eq_d[ii+1].dot(B2.z))
eq_d_scalar.append(eq_d[ii+2].dot(C2.x))
e |
kerneltask/micropython | tests/basics/bytearray_construct_array.py | Python | mit | 314 | 0 | # test construction of bytearray from different objects
try:
from uarray import array
except ImportError:
try:
from array import array
except ImportError:
print("SKIP")
raise SystemExit
# array | s
print(bytearray(array('b', [1, 2])))
print(bytearray(array('h', | [0x101, 0x202])))
|
hexpl0it/plugin.video.genesi-ita | resources/lib/resolvers/mrfile.py | Python | gpl-3.0 | 1,517 | 0.009888 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You shou | ld have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib
from resources.lib.libraries import client
def resolve(url):
try:
result = client.request(url)
pos | t = {}
f = client.parseDOM(result, 'Form', attrs = {'name': 'F1'})[-1]
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = client.request(url, post=post)
url = re.compile('(<a\s+href=.+?>Download\s+.+?</a>)').findall(result)[-1]
url = client.parseDOM(url, 'a', ret='href')[0]
return url
except:
return
|
rbbratta/virt-test | virttest/utils_libguestfs.py | Python | gpl-2.0 | 5,627 | 0.000711 | """
libguestfs tools test utility functions.
"""
import logging
from autotest.client import os_dep, utils
from autotest.client.shared import error
import propcan
class LibguestfsCmdError(Exception):
"""
Error of libguestfs-tool command.
"""
def __init__(self, details=''):
self.details = details
Exception.__init__(self)
def __str__(self):
return str(self.details)
def lgf_cmd_check(cmd):
"""
To check whether the cmd is supported on this host.
@param cmd: the cmd to use a libguest tool.
@return: None if the cmd is not exist, otherwise return its path.
"""
libguestfs_cmds = ['libguestfs_test_tool', 'guestfish', 'guestmount',
'virt-alignment-scan', 'virt-cat', 'virt-copy-in',
'virt-copy-out', 'virt-df', 'virt-edit',
'virt-filesystems', 'virt-format', 'virt-inspector',
'virt-list-filesystems', 'virt-list-partitions',
'virt-ls', 'virt-make-fs', 'virt-rescue',
'virt-resize', 'virt-sparsify', 'virt-sysprep',
'virt-tar', 'virt-tar-in', 'virt-tar-out',
'virt-win-reg']
if not (cmd in libguestfs_cmds):
raise LibguestfsCmdError("Command %s is not supported by libguestfs yet." % cmd)
try:
return os_dep.command(cmd)
except ValueError:
logging.warning("You have not installed %s on this host.", cmd)
return None
def lgf_command(cmd, **dargs):
"""
Interface of libguestfs tools' commands.
@param cmd: Command line to execute.
@param dargs: standardized command keywords.
@return: CmdResult object.
@raise: LibguestfsCmdError if non-zero exit status
and ignore_status=False
"""
ignore | _status = dargs.get('ignore_status', True)
debug = dargs.get('debug', | False)
timeout = dargs.get('timeout', 60)
if debug:
logging.debug("Running command %s in debug mode.", cmd)
# Raise exception if ignore_status == False
try:
ret = utils.run(cmd, ignore_status=ignore_status,
verbose=debug, timeout=timeout)
except error.CmdError, detail:
raise LibguestfsCmdError(detail)
if debug:
logging.debug("status: %s", ret.exit_status)
logging.debug("stdout: %s", ret.stdout.strip())
logging.debug("stderr: %s", ret.stderr.strip())
# Return CmdResult instance when ignore_status is True
return ret
class LibguestfsBase(propcan.PropCanBase):
"""
Base class of libguestfs tools.
"""
__slots__ = ('ignore_status', 'debug', 'timeout')
def __init__(self, *args, **dargs):
init_dict = dict(*args, **dargs)
init_dict['ignore_status'] = init_dict.get('ignore_status', True)
init_dict['debug'] = init_dict.get('debug', False)
init_dict['timeout'] = init_dict.get('timeout', 60)
super(LibguestfsBase, self).__init__(init_dict)
def set_ignore_status(self, ignore_status):
"""
Enforce setting ignore_status as a boolean.
"""
if bool(ignore_status):
self.dict_set('ignore_status', True)
else:
self.dict_set('ignore_status', False)
def set_debug(self, debug):
"""
Accessor method for 'debug' property that logs message on change
"""
if not self.INITIALIZED:
self.dict_set('debug', debug)
else:
current_setting = self.dict_get('debug')
desired_setting = bool(debug)
if not current_setting and desired_setting:
self.dict_set('debug', True)
logging.debug("Libguestfs debugging enabled")
# current and desired could both be True
if current_setting and not desired_setting:
self.dict_set('debug', False)
logging.debug("Libguestfs debugging disabled")
def libguest_test_tool_cmd(qemuarg=None, qemudirarg=None,
timeoutarg=None, **dargs):
"""
Execute libguest-test-tool command.
@param qemuarg: the qemu option
@param qemudirarg: the qemudir option
@param timeoutarg: the timeout option
@return: a CmdResult object
@raise: raise LibguestfsCmdError
"""
cmd = "libguest-test-tool"
if qemuarg is not None:
cmd += " --qemu '%s'" % qemuarg
if qemudirarg is not None:
cmd += " --qemudir '%s'" % qemudirarg
if timeoutarg is not None:
cmd += " --timeout %s" % timeoutarg
# Allow to raise LibguestfsCmdError if ignore_status is False.
return lgf_command(cmd, **dargs)
def virt_edit_cmd(disk_or_domain, file_path, options=None,
extra=None, expr=None, **dargs):
"""
Execute virt-edit command to check whether it is ok.
Since virt-edit will need uses' interact, maintain and return
a session if there is no raise after command has been executed.
@param disk_or_domain: a img path or a domain name.
@param file_path: the file need to be edited in img file.
@param options: the options of virt-edit.
@param extra: additional suffix of command.
@return: a session of executing virt-edit command.
"""
# disk_or_domain and file_path are necessary parameters.
cmd = "virt-edit '%s' '%s'" % (disk_or_domain, file_path)
if options is not None:
cmd += " %s" % options
if extra is not None:
cmd += " %s" % extra
if expr is not None:
cmd += " -e '%s'" % expr
return lgf_command(cmd, **dargs)
|
openhatch/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_paragraphs.py | Python | agpl-3.0 | 1,329 | 0.009029 | #! /usr/bin/env python
# $Id: test_paragraphs.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['paragraphs'] = [
["""\
A paragraph.
""",
"""\
<document source="test data">
<paragraph>
A paragraph.
"""],
["""\
Paragraph 1.
Paragraph 2.
""",
"""\
<document source="test data">
<paragraph>
Paragraph 1.
<paragraph>
Paragraph 2.
"""],
["""\
Line 1.
Line 2.
Line 3.
""",
"""\
<document source="test data">
<paragraph>
Line 1.
Line 2.
Line 3.
"""],
["""\
Paragraph 1, Line 1.
Line 2.
Line 3.
Paragraph 2, Line 1.
Line 2.
Line 3.
""",
"""\
<document source="test data">
<paragraph>
Paragraph 1, Line 1.
Line 2.
Line 3.
<paragraph>
Paragraph 2, Line 1.
Line 2.
Line 3.
"""],
["""\
A. Einstein was a really
smart dude.
"" | ",
"""\
<document source="test data">
<paragraph>
A. Einstein was a really
smart dude.
"""],
]
if __name__ == '__main__':
import un | ittest
unittest.main(defaultTest='suite')
|
divergentdave/inspectors-general | inspectors/sigar.py | Python | cc0-1.0 | 5,811 | 0.008092 | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from utils import utils, inspector
# https://www | .sigar.mil/
archive = 2008
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
SPOTLIGHT_REPORTS_URL = "https://www.sigar.mil/Newsroom/spotlight/spotlight.xml"
SPEECHES_REPORTS_URL = "https://www.sigar.mil/Newsroom/speeches/speeches.xml"
TESTIMONY_REPORTS_URL = "https://www.sigar | .mil/Newsroom/testimony/testimony.xml"
PRESS_RELEASES_URL = "https://www.sigar.mil/Newsroom/pressreleases/press-releases.xml"
REPORT_URLS = [
("other", SPOTLIGHT_REPORTS_URL),
("press", SPEECHES_REPORTS_URL),
("testimony", TESTIMONY_REPORTS_URL),
("press", PRESS_RELEASES_URL),
("audit", "https://www.sigar.mil/audits/auditreports/reports.xml"),
("inspection", "https://www.sigar.mil/audits/inspectionreports/inspection-reports.xml"),
("audit", "https://www.sigar.mil/audits/financialreports/Financial-Audits.xml"),
("other", "https://www.sigar.mil/SpecialProjects/projectreports/reports.xml"),
("other", "https://www.sigar.mil/Audits/alertandspecialreports/alert-special-reports.xml"),
("semiannual_report", "https://www.sigar.mil/quarterlyreports/index.xml"),
]
BASE_REPORT_URL = "https://www.sigar.mil/allreports/index.aspx"
def run(options):
year_range = inspector.year_range(options, archive)
# Pull the reports
for report_type, report_url in REPORT_URLS:
doc = utils.beautifulsoup_from_url(report_url)
results = doc.select("item")
if not results:
raise inspector.NoReportsFoundError("SIGAR (%s)" % report_type)
for result in results:
report = report_from(result, report_url, report_type, year_range)
if report:
inspector.save_report(report)
def report_from(result, landing_url, report_type, year_range):
report_url = report_url_for_landing_page(result.find("link").next.strip(), landing_url)
if report_url in ("https://www.sigar.mil/pdf/audits/Financial_Audits/SIGAR _14-15\u2013FA.pdf",
"https://www.sigar.mil/pdf/audits/Financial_Audits/SIGAR_14-14\u2013FA.pdf"):
report_url = report_url.replace("\u2013", "-")
report_filename = report_url.split("/")[-1]
report_id, extension = os.path.splitext(report_filename)
if result.title:
title = result.title.text.strip()
else:
title = report_id
published_on_text = result.find("pubdate").text.strip()
published_on = parse_date(published_on_text)
if report_id == "SIGAR-14-42-AL" and title == "SIGAR 14-42-AL":
# this report is posted in both "spotlight" and "special reports"
return
if report_id == "SIGAR_CSIS_Speech" and published_on.year == 2017:
# There are two speeches with the same file name
report_id += "_2017"
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'sigar',
'inspector_url': "https://www.sigar.mil",
'agency': 'sigar',
'agency_name': "Special Inspector General for Afghanistan Reconstruction",
'type': report_type,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if report_url.startswith(("https://www.justice.gov/",
"http://www.justice.gov/",
"https://www.fbi.gov/",
"http://www.fbi.gov/",
"https://www.usaid.gov/",
"http://www.usaid.gov/")):
if not os.path.splitext(report_url)[1]:
report['file_type'] = "html"
return report
def report_url_for_landing_page(relative_url, landing_url):
"""
We need to mimic the logic used in https://www.sigar.mil/js/AllReports.js
case SPOTLIGHT:
Title = "Spotlight";
Link = Link.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx");
Link = Link.replace("../../", "../");
break;
case SPEECHES:
Title = "Speeches";
Link = Link.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx");
Link = Link.replace("../../", "../");
break;
case TESTIMONY:
Title = "Testimony";
Link = Link.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx");
Link = Link.replace("../../", "../");
break;
case PRESSRELEASES:
Link = Link.replace("../", "../newsroom/");
Link = Link.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx");
break;
"""
relative_url = relative_url.replace("â\x80\x93", "–")
if landing_url == SPOTLIGHT_REPORTS_URL:
relative_url = relative_url.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx")
relative_url = relative_url.replace("../../", "../")
elif landing_url == SPEECHES_REPORTS_URL:
relative_url = relative_url.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx")
relative_url = relative_url.replace("../../", "../")
elif landing_url == TESTIMONY_REPORTS_URL:
relative_url = relative_url.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx")
relative_url = relative_url.replace("../../", "../")
elif landing_url == PRESS_RELEASES_URL:
relative_url = relative_url.replace("../", "../newsroom/")
relative_url = relative_url.replace("../ReadFile.aspx", "../newsroom/ReadFile.aspx")
return urljoin(BASE_REPORT_URL, relative_url)
def parse_date(text):
for format in [
'%A, %B %d, %Y',
'%A, %B %dst, %Y',
'%A, %B %dnd, %Y',
'%A, %B %drd, %Y',
'%A, %B %dth, %Y'
]:
try:
return datetime.datetime.strptime(text, format)
except ValueError:
pass
raise Exception("Couldn't parse date from {}".format(text))
utils.run(run) if (__name__ == "__main__") else None
|
defm03/toraeru | test/loli_GUI.py | Python | gpl-3.0 | 1,353 | 0.011086 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import kivy
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.image i | mport Image
from kivy.uix.button import Button
kivy.require('1.7.3')
text =""
class ShowLogo(Image):
def __init__(self,**kwargs):
super(Image, self).__init__(**kwargs)
class GetInfo(GridLayout):
def __init__(self,**kwargs):
super(GetInfo, self).__init__(**kwargs)
self.cols = 2
self.text=text
self.add_widget(Label(text='Gelbooru | limit'))
self.limit = TextInput(text="100",multiline=False)
self.add_widget(self.limit)
def on_text(instance,value):
print('widget: ',instance,' - val: ',value)
def on_enter(instance,value):
print('user pressed enter in: ',instance)
if self.limit.bind(on_text_validate=on_enter):
a = int(outnumb)
# self.printbutton = Button(text='Print')
# self.printbutton.bind(on_press=self.callback)
# self.add_widget(self.printbutton)
#def callback(self,evt=None):
# return self.add_widget(Label(text=self.text_input.text))
class AppMain(App):
def build(self):
img = Image(source="res/wikipe-tan.png")
return GetInfo()
AppMain().run() |
macknowak/simtools | simtools/tests/test_base.py | Python | gpl-3.0 | 1,438 | 0.00765 | # -*- coding: utf-8 -*-
"""Unit tests of assorted base data structures and common functions."""
import pytest
from simtools.base import Dict, is_iterable, is_string
def test_is_iterable():
# List of integers
obj = [1, 2, 3]
assert is_iterable(obj) == True
# Tuple of integers
obj = 1, 2, 3
assert is_iterable(obj) == True
# List of strings
obj = ["abc", "def", "ghi"]
assert is_iterable(obj) == True
# Tuple of strings
obj = "abc", "def", "ghi"
assert is_iterable(obj) == True
# Dictionary
obj = {1: "abc", 2: "def", 3: "ghi"}
assert is_iterable(obj) == True
# Integer
obj = 1
assert is_iterable(obj) == False
# String
obj = "abc"
assert is_iterable(obj) == True
def test_is_string():
# String
obj = "abc"
assert is_string(obj) == True
# Integer
obj = 1
assert is_string(obj) == False
# List of strings
obj = ["abc", "def", "ghi"]
assert is_string(obj) == False
# Dictionary
obj = {"abc": 1, "def": 2, "ghi": 3}
assert is_string(obj) == False
def test_dict_attr_access():
d = Dict()
d['a'] = 1
assert d.a == 1
d.b = 2
assert d['b'] == 2
with pytes | t.raises(AttributeError):
d. | c
def test_dict_attr_deletion():
d = Dict()
d['a'] = 1
del d.a
with pytest.raises(KeyError):
d['a']
with pytest.raises(AttributeError):
d.a
|
poppogbr/genropy | gnrpy/gnr/web/gnrwebstruct.py | Python | lgpl-2.1 | 85,061 | 0.008911 | #-*- coding: UTF-8 -*-
#--------------------------------------------------------------------------
# package : GenroPy web - see LICENSE for details
# module gnrsqlclass : Genro Web structures implementation
# Copyright (c) : 2004 - 2007 Softwell sas - Milano
# Written by : Giovanni Porcari, Michele Bertoldi
# Saverio Porcari, Francesco Porcari , Francesco Cavazzana
#--------------------------------------------------------------------------
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#import weakref
from gnr.core.gnrbag import Bag,BagCbResolver,DirectoryResolver
from gnr.core.gnrstructures import GnrStructData
from gnr.core import gnrstring
from gnr.core.gnrdict import dictExtract
from gnr.core.gnrlang import extract_kwargs
from time import time
from copy import copy
class StructMethodError(Exception):
pass
def struct_method(func_or_name):
"""A decorator. Allow to register a new method (in a page or in a component)
that will be available in the web structs::
@struct_method
def includedViewBox(self, bc, ...):
pass
def somewhereElse(self, bc):
bc.includedViewBox(...)
If the method name includes an underscore, only the part that follows the first
underscore will be the struct method's name::
@struct_method
def iv_foo(self, bc, ...):
pass
def somewhereElse(self, bc):
bc.foo(...)
You can also pass a name explicitly::
@struct_method('bar')
| def foo(self, bc, ...):
pass
def somewhereElse(self, bc):
bc.bar(...)
"""
def | register(name, func):
func_name = func.__name__
existing_name = GnrDomSrc._external_methods.get(name, None)
if existing_name and (existing_name != func_name):
# If you want to override a struct_method, be sure to call its implementation method in the same way as the original.
# (Otherwise, the result would NOT be well defined due to uncertainty in the mixin process at runtime plus the fact that the GnrDomSrc is global)
raise StructMethodError(
"struct_method %s is already tied to implementation method %s" % (repr(name), repr(existing_name)))
GnrDomSrc._external_methods[name] = func_name
if isinstance(func_or_name, basestring):
name = func_or_name
def decorate(func):
register(name, func)
return func
return decorate
else:
name = func_or_name.__name__
if '_' in name:
name = name.split('_', 1)[1]
register(name, func_or_name)
return func_or_name
class GnrDomSrcError(Exception):
pass
class GnrDomElem(object):
def __init__(self, obj, tag):
self.obj = obj
self.tag = tag
def __call__(self, *args, **kwargs):
child = self.obj.child(self.tag, *args, **kwargs)
return child
class GnrDomSrc(GnrStructData):
"""GnrDomSrc class"""
_external_methods = dict()
def makeRoot(cls, page, source=None):
"""Build the root through the :meth:`gnr.core.gnrstructures.GnrStructData.makeRoot`
method and return it
:param cls: the structure class
:param page: the webpage instance
:param source: the filepath of the xml file
:returns: the root instance for the given class
"""
root = GnrStructData.makeRoot(source=source, protocls=cls)
#root._page=weakref.ref(page)
root._page = page
return root
makeRoot = classmethod(makeRoot)
def _get_page(self):
#return self.root._page()
return self.root._page
page = property(_get_page)
def _get_parentfb(self):
if hasattr(self, 'fbuilder'):
return self.fbuilder
elif self.parent:
return self.parent.parentfb
parentfb = property(_get_parentfb)
def __getattr__(self, fname):
fnamelower = fname.lower()
if (fname != fnamelower) and hasattr(self, fnamelower):
return getattr(self, fnamelower)
if fnamelower in self.genroNameSpace:
return GnrDomElem(self, '%s' % (self.genroNameSpace[fnamelower]))
if fname in self._external_methods:
handler = getattr(self.page, self._external_methods[fname])
return lambda *args, **kwargs: handler(self, *args,**kwargs)
for n in self._nodes:
if n.attr.get('_attachname') == fname:
return n._value
autoslots = self._parentNode.attr.get('autoslots')
if autoslots:
autoslots = autoslots.split(',')
if fname in autoslots:
return self.child('autoslot',name=fname,_attachname=fname)
parentTag = self._parentNode.attr.get('tag','').lower()
if parentTag and not fnamelower.startswith(parentTag):
subtag = ('%s_%s' %(parentTag,fname)).lower()
if hasattr(self,subtag):
return getattr(self,subtag)
raise AttributeError("object has no attribute '%s'" % fname)
def getAttach(self, attachname):
"""add???
:param attachname: add???
:returns: ``None``
"""
for n in self._nodes:
if n.attr.get('_attachname') == attachname:
return n._value
return None
def child(self, tag, name=None, envelope=None, **kwargs):
"""Set a new item of the ``tag`` type into the current structure through
the :meth:`gnr.core.gnrstructures.GnrStructData.child` and return it
:param tag: add???
:param name: add???. Default value is ``None``
:param envelope: add???. Default value is ``None``
:returns: a child
"""
if 'fld' in kwargs:
fld_dict = self.getField(kwargs.pop('fld'))
fld_dict.update(kwargs)
kwargs = fld_dict
t = kwargs.pop('tag', tag)
if tag == 'input':
tag = t
if hasattr(self, 'fbuilder'):
if not tag in (
'tr', 'data', 'script', 'func', 'connect', 'dataFormula', 'dataScript', 'dataRpc', 'dataRemote',
'dataRecord', 'dataSelection', 'dataController'):
if tag == 'br':
return self.fbuilder.br()
if not 'disabled' in kwargs:
kwargs['disabled'] = self.childrenDisabled
return self.fbuilder.place(tag=tag, name=name, **kwargs)
if envelope:
obj = GnrStructData.child(self, 'div', name='*_#', **envelope)
else:
obj = self
return GnrStructData.child(obj, tag, name=name, **kwargs)
def htmlChild(self, tag, content, value=None, **kwargs):
"""Create an html child and return it
:param tag: the html tag
:param content: the html content
:param value: add???. Default value is ``None``
:returns: the child
"""
if content :
kwargs['innerHTML'] = content
content = None
elif value:
kwargs['innerHTML'] = value
value = None
return self.child(tag, content=content, **kwargs)
def nodeById(self, id |
PinguinoIDE/pinguino-ide | cmd/pinguino-reset.py | Python | gpl-2.0 | 59 | 0 | #!/usr/bin/env python
from pi | nguin | o import pinguino_reset
|
vileopratama/vitech | src/addons/event/wizard/event_confirm.py | Python | mit | 441 | 0.002268 | # -*- coding: utf-8 -*-
# Part of | Odoo. See LICENSE file for full copyright and licensing details.
from openerp import models, api
class event_confirm(models.TransientModel):
"""Event Confirmation"""
_name = "event.confirm"
@api.multi
def confirm(self):
events = self.env['event.event'].br | owse(self._context.get('event_ids', []))
events.do_confirm()
return {'type': 'ir.actions.act_window_close'}
|
manasgarg/qless-blinker-bridge | setup.py | Python | bsd-3-clause | 429 | 0.065268 | #!/usr/bin/env python
from distutils.core import setup
setup( name="qless_blinker",
version = "0.1",
d | escription = "A bridge between qless & blinker.",
author = "Manas Garg",
author_email = "manasgarg@gmail.com",
license = "BSD License",
url = "https://github.com/manasgarg/qless-blinker-bridge",
#data_files = ["LICENSE", "Readme.md"],
packages = ["qless_blinker"],
long_description | = ""
)
|
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/elan/Pools/Frank/2_speakercraft_lms.py | Python | gpl-3.0 | 1,883 | 0.013808 | from ImageScripter import *
from elan import *
if Viewer.shudder.Exists() ==True:
Viewer.shudder.Click()
else:
pass
#Viewer.homeicon9.Click()
#Viewer.homeicon9.Click()
Viewer.media10items.Click()
try:
Viewer.audiogroup.Click()
except Exception as e:
print(e)
if Viewer.irtester.Exists() == True:
Viewer.homeicon10items.Click()
Viewer.media10items.Click()
Viewer.audiogroup.Click()
else:
Viewer.audiogroup.Click()
Viewer.mediazones.Click(xoffset=40,yoffset=100)
#####################################################
sleep(3)
Viewer.zoneoff2.ClickFast()
sleep(1)
Viewer.smalloffbottomleft.Click()
sleep(1)
###########################################################
for i in range(10):
try:
Viewer.speakercraftlms.Click()
except:
if i >= 9:
raise ValueError('Zone off not working')
else:
pass
Sleep(10)
Viewer.zoneoff2.ClickFast()
sleep(1)
Viewer.smalloffbottomleft.Click()
sleep(1)
Viewer.speakercraftlms.Click()
| break
Viewer.pandoraicon.Click()
Viewer.yourstations.Click()
Viewer.dickdaleradio.Click()
Viewer.bluearrow.Click(threshold = .92)
Viewer.lmsback.Click()
Viewer.lmsback.Click()
Viewer.pandoradickdaleradio.Wait()
Viewer.lmssettings.Click()
Viewer.tidal.Click(xoffset=525,yoffset=0)
Viewer.lmsback.Click()
Viewer.tidalicon.Wait()
Viewer.lmssettings.Click()
Viewer.tidal.Click(xoffset=525,yoffset=0)
Viewer.lmsback.Click()
Viewer.tidalicon.WaitVanish()
Viewer.lmsoffbig.ClickFast()
sleep(1)
Viewer.lmso | ffoff.ClickFast()
Viewer.shudder.Click(threshold = .92)
Viewer.homeicon9.Click(threshold = .92)
Viewer.homeicon10items.Wait(seconds = 30)
Viewer.homeicon10items.Click(threshold = .92)
#Viewer.shudder.Click(threshold = .92)
#Viewer.homeicon9.Click()
#Viewer.homeicon9.Click()
|
matthiaskramm/corepy | corepy/arch/spu/platform/linux_spufs/spre_linux_spu.py | Python | bsd-3-clause | 33,201 | 0.019698 | # Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# |
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, B | UT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__doc__="""
SPE for the Cell SPU
"""
import corepy.lib.extarray as extarray
import corepy.lib.allocator as allocator
import corepy.spre.spe as spe
import spu_exec
import corepy.arch.spu.isa as spu
import corepy.arch.spu.lib.util as util
try:
import cell_fb
except:
cell_fb = None
ExecParams = spu_exec.ExecParams
# ------------------------------
# Registers
# ------------------------------
class SPURegister(spe.Register):
def __init__(self, name):
if isinstance(name, int):
self.reg = name
self.name = "r%d" % name
elif isinstance(name, str):
self.name = name
try:
self.reg = int(name[1:])
except ValueError:
raise Exception("Invalid register name %s" % name)
if name[0] != 'r':
raise Exception("Invalid register name %s" % name)
else:
raise Exception("Invalid register name %s" % str(name))
#self.acquired = False
return
# ------------------------------
# Constants
# ------------------------------
WORD_TYPE = 'I' # array type that corresponds to 1 word
WORD_SIZE = 4 # size in bytes of one word
WORD_BITS = WORD_SIZE * 8 # number of bits in a word
INT_SIZES = {'b':1, 'c':1, 'h':2, 'i':4, 'B':1, 'H':2, 'I':4}
# Parameters - (register, slot)
REG, SLOT = (0, 1)
spu_param_1 = (3, 1)
spu_param_2 = (3, 2)
spu_param_3 = (3, 3)
spu_param_4 = (4, 1)
spu_param_5 = (4, 2)
spu_param_6 = (4, 3)
spu_param_7 = (5, 0)
spu_param_8 = (5, 1)
spu_param_9 = (5, 2)
spu_param_10 = (5, 3)
N_SPUS = spu_exec.get_num_avail_spus()
# ------------------------------------------------------------
# Helpers
# ------------------------------------------------------------
def copy_param(code, target, source):
"""
Copy a parameter from source reg to preferred slot in the target reg.
For params in slot 0, this is just and add immediate.
For params in other slots, the source is rotated.
Note that other values in the source are copied, too.
"""
if source[SLOT] != 0:
code.add(spu.rotqbyi(target, source[REG], source[SLOT] * 4))
else:
code.add(spu.ai(target, source[REG], 0))
return
#ALIGN_UP = 0
#ALIGN_DOWN = 1
#def align_addr(addr, align = 16, dir = ALIGN_DOWN):
# """
# Round an address to the nearest aligned address based on align.
# Round up or down based on dir.
# """
# if dir == ALIGN_DOWN:
# return addr - (addr % align)
# else:
# return addr + (align - addr % align)
# ------------------------------------------------------------
# InstructionStream
# ------------------------------------------------------------
class InstructionStream(spe.InstructionStream):
"""
SPU Instruction Stream.
Two assumptions:
o We have the processor untill we're done
o If we're prempted, the whole state is saved automagically
Based on these and the fact that we are a leaf node, no register
saves are attempted and only the raw instructions stream (no
prologue/epilogue) is used.
"""
def __init__(self, prgm, optimize = False):
if not isinstance(prgm, Program):
raise TypeError("ERROR: A Program must be passed to InstructionStream. Have you updated your code for the code composition changes?")
spe.InstructionStream.__init__(self, prgm)
self._optimize = optimize
self.r_zero = SPURegister("r0")
self.gp_return = SPURegister("r1")
self.fp_return = self.gp_return
return
# ------------------------------
# Execute/ABI support
# ------------------------------
# TODO - this is broken, how should it be fixed?
def debug_set(self, idx, inst):
self._prologue[idx] = inst.render()
self[idx] = inst
return
# def add(self, inst, optimize_override = False):
# if not optimize_override and self._optimize:
# # binary_string_inst = spu.DecToBin(inst)
# #op = 'nop'
# # if binary_string_inst[0:3] in spu.inst_opcodes:
# # op = spu.inst_opcodes[binary_string_inst[0:3]]
# # elif binary_string_inst[0:6] in spu.inst_opcodes:
# # op = spu.inst_opcodes[binary_string_inst[0:6]]
# # elif binary_string_inst[0:7] in spu.inst_opcodes:
# # op = spu.inst_opcodes[binary_string_inst[0:7]]
# # elif binary_string_inst[0:8] in spu.inst_opcodes:
# # op = spu.inst_opcodes[binary_string_inst[0:8]]
# # elif binary_string_inst[0:9] in spu.inst_opcodes:
# # op = spu.inst_opcodes[binary_string_inst[0:9]]
# # elif binary_string_inst[0:10] in spu.inst_opcodes:
# # op = spu.inst_opcodes[binary_string_inst[0:10]]
#
# # TODO - AWF - thanks to labels, this won't work quite right anymore
# pipeline = inst.cycles[0]
#
# if (len(self) % 2 == 0) and pipeline == 0:
# InstructionStream.add(self, inst)
#
# elif (len(self) % 2 == 1) and pipeline == 1:
# InstructionStream.add(self, inst)
# elif (len(self) % 2 == 0) and pipeline == 1:
# InstructionStream.add(self, spu.nop(0))
# InstructionStream.add(self, inst)
# elif (len(self) % 2 == 1) and pipeline == 0:
# InstructionStream.add(self, spu.lnop(0))
# InstructionStream.add(self, inst)
#
# else:
# spe.InstructionStream.add(self, inst)
#
# return len(self)
#class ParallelInstructionStream(InstructionStream):
#
# def __init__(self, optimize=False):
# InstructionStream.__init__(self, optimize)
#
# self.r_rank = self.acquire_register()
# self.r_size = self.acquire_register()
#
# self.r_block_size = None
# self.r_offset = None
#
# # All the params are stored in r_rank
# self.r_params = self.r_rank
#
# # User/library supplied data size, used by processor to determine
# # block and offset for an execution run. This value is in bytes.
# self.raw_data_size = None
#
# return
#
# def _synthesize_prologue(self):
# """
# Add raw_data_size/offest support code.
# """
#
# InstructionStream._synthesize_prologue(self)
#
# # Parallel parameters are passed in the prefered slot and |
TravisFSmith/SweetSecurity | sweetSecurity/client/spoof.py | Python | apache-2.0 | 1,267 | 0.049724 | import sqlite3
import logging
from time import sleep
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import sweet | SecurityDB
dbPath="/opt/sweetsecurity/client/SweetSecurity.db"
def convertMAC(mac):
newMac="%s%s:%s%s:%s%s:%s%s:%s%s:%s%s" % (mac[0],mac[1],mac[2],mac[3],mac[4],mac[5],mac[6],mac[7],mac[8],mac[9],mac[10],mac[11])
return newMac
def getMac():
myMac = [get_if_hwaddr(i) for i in get_if_list()]
for mac in myMac:
if(mac != "00:00:00:00:00:00"):
return mac
def start():
logger = logging.getLogger('SweetSecurityLogger')
while 1:
try:
dfgwInfo=sweetSecurityDB.getDfgw()
dfg | w=dfgwInfo['dfgw']
dfgwMAC=dfgwInfo['dfgwMAC']
dfgwMAC=convertMAC(dfgwMAC)
conn = sqlite3.connect(dbPath)
c = conn.cursor()
for row in c.execute('SELECT * FROM hosts where active = 1 and ignore = 0'):
logger.info("Spoofing Device: ip=%s, mac=%s",row[2],row[3])
#Spoof the things...
victimMac=convertMAC(row[3])
packet = Ether()/ARP(op="who-has",hwdst=dfgwMAC,pdst=dfgw,psrc=row[2])
sendp(packet)
packet = Ether()/ARP(op="who-has",hwdst=victimMac,pdst=row[2],psrc=dfgw)
sendp(packet)
conn.close()
sleep(1)
except Exception,e:
logger.info("Error spoofing device: %s" % str(e)) |
EliotBerriot/savior | src/errors.py | Python | gpl-3.0 | 1,506 | 0.00664 | # Copyright (C) 2013 Eliot Berriot <contact@eliotberriot.com>
#
# This file is part of savior.
#
# Savior is free software: you can redist | ribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at | your option) any later version.
#
# Savior is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Savior. If not, see <http://www.gnu.org/licenses/>.
from utils import LoggerAware
class SaviorError(LoggerAware, Exception):
def __init__(self, message):
self.set_message(message)
self.get_logger()
self.log(self.message, "critical")
def set_message(self, message):
self.message = message
def __str__(self):
return self.message
class ParseConfigError(SaviorError):
def set_message(self, message):
self.message = """Error while parsing config file : {0}""".format(message)
class CheckConfigError(SaviorError):
def set_message(self, message):
self.message = """Savior has met a critical error while checking configuration files and connection to hosts: {0}. Please double-check your hosts.ini and settings.ini files.""".format(message)
|
mayankjohri/LetsExplorePython | Section 2 - Advance Python/Chapter S2.11 Multiprocessing and Threading/code/4_threading.py | Python | gpl-3.0 | 651 | 0.013825 | import threading
import time
def worker():
print (threading.currentThread().getName() + 'Starting')
time.sleep(2)
print (threading.currentThread().getName()+'Exiting')
def | my_service():
print (threading.currentThread().getName()+ 'Starting')
time.sleep(3)
print (threading.currentThread().getName()+'Exiting')
t = threading.Thread(name='my_service', target=my_service)
w = threading.Th | read(name='worker bee', target=worker)
w2 = threading.Thread(target=worker) # use default name
w3 = threading.Thread(target=worker) # use default name
w.start()
w2.start()
w3.start()
t.start() |
janies/dataleach | setup.py | Python | bsd-3-clause | 868 | 0.021889 | #!/usr/bin/env python
from setuptools import setup, find_packages
#try:
# import setuptools_git
#except ImportError:
# print "WARNING!"
# print "We need the setuptools-git package to be installed for"
# print "some of the setup.py targets to work correctly."
PACKAGE = 'dataleach'
VERSION = '0.1'
setup(
name = PACKAGE,
version = VERSION,
package_dir = {'': 'src'},
packages = find_packages('src', exclude=['dataleach.tests',
'dataleach.tests.*']),
install_requires = [ #"librsync",
| "requests",
"feedparser",
| "ijson",
#"pysync",
],
entry_points = {"console_scripts":
['leach = dataleach.leach:main']},
test_suite = "dataleach.tests",
)
|
Shemahmforash/thisdayinmusic.net | thisdayinmusic/settings.py | Python | mit | 4,625 | 0.00173 | """
Django settings for thisdayinmusic project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import raven
from decouple import config, Csv
from os.path import join, dirname
from spotipy import oauth2
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', 'secret_key')
SECURE_CONTENT_TYPE_NOSNIFF = config('SECURE_CONTENT_TYPE_NOSNIFF', default=False, cast=bool)
SECURE_BROWSER_XSS_FILTER = config('SECURE_BROWSER_XSS_FILTER', default=False, cast=bool)
SESSION_COOKIE_SECURE = config('SESSION_COOKIE_SECURE', default=False, cast=bool)
CSRF_COOKIE_SECURE = config('CSRF_COOKIE_SECURE', default=False, cast=bool)
SECURE_SSL_REDIRECT = config('SECURE_SSL_REDIRECT', default=False, cast=bool)
X_FRAME_OPTIONS = 'DENY'
API_BASE_ADDRESS = config("API_BASE_ADDRESS", 'http://localhost')
CLIENT_ID = config('SPOTIPY_CLIENT_ID', 'client id')
CLIENT_SECRET = config('SPOTIPY_CLIENT_SECRET', 'client secret')
REDIRECT_URI = config('SPOTIPY_REDIRECT_URI', 'http://127.0.0.1:8000/callback')
SPOTIFY_SCOPE = config('SPOTIFY_SCOPE', '')
SPOTIFY_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, scope=SPOTIFY_SCOPE)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ENVIRONMENT = config('ENVIRONMENT', default='PROD')
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv(), default='127.0.0.1')
CSRF_TRUSTED_ORIGINS = config('CSRF_TRUSTED_ORIGINS', cast=Csv(), default='')
STATIC_ROOT = config("STATIC_ROOT", default='/static')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'events',
'django_extensions',
]
if ENVIRONMENT == 'PROD':
INSTALLED_APPS.append('raven.contrib.django.raven_compat')
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware', |
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'thisdayinmusic.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': | True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'thisdayinmusic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
if ENVIRONMENT == 'PROD':
RAVEN_CONFIG = {
'dsn': config('RAVEN_DSN', ''),
'release': raven.fetch_git_sha(os.path.abspath(os.curdir)),
}
|
F5Networks/f5-common-python | f5/bigip/tm/asm/policies/methods.py | Python | apache-2.0 | 1,405 | 0.001426 | # coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from f5.bigip.resource import AsmResource
from f5.bigip.resource import Collection
class Methods_s(Collection):
"""BIG-IP® ASM Methods sub-collection."""
def __init__(self, policy):
super(Methods_s, self).__init__(policy)
self._meta_data['object_has_stats'] = False
self._meta_data['allowed_lazy_attributes'] = [Method]
self._meta_data['required_json_kind'] = 'tm:asm:policies:methods:methodcollectionstate'
self._meta_data['attribute_registry'] = {
'tm:asm:policies:methods:methodstate': Method
}
c | lass Method(AsmResource):
"""BIG-IP® ASM Methods Resource."""
def __init__(self, methods_s):
super(Method, self).__in | it__(methods_s)
self._meta_data['required_json_kind'] = 'tm:asm:policies:methods:methodstate'
|
horizon-institute/chariot | hub/emonhub/src/emonhub_setup.py | Python | mit | 4,819 | 0.005395 | """
This code is released under the GNU Affero General Public License.
OpenEnergyMonitor project:
http://openenergymonitor.org
"""
import time
import logging
from configobj import ConfigObj
"""class EmonHubSetup
User interface to setup the hub.
The settings attribute stores the settings of the hub. It is a
dictionary with the following keys:
'hub': a dictionary containing the hub settings
'interfacers': a dictionary containing the interfacers
'reporters': a dictionary containing the reporters
The hub settings are:
'loglevel': the logging level
interfacers and reporters are dictionaries with the following keys:
| 'Type': class name
'init_settings': dictionary with initialization settings
'runtimesettings': dictionary with runtime settings
Initialization and runtime settings depend on the interfacer and
| reporter type.
The run() method is supposed to be run regularly by the instantiater, to
perform regular communication tasks.
The check_settings() method is run regularly as well. It checks the settings
and returns True is settings were changed.
This almost empty class is meant to be inherited by subclasses specific to
each setup.
"""
class EmonHubSetup(object):
def __init__(self):
# Initialize logger
self._log = logging.getLogger("EmonHub")
# Initialize settings
self.settings = None
def run(self):
"""Run in background.
To be implemented in child class.
"""
pass
def check_settings(self):
"""Check settings
Update attribute settings and return True if modified.
To be implemented in child class.
"""
class EmonHubFileSetup(EmonHubSetup):
def __init__(self, filename):
# Initialization
super(EmonHubFileSetup, self).__init__()
# Initialize update timestamp
self._settings_update_timestamp = 0
self._retry_time_interval = 5
# create a timeout message if time out is set (>0)
if self._retry_time_interval > 0:
self.retry_msg = " Retry in " + str(self._retry_time_interval) + " seconds"
else:
self.retry_msg = ""
# Initialize attribute settings as a ConfigObj instance
try:
self.settings = ConfigObj(filename, file_error=True)
# Check the settings file sections
self.settings['hub']
self.settings['interfacers']
self.settings['reporters']
except IOError as e:
raise EmonHubSetupInitError(e)
except SyntaxError as e:
raise EmonHubSetupInitError(
'Error parsing config file \"%s\": ' % filename + str(e))
except KeyError as e:
raise EmonHubSetupInitError(
'Configuration file error - section: ' + str(e))
def check_settings(self):
"""Check settings
Update attribute settings and return True if modified.
"""
# Check settings only once per second
now = time.time()
if now - self._settings_update_timestamp < 0:
return
# Update timestamp
self._settings_update_timestamp = now
# Backup settings
settings = dict(self.settings)
# Get settings from file
try:
self.settings.reload()
except IOError as e:
self._log.warning('Could not get settings: ' + str(e) + self.retry_msg)
self._settings_update_timestamp = now + self._retry_time_interval
return
except SyntaxError as e:
self._log.warning('Could not get settings: ' +
'Error parsing config file: ' + str(e) + self.retry_msg)
self._settings_update_timestamp = now + self._retry_time_interval
return
except Exception:
import traceback
self._log.warning("Couldn't get settings, Exception: " +
traceback.format_exc() + self.retry_msg)
self._settings_update_timestamp = now + self._retry_time_interval
return
if self.settings != settings:
# Check the settings file sections
try:
self.settings['hub']
self.settings['interfacers']
self.settings['reporters']
except KeyError as e:
self._log.warning("Configuration file missing section: " + str(e))
else:
return True
"""class EmonHubSetupInitError
Raise this when init fails.
"""
class EmonHubSetupInitError(Exception):
pass
|
OCA/event | event_mail/models/event_mail.py | Python | agpl-3.0 | 1,566 | 0 | # Copyright 2017 Tecnativa - Sergio Teruel <sergio.teruel@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class EventMailSchedulerTemplate(models.Model):
_name = "event.mail.scheduler.template"
_inherit = "event.mail"
_description = "Event mail scheduler template"
event_id = fields.Many2one(required=False)
event_mail_template_id = fields.Many2one(
comodel_name="event.mail.template",
string="Event Mail Template",
required=True,
ondelete="cascade",
)
class EventMailTemplate(models.Model):
_name = "event.mail.template"
_description = "Scheduling templates for events"
@api.model
def _default_scheduler_template_ids(self):
return [
{
"notification_type": "mail",
| "interval_unit": "now",
"interval_type": "after_sub",
| "template_id": self.env.ref("event.event_subscription").id,
},
{
"notification_type": "mail",
"interval_nbr": 10,
"interval_unit": "days",
"interval_type": "before_event",
"template_id": self.env.ref("event.event_reminder").id,
},
]
name = fields.Char()
scheduler_template_ids = fields.One2many(
comodel_name="event.mail.scheduler.template",
inverse_name="event_mail_template_id",
string="Mail Schedule",
default=_default_scheduler_template_ids,
)
|
sarahn/ganeti | test/py/ganeti.utils.io_unittest.py | Python | gpl-2.0 | 36,321 | 0.00669 | #!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for testing ganeti.utils.io"""
import os
import tempfile
import unittest
import shutil
import glob
import time
import signal
import stat
import errno
from ganeti import constants
from ganeti import utils
from ganeti import compat
from ganeti import errors
import testutils
class TestReadFile(testutils.GanetiTestCase):
def testReadAll(self):
data = utils.ReadFile(testutils.TestDataFilename("cert1.pem"))
self.assertEqual(len(data), 814)
h = compat.md5_hash()
h.update(data)
self.assertEqual(h.hexdigest(), "a491efb3efe56a0535f924d5f8680fd4")
def testReadSize(self):
data = utils.ReadFile(testutils.TestDataFilename("cert1.pem"),
size=100)
self.assertEqual(len(data), 100)
h = compat.md5_hash()
h.update(data)
self.assertEqual(h.hexdigest(), "893772354e4e690b9efd073eed433ce7")
def testCallback(self):
def _Cb(fh):
self.assertEqual(fh.tell(), 0)
data = utils.ReadFile(testutils.TestDataFilename("cert1.pem"), preread=_Cb)
self.assertEqual(len(data), 814)
def testError(self):
self.assertRaises(EnvironmentError, utils.ReadFile,
"/dev/null/does-not-exist")
class TestReadOneLineFile(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
def testDefault(self):
data = utils.ReadOneLineFile(testutils.TestDataFilename("cert1.pem"))
self.assertEqual(len(data), 27)
self.assertEqual(data, "-----BEGIN CERTIFICATE-----")
def testNotStrict(self):
data = utils.ReadOneLineFile(testutils.TestDataFilename("cert1.pem"),
strict=False)
self.assertEqual(len(data), 27)
self.assertEqual(data, "-----BEGIN CERTIFICATE-----")
def testStrictFailure(self):
self.assertRaises(errors.GenericError, utils.ReadOneLineFile,
testutils.TestDataFilename("cert1.pem"), strict=True)
def testLongLine(self):
dummydata = (1024 * "Hello World! ")
myfile = self._CreateTempFile()
utils.WriteFile(myfile, data=dummydata)
datastrict = utils.ReadOneLineFile(myfile, strict=True)
datalax = utils.ReadOneLineFile(myfile, strict=False)
self.assertEqual(dummydata, datastrict)
self.assertEqual(dummydata, datalax)
def testNewline(self):
myfile = self._CreateTempFile()
myline = "myline"
for nl in ["", "\n", "\r\n"]:
dummydata = "%s%s" % (myline, nl)
utils.WriteFile(myfil | e, data=dummydata)
datalax = utils.ReadOneLineFile(myfile, strict=False)
self.assertEqual(myline, datalax)
datastrict = utils.ReadOneLineFile(myfile, strict=True)
self.assertEqual(myline, datastrict)
def testWhitespaceAndMultipleLines(self):
myfile = self._CreateTempFile()
for nl in ["", "\n", "\r\n"]:
for ws in [" ", "\t", "\t\t \t", "\t "]:
dummydata = (1024 * ("Foo bar baz %s%s" % (ws, nl)))
utils.WriteFile(myfile | , data=dummydata)
datalax = utils.ReadOneLineFile(myfile, strict=False)
if nl:
self.assert_(set("\r\n") & set(dummydata))
self.assertRaises(errors.GenericError, utils.ReadOneLineFile,
myfile, strict=True)
explen = len("Foo bar baz ") + len(ws)
self.assertEqual(len(datalax), explen)
self.assertEqual(datalax, dummydata[:explen])
self.assertFalse(set("\r\n") & set(datalax))
else:
datastrict = utils.ReadOneLineFile(myfile, strict=True)
self.assertEqual(dummydata, datastrict)
self.assertEqual(dummydata, datalax)
def testEmptylines(self):
myfile = self._CreateTempFile()
myline = "myline"
for nl in ["\n", "\r\n"]:
for ol in ["", "otherline"]:
dummydata = "%s%s%s%s%s%s" % (nl, nl, myline, nl, ol, nl)
utils.WriteFile(myfile, data=dummydata)
self.assert_(set("\r\n") & set(dummydata))
datalax = utils.ReadOneLineFile(myfile, strict=False)
self.assertEqual(myline, datalax)
if ol:
self.assertRaises(errors.GenericError, utils.ReadOneLineFile,
myfile, strict=True)
else:
datastrict = utils.ReadOneLineFile(myfile, strict=True)
self.assertEqual(myline, datastrict)
def testEmptyfile(self):
myfile = self._CreateTempFile()
self.assertRaises(errors.GenericError, utils.ReadOneLineFile, myfile)
class TestTimestampForFilename(unittest.TestCase):
def test(self):
self.assert_("." not in utils.TimestampForFilename())
self.assert_(":" not in utils.TimestampForFilename())
class TestCreateBackup(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testEmpty(self):
filename = utils.PathJoin(self.tmpdir, "config.data")
utils.WriteFile(filename, data="")
bname = utils.CreateBackup(filename)
self.assertFileContent(bname, "")
self.assertEqual(len(glob.glob("%s*" % filename)), 2)
utils.CreateBackup(filename)
self.assertEqual(len(glob.glob("%s*" % filename)), 3)
utils.CreateBackup(filename)
self.assertEqual(len(glob.glob("%s*" % filename)), 4)
fifoname = utils.PathJoin(self.tmpdir, "fifo")
os.mkfifo(fifoname)
self.assertRaises(errors.ProgrammerError, utils.CreateBackup, fifoname)
def testContent(self):
bkpcount = 0
for data in ["", "X", "Hello World!\n" * 100, "Binary data\0\x01\x02\n"]:
for rep in [1, 2, 10, 127]:
testdata = data * rep
filename = utils.PathJoin(self.tmpdir, "test.data_")
utils.WriteFile(filename, data=testdata)
self.assertFileContent(filename, testdata)
for _ in range(3):
bname = utils.CreateBackup(filename)
bkpcount += 1
self.assertFileContent(bname, testdata)
self.assertEqual(len(glob.glob("%s*" % filename)), 1 + bkpcount)
class TestListVisibleFiles(unittest.TestCase):
"""Test case for ListVisibleFiles"""
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path)
def _CreateFiles(self, files):
for name in files:
utils.WriteFile(os.path.join(self.path, name), data="test")
def _test(self, files, expected):
self._CreateFiles(files)
found = utils.ListVisibleFiles(self.path)
self.assertEqual(set(found), set(expected))
def testAllVisible(self):
files = ["a", "b", "c"]
expected = files
self._test(files, expected)
def testNoneVisible(self):
files = [".a", ".b", ".c"]
expected = []
self._test(files, expected)
def testSomeVisible(self):
files = ["a", "b", ".c"]
expected = ["a", "b"]
self._test(files, expected)
def testNonAbsolutePath(self):
self.failUnlessRaises(errors.ProgrammerError, utils.ListVisibleFiles,
"abc")
def testNonNormalizedPath(self):
self.failUnlessRaises(errors.ProgrammerError, utils.ListVisibleFiles,
"/bin/../tmp")
def testMountpoint(self):
lvfmp_fn = compat.partial(utils.ListVisibleFiles,
_is_mountpoint=lambda _: True)
self.assertEqual(lvfmp_fn(self.path), [])
# Create "lost+found" as a regular file
self._CreateFiles(["foo", "bar", ".baz", |
antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_12/ar_/test_artificial_1024_RelativeDifference_MovingAverage_12__0.py | Python | bsd-3-clause | 278 | 0.082734 | impo | rt pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "RelativeDifference", s | igma = 0.0, exog_count = 0, ar_order = 0); |
WorldException/v7py | v7/utils.py | Python | gpl-2.0 | 3,168 | 0.004786 | #!/usr/bin/env python
# -*-coding:utf8-*-
from __future__ import division
import re
import six
from six.moves import reprlib
re_unicode = re.compile(r'\\u[0-9a-f]{4}')
def fixunicode(str, encoding=''):
if six.PY2:
newstr = str.decode('unicode_escape')
if encoding:
return newstr.decode(encoding)
return newstr
else:
return bytes(str, 'utf-8').decode('unicode_escape')
class MyRepr(reprlib.Repr):
def __init__(self):
reprlib.Repr.__init__(self)
self.maxstring = 1000
self.maxlong = 1000
self.maxlist = 50
self.maxarray = 50
self.maxdict = 50
self.maxlevel=20
def repr_unicode(self, obj, level):
return obj
def repr_str(self, obj, level):
return obj
def repr_list(self, obj, level):
otstup = u' '*level
rstr = u'\n'+otstup+u'['
for i in obj:
rstr += repr(i)
rstr += u',\n'
rstr += otstup
rstr = rstr[:-(level+1)] + u']\n'
return rstr
def repr(self,data):
rstr = reprlib.Repr.repr(self, data)
return rstr
mrepr = MyRepr()
# конвертация между системами счисления
def baseN(num, b, numerals="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((num == 0) and numerals[0]) or (baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def convert_n_to_m(x,n,m):
if n < 1:
return False
if m < 1 or m > 36:
return False
if type(x) == type(0):
#numeric
if m == 1:
d = '%d' % x
try:
d = int(d, n)
return '0' * d
except:
return False
else:
return baseN(x, n).upper()
elif type(x)==type(''):
#string
if m == 1:
try:
d = int(x, n)
return '0'*d
except:
return False
else:
try:
q = int(x, n)
return baseN(q, m).upper()
except:
return False
else:
return False
def convert_n_to_m2(x, n, m):
available_symbols = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if ((type(x) != str) and (type(x) != int)) or (n < 1) or (n > 36) or (m < 1) or (m > 36):
return False
else:
if type(x) == int:
x = str(x)
allowed_symbols = available_symbols[0: n]
if (False in map(lambda symbol: True if (symbol in allowed_symbols) else False, x)):
return False
else:
value = len(x) if (n == 1) else int(x, n)
result = ""
if m > 1:
while value > 0:
result + | = str(available_symbols[value % m])
value = int(value/m)
else:
result = '0' * value
return result[::-1]
def ID_36(value_10):
if six.PY2:
if type(value_10) is unicode:
value_10 = str(value_10)
else:
value_10 | = str(value_10)
return '{:^9}'.format(convert_n_to_m2(value_10, 10, 36))
|
MithileshCParab/HackerRank-10DaysOfStatistics | Problem Solving/Data Structure/Trees/square-ten_three.py | Python | apache-2.0 | 69 | 0.014493 | # Enter your code here. Read input | fr | om STDIN. Print output to STDOUT |
antoinecarme/pyaf | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_Lag1Trend_Seasonal_MonthOfYear_LSTM.py | Python | bsd-3-clause | 161 | 0.049689 | import tests.model_control.test_ozone_custom_models_enabled as tes | tmod
testmod.build_model( ['Fisher'] , ['Lag1Trend'] , ['Seasonal_MonthOfYear'] , ['LST | M'] ); |
Pitmairen/hamlish-jinja | tests/test_debug_output.py | Python | bsd-3-clause | 3,732 | 0.002413 | # -*- coding: utf-8 -*-
import unittest
from hamlish_jinja import Hamlish, Output
import testing_base
class TestDebugOutput(testing_base.TestCase):
def setUp(self):
self.hamlish = Hamlish(
Output(indent_string=' ', newline_string='\n', debug=True))
def test_html_tags(self):
s = self._h('''
%html
%head
%title << Test
%body
%p
Test
%p << Test
%p
Test
''')
r = '''
<html>
<head>
<title>Test</title></head>
<body>
<p>
Test</p>
<p>Test</p>
<p>
Test</p></body></html>
'''
self.assertEqual(s, r)
def test_jinja_tags(self):
s = self._h('''
-macro test(name):
%p << {{ name }}
-block content:
-for i in range(20):
-if i < 10:
=test(i)
-elif i < 15:
Test {{ i|safe }}
-if i == 10:
Test
-continue
-elif i == 11:
-break
Test
-else:
Test
-trans count=i:
There is {{ count }} object.
-pluralize
There is {{ count }} objects.
-else:
Test
''')
r = '''
{% macro test(name): %}
<p>{{ name }}</p>{% endmacro %}
{% block content: %}
{% for i in range(20): %}
{% if i < 10: %}
{{ test(i) }}
{% elif i < 15: %}
Test {{ i|safe }}
{% if i == 10: %}
Test
{% continue %}
{% elif i == 11: %}
{% break %}{% endif %}
Test
{% else: %}
Test{% endif %}
{% trans count=i: %}
There is {{ count }} object.
{% pluralize %}
There is {{ count }} objects.{% endtrans %}
{% else: %}
Test{% endfor %}{% endblock %}
'''
self.assertEqual(s, r)
def test_preformatted_lines(self):
s = self._h('''
%html
%pre
|def test(name):
| if True:
| print name
%p
Test
''')
r = '''
<html>
<pre>
def test(name):
if True:
print name</pre>
<p>
Test</p></html>
'''
self.assertEqual(s, r)
def test_self_closing_tag_with | _empty_lines_bellow(self):
s = self._h('''
%br
%span << test''')
r = '''
<br />
<span>test</span>
'''
self.assertEqual(s, r)
def test_nested_tags(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}" << {{ i }}
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">{{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assert | Equal(s, r)
def test_nested_tags2(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}" -> {{ i }}
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">{{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assertEqual(s, r)
def test_nested_tags3(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}" -> =i
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">{{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assertEqual(s, r)
def test_nested_tags4(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}"
Test {{ i }}
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">
Test {{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assertEqual(s, r)
if __name__ == '__main__':
unittest.main() |
madarche/jack | jack_m3u.py | Python | gpl-2.0 | 1,306 | 0 | # jack_m3u: generate a m3u playl | ist - a module for
# jack - tag audio from a CD and enc | ode it using 3rd party software
# Copyright (C) 1999-2003 Arne Zellentin <zarne@users.sf.net>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from jack_globals import *
m3u = None
wavm3u = None
def init():
global m3u, wavm3u
m3u = []
wavm3u = []
def add(file):
m3u.append(file)
def add_wav(file):
wavm3u.append(file)
# fixeme - not written to a file yet
def write(file="jack.m3u"):
if m3u and cf['_write_m3u']:
f = open(file, "w")
for i in m3u:
f.write(i)
f.write("\n")
f.close()
|
civilian/competitive_programing | leetcode/0/88/best.py | Python | mit | 1,092 | 0.003663 | from typing import List
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
'''
for i in range(n):
nums1[m + i] = nums2[i]
nums1.sort()
'''
'''
nums1copy = nums1[:m]
p = p1 = p2 = 0
while p1 < m and p2 < n:
if nums1copy[p1] < nums2[p2]:
nums1[p] = nums1copy[p1]
p += 1
p1 += | 1
else:
nums1[p] = nums2[p2]
p += 1
p2 += 1
if p1 < m:
nums1[p:] = nums1copy[p1:]
elif p2 < n:
nums1[p:] = nums2[p2:]
'''
p1 = m - 1
p2 = n - 1
for p in range(n + m - 1, -1, -1):
if p2 < 0:
break
if p1 >= 0 and nums1[p1] > nums2[p2]:
nums1[p] = nums1[p1]
p1 -= 1
else:
nums1 | [p] = nums2[p2]
p2 -= 1 |
michaelarnauts/home-assistant | homeassistant/components/media_player/cast.py | Python | mit | 8,631 | 0 | """
homeassistant.components.media_player.chromecast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to interact with Cast devices on the network.
WARNING: This platform is currently not working due to a changed Cast API
"""
import logging
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_IDLE, STATE_OFF,
STATE_UNKNOWN, CONF_HOST)
from homeassistant.components.media_player import (
MediaPlayerDevice,
SUPPORT_PAUSE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_MUTE,
SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_YOUTUBE,
SUPPORT_PREVIOUS_TRACK, SUPPORT_NEXT_TRACK,
MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO)
REQUIREMENTS = ['pychromecast==0.6.10']
CONF_IGNORE_CEC = 'ignore_cec'
CAST_SPLASH = 'https://home-assistant.io/images/cast/splash.png'
SUPPORT_CAST = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_YOUTUBE
KNOWN_HOSTS = []
# pylint: disable=invalid-name
cast = None
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the cast platform. """
global cast
import pychromecast
cast = pychromecast
logger = logging.getLogger(__name__)
# import CEC IGNORE attributes
ignore_cec = config.get(CONF_IGNORE_CEC, [])
if isinstance(ignore_cec, list):
cast.IGNORE_CEC += ignore_cec
else:
logger.error('Chromecast conig, %s must be a list.', CONF_IGNORE_CEC)
hosts = []
if discovery_info and discovery_info[0] not in KNOWN_HOSTS:
hosts = [discovery_info[0]]
elif CONF_HOST in config:
hosts = [config[CONF_HOST]]
else:
hosts = (host_port[0] for host_port
in cast.discover_chromecasts()
if host_port[0] not in KNOWN_HOSTS)
casts = []
for host in hosts:
try:
casts.append(CastDevice(host))
except cast.ChromecastConnectionError:
pass
else:
KNOWN_HOSTS.append(host)
add_devices(casts)
class CastDevice(MediaPlayerDevice):
""" Represents a Cast device on the network. """
# pylint: disable=too-many-public-methods
def __init__(self, host):
import pychromecast.controllers.youtube as youtube
self.cast = cast.Chromecast(host)
self.youtube = youtube.YouTubeController()
self.cast.register_handler(self.youtube)
self.cast.socket_client.receiver_controller.register_status_listener(
self)
self.cast.socket_client.media_controller.register_status_listener(self)
self.cast_status = self.cast.status
self.media_status = self.cast.media_controller.status
# Entity properties and methods
@property
def should_poll(self):
return False
@property
def name(self):
""" Returns the name of the device. """
return self.cast.device.friendly_name
# MediaPlayerDevice properties and methods
@property
def state(self):
""" State of the player. """
if self.media_status is None:
return STATE_UNKNOWN
elif self.media_status.player_is_playing:
return STATE_PLAYING
elif self.media_status.player_is_paused:
return STATE_PAUSED
elif self.media_status.player_is_idle:
return STATE_IDLE
elif self.cast.is_idle:
return STATE_OFF
else:
return STATE_UNKNOWN
@property
def volume_level(self):
""" Volume level of the media player (0..1). """
return self.cast_status.volume_level if self.cast_status else None
@property
def is_volume_muted(self):
""" Boolean if volume is currently muted. """
return self.cast | _status.volume_muted if self.cast_status else None
| @property
def media_content_id(self):
""" Content ID of current playing media. """
return self.media_status.content_id if self.media_status else None
@property
def media_content_type(self):
""" Content type of current playing media. """
if self.media_status is None:
return None
elif self.media_status.media_is_tvshow:
return MEDIA_TYPE_TVSHOW
elif self.media_status.media_is_movie:
return MEDIA_TYPE_VIDEO
elif self.media_status.media_is_musictrack:
return MEDIA_TYPE_MUSIC
return None
@property
def media_duration(self):
""" Duration of current playing media in seconds. """
return self.media_status.duration if self.media_status else None
@property
def media_image_url(self):
""" Image url of current playing media. """
if self.media_status is None:
return None
images = self.media_status.images
return images[0].url if images else None
@property
def media_title(self):
""" Title of current playing media. """
return self.media_status.title if self.media_status else None
@property
def media_artist(self):
""" Artist of current playing media. (Music track only) """
return self.media_status.artist if self.media_status else None
@property
def media_album(self):
""" Album of current playing media. (Music track only) """
return self.media_status.album_name if self.media_status else None
@property
def media_album_artist(self):
""" Album arist of current playing media. (Music track only) """
return self.media_status.album_artist if self.media_status else None
@property
def media_track(self):
""" Track number of current playing media. (Music track only) """
return self.media_status.track if self.media_status else None
@property
def media_series_title(self):
""" Series title of current playing media. (TV Show only)"""
return self.media_status.series_title if self.media_status else None
@property
def media_season(self):
""" Season of current playing media. (TV Show only) """
return self.media_status.season if self.media_status else None
@property
def media_episode(self):
""" Episode of current playing media. (TV Show only) """
return self.media_status.episode if self.media_status else None
@property
def app_id(self):
""" ID of the current running app. """
return self.cast.app_id
@property
def app_name(self):
""" Name of the current running app. """
return self.cast.app_display_name
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_CAST
def turn_on(self):
""" Turns on the ChromeCast. """
# The only way we can turn the Chromecast is on is by launching an app
if not self.cast.status or not self.cast.status.is_active_input:
if self.cast.app_id:
self.cast.quit_app()
self.cast.play_media(
CAST_SPLASH, cast.STREAM_TYPE_BUFFERED)
def turn_off(self):
""" Turns Chromecast off. """
self.cast.quit_app()
def mute_volume(self, mute):
""" mute the volume. """
self.cast.set_volume_muted(mute)
def set_volume_level(self, volume):
""" set volume level, range 0..1. """
self.cast.set_volume(volume)
def media_play(self):
""" Send play commmand. """
self.cast.media_controller.play()
def media_pause(self):
""" Send pause command. """
self.cast.media_controller.pause()
def media_previous_track(self):
""" Send previous track command. """
self.cast.media_controller.rewind()
def media_next_track(self):
""" Send next track command. """
self.cast.media_controller.skip()
def media_seek(self, position):
""" Seek the media to a specific location. """
self.cast.media_controller.seek(position)
def play_youtube(self, media_id):
""" Plays a Y |
mdboom/freetypy | docstrings/tt_postscript.py | Python | bsd-2-clause | 2,490 | 0 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; O | R BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future | __ import print_function, unicode_literals, absolute_import
TT_Postscript__init__ = """
A TrueType PostScript table.
"""
TT_Postscript_format_type = """
Format of this table.
"""
TT_Postscript_italic_angle = """
Italic angle in degrees.
"""
TT_Postscript_underline_position = """
Underline position.
"""
TT_Postscript_underline_thickness = """
Underline thickness.
"""
TT_Postscript_is_fixed_pitch = """
If `True`, the font is monospaced.
"""
TT_Postscript_min_mem_type42 = """
Minimum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_max_mem_type42 = """
Maximum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_min_mem_type1 = """
Minimum memory usage when the font is downloaded as a Type 1 font.
"""
TT_Postscript_max_mem_type1 = """
Maximum memory usage when the font is downloaded as a Type 1 font.
"""
|
AdamWill/anaconda | pyanaconda/simpleconfig.py | Python | gpl-2.0 | 8,078 | 0.001609 | #
# simpleconifg.py - representation of a simple configuration file (sh-like)
#
# Copyright (C) 1999-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import os
import shlex
import string # pylint: disable=deprecated-module
import tempfile
from pyanaconda.iutil import upperASCII
_SAFECHARS = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
def unquote(s):
return ' '.join(shlex.split(s))
def quote(s, always=False):
""" If always is set it returns a quoted value
"""
if not always:
for c in s:
if c not in _SAFECHARS:
break
else:
return s
return '"'+s.replace('"', '\\"')+'"'
def find_comment(s):
""" Look for a # comment outside of a quoted string.
If there are no quotes, find the last # in the string.
:param str s: string to check for comment and quotes
:returns: index of comment or None
:rtype: int or None
Handles comments inside quotes and quotes inside quotes.
"""
q = None
for i in range(len(s)):
if not q and s[i] == '#':
return i
# Ignore quotes inside other quotes
if s[i] in "'\"":
if s[i] == q:
q = None
elif q is None:
q = s[i]
return None
def write_tmpfile(filename, data):
# Create a temporary in the same directory as the target file to ensure
# the new file is on the same filesystem
tmpf = tempfile.NamedTemporaryFile(mode="w", delete=False,
dir=os.path.dirname(filename) or '.', prefix="." + os.path.basename(filename))
tmpf.write(data)
tmpf.close()
# Change the permissions (currently 0600) to match the original file
if os.path.exists(filename):
m = os.stat(filename).st_mode
else:
m = 0o0644
os.chmod(tmpf.name, m)
# Move the temporary file over the top of the original
os.rename(tmpf.name, filename)
class SimpleConfigFile(object):
""" Edit values in a configuration file without changing comments.
Supports KEY=VALUE lines and ignores everything else.
Supports adding new keys.
Supports deleting keys.
Preserves comment, blank lines and comments on KEY lines
Does not support duplicate key entries.
"""
def __init__(self, filename=None, read_unquote=True, write_quote=True,
always_quote=False):
self.filename = filename
self.read_unquote = read_unquote
self.write_quote = write_quote
self.always_quote = always_quote
self.reset()
def reset(self):
self._lines = []
self.info = {}
def read(self, filename=None):
""" passing filename will override the filename passed to init.
save the lines into self._lines and the key/value pairs into
self.info
"""
filename = filename or self.filename
with open(filename) as f:
for line in f:
self._lines.append(line)
key, value, _comment = self._parseline(line)
if key:
self.info[key] = value
def write(self, filename=None, use_tmp=True):
""" passing filename will override the filename passed to init.
"""
filename = filename or self.filename
if not filename:
return None
if use_tmp:
write_tmpfile(filename, str(self))
else:
# write directly to the file
with open(filename, "w") as fobj:
fobj.write(str(self))
def set(self, *args):
for key, value in args:
self.info[upperASCII(key)] = value
def unset(self, *keys):
for key in (upperASCII(k) for k in keys):
if key in self.info:
del self.info[key]
def get(self, key):
return self.info.get(upperASCII(key), "")
def _parseline(self, line):
""" parse a line into a key, value and comment
:param str line: Line to be parsed
:returns: Tuple of key, value, comment
:rtype: tuple
Handle comments and optionally unquote quoted strings
Returns (key, value, comment) or (None, None, comment)
key is always UPPERCASE and comment may by "" if none was found.
"""
s = line.strip()
# Look for a # outside any quotes
comment = ""
comment_index = find_comment(s)
if comment_index is not None:
comment = s[comment_index:]
s = s[:comment_index] # remove from comment to EOL
key, eq, val = s.partition('=')
key = key.strip()
val = val.strip()
if self.read_unquote:
val = unquote(val)
if key != '' and eq == '=':
return (upperASCII(key), val, comment)
else:
return (None, None, comment)
def _kvpair(self, key, comment=""):
value = self.info[key]
if self.write_quote or self.always_quote:
value = quote(value, self.always_quote)
if comment:
comment = " " + comment
return key + '=' + value + comment + "\n"
def __str__(self):
""" Return the file that was read, replacing existing keys with new values
removing keys that have been deleted and adding new keys.
"""
oldkeys = []
s = ""
for line in self._lines:
key, _value, comment = self._parseline(line)
if key is None:
s += line
else:
if key not in self.info:
continue
oldkeys.append(key)
s += self._kvpair(key, comment)
# Add new keys
for key in self.info:
if key not in oldkeys:
s += self._kvpair(key)
return s
def simple_replace(fname, keys, add=True, add_comment="# Added by Anaconda"):
""" Replace lines in a file, optionally adding if missing.
:param str fname: Filename to operate on
:param list keys: List of (key, string) tuples to search and replace
:param bool add: When True ad | d strings that were not replaced
This will read all the lines in a file, looking for ones that start
with keys and replacing the line with the associated string. The string
should b | e a COMPLETE replacement for the line, not just a value.
When add is True any keys that haven't been found will be appended
to the end of the file along with the add_comment.
"""
# Helper to return the line or the first matching key's string
def _replace(l):
r = [s for k,s in keys if l.startswith(k)]
if r:
return r[0]
else:
return l
# Replace lines that match any of the keys
with open(fname, "r") as f:
lines = [_replace(l.strip()) for l in f]
# Add any strings that weren't already in the file
if add:
append = [s for k,s in keys if not any(l.startswith(k) for l in lines)]
if append:
lines += [add_comment]
lines += append
write_tmpfile(fname, "\n".join(lines)+"\n")
|
reminisce/mxnet | example/ssd/quantization.py | Python | apache-2.0 | 8,206 | 0.004996 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import importlib
import mxnet as mx
from dataset.iterator import DetRecordIter
from config.config import cfg
from evaluate.eval_metric import MApMetric, VOC07MApMetric
import argparse
import logging
import time
from symbol.symbol_factory import get_symbol
from symbol import symbol_builder
from mxnet.base import SymbolHandle, check_call, _LIB, mx_uint, c_str_array
import ctypes
from mxnet.contrib.quantization import *
def save_symbol(fname, sym, logger=None):
if logger is not None:
logger.info('Saving symbol into file at %s' % fname)
sym.save(fname)
def save_params(fname, arg_params, aux_params, logger=None):
if logger is not None:
logger.info('Saving params into file at %s' % fname)
save_dict = {('arg:%s' % k): v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k): v.as_in_context(cpu()) for k, v in aux_params.items()})
mx.nd.save(fname, save_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a calibrated quantized SSD model from a FP32 model')
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--num-calib-batches', type=int, default=5,
help='number of batches for calibration')
parser.add_argument('--exclude-first-conv', action='store_true', default=False,
help='excluding quantizing the first conv layer since the'
' number of channels is usually not a multiple of 4 in that layer'
' which does not satisfy the requirement of cuDNN')
parser.add_argument('--shuffle-dataset', action='store_true', default=True,
help='shuffle the calibration dataset')
parser.add_argument('--shuffle-chunk-seed', type=int, default=3982304,
help='shuffling chunk seed, see'
' https://mxnet.incubator.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'
' for more details')
parser.add_argument('--shuffle-seed', type=int, default=48564309,
help='shuffling seed, see'
' https://mxnet.incubator.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'
' for more details')
parser.add_argument('--calib-mode', type=str, default='naive',
help='calibration mode used for generating calibration table for the quantized symbol; supports'
' 1. none: no calibration will be used. The thresholds for quantization will be calculated'
' on the fly. This will result in inference speed slowdown and loss of accuracy'
' in general.'
' 2. naive: simply take min and max values of layer outputs as thresholds for'
' quantization. In general, the inference accuracy worsens with more examples used in'
' calibration. It is recommended to use `entropy` mode as it produces more accurate'
' inference results.'
' 3. entropy: calculate KL divergence of the fp32 output and quantized output for optimal'
' thresholds. This mode is expected to produce the best inference accuracy of all three'
' kinds of quantized models if the calibration dataset is representative enough of the'
' inference dataset.')
parser.add_argument('--quantized-dtype', type=str, default='auto',
choices=['auto', 'int8', 'uint8'],
help='quantization destination data type for input data')
args = parser.parse_args()
ctx = mx.cpu(0)
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
logger.info('shuffle_dataset=%s' % args.shuffle_dataset)
calib_mode = args.calib_mode
logger.info('calibration mode set to %s' % calib_mode)
# load FP32 models
prefix, epoch = "./model/ssd_vgg16_reduced_300", 0
sym, arg_params, aux_params = mx.model.load_checkpoint("./model/ssd_vgg16_reduced_300", 0)
if not 'label' in sym.list_arguments():
label = mx.sym.Variable(name='label')
sym = mx.sym.Group([sym, label])
sym = sym.get_backend_symbol('MKLDNN_QUANTIZE')
# get batch size
batch_size = args.batch_size
logger.info('batch size = %d for calibration' % batch_size)
# get number of batches for calibration
num_calib_batches = args.num_calib_batches
if calib_mode != 'none':
logger.info('number of batches = %d for calibration' % num_calib_batches)
# get image shape
image_shape = '3,300,300'
# Quantization layer configs
exclude_first_conv = args.exclude_first_conv
excluded_sym_names = []
rgb_mean = '123,117,104'
if exclude_first_conv:
excluded_sym_names += ['conv1_1']
label_name = 'label'
logger.info('label_name = %s' % label_name)
data_shape = tuple([int(i) for i in image_shape.split(',')])
logger.info('Input data shape = %s' % str(data_shape))
logger.info('rgb_mean = %s' % rgb_mean)
rgb_mean = [float(i) for i in rgb_mean.split(',')]
mean_args = {'mean_r': rgb_mean[0], 'mean_g': rgb_mean[1], 'mean_b': rgb_mean[2]}
if calib_mode == 'none':
qsym, qarg_params, aux_params = quantize_model(sym=sym, arg_params=arg_params, aux_params=aux_params,
ctx=ctx, excluded_sym_names=excluded_sym_names,
calib_mode=calib_mode, quantized_dtype=args.quantized_dtype,
logger=logger)
sym_name = '%s-symbol.json' % ('./model | /qssd_vgg16_reduced_300')
param_name = '%s-%04d.params' % ('./model/qssd_vgg16_reduced_300', epoch)
save_symbol(sym_name, qsym, logger)
else:
logger.info('Creating ImageRecordIter for reading calibration dataset')
eval_iter = DetRecordIter(os.path.join(os.getcwd(), 'data', 'val.rec'),
batch_size, data_shape, mean_pixels=(123, 117, 104),
path_imglist="", **cfg.val | id)
qsym, qarg_params, aux_params = quantize_model(sym=sym, arg_params=arg_params, aux_params=aux_params,
ctx=ctx, excluded_sym_names=excluded_sym_names,
calib_mode=calib_mode, calib_data=eval_iter,
num_calib_examples=num_calib_batches * batch_size,
quantized_dtype=args.quantized_dtype,
label_names=(label_name,), logger=logger)
sym_name = '%s-symbol.json' % ('./model/cqssd_vgg16_reduced_300')
param_name = '%s-%04d.params' % ('./model/cqssd_vgg16_reduced_300', epoch)
qsym = qsym.get_backend_symbol('MKLDNN_QUANTIZE')
save_symbol(sym_name, qsym, logger)
save_params(param_name, qarg_pa |
dimagi/commcare-hq | corehq/apps/export/management/commands/delete_exports.py | Python | bsd-3-clause | 3,683 | 0.002715 | from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_bulk_delete
from corehq.apps.export.models import ExportInstance
class Command(BaseCommand):
help = "Delete exports in a domain"
def add_arguments(self, parser):
parser.add_argument(
'domain',
help="The domain to delete exports in"
)
parser.add_argument(
'--days_inactive',
default=0,
help="Only delete exports that have been inactive for this many days"
)
def handle(self, dom | ain, **options):
db = ExportInstance.get_db()
exports = db.view(
'export_instances_by_domain/view',
startkey=[do | main],
endkey=[domain, {}],
include_docs=False,
reduce=False,
).all()
if not exports:
print("No exports to delete here, exiting.")
return
if options['days_inactive'] > 0:
import datetime
inactive_since = datetime.datetime.today() - datetime.timedelta(days=int(options['days_inactive']))
inactive_exports = []
for export in exports:
e = ExportInstance.get(export['id'])
if e.last_accessed and e.last_accessed <= inactive_since:
inactive_exports.append(export)
if not inactive_exports:
print("No exports have been inactive for more than {days_inactive} days, exiting.".format(
days_inactive=options['days_inactive'])
)
return
confirm = input(
"There are {total_exports} exports in {domain}. Are you sure you want to delete "
"{total_inactive_exports} that are older than {days_inactive} days [y/N]?".format(
total_exports=len(exports),
total_inactive_exports=len(inactive_exports),
domain=domain,
days_inactive=int(options['days_inactive'])
)
)
if confirm.lower() == 'y':
exports = inactive_exports
else:
return
filter_exports = lambda _type: [row for row in exports if _type in row['key']]
form_exports = filter_exports('FormExportInstance')
case_exports = filter_exports('CaseExportInstance')
confirm = input(
"There are {f_count} form exports, and {c_count} case exports. "
"Are you sure you want to delete all these exports [y/N]?\n".format(
f_count=len(form_exports),
c_count=len(case_exports)
)
)
to_delete = []
if confirm.lower() == 'y':
_type = input(
"Enter 'case' to delete all case exports, "
"'form to delete all form exports, "
"'all' to delete both form and case exports. "
"Enter anything else to exit.\n"
)
if _type == 'form':
to_delete = form_exports
print("Deleting form exports")
elif _type == 'case':
to_delete = case_exports
print("Deleting case exports")
elif _type == 'all':
to_delete = form_exports + case_exports
print("Deleting all exports")
else:
print("Not deleting anything, exiting!")
return
total_count = iter_bulk_delete(db, [doc['id'] for doc in to_delete])
print("Deleted total of {} exports succesfully!".format(total_count))
|
PythonSanSebastian/epcon | conference/models.py | Python | bsd-2-clause | 49,377 | 0.005167 | # -*- coding: UTF-8 -*-
import datetime
import os
import os.path
import subprocess
from collections import defaultdict
from django.conf import settings as dsettings
from django.core import exceptions
from django.core.cache import cache
from django.db import connection
from django.db import models
from django.db import transaction
from django.db.models.query import QuerySet
from django.db.models.signals import post_save
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from django_urls import UrlMixin
import tagging
from tagging.fields import TagField
import conference
import conference.gmap
from conference import settings
from conference import signals
from taggit.models import TagBase, GenericTaggedItemBase, ItemBase
from taggit.managers import TaggableManager
import inspect
import traceback
import logging
log = logging.getLogger('conference.tags')
# ConferenceTag e ConferenceTaggedItem servono per creare un "namespace" per i
# tag relativi a conference. In questo modo non devo preocuparmi di altri
# utilizzi di taggit fatti da altre app.
class ConferenceTagManager(models.Manager):
def get_query_set(self):
return self._QuerySet(self.model)
def __getattr__(self, name):
return getattr(self.all(), name)
class _QuerySet(QuerySet):
def annotate_with_usage(self):
return self\
.annotate(usage=models.Count('conference_conferencetaggeditem_items'))
def order_by_usage(self, asc=False):
| key = 'usage' if asc else '-usage'
| return self.annotate_with_usage().order_by(key)
class ConferenceTag(TagBase):
objects = ConferenceTagManager()
category = models.CharField(max_length=50, default='', blank=True)
def save(self, **kw):
if not self.pk:
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
log.debug(u'saving new tag {}'.format(self.name))
log.debug(u''.join(stack_trace[:-1]))
# prima di salvare questo tag mi assicuro che non ne esista un
# altro diverso solo per maiuscole/minuscole
try:
c = ConferenceTag.objects.get(name__iexact=self.name)
except ConferenceTag.DoesNotExist:
pass
else:
self.pk = c.pk
return
return super(ConferenceTag, self).save(**kw)
class ConferenceTaggedItem(GenericTaggedItemBase, ItemBase):
tag = models.ForeignKey(ConferenceTag, related_name="%(app_label)s_%(class)s_items")
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
class ConferenceManager(models.Manager):
def current(self):
key = 'CONFERENCE_CURRENT'
data = cache.get(key)
if data is None:
data = self.get(code=settings.CONFERENCE)
# mantengo in cache abbastanza a lungo perchè la query non sia più
# un problema
cache.set(key, data, 60*60*24*7)
return data
@classmethod
def clear_cache(cls, sender, **kwargs):
cache.delete('CONFERENCE_CURRENT')
class Conference(models.Model):
code = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=100)
cfp_start = models.DateField(null=True, blank=True)
cfp_end = models.DateField(null=True, blank=True)
conference_start = models.DateField(null=True, blank=True)
conference_end = models.DateField(null=True, blank=True)
voting_start = models.DateField(null=True, blank=True)
voting_end = models.DateField(null=True, blank=True)
objects = ConferenceManager()
def __unicode__(self):
return self.code
def days(self):
output = []
if self.conference_start and self.conference_end:
d = self.conference_start
step = datetime.timedelta(days=1)
while d<= self.conference_end:
output.append(d)
d += step
return output
def clean(self):
if self.conference_start and self.conference_end:
if self.conference_start > self.conference_end:
raise exceptions.ValidationError('Conference end must be > of conference start')
if self.cfp_start and self.cfp_end:
if self.cfp_start > self.cfp_end:
raise exceptions.ValidationError('Cfp end must be > of cfp start')
if self.voting_start and self.voting_end:
if self.voting_start > self.voting_end:
raise exceptions.ValidationError('Voting end must be > of voting start')
def cfp(self):
today = datetime.date.today()
try:
return self.cfp_start <= today <= self.cfp_end
except TypeError:
# date non impostate
return False
def voting(self):
today = datetime.date.today()
try:
return self.voting_start <= today <= self.voting_end
except TypeError:
# date non impostate
return False
def conference(self):
today = datetime.date.today()
try:
return self.conference_start <= today <= self.conference_end
except TypeError:
raise
# date non impostate
return False
post_save.connect(ConferenceManager.clear_cache, sender=Conference)
class DeadlineManager(models.Manager):
def valid_news(self):
today = datetime.date.today()
return self.all().filter(date__gte = today)
class Deadline(models.Model):
"""
deadline per il pycon
"""
date = models.DateField()
objects = DeadlineManager()
def __unicode__(self):
return "deadline: %s" % (self.date, )
class Meta:
ordering = ['date']
def isExpired(self):
today = datetime.date.today()
return today > self.date
def content(self, lang, fallback=True):
"""
Ritorna il DeadlineContent nella lingua specificata. Se il
DeadlineContent non esiste e fallback è False viene sollevata
l'eccezione ObjectDoesNotExist. Se fallback è True viene ritornato il
primo DeadlineContent disponibile.
"""
contents = dict((c.language, c) for c in self.deadlinecontent_set.exclude(body=''))
if not contents:
raise DeadlineContent.DoesNotExist()
try:
return contents[lang]
except KeyError:
if not fallback:
raise DeadlineContent.DoesNotExist()
return contents.values()[0]
class DeadlineContent(models.Model):
"""
Testo, multilingua, di una deadline
"""
deadline = models.ForeignKey(Deadline)
language = models.CharField(max_length=3)
headline = models.CharField(max_length=200)
body = models.TextField()
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
class MultilingualContentManager(models.Manager):
def setContent(self, object, content, language, body):
if language is None:
language = dsettings.LANGUAGE_CODE.split('-', 1)[0]
object_type = ContentType.objects.get_for_model(object)
try:
mc = self.get(content_type=object_type, object_id=object.pk, content=content, language=language)
except MultilingualContent.DoesNotExist:
mc = MultilingualContent(content_object=object)
mc.content = content
mc.language = language
mc.body = body
mc.save()
def getContent(self, object, content, language):
if language is None:
language = dsettings.LANGUAGE_CODE.split('-', 1)[0]
object_type = ContentType.objects.get_for_model(object)
records = dict(
(x.language, x)
for x in self.exclude(body='').filter(content_type=object_type, object_id=object.pk, content=content)
)
try:
return records[language]
except KeyError:
if not records:
return None
else:
return records.get(dse |
samuelcolvin/pydantic | docs/examples/exporting_models_ujson.py | Python | mit | 308 | 0 | from dat | etime import datetime
import ujson
from pydantic import BaseModel
class | User(BaseModel):
id: int
name = 'John Doe'
signup_ts: datetime = None
class Config:
json_loads = ujson.loads
user = User.parse_raw('{"id": 123,"signup_ts":1234567890,"name":"John Doe"}')
print(user)
|
brianspeir/Vanilla | vendor/bootstrap-vz/base/__init__.py | Python | bsd-3-clause | 296 | 0.013514 | __all__ = ['Phase', 'Task', 'main']
from phase | import Phase
from task import Task
from main import main
def validate_manifest(data, validator, error):
import os.path
schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest- | schema.json'))
validator(data, schema_path)
|
skeenp/Roam | src/roam/errors.py | Python | gpl-2.0 | 854 | 0.003513 | """
Module to handle sending error reports.
"""
import roam
import roam.config
import roam.utils
errorreporting = False
try:
from raven import Client
errorreporting = True
except ImportError:
errorreporting = False
roam.utils.warning("Error reporting disabled due to import error")
def can_send():
"""
Return True if allowed to send error reports to the online error service.
:return: True if allowed
"""
return roam.config.settings.get("online_er | ror_reporting", False)
def send_exception(exinfo):
| if can_send() and errorreporting:
client = Client(
dsn='http://681cb73fc39247d0bfa03437a9b53b61:114be99c3a8842188ae7e9381d30374a@sentry.kartoza.com/17',
release=roam.__version__
)
roam.utils.info("Sending error report.")
client.captureException(exinfo)
|
linglung/ytdl | youtube_dl/extractor/senateisvp.py | Python | unlicense | 6,273 | 0.001594 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unsmuggle_url,
)
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class SenateISVPIE(InfoExtractor):
_COMM_MAP = [
['ag', '76440', 'http://ag-f.akamaihd.net'],
['aging', '76442', 'http://aging-f.akamaihd.net'],
['approps', '76441', 'http://approps-f.akamaihd.net'],
['armed', '76445', 'http://armed-f.akamaihd.net'],
['banking', '76446', 'http://banking-f.akamaihd.net'],
['budget', '76447', 'http://budget-f.akamaihd.net'],
['cecc', '76486', 'http://srs-f.akamaihd.net'],
['commerce', '80177', 'http://commerce1-f.akamaihd.net'],
['csce', '75229', 'http://srs-f.akamaihd.net'],
['dpc', '76590', 'http://dpc-f.akamaihd.net'],
['energy', '76448', 'http://energy-f.akamaihd.net'],
['epw', '76478', 'http://epw-f.akamaihd.net'],
['ethics', '76449', 'http://ethics-f.akamaihd.net'],
['finance', '76450', 'http://finance-f.akamaihd.net'],
['foreign', '76451', 'http://foreign-f.akamaihd.net'],
['govtaff', '76453', 'http://govtaff-f.akamaihd.net'],
['help', '76452', 'http://help-f.akamaihd.net'],
['indian', '76455', 'http://indian-f.akamaihd.net'],
['intel', '76456', 'http://intel-f.akamaihd.net'],
['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'],
['jccic', '85180', 'http://jccic-f.akamaihd.net'],
['jec', '76458', 'http://jec-f.akamaihd.net'],
['judiciary', '76459', 'http://judiciary-f.akamaihd.net'],
['rpc', '76591', 'http://rpc-f.akamaihd.net'],
['rules', '76460', 'http://rules-f.akamaihd.net'],
['saa', '76489', 'http://srs-f.akamaihd.net'],
['smbiz', '76461', 'http://smbiz-f.akamaihd.net'],
['srs', '75229', 'http://srs-f.akamaihd.net'],
['uscc', '76487', 'http://srs-f.akamaihd.net'],
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
'info_dict': {
'id': 'judiciary031715',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false',
'info_dict': {
'id': 'commerce011514',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi',
# checksum differs each time
'info_dict': {
'id': 'intel090613',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
}
}, {
# From http://www.c-span.org/video/?96791-1
'url': 'http://www.senate.gov/isvp?type=live&comm=banking&filename=banking012715',
'only_matching': True,
}]
@staticmethod
def _search_iframe_url(webpage):
mobj = re.search(
r"<iframe[^>]+src=['\"](?P<url>http://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]",
webpage)
if mobj:
return mobj.group('url')
def _get_info_for_comm(self, committee):
for entry in self._COMM_MAP:
| if entry[0] == committee:
return entry[1:]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
qs = compat_parse_qs(re.match(self._VALID_URL, ur | l).group('qs'))
if not qs.get('filename') or not qs.get('type') or not qs.get('comm'):
raise ExtractorError('Invalid URL', expected=True)
video_id = re.sub(r'.mp4$', '', qs['filename'][0])
webpage = self._download_webpage(url, video_id)
if smuggled_data.get('force_title'):
title = smuggled_data['force_title']
else:
title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, video_id)
poster = qs.get('poster')
thumbnail = poster[0] if poster else None
video_type = qs['type'][0]
committee = video_type if video_type == 'arch' else qs['comm'][0]
stream_num, domain = self._get_info_for_comm(committee)
formats = []
if video_type == 'arch':
filename = video_id if '.' in video_id else video_id + '.mp4'
formats = [{
# All parameters in the query string are necessary to prevent a 403 error
'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=',
}]
else:
hdcore_sign = 'hdcore=3.1.0'
url_params = (domain, video_id, stream_num)
f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign
m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params
for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'):
# URLs without the extra param induce an 404 error
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.append(entry)
for entry in self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', m3u8_id='m3u8'):
mobj = re.search(r'(?P<tag>(?:-p|-b)).m3u8', entry['url'])
if mobj:
entry['format_id'] += mobj.group('tag')
formats.append(entry)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
|
beiko-lab/gengis | bin/Lib/site-packages/wx-2.8-msw-unicode/wx/build/build_options.py | Python | gpl-3.0 | 160 | 0.06875 |
UNICODE=1
UNDEF_N | DEBUG=1
INSTALL_MULTIVERSION=1
FLAVOUR=""
EP_ADD_OPTS=1
EP_FULL_VER=0
WX_CONFIG="None"
WXPORT="msw"
MONOLITHIC=0 |
FINAL=0
HYBRID=1
|
hrayr-artunyan/shuup | shuup_tests/core/test_order_price_display.py | Python | agpl-3.0 | 3,010 | 0.001661 | # This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import decimal
import pytest
from shuup.core.templatetags.shuup_common import money
from shuup.testing.factories import (
add_product_to_order, create_empty_order, create_product,
get_default_shop, get_default_supplier
)
@pytest.mark.django_db
def test_order_price_display():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
# Formatted line prices should match with the formatted order total prices
# Strip euro sign before converting formatted price to decimal
taxful_total = decimal.Decimal("0.00")
taxless_total = decimal.Decimal("0.00")
for line in order.lines.all():
taxful_total += decimal.Decimal(money(line.taxful_price). | strip("\u20ac"))
taxless_total += decimal.Decimal(money(line.taxless_price).strip("\u20ac"))
assert decimal.Decimal(money(order.taxful_total_price).strip("\u20ac")) == taxful_total
assert decimal.Decimal(money(order.taxless_total_price).strip("\u20ac")) == taxless_total
def _get_order(shop, supplier):
or | der = create_empty_order(shop=shop)
order.full_clean()
order.save()
for product_data in _get_product_data():
quantity = product_data.pop("quantity")
tax_rate = product_data.pop("tax_rate")
product = create_product(
sku=product_data.pop("sku"),
shop=shop,
supplier=supplier,
**product_data)
add_product_to_order(
order, supplier, product, quantity=quantity,
taxless_base_unit_price=product_data["default_price"], tax_rate=tax_rate)
order.cache_prices()
order.check_all_verified()
order.save()
return order
def _get_product_data():
return [
{
"sku": "sku1234",
"default_price": decimal.Decimal("14.756"),
"quantity": decimal.Decimal("1"),
"tax_rate": decimal.Decimal("0.24")
},
{
"sku": "sku12345",
"default_price": decimal.Decimal("10"),
"quantity": decimal.Decimal("5"),
"tax_rate": decimal.Decimal("0.24")
},
{
"sku": "sku123456",
"default_price": decimal.Decimal("14.756"),
"quantity": decimal.Decimal("2"),
"tax_rate": decimal.Decimal("0.24")
},
{
"sku": "sku1234567",
"default_price": decimal.Decimal("8.8164"),
"quantity": decimal.Decimal("1"),
"tax_rate": decimal.Decimal("0.14")
},
{
"sku": "sku12345678",
"default_price": decimal.Decimal("17.6328"),
"quantity": decimal.Decimal("4"),
"tax_rate": decimal.Decimal("0.00")
}
]
|
getting-things-gnome/gtg | tests/core/test_search_filter.py | Python | gpl-3.0 | 9,498 | 0 | # -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2014 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from unittest import TestCase
from GTG.core.search import search_filter
from GTG.core.dates import Date
d = Date.parse
class FakeTask():
def __init__(self, title="", body="", tags=[], due_date=""):
self.title = title
self.body = body
self.tags = tags
self.due_date = Date.parse(due_date)
def get_title(self):
return self.title
def get_excerpt(self, strip_tags=False):
return self.body
def get_tags_name(self):
return self.tags
def get_due_date(self):
return self.due_date
class TestSearchFilter(TestCase):
def test_empty(self):
self.assertFalse(search_filter(FakeTask()))
def test_single_tag(self):
task = FakeTask(tags=['@a'])
self.assertTrue(search_filter(task, {"q": [("tag", True, "@a")]}))
self.assertFalse(search_filter(task, {"q": [("tag", True, "@b")]}))
self.assertFalse(search_filter(task, {"q": [("tag", True, "@n")]}))
def test_double_tag(self):
p = {"q": [("tag", True, "@a"), ("tag", True, "@b")]}
self.assertTrue(search_filter(FakeTask(tags=['@a', '@b']), p))
self.assertTrue(search_filter(FakeTask(tags=['@b', '@a']), p))
self.assertTrue(search_filter(FakeTask(tags=['@b', '@a', '@a']), p))
self.assertTrue(search_filter(
FakeTask(tags=['@b', '@a', '@c', '@d']), p))
self.assertTrue(search_filter(
FakeTask(tags=['@b', 'search', '@a']), p))
self.assertTrue(search_filter(
FakeTask(tags=['gtg-tags-all', '@b', 'search', '@a']), p))
self.assertTrue(search_filter(FakeTask(tags=['gtg-tags-all',
'gtg-tags-none',
'@b', 'search',
'@a']), p))
self.assertFalse(search_filter(FakeTask(tags=['@n', '@b']), p))
self.assertFalse(search_filter(FakeTask(tags=['@b', '@n']), p))
self.assertFalse(search_filter(FakeTask(tags=['@a']), p))
self.assertFalse(search_filter(FakeTask(tags=['@b']), p))
self.assertFalse(search_filter(
FakeTask(tags=['@b', '@b', '@c', '@d']), p))
self.assertFalse(search_filter(
FakeTask(tags=['@b', 'search', '@d']), p))
self.assertFalse(search_filter(
FakeTask(tags=['gtg-tags-all', '@g', 'search', '@a']), p))
self.assertFalse(search_filter(FakeTask(tags=['gtg-tags-all',
'gtg-tags-none',
'@@b',
| 'search', '@ | a']), p))
def test_simple_tag_or(self):
task = FakeTask(tags=['@a', '@b'])
self.assertTrue(search_filter(
task,
{"q":
[('or', True, [("tag", True, "@a"), ("tag", True, "@b")])]}))
self.assertTrue(search_filter(
task,
{"q":
[('or', True, [("tag", True, "@a"), ("tag", True, "@n")])]}))
self.assertTrue(search_filter(
task,
{"q":
[('or', True, [("tag", True, "@n"), ("tag", True, "@b")])]}))
self.assertFalse(search_filter(
task,
{"q":
[('or', True, [("tag", True, "@n"), ("tag", True, "@n")])]}))
def test_simple_word_in_title(self):
task = FakeTask(title="GTG is the best ToDo manager for GNOME")
# Test the lowercasing
self.assertTrue(search_filter(task, {'q': [("word", True, 'GTG')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'gtg')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'GtG')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'Gtg')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'gTg')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'GTG')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'is')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'the')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'best')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'todo')]}))
self.assertTrue(search_filter(task,
{'q': [("word", True, 'manager')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'for')]}))
self.assertTrue(search_filter(task, {'q': [("word", True, 'GNOME')]}))
# test literals
self.assertTrue(search_filter(task, {'q': [("word", True, 'GTG is')]})
)
self.assertTrue(search_filter(task,
{'q': [("word", True, 'for GNOME')]}))
self.assertTrue(search_filter(task,
{'q': [("word", False, 'GTG for GNOME')]
}))
self.assertFalse(search_filter(task,
{'q': [("word", True, 'GTG for GNOME')]
}))
def test_simple_before(self):
v = FakeTask(due_date="2012-02-14")
self.assertTrue(search_filter(v,
{'q': [("before", True, d('2022-01-01'))
]}))
self.assertTrue(search_filter(v,
{'q': [("before", True, d('2012-03-01'))
]}))
self.assertTrue(search_filter(v,
{'q': [("before", True, d('2012-02-20'))
]}))
self.assertTrue(search_filter(v,
{'q': [("before", True, d('2012-02-15'))
]}))
self.assertFalse(search_filter(v,
{'q': [("before", True, d('2012-02-14'))
]}))
self.assertFalse(search_filter(v,
{'q': [("before", True, d('2012-02-13'))
]}))
self.assertFalse(search_filter(v,
{'q': [("before", True, d('2000-01-01')
)]}))
self.assertFalse(search_filter(v,
{'q': [("before", False, d('2012-03-01'
))]}))
self.assertTrue(search_filter(v,
{'q': [("before", False, d('2012-02-14')
)]}))
self.assertTrue(search_filter(v,
{'q': [("before", False, d('2002-02-20')
)]}))
def test_simple_after(self):
t = FakeTask(due_date="2012-06-01")
self.assertTrue(search_filter(t,
{'q': [("after", True, d('2002-01-01'))]
}))
|
chrislit/abydos | abydos/distance/_tarantula.py | Python | gpl-3.0 | 4,524 | 0 | # Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._tarantula.
Tarantula similarity
"""
from typing import Any, Counter as TCounter, O | ptional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Tarantula']
class Tarantula(_TokenDistance):
r"""Tarantula similarity.
For two sets X and Y and a population N, Ta | rantula similarity
:cite:`Jones:2005` is
.. math::
sim_{Tarantula}(X, Y) =
\frac{\frac{|X \cap Y|}{|X \cap Y| + |X \setminus Y|}}
{\frac{|X \cap Y|}{|X \cap Y| + |X \setminus Y|} +
\frac{|Y \setminus X|}
{|Y \setminus X| + |(N \setminus X) \setminus Y|}}
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
sim_{Tarantula} =
\frac{\frac{a}{a+b}}{\frac{a}{a+b} + \frac{c}{c+d}}
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize Tarantula instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(Tarantula, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def sim(self, src: str, tar: str) -> float:
"""Return the Tarantula similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Tarantula similarity
Examples
--------
>>> cmp = Tarantula()
>>> cmp.sim('cat', 'hat')
0.9948979591836735
>>> cmp.sim('Niall', 'Neil')
0.98856416772554
>>> cmp.sim('aluminum', 'Catalan')
0.9249106078665077
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
d = self._total_complement_card()
num = a * (c + d)
if num:
return num / (a * (2 * c + d) + b * c)
return 0.0
if __name__ == '__main__':
import doctest
doctest.testmod()
|
plotly/plotly.py | packages/python/plotly/plotly/validators/box/marker/_symbol.py | Python | mit | 14,526 | 0.000069 | import _plotly_utils.basevalidators
class SymbolValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="symbol", parent_name="box.marker", **kwargs):
super(SymbolValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop(
"values",
[
0,
"0",
"circle",
100,
"100",
"circle-open",
200,
"200",
"circle-dot",
300,
"300",
"circle-open-dot",
1,
"1",
"square",
101,
"101",
"square-open",
201,
"201",
"square-dot",
301,
"301",
"square-open-dot",
2,
"2",
"diamond",
102,
"102",
"diamond-open",
202,
"202",
"diamond-dot",
302,
"302",
"diamond-open-dot",
3,
"3",
"cross",
103,
"103",
"cross-open",
203,
"203",
"cross-dot",
303,
"303",
"cross-open-dot",
4,
"4",
"x",
104,
"104",
"x-open",
204,
"204",
"x-dot",
304,
"304",
"x-open-dot",
5,
"5",
"triangle-up",
105,
"105",
"triangle-up-open",
205,
"205",
"triangle-up-dot",
305,
"305",
"triangle-up-open-dot",
6,
"6",
"triangle-down",
106,
"106",
"triangle-down-open",
206,
"206",
"triangle-down-dot",
306,
"306",
"triangle-down-open-dot",
7,
"7",
"triangle-left",
107,
"107",
"triangle-left-open",
207,
"207",
"triangle-left-dot",
307,
"307",
"triangle-left-open-dot",
8,
"8",
"triangle-right",
108,
"108",
"triangle-right-open",
208,
"208",
"triangle-right-dot",
308,
"308",
"triangle-right-open-dot",
9,
"9",
"triangle-ne",
109,
"109",
"triangle-ne-open",
209,
"209",
"triangle-ne-dot",
309,
"309",
"triangle-ne-open-dot",
10,
"10",
"triangle-se",
110,
"110",
"triangle-se-open",
210,
"210",
"triangle-se-dot",
310,
"310",
"triangle-se-open-dot",
11,
"11",
"triangle-sw",
111,
"111",
"triangle-sw-open",
211,
"211",
"triangle-sw-dot",
311,
"311",
"triangle-sw-open-dot",
12,
"12",
"triangle-nw",
112,
"112",
"triangle-nw-open",
212,
"212",
"triangle-nw-dot",
312,
"312",
"triangle-nw-open-dot",
13,
"13",
"pentagon",
113,
"113",
"pentagon-open",
| 213,
"213",
"pentagon-dot",
313,
"313",
"pentagon-open-dot",
14,
"14",
| "hexagon",
114,
"114",
"hexagon-open",
214,
"214",
"hexagon-dot",
314,
"314",
"hexagon-open-dot",
15,
"15",
"hexagon2",
115,
"115",
"hexagon2-open",
215,
"215",
"hexagon2-dot",
315,
"315",
"hexagon2-open-dot",
16,
"16",
"octagon",
116,
"116",
"octagon-open",
216,
"216",
"octagon-dot",
316,
"316",
"octagon-open-dot",
17,
"17",
"star",
117,
"117",
"star-open",
217,
"217",
"star-dot",
317,
"317",
"star-open-dot",
18,
"18",
"hexagram",
118,
"118",
"hexagram-open",
218,
"218",
"hexagram-dot",
318,
"318",
"hexagram-open-dot",
19,
"19",
"star-triangle-up",
119,
"119",
"star-triangle-up-open",
219,
"219",
"star-triangle-up-dot",
319,
"319",
"star-triangle-up-open-dot",
20,
"20",
"star-triangle-down",
120,
"120",
"star-triangle-down-open",
220,
"220",
"star-triangle-down-dot",
320,
"320",
"star-triangle-down-open-dot",
21,
"21",
"star-square",
121,
"121",
"star-square-open",
221 |
moto-timo/robotframework | src/robot/output/xmllogger.py | Python | apache-2.0 | 5,678 | 0.000176 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.utils import XmlWriter, NullMarkupWriter, get_timestamp, unic
from robot.version import get_full_version
from robot.result.visitor import ResultVisitor
from .loggerhelper import IsLogged
class XmlLogger(ResultVisitor):
def __init__(self, path, log_level='TRACE', generator='Robot'):
self._log_message_is_logged = IsLogged(log_level)
self._error_message_is_logged = IsLogged('WARN')
self._writer = self._get_writer(path, generator)
self._errors = []
def _get_writer(self, path, generator):
if not path:
return NullMarkupWriter()
try:
writer = XmlWriter(path, write_empty=False)
except EnvironmentError as err:
raise DataError("Opening output file '%s' failed: %s" %
(path, err.strerror))
writer.start('robot', {'generator': get_full_version(generator),
'generated': get_timestamp()})
return writer
def close(self):
self.start_errors()
for msg in self._errors:
self._write_message(msg)
self.end_errors()
self._writer.end('robot')
self._writer.close()
def set_log_level(self, level):
return self._log_message_is_logged.set_level(level)
def message(self, msg):
if self._error_message_is_logged(msg.level):
self._errors.append(msg)
def log_message(self, msg):
if self._log_message_is_logged(msg.level):
self._write_message(msg)
def _write_message(self, msg):
attrs = {'timestamp': msg.timestamp or 'N/A', 'level': msg.level}
if msg.html:
attrs['html'] = 'yes'
self._writer.element('msg', msg.message, attrs)
def start_keyword(self, kw):
attrs = {'name': kw.kwname, 'library': kw.libname}
if kw.type != 'kw':
attrs['type'] = kw.type
if kw.timeout:
attrs['timeout'] = unic(kw.timeout)
self._writer.start('kw', attrs)
self._write_list('tags', 'tag', [unic(t) for t in kw.tags])
self._writer.element('doc', kw.doc)
self._write_list('arguments', 'arg', [unic(a) for a in kw.args])
self._write_list('assign', 'var', kw | .assign)
def end_keyword(self, kw):
self._write_status(kw)
self._writer.end('kw')
def start_test(self, test):
attrs = {'id': test.id, 'name': test.name}
if test.timeout:
attrs['timeout'] = unic(test.timeout)
self._writer.start('test', attrs)
def end_test(self, test):
self._writer.element('doc', test.doc)
self._write_list('tags', 'tag' | , test.tags)
self._write_status(test, {'critical': 'yes' if test.critical else 'no'})
self._writer.end('test')
def start_suite(self, suite):
attrs = {'id': suite.id, 'name': suite.name, 'source': suite.source}
self._writer.start('suite', attrs)
def end_suite(self, suite):
self._writer.element('doc', suite.doc)
if suite.metadata:
self._write_metadata(suite.metadata)
self._write_status(suite)
self._writer.end('suite')
def _write_metadata(self, metadata):
self._writer.start('metadata')
for name, value in metadata.items():
self._writer.element('item', value, {'name': name})
self._writer.end('metadata')
def start_statistics(self, stats):
self._writer.start('statistics')
def end_statistics(self, stats):
self._writer.end('statistics')
def start_total_statistics(self, total_stats):
self._writer.start('total')
def end_total_statistics(self, total_stats):
self._writer.end('total')
def start_tag_statistics(self, tag_stats):
self._writer.start('tag')
def end_tag_statistics(self, tag_stats):
self._writer.end('tag')
def start_suite_statistics(self, tag_stats):
self._writer.start('suite')
def end_suite_statistics(self, tag_stats):
self._writer.end('suite')
def visit_stat(self, stat):
self._writer.element('stat', stat.name,
stat.get_attributes(values_as_strings=True))
def start_errors(self, errors=None):
self._writer.start('errors')
def end_errors(self, errors=None):
self._writer.end('errors')
def _write_list(self, container_tag, item_tag, items):
if items:
self._writer.start(container_tag)
for item in items:
self._writer.element(item_tag, item)
self._writer.end(container_tag)
def _write_status(self, item, extra_attrs=None):
attrs = {'status': item.status, 'starttime': item.starttime or 'N/A',
'endtime': item.endtime or 'N/A'}
if not (item.starttime and item.endtime):
attrs['elapsedtime'] = str(item.elapsedtime)
if extra_attrs:
attrs.update(extra_attrs)
self._writer.element('status', item.message, attrs)
|
eayunstack/neutron | neutron/plugins/ml2/extensions/dns_integration.py | Python | apache-2.0 | 23,051 | 0.00026 | # Copyright (c) 2016 IBM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import dns as dns_apidef
from neutron_lib.api import validators
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as lib_const
from neutron_lib.exceptions import dns as dns_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from oslo_config import cfg
from oslo_log import log as logging
from neutron.db import segments_db
from neutron.objects import network as net_obj
from neutron.objects import ports as port_obj
from neutron.plugins.common import utils as plugin_utils
from neutron.services.externaldns import driver
LOG = logging.getLogger(__name__)
class DNSExtensionDriver(api.ExtensionDriver):
_supported_extension_alias = dns_apidef.ALIAS
@property
def extension_alias(self):
return self._supported_extension_alias
def process_create_network(self, plugin_context, request_data, db_data):
dns_domain = request_data.get(dns_apidef.DNSDOMAIN)
if not validators.is_attr_set(dns_domain):
return
if dns_domain:
net_obj.NetworkDNSDomain(plugin_context,
network_id=db_data['id'],
dns_domain=dns_domain).create()
db_data[dns_apidef.DNSDOMAIN] = dns_domain
def process_update_network(self, plugin_context, request_data, db_data):
new_value = request_data.get(dns_apidef.DNSDOMAIN)
if not validators.is_attr_set(new_value):
return
current_dns_domain = db_data.get(dns_apidef.DNSDOMAIN)
if current_dns_domain == new_value:
return
net_id = db_data['id']
if current_dns_domain:
net_dns_domain = net_obj.NetworkDNSDomain.get_object(
plugin_context,
network_id=net_id)
if new_value:
net_dns_domain['dns_domain'] = new_value
db_data[dns_apidef.DNSDOMAIN] = new_value
net_dns_domain.update()
else:
net_dns_domain.delete()
db_data[dns_apidef.DNSDOMAIN] = ''
elif new_value:
net_obj.NetworkDNSDomain(plugin_context,
network_id=net_id,
dns_domain=new_value).create()
db_data[dns_apidef.DNSDOMAIN] = new_value
def process_create_port(self, plugin_context, request_data, db_data):
if not (request_data.get(dns_apidef.DNSNAME) or
request_data.get(dns_apidef.DNSDOMAIN)):
return
dns_name, is_dns_domain_default = self._get_request_dns_name(
request_data)
if is_dns_domain_default:
return
network = self._get_network(plugin_context, db_data['network_id'])
self._create_port_dns_record(plugin_context, request_data, db_data,
network, dns_name)
def _create_port_dns_record(self, plugin_context, request_data, db_data,
network, dns_name):
external_dns_domain = (request_data.get(dns_apidef.DNSDOMAIN) or
network.get(dns_apidef.DNSDOMAIN))
current_dns_name, current_dns_domain = (
self._calculate_current_dns_name_and_domain(
dns_name, external_dns_domain,
self.external_dns_not_needed(plugin_context, network)))
dns_data_obj = port_obj.PortDNS(
plugin_context,
port_id=db_data['id'],
current_dns_name=current_dns_name,
current_dns_domain=current_dns_domain,
previous_dns_name='',
previous_dns_domain='',
dns_name=dns_name,
dns_domain=request_data.get(dns_apidef.DNSDOMAIN, ''))
dns_data_obj.create()
return dns_data_obj
def _calculate_current_dns_name_and_domain(self, dns_name,
external_dns_domain,
no_external_dns_service):
# When creating a new PortDNS object, the current_dns_name and
# current_dns_domain fields hold the data that the integration driver
# will send to the external DNS service. They are set to non-blank
# values only if all the following conditions are met:
# 1) There is an external DNS integration driver configured
# 2) The user request contains a valid non-blank value for the port's
# dns_name
# 3) The user request contains a valid non-blank value for the port's
# dns_domain or the port's network has a non-blank value in its
# dns_domain attribute
are_both_dns_attributes_set = dns_name and external_dns_domain
if no_external_dns_service or not are_both_dns_attributes_set:
return '', ''
return dns_name, external_dns_domain
def _update_dns_db(self, plugin_context, request_data, db_data, network):
dns_name = request_data.get(dns_apidef.DNSNAME)
dns_domain = request_data.get(dns_apidef.DNSDOMAIN)
has_fixed_ips = 'fixed_ips' in request_data
dns_data_db = port_obj.PortDNS.get_object(
plugin_context,
port_id=db_data['id'])
if dns_data_db:
is_dns_name_changed = (dns_name is not None and
dns_data_db[dns_apidef.DNSNAME] != dns_name)
is_dns_domain_changed = (dns_domain is not None and
dns_data_db[dns_apidef.DNSDOMAIN] != dns_domain)
if (is_dns_name_changed or is_dns_domain_changed or
(has_fixed_ips and dns_data_db['current_dns_name'])):
dns_data_db = self._populate_previous_external_dns_data(
dns_data_db)
dns_data_db = self._populate_current_external_dns_data(
request_data, network, dns_data_db, dns_name, dns_domain,
is_dns_name_changed, is_dns_domain_changed)
elif not dns_data_db['current_dns_name']:
# If port was removed from external DNS service in previous
# update, make sure we don't attempt removal again
dns_data_db['previous_dns_name'] = ''
dns_data_db['previous_dns_domain'] = ''
dns_data_db.update()
return dns_data_db
if dns_name or dns_domain:
dns_data_db = self._create_port_dns_reco | rd(plugin_context,
request_da | ta, db_data, network, dns_name or '')
return dns_data_db
def _populate_previous_external_dns_data(self, dns_data_db):
dns_data_db['previous_dns_name'] = (
dns_data_db['current_dns_name'])
dns_data_db['previous_dns_domain'] = (
dns_data_db['current_dns_domain'])
return dns_data_db
def _populate_current_external_dns_data(self, request_data, network,
dns_data_db, dns_name, dns_domain,
is_dns_name_changed,
is_dns_domain_changed):
if is_dns_name_changed or is_dns_domain_changed:
if is_dns_name_changed:
dns_data_db[dns_apidef.DNSNAME] = dns_name
external_dns_domain = (dns_data_db[dns_apidef.DNSDOMAIN] or
network.get(dns_apidef.DNSDOMAIN))
if is |
OpenSpaceProgram/pyOSP | library/sensors/Raspistill.py | Python | mit | 2,060 | 0.001942 | # -*- coding: utf-8 -*-
import sys
import time
from subprocess import call
#add the project folder to pythpath
sys.path.append('../../')
from library.components.SensorModule import SensorModule as Sensor
from library.components.MetaData import MetaData as MetaData
class Raspistill(Sensor):
def __init__(self):
super(Raspistill, self).__init__()
# ISO100
iso100MetaData = MetaData('ISO100')
iso100MetaData.setValueCallback(self.getIso100)
iso100MetaData.setUnitCallback(self.getUnit)
self.addMetaData(iso100MetaData)
# ISO200
iso200MetaData = MetaData('ISO200')
iso200MetaData.setValueCallback(self.getIso200)
iso200MetaData.setUnitCallback(self.getUnit)
self.addMetaData(iso200MetaData)
# ISO400'
iso400MetaData = MetaData('ISO400')
iso400MetaData.setValueCallback(self.getIso400)
iso400MetaData.setUnitCallback(self.getUnit)
self.addMetaData(iso400MetaData)
# ISO800'
iso800MetaData = MetaData('ISO800')
iso800MetaData.setValueCallback(self.getIso800)
iso800MetaData.setUnitCallback(self.getUnit | )
self.addMetaData(iso800MetaData)
def getIso100(sel | f):
filename = "photos/" + str(time.time()) + "-iso100.jpg"
call(["raspistill", "--ISO", "100", "-o", filename])
return str(filename)
def getIso200(self):
filename = "photos/" + str(time.time()) + "-iso200.jpg"
call(["raspistill", "--ISO", "200", "-o", filename])
return str(filename)
def getIso400(self):
filename = "photos/" + str(time.time()) + "-iso400.jpg"
call(["raspistill", "--ISO", "400", "-o", filename])
return str(filename)
def getIso800(self):
filename = "photos/" + str(time.time()) + "-iso800.jpg"
call(["raspistill", "--ISO", "800", "-o", filename])
return str(filename)
def getUnit(self):
return " Photo"
def getMetaData(self):
return super(Raspistill, self).getMetaData() |
felipevolpone/alabama_orm | tests/test_property.py | Python | mit | 1,389 | 0 | import unittest
from mock import Person, Gender
from models import BaseProperty
class TestProperty(unittest.TestCase):
def test_enum_property(self):
model = Person()
model.gender = Gender.male
self.assertEquals(model.gender, Gender.male)
with self.assertRaises(ValueError):
a = Person()
a.gender = "invalid valid"
def test_property(self):
with self.assertRaises(ValueError):
obj = Person()
obj.name = 3
with self.assertRaises(ValueError):
obj = Person()
obj.age = "a"
obj = Person()
obj.name = "string"
obj.age = 1
self.assertEqual(obj.name, "string")
self.assertEqual(obj.age, 1)
def test_different_object(self):
obj1 = Person()
| obj | 1.name = "string"
obj1.age = 1
self.assertEqual(obj1.name, "string")
self.assertEqual(obj1.age, 1)
obj2 = Person()
obj2.name = "new"
obj2.age = 2
self.assertEqual(obj2.name, "new")
self.assertEqual(obj2.age, 2)
self.assertEqual(obj1.name, "string")
self.assertEqual(obj1.age, 1)
def test_wrong_implementation_of_property(self):
class WrongProp(BaseProperty):
pass
with self.assertRaises(NotImplementedError):
WrongProp()._type()
|
magvugr/AT | AppAdiccionTic/models.py | Python | gpl-3.0 | 1,704 | 0.033451 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Noticia(models.Model):
Publicado = 'Publicado'
Borrador = 'Borrador'
Titulo = models.CharField(max_length=30)
Subtitulo = models.CharField(max_length=50)
Imagen = models.FileField(blank=True, upload_to='media/fotos/noticias')
SubtituloImag = models.CharField(max_length=30)
Cuerpo = models.TextField(max_length=500)
Timestamp = models.DateTimeField(auto_now_add = True, auto_now = False)
Actualizado = models.DateTimeField(auto_now_add = False, auto_now = True)
CHOICES=[(Publicado, 'Publicado'),(Borrador, 'Borrador')] |
Estado = models.CharField(max_length=9,choices=CHOICES, default=Borrador)
IncluirVideo = models.BooleanField()
CodVideo = models.CharField(max_length=200)
Tags = models.CharField(max_length=30)
usuario = models.ForeignKey(User)
def __str__(self):
return self.Titulo + ' - ' + self.Subtitulo
class Evento(models.Model):
Titulo = models.CharField(max_length=30)
Subtitulo = models.CharField(max_length=50)
Imagen | = models.FileField(blank=True, upload_to='media/fotos/noticias')
SubtituloImag = models.CharField(max_length=30)
Cuerpo = models.CharField(max_length=500)
Timestamp = models.DateTimeField(auto_now_add = True, auto_now = False)
Actualizado = models.DateTimeField(auto_now_add = False, auto_now = True)
Lugar = models.CharField(max_length=50)
Fecha = models.DateTimeField(auto_now_add = False)
Organizadores = models.CharField(max_length=30)
Ponente = models.CharField(max_length=30)
Tags = models.CharField(max_length=30)
def __str__(self):
return self.Titulo + ' - ' + self.Subtitulo
|
makfire/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/task.py | Python | agpl-3.0 | 37,929 | 0.019774 | import datetime
import json
from django.http import HttpResponseRedirect
from django.http import StreamingHttpResponse
from django.http import HttpResponse
from django.shortcuts import render_to_response #render uncomment when to use
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.utils import simplejson
from online_status.utils import encode_json
from mongokit import paginator
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
from gnowsys_ndf.settings import GAPPS, MEDIA_ROOT
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.models import Node, GSystemType
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.models import GSystemType, Node
from gnowsys_ndf.ndf.views.methods import get_node_common_fields, get_file_node,get_execution_time,get_group_name_id
from gnowsys_ndf.ndf.views.methods import parse_template_data, create_gattribute, create_grelation
from gnowsys_ndf.ndf.views.notify import set_notif_val
sitename=Site.objects.all()
app = node_collection.one({'_type': "GSystemType", 'name': 'Task'})
if sitename :
sitename = sitename[0]
else :
sitename = ""
@get_execution_time
def task(request, group_name, task_id=None):
"""Renders a list of all 'task' available within the database.
"""
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_name) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_name})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
group_name, group_id = get_group_name_id(group_name)
GST_TASK = node_collection.one({'_type': "GSystemType", 'name': 'Task'})
title = "Task"
TASK_inst = node_collection.find({'member_of': {'$all': [GST_TASK._id]}, 'group_set': {'$all': [ObjectId(group_id)]}})
template = "ndf/task.html"
variable = RequestContext(request, {'title': title, 'appId':app._id, 'TASK_inst': TASK_inst, 'group_id': group_id, 'groupid': group_id, 'group_name':group_name })
return render_to_response(template, variable)
@login_required
@get_execution_time
def task_details(request, group_name, task_id):
"""Renders given task's details.
"""
group_id = None
if ObjectId.is_valid(group_name) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_name})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
elif ObjectId.is_valid(group_name) is True:
group_ins = node_collection.find_one({'_type': "Group", "_id": ObjectId(group_name)})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
task_node = node_collection.one({'_type': u'GSystem', '_id': ObjectId(task_id)})
at_list = ["Status", "start_time", "Priority", "end_time", "Assignee", "Estimated_time","Upload_Task"]
blank_dict = {}
| history = []
subtask = []
for each in at_list:
attributetype_key = node_collection.find_one({"_type": 'AttributeType', 'name': | each})
attr = triple_collection.find_one({"_type": "GAttribute", "subject": task_node._id, "attribute_type.$id": attributetype_key._id})
if attr:
if attributetype_key.name == "Assignee":
u_list = []
for each_id in attr.object_value:
u = User.objects.get(id=each_id)
if u:
if u.username not in u_list:
u_list.append(u.username)
blank_dict[each] = u_list
else:
blank_dict[each] = attr.object_value
if task_node.prior_node:
blank_dict['parent'] = node_collection.one({'_id': task_node.prior_node[0]}).name
if task_node.post_node:
for each_postnode in task_node.post_node:
sys_each_postnode = node_collection.find_one({'_id': each_postnode})
sys_each_postnode_user = User.objects.get(id=sys_each_postnode.created_by)
member_of_name = node_collection.find_one({'_id': sys_each_postnode.member_of[0]}).name
if member_of_name == "Task" :
subtask.append({
'id':str(sys_each_postnode._id),
'name':sys_each_postnode.name,
'created_by':sys_each_postnode_user.username,
'created_at':sys_each_postnode.created_at
})
if member_of_name == "task_update_history":
if sys_each_postnode.altnames == None:
postnode_task = '[]'
else :
postnode_task = sys_each_postnode.altnames
history.append({
'id':str(sys_each_postnode._id),
'name':sys_each_postnode.name,
'created_by':sys_each_postnode_user.username,
'created_at':sys_each_postnode.created_at,
'altnames':eval(postnode_task),
'content':sys_each_postnode.content
})
if task_node.collection_set:
blank_dict['collection']='True'
# Appending TaskType to blank_dict, i.e. "has_type" relationship
if task_node.relation_set:
for rel in task_node.relation_set:
if "has_type" in rel and rel["has_type"]:
task_type = node_collection.one({'_id': rel["has_type"][0]}, {'name': 1})
if task_type:
blank_dict["has_type"] = task_type["name"]
break
# Appending Watchers to blank_dict, i.e. values of node's author_set field
if task_node.author_set:
watchers_list = []
for eachid in task_node.author_set:
if eachid not in watchers_list:
watchers_list.append(eachid)
blank_dict["Watchers"] = watchers_list
history.reverse()
var = {
'title': task_node.name,
'group_id': group_id, 'appId': app._id, 'groupid': group_id, 'group_name': group_name,
'node': task_node, 'history':history, 'subtask': subtask
}
var.update(blank_dict)
variables = RequestContext(request, var)
template = "ndf/task_details.html"
return render_to_response(template, variables)
@get_execution_time
def save_image(request, group_name, app_id=None, app_name=None, app_set_id=None, slug=None):
if request.method == "POST" :
#here group_name contains the object id of the group insted of name sent from
#task template
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_name) is False :
group_object = node_collection.one({'_type':{'$in':['Group','Author']}, 'name': unicode(group_name)})
group_object = group_object._id
else:
group_object = group_name
for index, each in enumerate(request.FILES.getlist("doc[]", "")):
title = each.name
userid = request.POST.get("user", "")
content_org = request.POST.get('content_org', '')
tags = request.POST.get('tags', "")
img_type = request.POST.get("type", "")
language = request.POST.get("lan", "")
usrname = request.user.username
page_url = request.POST.get("page_url", "")
access_policy = request.POST.get("login-mode", '') # To add access policy(public or private) to file object
# for storing location in the file
# location = []
# location.append(json.loads(request.POST.get("location", "{}")))
# obs_image = save_file(each,title,userid,group_id, content_org, tags, img_type, language, usrname, access_policy, oid=True, location=location)
obs_image = sav |
gnowledge/ncert_nroer | gstudio/tests/url_shortener.py | Python | agpl-3.0 | 5,124 | 0.000585 | # Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICU | LAR PURPOSE. See the
# GNU Affero General Pu | blic License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test cases for Gstudio's url_shortener"""
from __future__ import with_statement
import warnings
from django.test import TestCase
from gstudio.url_shortener import get_url_shortener
from gstudio import url_shortener as us_settings
from gstudio.url_shortener.backends.default import backend as default_backend
class URLShortenerTestCase(TestCase):
"""Test cases for gstudio.url_shortener"""
def setUp(self):
self.original_backend = us_settings.URL_SHORTENER_BACKEND
def tearDown(self):
us_settings.URL_SHORTENER_BACKEND = self.original_backend
def test_get_url_shortener(self):
us_settings.URL_SHORTENER_BACKEND = 'mymodule.myclass'
try:
with warnings.catch_warnings(record=True) as w:
self.assertEquals(get_url_shortener(), default_backend)
self.assertTrue(issubclass(w[-1].metatype, RuntimeWarning))
self.assertEquals(
str(w[-1].message),
'mymodule.myclass backend cannot be imported')
except AttributeError:
# Fail under Python2.5, because of'warnings.catch_warnings'
pass
us_settings.URL_SHORTENER_BACKEND = 'gstudio.tests.custom_url_shortener'
try:
with warnings.catch_warnings(record=True) as w:
self.assertEquals(get_url_shortener(), default_backend)
self.assertTrue(issubclass(w[-1].metatype, RuntimeWarning))
self.assertEquals(
str(w[-1].message),
'This backend only exists for testing')
except AttributeError:
# Fail under Python2.5, because of'warnings.catch_warnings'
pass
us_settings.URL_SHORTENER_BACKEND = 'gstudio.url_shortener'\
'.backends.default'
self.assertEquals(get_url_shortener(), default_backend)
|
bartoldeman/easybuild-easyblocks | easybuild/easyblocks/s/snphylo.py | Python | gpl-2.0 | 3,939 | 0.002793 | ##
# Copyright 2009-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for SNPyhlo, implemented as an easyblock
@authors: Ewan Higgs (HPC-UGent)
@authors: Kenneth Hoste (HPC-UGent)
"""
import os
import re
import shutil
import stat
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, mkdir
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_SNPhylo(EasyBlock):
"""Support for building and installing SNPhylo."""
def configure_step(self):
"""No configure step for SNPhylo."""
pass
def build_step(self):
"""No build step for SNPhylo."""
# check for required dependencies
for dep in ['MUSCLE', 'PHYLIP', 'Python', 'R']:
if not get_software_root(dep):
raise EasyBuildError("Required dependency '%s' not loaded", dep)
# check for required R libraries
rver = get_software_version('R')
r_libs, _ = run_cmd("R --vanilla --no-save --slave -e 'print(installed.packages())'", simple=False)
for rpkg in ['gdsfmt', 'getopt', 'SNPRelate', 'phangorn']:
if not re.search(r'^%s\s.*%s' % (rpkg, rver), r_libs, re.M):
raise EasyBuildError("Required R package '%s' not installed", rpkg)
# run setup.sh, and send a bunch of newlines as stdin to 'answer' the Q&A;
# all questions can be answered with the default answer (if the dependencies are | specified correctly);
# use run_cmd_qa doesn not work because of buffering issues (questions are not coming through)
adjust_permissions('setup.sh', stat.S_IXUSR, add=True)
(out, _) = run_cmd('bash ./setup.sh', inp='\n' * 10, simple=False)
success_msg = "SNPHYLO is successfully installed!!!"
if success_msg not in out:
raise EasyBuildError("Success message '%s' not found in setup.sh output: %s", success_msg, out)
def install_step(se | lf):
"""Install by copying files/directories."""
bindir = os.path.join(self.installdir, 'bin')
binfiles = ['snphylo.sh', 'snphylo.cfg', 'snphylo.template']
try:
mkdir(bindir, parents=True)
for binfile in binfiles:
shutil.copy2(os.path.join(self.builddir, binfile), bindir)
shutil.copytree(os.path.join(self.builddir, 'scripts'), os.path.join(self.installdir, 'scripts'))
except OSError as err:
raise EasyBuildError("Failed to copy SNPhylo files/dirs: %s", err)
def sanity_check_step(self):
"""Custom sanity check for SNPhylo."""
custom_paths = {
'files': ['bin/snphylo.sh', 'bin/snphylo.cfg', 'bin/snphylo.template'],
'dirs': ['scripts'],
}
super(EB_SNPhylo, self).sanity_check_step(custom_paths=custom_paths)
|
RevansChen/online-judge | Codewars/8kyu/training-js-number-7-if-dot-else-and-ternary-operator/Python/solution1.py | Python | mit | 153 | 0.006536 | # Python - 2.7.6
def sale_hotdogs(n):
if n < 5:
return n * 100
elif (5 <= | n < 10):
return n * | 95
else:
return n * 90 |
zedr/boxing-clock | src/main.py | Python | bsd-3-clause | 209 | 0 | try:
from boxe_c | lock.apps.android_app import BoxingApp
except ImportError:
from boxe_clock.apps.generic_app import BoxingApp
def main():
BoxingApp().run()
if __name__ = | = "__main__":
main()
|
ina-foss/ID-Fits | lib/datasets/landmarks_file.py | Python | lgpl-3.0 | 1,121 | 0.005352 | # ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the Lic | ense, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import numpy as np
def readPtsL | andmarkFile(filename, landmarks_number):
f = open(filename)
# Skip first 3 lines
for i in range(3):
f.readline()
# Read landmarks position
landmarks = np.empty((landmarks_number, 2), dtype=np.float)
for i in range(landmarks_number):
landmarks[i] = np.array([float(x) for x in f.readline().split()])
return landmarks
|
CCI-MOC/GUI-Backend | scripts/import_tags.py | Python | apache-2.0 | 2,440 | 0.00041 | #!/usr/bin/env python
import json
import logging
from optparse import OptionParser
from service_old.models import Instance
import django
django.setup()
def export_instance_tags():
instance_tags = []
instances = Instance.objects.all()
added = 0
for i in instances:
if i.instance_tags:
tag_json = []
tag_list = i.instance_tags.split(',')
for tag in tag_list:
tag_json.append({'name': tag, 'description': ''})
instance_tags.append({'instance': i.instance_id, 'tags': tag_json})
added = added + 1
logging.info('%s records exported' % added)
return json.dumps(instance_tags)
def import_instance_tags(instance_tags_json):
| instance_tags = json.loads(instance_tags_json)
added = 0
skipped = 0
for instance_tag in instance_tags:
try:
instance = Instance.objects.get(
instance_id=instance_tag['instance'])
instance.instance_tags = ','.join(
[tag['name'] for tag in instance_tag['tags']])
instance.save()
added = added + 1
except In | stance.DoesNotExist as dne:
logging.warn(
'Could not import tags for instance <%s> - DB Record does not exist' %
instance_tag['instance'])
skipped = skipped + 1
total = added + skipped
logging.info(
'%s Records imported. %s Records added, %s Records skipped' %
(total, added, skipped))
return
def main():
(options, filenames) = parser.parse_args()
if not filenames or len(filenames) == 0:
print 'Missing filename'
parser.print_help()
return 1
filename = filenames[0]
if options.export:
f = open(filename, 'w')
json_data = export_instance_tags()
f.write(json_data)
else:
f = open(filename, 'r')
json_data = f.read()
import_instance_tags(json_data)
f.close()
return
usage = "usage: %prog [command] filename"
parser = OptionParser(usage=usage)
parser.add_option(
"--import",
action="store_false",
dest="export",
help="Override the current DB with the Instance Tag JSON file provided")
parser.add_option(
"--export",
action="store_true",
dest="export",
default=True,
help="Export the current DB instance tags to empty file provided")
if __name__ == '__main__':
main()
|
Loisel/colorview2d | colorview2d/mods/Scale.py | Python | bsd-2-clause | 455 | 0.002198 | """A mod to scale the data."""
from co | lorview2d import imod
class Scale(imod.IMod):
"""
The mod class to scale the values in the 2d data array
according to the value entered in the widget:
args (float): The float that is multiplied with the data array.
"""
def __init__(self):
imod.IMod.__init__(self)
self.args = self.default_args = 1.
def do_apply(self, data, args):
data.zdata = data.zdata * | args
|
lewfish/django-social-news | social_news/urls.py | Python | mit | 1,105 | 0.00724 | from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import | login_required
from django.views.generic.edit import CreateView
from django.contrib.auth.forms import UserCreationForm
from . import views
urlpatterns = patterns('',
url(r'^$', views.all, name='all'),
url(r'^submit/$', login_required(views.EntryCreateView.as_view()), name='submit'),
| url(r'^vote/$', login_required(views.vote), name='vote'),
url(r'^login/$', 'django.contrib.auth.views.login',
{'template_name': 'social_news/login.html'},
name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'template_name': 'social_news/logout.html'},
name='logout'),
url(r'^signup/$', CreateView.as_view(template_name='social_news/signup.html', form_class=UserCreationForm, success_url='/'), name = 'signup'),
)
|
p1c2u/openapi-core | openapi_core/validation/request/shortcuts.py | Python | bsd-3-clause | 1,401 | 0 | """OpenAPI core validation request shortcuts module"""
fro | m functools import partial
from openapi_core.validation.request.validators import RequestBodyValidator
from openapi | _core.validation.request.validators import (
RequestParametersValidator,
)
from openapi_core.validation.request.validators import RequestSecurityValidator
from openapi_core.validation.request.validators import RequestValidator
def validate_request(validator, request):
result = validator.validate(request)
result.raise_for_errors()
return result
def spec_validate_request(
spec,
request,
request_factory=None,
validator_class=RequestValidator,
result_attribute=None,
):
if request_factory is not None:
request = request_factory(request)
validator = validator_class(spec)
result = validator.validate(request)
result.raise_for_errors()
if result_attribute is None:
return result
return getattr(result, result_attribute)
spec_validate_parameters = partial(
spec_validate_request,
validator_class=RequestParametersValidator,
result_attribute="parameters",
)
spec_validate_body = partial(
spec_validate_request,
validator_class=RequestBodyValidator,
result_attribute="body",
)
spec_validate_security = partial(
spec_validate_request,
validator_class=RequestSecurityValidator,
result_attribute="security",
)
|
vincentchevrier/dataquick | dataquick/plugins/visualizations/ui/psd.py | Python | mit | 1,775 | 0.002817 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Coding\Python\PythonPackageLinks\dataquick\plugins\visualizations\ui\psd.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes mad | e in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PSD(object):
def setupUi(self, PSD):
PSD.setObjectName("PSD")
PSD.resize(1000, 561)
self.verticalLayout = QtWidgets.QVBoxLayout(PSD)
self.verticalLayout.setContentsMargins(3, 3, 3, 3)
self.verticalLayout.setObjectName("verticalLayout")
self.splitter = QtWidgets.QSplitter(PSD)
self.splitter.setOrientat | ion(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_left = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_left.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_left.setObjectName("verticalLayout_left")
self.listView_datasets = DQFListView(self.layoutWidget)
self.listView_datasets.setObjectName("listView_datasets")
self.verticalLayout_left.addWidget(self.listView_datasets)
self.plotWidget = PlotWidget(self.splitter)
self.plotWidget.setObjectName("plotWidget")
self.verticalLayout.addWidget(self.splitter)
self.retranslateUi(PSD)
QtCore.QMetaObject.connectSlotsByName(PSD)
def retranslateUi(self, PSD):
_translate = QtCore.QCoreApplication.translate
PSD.setWindowTitle(_translate("PSD", "Particle Size Distribution"))
from dataquick.qt.plotlist import DQFListView
from dataquick.qt.plotwidget import PlotWidget
|
sjzabel/snail | tests/test-vlq.py | Python | bsd-3-clause | 271 | 0.00369 | import unittest
from unittest.mock import MagicMock
import io
from snail import vlq
clas | s TestVlq(unittest.TestCase):
def setup(self):
pass
def teardown(self):
pass
def test_re | ad(self):
pass
def test_write(self):
pass
|
yungyuc/solvcon | examples/misc/elas3d/elastic.py | Python | bsd-3-clause | 26,514 | 0.01041 | # -*- coding: UTF-8 -*-
#
# Copyright (C) 2010-2011 Yung-Yu Chen <yyc@solvcon.net>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from solvcon.gendata import SingleAssignDict, AttributeDict
from solvcon.anchor import Anchor
from solvcon.hook import BlockHook
#from .cese import CeseSolver
#from .cese import CeseCase
#from .cese import CeseBC
from solvcon.kerpak.cese import CeseSolver, CeseCase, CeseBC
def getcdll(libname):
"""
Load shared objects at the default location.
@param libname: main basename of library without sc_ prefix.
@type libname: str
@return: ctypes library.
@rtype: ctypes.CDLL
"""
from solvcon.dependency import loadcdll
return loadcdll('.', 'sc_'+libname)
###############################################################################
# Metadata for materials.
###############################################################################
class MtrlTypeRegistry(SingleAssignDict, AttributeDict):
"""
BC type registry class, and its instance holds BC type classes, which can
be indexed by BC type name and BC type number.
In current design, there should exist only one registry singleton in
package.
BC classes in registry should not be altered, in any circumstances.
"""
def register(self, bctype):
name = bctype.__name__
self[name] = bctype
return bctype
mltregy = MtrlTypeRegistry() # registry singleton.
class MaterialMeta(type):
"""
Meta class for material class.
"""
def __new__(cls, name, bases, namespace):
newcls = super(MaterialMeta, cls).__new__(cls, name, bases, namespace)
# register.
mltregy.register(newcls)
return newcls
###############################################################################
| # Solver | .
###############################################################################
class ElasticSolver(CeseSolver):
"""
Basic elastic solver.
@ivar cfldt: the time_increment for CFL calculation at boundcond.
@itype cfldt: float
@ivar cflmax: the maximum CFL number.
@itype cflmax: float
@ivar mtrldict: map from names to material objects.
@itype mtrldict: dict
@ivar mtrllist: list of all material objects.
@itype mtrllist: list
"""
#from solvcon.dependency import getcdll
__clib_elastic = {
2: getcdll('elastic2d'),
3: getcdll('elastic3d'),
}
#del getcdll
@property
def _clib_elastic(self):
return self.__clib_elastic[self.ndim]
@property
def _gdlen_(self):
return 9 * 9 * self.ndim
@property
def _jacofunc_(self):
return self._clib_elastic.calc_jaco
def __init__(self, *args, **kw):
self.cfldt = kw.pop('cfldt', None)
self.cflmax = 0.0
self.mtrldict = kw.pop('mtrldict', {})
self.mtrllist = None
super(ElasticSolver, self).__init__(*args, **kw)
@staticmethod
def _build_mtrllist(grpnames, mtrldict):
"""
Build the material list out of the mapping dict.
@param grpnames: sequence of group names.
@type grpnames: list
@param mtrldict: the map from names to material objects.
@type mtrldict: dict
@return: the list of material object.
@rtype: Material
"""
mtrllist = list()
default_mtuple = mtrldict.get(None, None)
for grpname in grpnames:
try:
mtrl = mtrldict.get(grpname, default_mtuple)
except KeyError, e:
args = e.args[:]
args.append('no material named %s in mtrldict'%grpname)
e.args = args
raise
mtrllist.append(mtrl)
return mtrllist
def provide(self):
from ctypes import byref, c_int
# build material list.
self.mtrllist = self._build_mtrllist(self.grpnames, self.mtrldict)
for igrp in range(len(self.grpnames)):
mtrl = self.mtrllist[igrp]
jaco = self.grpda[igrp].reshape(self.neq, self.neq, self.ndim)
jaco[:,:,0] = mtrl.jacox
jaco[:,:,1] = mtrl.jacoy
if self.ndim == 3:
jaco[:,:,2] = mtrl.jacoz
# pre-calculate CFL.
self._set_time(self.time, self.cfldt)
self._clib_elastic.calc_cfl(
byref(self.exd), c_int(0), c_int(self.ncell))
self.cflmax = self.cfl.max()
# super method.
super(ElasticSolver, self).provide()
def calccfl(self, worker=None):
self.marchret.setdefault('cfl', [0.0, 0.0, 0, 0])
self.marchret['cfl'][0] = self.cflmax
self.marchret['cfl'][1] = self.cflmax
self.marchret['cfl'][2] = 0
self.marchret['cfl'][3] = 0
return self.marchret
###############################################################################
# Case.
###############################################################################
class ElasticCase(CeseCase):
"""
Case for anisotropic elastic solids.
"""
from solvcon.domain import Domain
defdict = {
'execution.neq': 9,
'solver.solvertype': ElasticSolver,
'solver.domaintype': Domain,
'solver.alpha': 0,
'solver.cfldt': None,
'solver.mtrldict': dict,
}
del Domain
def make_solver_keywords(self):
kw = super(ElasticCase, self).make_solver_keywords()
# setup delta t for CFL calculation.
cfldt = self.solver.cfldt
cfldt = self.execution.time_increment if cfldt is None else cfldt
kw['cfldt'] = cfldt
# setup material mapper.
kw['mtrldict'] = self.solver.mtrldict
return kw
###############################################################################
# Boundary conditions.
###############################################################################
class ElasticBC(CeseBC):
"""
Basic BC class for elastic problems.
"""
typn = -10200
#from solvcon.dependency import getcdll
__clib_elasticb = {
2: getcdll('elasticb2d'),
3: getcdll('elasticb3d'),
}
#del getcdll
@property
def _clib_elasticb(self):
return self.__clib_elasticb[self.svr.ndim]
class ElasticTraction(ElasticBC):
typn = 10201
vnames = [
'bfcsys', 'tau1', 'tau2', 'tau3', 'freq', 'phase',
]
vdefaults = {
'bfcsys': 0.0,
'tau1': 0.0, 'tau2': 0.0, 'tau3': 0.0, 'freq': 0.0, 'phase': 0.0,
}
_ghostgeom_ = 'compress'
def soln(self):
from solvcon.dependency import intptr
from ctypes import byref, c_int
self._clib_boundcond.bound_traction_soln(
byref(self.svr.exd),
c_int(self.facn.shape[0]),
self.facn.ctypes.data_as(intptr),
c_int(self.value.shape[1]),
self.value.ctypes.data_as(self.fpptr),
)
def dsoln(self):
from solvcon.dependency import intptr
from ctypes import byref, c_int
self._clib_boundcond.bound_traction_dsoln(
byref(self.svr.exd),
c_int(self.facn.shape[0]),
self.facn.ctypes.data_as(intptr),
)
class ElasticTractionFree(ElasticBC):
typn = 10202
_ghostgeom_ = 'mirror'
def soln(self):
from solvcon.dependency import intptr
from ctypes import byref, c_int
self._clib_boundcond.bound_traction_free_soln(
byref(self.svr.exd),
c_int(self.facn.shape[0]),
self.facn.ctypes.dat |
tipsybear/ormbad | ormbad/version.py | Python | apache-2.0 | 1,161 | 0.000861 | # or | mbad.version
# Helper module for ORMBad version information
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Thu Aug 13 12:38:42 2015 -0400
#
# Copyright (C) 2015 Tipsy Bear Studios
# For license information, see LICENSE.txt
#
# ID: version.py [] benjamin@bengfort.com $
"""
Helper module for ORMBad version information.
"""
##########################################################################
## Versioning
######################################################################### | #
__version_info__ = {
'major': 0,
'minor': 1,
'micro': 0,
'releaselevel': 'final',
'serial': 0,
}
def get_version(short=False):
"""
Returns the version from the version info.
"""
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0],
__version_info__['serial']))
return ''.join(vers)
|
ajhager/copycat | lib/pyglet/window/__init__.py | Python | gpl-2.0 | 65,226 | 0.000767 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# de | rived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICU | LAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Windowing and user-interface events.
This module allows applications to create and display windows with an
OpenGL context. Windows can be created with a variety of border styles
or set fullscreen.
You can register event handlers for keyboard, mouse and window events.
For games and kiosks you can also restrict the input to your windows,
for example disabling users from switching away from the application
with certain key combinations or capturing and hiding the mouse.
Getting started
---------------
Call the Window constructor to create a new window::
from pyglet.window import Window
win = Window(width=640, height=480)
Attach your own event handlers::
@win.event
def on_key_press(symbol, modifiers):
# ... handle this event ...
Place drawing code for the window within the `Window.on_draw` event handler::
@win.event
def on_draw():
# ... drawing code ...
Call `pyglet.app.run` to enter the main event loop (by default, this
returns when all open windows are closed)::
from pyglet import app
app.run()
Creating a game window
----------------------
Use `Window.set_exclusive_mouse` to hide the mouse cursor and receive relative
mouse movement events. Specify ``fullscreen=True`` as a keyword argument to
the `Window` constructor to render to the entire screen rather than opening a
window::
win = Window(fullscreen=True)
win.set_exclusive_mouse()
Working with multiple screens
-----------------------------
By default, fullscreen windows are opened on the primary display (typically
set by the user in their operating system settings). You can retrieve a list
of attached screens and select one manually if you prefer. This is useful for
opening a fullscreen window on each screen::
display = window.get_platform().get_default_display()
screens = display.get_screens()
windows = []
for screen in screens:
windows.append(window.Window(fullscreen=True, screen=screen))
Specifying a screen has no effect if the window is not fullscreen.
Specifying the OpenGL context properties
----------------------------------------
Each window has its own context which is created when the window is created.
You can specify the properties of the context before it is created
by creating a "template" configuration::
from pyglet import gl
# Create template config
config = gl.Config()
config.stencil_size = 8
config.aux_buffers = 4
# Create a window using this config
win = window.Window(config=config)
To determine if a given configuration is supported, query the screen (see
above, "Working with multiple screens")::
configs = screen.get_matching_configs(config)
if not configs:
# ... config is not supported
else:
win = window.Window(config=configs[0])
"""
from __future__ import division
from builtins import object
from future.utils import with_metaclass
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
import pyglet
from pyglet import gl
from pyglet.event import EventDispatcher
import pyglet.window.key
import pyglet.window.event
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
class WindowException(Exception):
"""The root exception for all window-related errors."""
pass
class NoSuchDisplayException(WindowException):
"""An exception indicating the requested display is not available."""
pass
class NoSuchConfigException(WindowException):
"""An exception indicating the requested configuration is not
available."""
pass
class NoSuchScreenModeException(WindowException):
"""An exception indicating the requested screen resolution could not be
met."""
pass
class MouseCursorException(WindowException):
"""The root exception for all mouse cursor-related errors."""
pass
class MouseCursor(object):
"""An abstract mouse cursor."""
#: Indicates if the cursor is drawn using OpenGL. This is True
#: for all mouse cursors except system cursors.
drawable = True
def draw(self, x, y):
"""Abstract render method.
The cursor should be drawn with the "hot" spot at the given
coordinates. The projection is set to the pyglet default (i.e.,
orthographic in window-space), however no other aspects of the
state can be assumed.
:Parameters:
`x` : int
X coordinate of the mouse pointer's hot spot.
`y` : int
Y coordinate of the mouse pointer's hot spot.
"""
raise NotImplementedError('abstract')
class DefaultMouseCursor(MouseCursor):
"""The default mouse cursor used by the operating system."""
drawable = False
class ImageMouseCursor(MouseCursor):
"""A user-defined mouse cursor created from an image.
Use this class to create your own mouse cursors and assign them
to windows. There are no constraints on the image size or format.
"""
drawable = True
def __init__(self, image, hot_x=0, hot_y=0):
"""Create a mouse cursor from an image.
:Parameters:
`image` : `pyglet.image.AbstractImage`
Image to use for the mouse cursor. It must have a
valid ``texture`` attribute.
`hot_x` : int
X coordinate of the "hot" spot in the image relative to the
image's anchor.
`hot_y` : int
Y coordinate of the "hot" spot in the image, relative to the
image's anchor.
"""
self.texture = image.get_texture()
self.hot_x = hot_x
self.hot_y = hot_y
def draw(self, x, y):
gl.glPushAttrib(gl.GL_ENABLE_BIT | gl.GL_CURRENT_BIT)
gl.glColor4f(1, 1, 1, 1)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
self.texture.blit(x - self.hot_x, y - self.hot_y, 0)
gl.glPopAttrib()
def _PlatformEventHandler(data):
"""Decorator for platform event handlers.
Apply giving the platform-specific data needed by the window to associate
the method with an event. See platform-specific subclasses of this
decorator for examples.
The following attributes are set on the function, which is returned
otherwise unchanged:
_platform_event
True
_platform_event_data
List of data applied to the function (permitting multiple decorators
on the |
gofore/aws-emr | src/streaming-programs/02-xml-parse-test_map.py | Python | mit | 765 | 0.005229 | #!/usr/bin/python
import sys
import xml.etree.ElementTree as ET
# XML parsing test
# See https://hadoop.apache.org/docs/current/api/org/apache/hadoop/mapreduce/lib/aggregate/package-tree.html
def m | ain(argv):
root = ET.parse(sys.stdin).getroot()
period_start = root.attrib.get('periodstart')
for road_link in root.iter('{http://FTT.arstraffic.com/schemas/IndividualTT/}link'):
road_link_id = road_link.attrib.get('id')
road_link_times = [int(car.attrib.get('t | t')) for car in road_link]
number_of_cars = len(road_link_times)
average_travel_time = sum(road_link_times)/number_of_cars
print "{0}\t{1} {2}".format(road_link_id, average_travel_time, number_of_cars)
if __name__ == "__main__":
main(sys.argv)
|
appleseedhq/cortex | python/IECoreMaya/FileSequenceParameterUI.py | Python | bsd-3-clause | 3,597 | 0.033083 | ################################# | #########################################
#
# Copyright (c) 2010, Image En | gine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os.path
import IECore
import IECoreMaya
class FileSequenceParameterUI( IECoreMaya.PathParameterUI ) :
def __init__( self, node, parameter, **kw ):
IECoreMaya.PathParameterUI.__init__( self, node, parameter, **kw )
def _fileDialog( self ) :
tools = FileSequenceParameterUI.FileSequenceFilter( self.parameter.extensions )
IECoreMaya.PathParameterUI._fileDialog( self,
filter = tools.filter,
validate = tools.validate,
)
class FileSequenceFilter :
def __init__( self, extensions=None ) :
if extensions:
self.__extensions = IECore.StringVectorData( extensions )
else:
self.__extensions = IECore.StringVectorData()
def filter( self, path, items ) :
fsOp = IECore.SequenceLsOp()
oldItems = list( items )
del items[:]
for i in oldItems:
if os.path.isdir( i["path"] ) :
items.append( i )
sequences = fsOp(
dir=path,
type="files",
resultType="stringVector",
format="<PREFIX><#PADDING><SUFFIX> <FRAMES>",
extensions=self.__extensions,
)
for s in sequences :
firstFrame = IECore.FileSequence( s ).fileNames()[0]
stat = os.stat( firstFrame )
seqItem = {
"path" : s,
"name" : s.replace( "%s/" % path, "" ),
"mode" : stat[0],
"uid" : stat[4],
"gid" : stat[5],
"size" : stat[6],
"atime" : stat[7],
"mtime" : stat[8],
"ctime" : stat[9],
}
items.append( seqItem )
# FileExtensionFilter will get confused by the extra info on
# the end of the sequence string.
def validate( self, path, items ):
if not items:
return False
for i in items:
if os.path.isdir( "%s/%s" % ( path, i["name"] ) ) :
return False
return True
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.FileSequenceParameter, FileSequenceParameterUI )
|
orchidinfosys/odoo | addons/payment/models/res_config.py | Python | gpl-3.0 | 1,197 | 0 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class AccountPaymentConfig(osv.TransientModel):
_inherit = 'account.config.settings'
_columns = {
'module_payment_transfer': fields.boolean(
'Wire Transfer',
help='-This installs the module payment_transfer.'),
'module_payment_paypal': fields.boolean(
'Paypal',
help='-This installs the module payment_paypal.'),
'module_payment_ogone': fields.boolean(
'Ogone',
help='-This installs the module payment_ogone.'),
'module_payment_adyen': fields.boolean(
'Adyen',
help='-This installs the module payment_adyen.' | ),
'module_payment_buckaroo': fields.boolean(
| 'Buckaroo',
help='-This installs the module payment_buckaroo.'),
'module_payment_authorize': fields.boolean(
'Authorize.Net',
help='-This installs the module payment_authorize.'),
'module_payment_sips': fields.boolean(
'Sips',
help='-This installs the module payment_sips.'),
}
_defaults = {
'module_payment_transfer': True
}
|
laudaa/bitcoin | test/functional/fundrawtransaction.py | Python | mit | 32,075 | 0.009602 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin' | ][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundr | awtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'] |
racker/python-raxcli | raxcli/concurrency/__init__.py | Python | apache-2.0 | 1,568 | 0 | # Copyright 2013 Rackspace
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY | KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'get_pool',
'run_function',
'join_pool'
]
import os
import sys
tr | y:
import gevent
gevent
gevent_available = True
except ImportError:
gevent_available = False
DEFAULT_BACKEND = 'noop'
BACKEND = DEFAULT_BACKEND
USE_GEVENT = os.getenv('RAXCLI_USE_GEVENT')
if USE_GEVENT and gevent_available:
BACKEND = 'gevent'
module_name = 'raxcli.concurrency.backends.%s_backend' % (BACKEND)
current_module = sys.modules[__name__]
backend_module = __import__(module_name,
fromlist=['raxcli.concurrency.backends'])
for key in __all__:
func = getattr(backend_module, key)
setattr(current_module, key, func)
backend_initialize = getattr(backend_module, 'initialize')
backend_initialize()
|
magfest/ubersystem | uber/site_sections/schedule.py | Python | agpl-3.0 | 16,602 | 0.003253 | import json
import ics
from collections import defaultdict
from datetime import datetime, time, timedelta
from time import mktime
import cherrypy
from pockets import listify
from sqlalchemy.orm import joinedload
from uber.config import c
from uber.decorators import ajax, all_renderable, cached, csrf_protected, csv_file, render, schedule_view
from uber.errors import HTTPRedirect
from uber.models import AdminAccount, AssignedPanelist, Attendee, Event, PanelApplication
from uber.utils import check, localized_now, normalize_newlines
def get_schedule_data(session, message):
schedule = defaultdict(lambda: defaultdict(list))
for event in session.query(Event).all():
schedule[event.start_time_local][event.location].append(event)
for i in range(1, event.duration):
half_hour = event.start_time_local + timedelta(minutes=30 * i)
schedule[half_hour][event.location].append(c.EVENT_BOOKED)
max_simul = {}
for id, name in c.EVENT_LOCATION_OPTS:
max_events = 1
for i in range(c.PANEL_SCHEDULE_LENGTH):
half_hour = c.EPOCH + timedelta(minutes=30 * i)
max_events = max(max_events, len(schedule[half_hour][id]))
max_simul[id] = max_events
for half_hour in schedule:
for location in schedule[half_hour]:
for event in schedule[half_hour][location]:
if isinstance(event, Event):
simul = max(len(schedule[half_hour][event.location]) for half_hour in event.half_hours)
event.colspan = 1 if simul > 1 else max_simul[event.location]
for i in range(1, event.duration):
schedule[half_hour + timedelta(minutes=30 * i)][event.location].remove(c.EVENT_BOOKED)
schedule[half_hour + timedelta(minutes=30 * i)][event.location].append(event.colspan)
for half_hour in schedule:
for id, name in c.EVENT_LOCATION_OPTS:
span_sum = sum(getattr(e, 'colspan', e) for e in schedule[half_hour][id])
for i in range(max_simul[id] - span_sum):
schedule[half_hour][id].append(c.EVENT_OPEN)
schedule[half_hour] = sorted(
schedule[half_hour].items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0]))
max_simul = [(id, c.EVENT_LOCATIONS[id], colspan) for id, colspan in max_simul.items()]
return {
'message': message,
'schedule': sorted(schedule.items()),
'max_simul': sorted(max_simul, key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0]))
}
@all_renderable()
class Root:
@cached
@schedule_view
def index(self, session, message=''):
if c.ALT_SCHEDULE_URL:
raise HTTPRedirect(c.ALT_SCHEDULE_URL)
else:
# external view attendees can look at with no admin menus/etc
| # we cache this view because it takes a while to generate
return get_schedule_data(session, message)
@schedule_view
@csv_file
def time_ordered(self, out, session):
for event in session.query(Event).order_by('start_time', 'duration', 'location').all():
out.wri | terow([event.timespan(30), event.name, event.location_label])
@schedule_view
def xml(self, session):
cherrypy.response.headers['Content-type'] = 'text/xml'
schedule = defaultdict(list)
for event in session.query(Event).order_by('start_time').all():
schedule[event.location_label].append(event)
return render('schedule/schedule.xml', {
'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0].location))
})
@schedule_view
def schedule_tsv(self, session):
cherrypy.response.headers['Content-Type'] = 'text/tsv'
cherrypy.response.headers['Content-Disposition'] = 'attachment;filename=Schedule-{}.tsv'.format(
int(localized_now().timestamp()))
schedule = defaultdict(list)
for event in session.query(Event).order_by('start_time').all():
schedule[event.location_label].append(dict(event.to_dict(), **{
'date': event.start_time_local.strftime('%m/%d/%Y'),
'start_time': event.start_time_local.strftime('%I:%M:%S %p'),
'end_time': (event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'),
'description': normalize_newlines(event.description).replace('\n', ' ')
}))
return render('schedule/schedule.tsv', {
'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0]['location']))
})
def ical(self, session, **params):
icalendar = ics.Calendar()
if 'locations' not in params or not params['locations']:
locations = [id for id, name in c.EVENT_LOCATION_OPTS]
calname = "full"
else:
locations = json.loads(params['locations'])
if len(locations) > 3:
calname = "partial"
else:
calname = "_".join([name for id, name in c.EVENT_LOCATION_OPTS
if str(id) in locations])
calname = '{}_{}_schedule'.format(c.EVENT_NAME, calname).lower().replace(' ', '_')
for location in locations:
for event in session.query(Event)\
.filter_by(location=int(location))\
.order_by('start_time').all():
icalendar.events.add(ics.Event(
name=event.name,
begin=event.start_time,
end=(event.start_time + timedelta(minutes=event.minutes)),
description=normalize_newlines(event.description),
created=event.created.when,
location=event.location_label))
cherrypy.response.headers['Content-Type'] = \
'text/calendar; charset=utf-8'
cherrypy.response.headers['Content-Disposition'] = \
'attachment; filename="{}.ics"'.format(calname)
return icalendar
if not c.HIDE_SCHEDULE:
ical.restricted = False
@csv_file
def csv(self, out, session):
out.writerow(['Session Title', 'Date', 'Time Start', 'Time End', 'Room/Location',
'Schedule Track (Optional)', 'Description (Optional)', 'Allow Checkin (Optional)',
'Checkin Begin (Optional)', 'Limit Spaces? (Optional)', 'Allow Waitlist (Optional)'])
rows = []
for event in session.query(Event).order_by('start_time').all():
rows.append([
event.name,
event.start_time_local.strftime('%m/%d/%Y'),
event.start_time_local.strftime('%I:%M:%S %p'),
(event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'),
event.location_label,
'',
normalize_newlines(event.description).replace('\n', ' '),
'', '', '', ''
])
for r in sorted(rows, key=lambda tup: tup[4]):
out.writerow(r)
@csv_file
def panels(self, out, session):
out.writerow(['Panel', 'Time', 'Duration', 'Room', 'Description', 'Panelists'])
for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label]):
if 'Panel' in event.location_label or 'Autograph' in event.location_label:
panelist_names = ' / '.join(ap.attendee.full_name for ap in sorted(
event.assigned_panelists, key=lambda ap: ap.attendee.full_name))
out.writerow([
event.name,
event.start_time_local.strftime('%I%p %a').lstrip('0'),
'{} minutes'.format(event.minutes),
event.location_label,
event.description,
panelist_names])
@schedule_view
def panels_json(self, session):
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps([
{
'name': event.name,
|
cleberzavadniak/eolo-app-db | eolo_db/__main__.py | Python | gpl-2.0 | 934 | 0 | #!env python
import optparse
import logging
from . import wamp
from . import db
from . import rpcs
logging.basicConfig(level=logging.INFO)
# Options parsing:
parser = optparse.OptionParser()
parser.add_option('-W', '--router',
help="URL to WAMP router",
default='ws://localhost:1080/ws')
parser.add_option('-R', '--realm',
help="Realm of the WAMP router to co | nnect to",
default='eolo')
# TODO: add options related to the database backend.
(opts, args) = parser.parse_args()
# The Collections Manager:
db_manager = db.DatabaseManager()
# The RPCs Manager:
rpc_manager = rpcs.RPCManager(db_manager)
# Starts a WAMP session:
wamp_session = wamp.WAMP_Session(
r | pc_manager,
'database', # Client type
'eolo.db.info', # Info topic
opts.realm # Realm name
)
wamp_session.start(opts.router)
# Starts the main loop:
wamp_session.run_loop()
|
funbaker/astropy | astropy/utils/iers/iers.py | Python | bsd-3-clause | 30,484 | 0.001083 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
from warnings import warn
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import numpy as np
from ... import config as _config
from ... import units | as u
from ...table import Table, QTable
from ...utils.data import get_pkg_data_filename, clear_download_cache
from ... import utils
from ...utils.exceptions import AstropyWarning
__all__ = ['Conf', 'conf',
'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto',
'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION',
| 'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE',
'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_README',
'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README',
'IERSRangeError', 'IERSStaleWarning']
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = 'finals2000A.all'
IERS_A_URL = 'http://maia.usno.navy.mil/ser7/finals2000A.all'
IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A')
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now')
IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now'
IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000')
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
class IERSStaleWarning(AstropyWarning):
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
'Enable auto-downloading of the latest IERS data. If set to False '
'then the local IERS-B file will be used by default. Default is True.')
auto_max_age = _config.ConfigItem(
30.0,
'Maximum age (days) of predictive data before auto-downloading. Default is 30.')
iers_auto_url = _config.ConfigItem(
IERS_A_URL,
'URL for auto-downloading IERS file data.')
remote_timeout = _config.ConfigItem(
10.0,
'Remote timeout downloading IERS file data (seconds).')
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or Time
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO+mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0., return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['UT1_UTC'],
self.ut1_utc_source if return_status else None)
def dcip_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
|
yujikato/DIRAC | src/DIRAC/TransformationSystem/Agent/MCExtensionAgent.py | Python | gpl-3.0 | 5,092 | 0.008445 | """ Agent to extend the number of tasks given the Transformation definition
The following options can be set for the MCExtensionAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN MCExtensionAgent
:end-before: ##END
:dedent: 2
:caption: MCExtensionAgent options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/MCExtensionAgent'
class MCExtensionAgent(AgentModule):
def __init__(self, *args, **kwargs):
''' c'tor
'''
AgentModule.__init__(self, *args, **kwargs)
self.transClient = TransformationClient()
agentTSTypes = self.am_getOption('TransformationTypes', [])
if agentTSTypes:
self.transformationTypes = sorted(agentTSTypes)
else:
self.transformationTypes = sorted(Operations().getValue('Transformations/ExtendableTransfTypes',
['MCSimulation', 'Simulation']))
self.maxIterationTasks = self.am_getOption('TasksPerIteration', 50)
self.maxFailRate = self.am_getOption('MaxFailureRate', 30)
self.maxWaitingJobs = self.am_getOption('MaxWaitingJobs', 1000)
#############################################################################
def initialize(self):
'''Sets defaults
'''
gLogger.info("Will consider the following transformation types: %s" % str(self.transformationTypes))
gLogger.info("Will create a maximum of %s tasks per iteration" % self.maxIterationTasks)
gLogger.info("Will not submit tasks for transformations with failure rate greater than %s%%" % (self.maxFailRate))
gLogger.info("Will not submit tasks for transformations with more than %d waiting jobs" % self.maxWaitingJobs)
return S_OK()
#############################################################################
def execute(self):
''' The MCExtensionAgent execution method.
'''
self.enableFlag = self.am_getOption('EnableFlag', 'True')
if not self.enableFlag == 'True':
self.log.info('MCExtensionAgent is disabled by configuration option EnableFlag')
return S_OK('Disabled via CS flag')
# Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations({'Status': 'Active', 'Type': self.transformationTypes})
if res['OK']:
for transDict in res['Value']:
transID = transDict['TransformationID']
maxTasks = transDict['MaxNumberOfTasks']
self.extendTransformation(transID, maxTasks)
return S_OK()
def extendTransformation(self, transID, maxTasks):
gLogger.info("Considering extension of transformation %d" % transID)
# Get the current count of tasks submitted for this transformation
res = self.transClient.getTransformationTaskStats(transID)
if not res['OK']:
if res['Message'] != 'No records found':
gLogger.error("Failed to get task statistics", "%s %s" % (transID, res['Message']))
return res
else:
statusDict = {}
else:
statusDict = res['Value']
gLogger.verbose("Current task count for transformation %d" % transID)
for status in sorted(statusDict):
statusCount = statusDict[status]
gLogger.verbose("%s : %s" % (status.ljust(20), str(statusCount).rjust(8))) | # Determine the number of tasks to be created
numberOfTasks = self._calculateTaskNumber(maxTasks, statusDict)
if not numberOfTasks:
gLogger.info("No tasks required for transformation %d" % transID)
return S_OK()
# Extend the transformation by the determined number of tasks
res = self.transClient.extendTransformation(transID, numberOfTasks)
if not res['OK']:
gLogger.error("Failed to extend transformation", "%s %s" % (transID, res['Message']))
return res
gLogger.info("Successfully extended transformation %d by %d tasks" % (transID, numberOfTasks))
return S_OK()
def _calculateTaskNumber(self, maxTasks, statusDict):
''' Utility function
'''
done = statusDict.get('Done', 0)
failed = statusDict.get('Failed', 0)
waiting = statusDict.get('Waiting', 0)
total = statusDict.get('TotalCreated', 0)
# If the failure rate is higher than acceptable
if (total != 0) and ((100.0 * float(failed) / float(total)) > self.maxFailRate):
return 0
# If we already have enough completed jobs
if done >= maxTasks:
return 0
if waiting > self.maxWaitingJobs:
return 0
numberOfTasks = maxTasks - (total - failed)
if numberOfTasks < 0:
# this happens when people extend the transformation manually instead of increasing MaxNumberOfTasks
return 0
if numberOfTasks > self.maxIterationTasks:
numberOfTasks = self.maxIterationTasks
return numberOfTasks
| |
MattyO/start-cms | reverse.py | Python | gpl-2.0 | 1,062 | 0.001883 | import fnmatch
import os
im | port json
import argparse
import re
parser = argparse.ArgumentParser()
parser.add_argument("template_data")
args = parser.parse_args()
template_data = json.loads(args.template_data)
folder = template_data['app_name']
template_data = json.loads(args.template_data)
for (dirpath, dirnames, filenames) in os.walk('./' + folder, topdown=False):
for dirname in dirnames:
if fnmatch.fnmatch(dirname, template_data['app_name'] ):
os.rename(dirpath+"/"+dirname, dirpath+"/"+"{{app_name}}")
|
for filename in filenames:
if filename.endswith(".pyc"):
continue
new_file_contents = ""
with open(dirpath + "/" + filename, 'r') as f:
for line in f:
line, times = re.subn(template_data['app_name'], '{{app_name}}', line)
new_file_contents += line
with open(dirpath + "/" + filename, 'w') as f:
f.write(new_file_contents)
if fnmatch.fnmatch(folder, template_data['app_name']):
os.rename(folder, "{{app_name}}")
|
Tsiems/mobile-sensing-apps | tornado_bare/mongodb_example.py | Python | mit | 174 | 0.017241 | #!/usr/bin/python
from pymongo import MongoClient
client = MongoClient()
db=client.exampledatabase
collect1 = db.queries
for document in collect1.find():
print docu | ment | |
openprocurement/openprocurement.auctions.dgf | openprocurement/auctions/dgf/views/financial/bid_document.py | Python | apache-2.0 | 623 | 0.001605 | # -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
opres | ource,
)
from openprocurement.auctions.dgf.views.other.bid_document import (
AuctionBidDocumentResource,
)
@opresource(name='dgfFinancialAssets:Auction Bid Documents',
collection_path='/auctions/{auction_id}/bids/{bid_id}/documents',
path='/auctions/{auction_id}/bids/{bid_id}/documents/{document_id}',
auctionsprocurementMethodType="dgfFinancialAssets",
description="Financial auction bidder documents")
c | lass FinancialAuctionBidDocumentResource(AuctionBidDocumentResource):
pass
|
Tjorriemorrie/pokeraide | tui/demos/demo.py | Python | gpl-2.0 | 495 | 0.00202 | from urwid import MainLoop, ExitMainLoop, Text, Filler, | AttrMap
PALETTE = [
('banner', 'black', 'light gray'),
('streak', 'black', 'dark red'),
('bg', 'black', 'dark blue'),
]
def exit_on_q(key):
if key == 'Q':
raise ExitMainLoop()
txt = Text(('banner', u"Hello World"), align='center')
map1 = AttrMap(txt, 'streak')
fill = Filler(map1)
map2 = AttrMap(fill, 'bg')
loop = MainLoop(map2, PALETTE, unhandled_input=exit_on_q)
if __name__ == '__ma | in__':
loop.run()
|
tgalal/inception | inception/argparsers/makers/submakers/submaker_updatezip.py | Python | gpl-3.0 | 1,747 | 0.005724 | from .submaker import Submaker
from inception.tools.signapk import SignApk
import shutil
import os
from inception.constants import InceptionConstants
class UpdatezipSubmaker(Submaker):
def make(self, updatePkgDir):
keys_name = self.getValue("keys")
signingKeys = self.getMaker().getConfig().getKeyConfig(keys_name) if keys_name else None
updateBinaryKey, updateBinary = self.getTargetBinary("update-binary")
assert updateBinary, "%s is not set" % updateBinaryKey
if keys_name:
assert signingKeys, "update.keys is '%s' but __config__.host.keys.%s is not set" % (keys_name, keys_name)
signingKeys = signingKeys["private"], signingKeys["public"]
shutil.copy(updateBinary, os.path.join(updatePkgDir, "META-INF/com/google/android/update-binary"))
updateZipPath = updatePkgDir + "/../"
updateZipPath += "update_unsigned" if signingKeys else "update"
shutil.make_archive(updateZipPath, "zip", updatePkgDir)
updateZipPath += ".zip"
if signingKeys:
javaKey, javaPath = self.getHostBinary("java")
signApkKey, signApkPath = self.getHostBinary("signapk")
assert signApkPath, "%s is not set" % signApkKey
assert os.path.exists(signApkPath), " | '%s' from %s does not exist" % (signApkPath, signApkKey)
assert os.path.exists(javaPath), "'%s' from %s does not exist" % (javaPath, javaKey)
s | ignApk = SignApk(javaPath, signApkPath)
targetPath = updatePkgDir + "/../" + InceptionConstants.OUT_NAME_UPDATE
signApk.sign(updateZipPath, targetPath, signingKeys[0], signingKeys[1])
updateZipPath = targetPath
return updateZipPath
|
aglie/meerkat | setup.py | Python | mit | 480 | 0.052083 | from setuptools import setup
setup(
name = 'meerkat',
packages = ['meerkat'],
version = '0.3.7',
description = 'A program for reciprocal space reconstruction',
author = 'Arkadiy Simonov, Dmitry Logvinovich',
author_email = 'aglietto@gmai | l.com',
url = 'https://git | hub.com/aglie/meerkat.git',
# download_url =
keywords = ['crystallography', 'single crystal', 'reciprocal space reconstruction'],
classifiers = [],
install_requires = ['fabio','h5py','numpy'],
)
|
fake-name/ReadableWebProxy | alembic/versions/2019-09-08_c225ea8fbf5e_add_hash_and_parent_hash_columns.py | Python | bsd-3-clause | 4,843 | 0.006607 | """Add hash and parent hash columns
Revision ID: c225ea8fbf5e
Revises: ea8987f915b8
Create Date: 2019-09-08 16:33:03.743328
"""
# revision identifiers, used by Alembic.
revision = 'c225ea8fbf5e'
down_revision = 'ea8987f915b8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
op.execute("SET statement_timeout TO 144000000;")
# ### commands auto generated by Alembic - please adjust! ###
print("Adding to rss_parser_feed_name_lut 1")
op.add_column('rss_parser_feed_name_lut_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to rss_parser_feed_name_lut 2")
op.add_column('rss_parser_feed_name_lut_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to rss_parser_feed_name_lut (foreign key)")
op.create_foreign_key(None, 'rss_parser_feed_name_lut_version', 'rss_parser_feed_name_lut_version', ['parent_hash'], ['data_hash'])
print("Dropping is_delta column on rss_parser_feed_name_lut")
op.drop_column('rss_parser_feed_name_lut_version', 'is_delta')
print("Adding to rss_parser_funcs 1")
op.add_column('rss_parser_funcs_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to rss_parser_funcs 2")
op.add_column('rss_parser_funcs_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to rss_parser_funcs (foreign key)")
op.create_foreign_key(None, 'rss_parser_funcs_version', 'rss_parser_funcs_version', ['parent_h | ash'], ['data_hash'])
print("Dropping is_delta column on rss_parser_funcs")
op.drop_column('rss_parser_funcs_version', 'is_delta')
print("Adding to web_pages 1")
op.add_column( | 'web_pages_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to web_pages 2")
op.add_column('web_pages_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to web_pages (foreign key)")
op.create_foreign_key(None, 'web_pages_version', 'web_pages_version', ['parent_hash'], ['data_hash'])
print("Dropping is_delta column on web_pages")
op.drop_column('web_pages_version', 'is_delta')
print("Adding to raw_web_pages 1")
op.add_column('raw_web_pages_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to raw_web_pages 2")
op.add_column('raw_web_pages_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to raw_web_pages (foreign key)")
op.create_foreign_key(None, 'raw_web_pages_version', 'raw_web_pages_version', ['parent_hash'], ['data_hash'])
print("Dropping is_delta column on raw_web_pages")
op.drop_column('raw_web_pages_version', 'is_delta')
print("Done!")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('web_pages_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'web_pages_version', type_='foreignkey')
op.drop_column('web_pages_version', 'parent_hash')
op.drop_column('web_pages_version', 'data_hash')
op.add_column('rss_parser_funcs_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'rss_parser_funcs_version', type_='foreignkey')
op.drop_column('rss_parser_funcs_version', 'parent_hash')
op.drop_column('rss_parser_funcs_version', 'data_hash')
op.add_column('rss_parser_feed_name_lut_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'rss_parser_feed_name_lut_version', type_='foreignkey')
op.drop_column('rss_parser_feed_name_lut_version', 'parent_hash')
op.drop_column('rss_parser_feed_name_lut_version', 'data_hash')
op.add_column('raw_web_pages_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'raw_web_pages_version', type_='foreignkey')
op.drop_column('raw_web_pages_version', 'parent_hash')
op.drop_column('raw_web_pages_version', 'data_hash')
# ### end Alembic commands ###
|
blueskycoco/rt-thread | bsp/stm32/stm32f767-fire-challenger/rtconfig.py | Python | gpl-2.0 | 4,079 | 0.0076 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m7'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M7.fp.sp'
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M7'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv5_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M7'
AFLAGS += ' --fpu VFPv5_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' - | -config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT) | , 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
|
arsenovic/galgebra | test/test_lt.py | Python | bsd-3-clause | 402 | 0.012438 | import unittest
from sympy impo | rt symbols
from galgebra.ga import Ga
class TestLt(unittest.TestCase):
# reproduce gh-105
def test_lt_matrix(self):
base = Ga('a b', g=[1,1], coords=symbols('x,y',real=True))
a,b = base.mv()
A = base.lt([a+b,2*a-b])
assert str(A) == 'Lt(a) = a + b\nLt(b) = 2*a - b'
assert str(A | .matrix()) == 'Matrix([[1, 2], [1, -1]])'
|
DinoV/PTVS | Python/Product/PyKinect/PyKinect/winspeech/recognition.py | Python | apache-2.0 | 8,335 | 0.005399 | # PyKinect
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
import os
import ctypes
from os import path
_audio_path = path.join(path.dirname(__file__), '..', 'pykinect', 'audio', 'PyKinectAudio.dll')
if not os.path.exists(_audio_path):
_audio_path = path.join(path.dirname(__file__), '..', '..', '..', '..', '..', '..', 'Binaries', 'Debug', 'PyKinectAudio.dll')
if not path.exists(_audio_path):
raise Exception('Cannot find PyKinectAudio.dll')
_PYAUDIODLL = ctypes.CDLL(_audio_path)
_CreateRecognizer = _PYAUDIODLL.CreateRecognizer
_CreateRecognizer.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_CreateRecognizer.restype = ctypes.HRESULT
_SetInputFile = _PYAUDIODLL.SetInputFile
_SetInputFile.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputFile.restype = ctypes.HRESULT
_SetInputStream = _PYAUDIODLL.SetInputStream
_SetInputStream.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputStream.restype = ctypes.HRESULT
_IUnknownRelease = _PYAUDIODLL.IUnknownRelease
_IUnknownRelease.argtypes = [ctypes.c_voidp]
_IUnknownRelease.restype = None
_LoadGrammar = _PYAUDIODLL.LoadGrammar
_LoadGrammar.argtypes = [ctypes.c_wchar_p, ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_LoadGrammar.restype = ctypes.HRESULT
_EnumRecognizers = _PYAUDIODLL.EnumRecognizers
_ReadCallback = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.c_voidp, ctypes.POINTER(ctypes.c_uint32))
_Recognize_Callback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p)
_RecognizeOne = _PYAUDIODLL.RecognizeOne
_RecognizeOne.argtypes = [ctypes.c_voidp, ctypes.c_uint32, _Recognize_Callback, _Recognize_Callback]
_RecognizeOne.restype = ctypes.HRESULT
_RecognizeAsync = _PYAUDIODLL.RecognizeAsync
_RecognizeAsync.argtypes = [ctypes.c_voidp, ctypes.c_uint, _Recognize_Callback, _Recognize_Callback, ctypes.POINTER(ctypes.c_voidp)]
_RecognizeAsync.restype = ctypes.HRESULT
_StopRecognizeAsync = _PYAUDIODLL.StopRecognizeAsync
_StopRecognizeAsync.argtypes = [ctypes.c_voidp]
_StopRecognizeAsync.restype = ctypes.HRESULT
_EnumRecognizersCallback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_voidp)
class Grammar(object):
"""Represents a speech grammar constructed from an XML file"""
def __init__(self, filename):
self.filename = filename
def __del__(self):
#_IUnknownRelease(self._reco_ctx)
_IUnknownRelease(self._grammar)
class RecognizerInfo(object):
def __init__(self, id, description, token):
self.id = id
self.description = description
self._token = token
def __del__(self):
_IUnknownRelease(self._token)
def __repr__(self):
return 'RecognizerInfo(%r, %r, ...)' % (self.id, self.description)
class RecognitionResult(object):
def __init__(self, text, alternates = None):
self.text = text
if alternates:
self.alternates = tuple(Recogniti | onResult(alt) for alt in alternates)
else:
self.alternates = ()
class _event(object):
"""class used for adding/removing/invoking a set of listener func | tions"""
__slots__ = ['handlers']
def __init__(self):
self.handlers = []
def __iadd__(self, other):
self.handlers.append(other)
return self
def __isub__(self, other):
self.handlers.remove(other)
return self
def fire(self, *args):
for handler in self.handlers:
handler(*args)
class RecognitionEventArgs(object):
"""Provides information about speech recognition events."""
def __init__(self, result):
self.result = result
class SpeechRecognitionEngine(object):
"""Provides the means to access and manage an in-process speech recognition engine."""
def __init__(self, recognizer = None):
self.speech_recognized = _event()
self._async_handle = None
if isinstance(recognizer, str):
# TODO: Lookup by ID
pass
elif isinstance(recognizer, RecognizerInfo):
rec = ctypes.c_voidp()
_CreateRecognizer(recognizer._token, ctypes.byref(rec))
self._rec = rec
elif recognizer is None:
rec = ctypes.c_voidp()
_CreateRecognizer(None, ctypes.byref(rec))
self._rec = rec
else:
raise TypeError('Bad type for recognizer: ' + repr(recognizer))
def __del__(self):
# TODO: Need to shut down any listening threads
self.recognize_async_stop()
_IUnknownRelease(self._rec)
def load_grammar(self, grammar):
if isinstance(grammar, str):
grammar_obj = Grammar(grammar)
else:
grammar_obj = grammar
comGrammar = ctypes.c_voidp()
_LoadGrammar(grammar_obj.filename, self._rec, ctypes.byref(comGrammar))
grammar_obj._grammar = comGrammar
return grammar_obj
def set_input_to_audio_file(self, stream):
"""sets the input to a Python file-like object which implements read"""
stream_obj = getattr(stream, '__ISpStreamFormat__', None)
if stream_obj is not None:
# optimization: we can avoid going through Python to do the reading by passing
# the original ISpStreamFormat object through
_SetInputStream(self._rec, stream_obj)
else:
def reader(byteCount, buffer, bytesRead):
bytes = stream.read(byteCount)
ctypes.memmove(buffer, bytes, len(bytes))
bytesRead.contents.value = len(bytes)
return 0
self._reader = _ReadCallback(reader)
_SetInputFile(self._rec, self._reader)
def recognize_sync(self, timeout = 30000):
"""attempts to recognize speech and returns the recognized text.
By default times out after 30 seconds"""
res = []
alts = []
def callback(text):
res.append(text)
def alt_callback(text):
if text is not None:
alts.append(text)
_RecognizeOne(self._rec, timeout, _Recognize_Callback(callback), _Recognize_Callback(alt_callback))
if res:
return RecognitionResult(res[0], alts)
return None
def recognize_async(self, multiple = False):
cur_result = []
def callback(text):
cur_result.append(text)
def alt_callback(text):
if text == None:
# send the event
result = RecognitionResult(cur_result[0], cur_result[1:])
event_args = RecognitionEventArgs(result)
self.speech_recognized.fire(event_args)
del cur_result[:]
else:
cur_result.append(text)
stop_listening_handle = ctypes.c_voidp()
# keep alive our function pointers on ourselves...
self._async_callback = async_callback =_Recognize_Callback(callback)
self._async_alt_callback = async_alt_callback = _Recognize_Callback(alt_callback)
_RecognizeAsync(self._rec, multiple, async_callback, async_alt_callback, ctypes.byref(stop_listening_handle))
self._async_handle = stop_listening_handle
def recognize_async_stop(self):
if self._async_handle is not None:
_StopRecognizeAsync(self._async_handle)
self._async_handle = None
@staticmethod
def installed_recognizers():
ids = []
def callback(id, description |
CrowdStrike/kafka-python | test/fixtures.py | Python | apache-2.0 | 8,620 | 0.001508 | import logging
import os
import os.path
import shutil
import subprocess
import tempfile
from six.moves import urllib
import uuid
from six.moves.urllib.parse import urlparse # pylint: disable-msg=E0611
from test.service import ExternalService, SpawnedService
from test.testutil import get_open_port
class Fixture(object):
kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0')
scala_version = os.environ.get("SCALA_VERSION", '2.8.0')
project_root = os.environ.get('PROJECT_ROOT', os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
kafka_root = os.environ.get("KAFKA_ROOT", os.path.join(project_root, 'servers', kafka_version, "kafka-bin"))
ivy_root = os.environ.get('IVY_ROOT', os.path.expanduser("~/.ivy2/cache"))
@classmethod
def download_official_distribution(cls,
kafka_version=None,
scala_version=None,
output_dir=None):
if not kafka_version:
kafka_version = cls.kafka_version
if not scala_version:
scala_version = cls.scala_version
if not output_dir:
output_dir = os.path.join(cls.project_root, 'servers', 'dist')
distfile = 'kafka_%s-%s' % (scala_version, kafka_version,)
url_base = 'https://archive.apache.org/dist/kafka/%s/' % (kafka_version,)
output_file = os.path.join(output_dir, distfile + '.tgz')
if os.path.isfile(output_file):
logging.info("Found file already on disk: %s", output_file)
return output_file
# New tarballs are .tgz, older ones are sometimes .tar.gz
try:
url = url_base + distfile + '.tgz'
logging.info("Attempting to download %s", url)
response = urllib.request.urlopen(url)
except urllib.error.HTTPError:
logging.exception("HTTP Error")
url = url_base + distfile + '.tar.gz'
logging.info("Attempting to download %s", url)
response = urllib.request.urlopen(url)
logging.info("Saving distribution file to %s", output_file)
with open(output_file, 'w') as output_file_fd:
output_file_fd.write(response.read())
return output_file
@classmethod
def test_resource(cls, filename):
return os.path.join(cls.project_root, "servers", cls.kafka_version, "resources", filename)
@classmethod
def kafka_run_class_args(cls, *args):
result = [os.path.join(cls.kafka_root, 'bin', 'kafka-run-class.sh')]
result.extend(args)
return result
@classmethod
def kafka_run_class_env(cls):
env = os.environ.copy()
env['KAFKA_LOG4J_OPTS'] = "-Dlog4j.configuration=file:%s" % cls.test_resource("log4j.properties")
return env
@classmethod
def render_template(cls, source_file, target_file, binding):
with open(source_file, "r") as handle:
template = handle.read()
with open(target_file, "w") as handle:
handle.write(template.format(**binding))
class ZookeeperFixture(Fixture):
@classmethod
def instance(cls):
if "ZOOKEEPER_URI" in os.environ:
parse = urlparse(os.environ["ZOOKEEPER_URI"])
(host, port) = (parse.hostname, parse.port)
fixture = ExternalService(host, port)
else:
(host, port) = ("127.0.0.1", get_open_port())
fixture = cls(host, port)
fixture.open()
return fixture
def __init__(self, host, port):
self.host = host
self.port = port
self.tmp_dir = None
self.child = None
def out(self, message):
logging.info("*** Zookeeper [%s:%d]: %s", self.host, self.port, message)
def open(self):
self.tmp_dir = tempfile.mkdtemp()
self.out("Running local instance...")
logging.info(" host = %s", self.host)
logging.info(" port = %s", self.port)
logging.info(" tmp_dir = %s", self.tmp_dir)
# Generate configs
template = self.test_resource("zookeeper.properties")
properties = os.path.join(self.tmp_dir, "zookeeper.properties")
self.render_template(template, properties, vars(self))
# Configure Zookeeper child process
args = self.kafka_run_class_args("org.apache.zookeeper.server.quorum.QuorumPeerMain", properties)
env = self.kafka_run_class_env()
self.child = SpawnedService(args, env)
# Party!
self.out("Starting...")
self.child.start()
self.child.wait_for(r"Snapshotting")
self.out("Done!")
def close(self):
self.out("Stopping...")
self.child.stop()
self.child = None
self.out("Done!")
shutil.rmtree(self.tmp_dir)
class KafkaFixture(Fixture):
@classmethod
def instance(cls, broker_id, zk_host, zk_port, zk_chroot=None, replicas=1, partitions=2):
if zk_chroot is None:
zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_")
if "KAFKA_URI" in os.environ:
parse = urlparse(os.environ["KAFKA_URI"])
(host, port) = (parse.hostname, parse.port)
fixture = ExternalService(host, port)
else:
(host, port) = ("127.0.0.1", get_open_port())
fixture = KafkaFixture(host, port, broker_id, zk_host, zk_port, zk_chroot, replicas, partitions)
fixture.open()
return fixture
def __init__(self, host, port, broker_ | id, zk_host, zk_port, zk_chroot, replicas=1, partitions=2):
self.host = host
self.port = port
self.broker_id = broker_id
|
self.zk_host = zk_host
self.zk_port = zk_port
self.zk_chroot = zk_chroot
self.replicas = replicas
self.partitions = partitions
self.tmp_dir = None
self.child = None
self.running = False
def out(self, message):
logging.info("*** Kafka [%s:%d]: %s", self.host, self.port, message)
def open(self):
if self.running:
self.out("Instance already running")
return
self.tmp_dir = tempfile.mkdtemp()
self.out("Running local instance...")
logging.info(" host = %s", self.host)
logging.info(" port = %s", self.port)
logging.info(" broker_id = %s", self.broker_id)
logging.info(" zk_host = %s", self.zk_host)
logging.info(" zk_port = %s", self.zk_port)
logging.info(" zk_chroot = %s", self.zk_chroot)
logging.info(" replicas = %s", self.replicas)
logging.info(" partitions = %s", self.partitions)
logging.info(" tmp_dir = %s", self.tmp_dir)
# Create directories
os.mkdir(os.path.join(self.tmp_dir, "logs"))
os.mkdir(os.path.join(self.tmp_dir, "data"))
# Generate configs
template = self.test_resource("kafka.properties")
properties = os.path.join(self.tmp_dir, "kafka.properties")
self.render_template(template, properties, vars(self))
# Configure Kafka child process
args = self.kafka_run_class_args("kafka.Kafka", properties)
env = self.kafka_run_class_env()
self.child = SpawnedService(args, env)
# Party!
self.out("Creating Zookeeper chroot node...")
args = self.kafka_run_class_args("org.apache.zookeeper.ZooKeeperMain",
"-server", "%s:%d" % (self.zk_host, self.zk_port),
"create",
"/%s" % self.zk_chroot,
"kafka-python")
env = self.kafka_run_class_env()
proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.wait() != 0:
self.out("Failed to create Zookeeper chroot node")
self.out(proc.stdout)
self.out(proc.stderr)
raise RuntimeError("Failed to create Zookeeper chroot node")
self.out("Done!")
self.out("Starting...")
self.c |
USGSDenverPychron/pychron | pychron/entry/tasks/project/project_manager.py | Python | apache-2.0 | 4,367 | 0.000458 | # ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
from __future__ import absolute_import
from traits.api import List, Str, Dict, Button, Int, String, Event
from pychron.core.fuzzyfinder import fuzzyfinder
from pychron.envisage.browser.record_views import ProjectRecordView
from pychron.loggable import Loggable
class ProjectManager(Loggable):
oitems = List
items = List
filter_str = Str(enter_set=True, auto_set=False)
filter_attrs = Dict(
{
"name": "Name",
"unique_id": "UniqueID",
"principal_investigator": "Principal Investigator",
"lab_contact": "Lab Contact",
}
)
filter_attr = Str
# add_button = Button
# ir = Str
# institution = Str
# comment = String
# pi = Str
# lab_contact = Str
# pis = List
# lab_contacts = List
scroll_to_row = Int
project_name = String
comment = String
selected = List
save_button = Button
refresh = Event
def activated(self):
with self.dvc.session_ctx(use_parent_session=False):
self.items = self.oitems = [
ProjectRecordView(pr) for pr in self.dvc.get_projects()
]
# self._filter()
# self.pis = self.dvc.get_principal_investigator_names()
# self.lab_contacts = self.dvc.get_usernames()
# def prepare_destroy(self):
# self.dvc.close_session()
# private
# def _add(self):
# self.dvc.add_ir(self.pi, self.lab_contact,
# ir=self.ir,
# comment=self.comment,
# institution=self.institution)
#
# self.oitems = self.dvc.get_irs()
# self._filter()
def _project_name_changed(self, new):
if self.selected:
if len(self.selected) == 1:
p = self.selected[0]
p.name = new
if new != p.db_name:
p.dirty = True
else:
p.dirty = False
self.refresh = True
else:
self.warning_dialog("Can only edit the name of one project at a time")
def _comment_changed(self, new):
if self.selected:
for i in self.selected:
i.comment = new
if new != i.db_comment:
i.comment = new
i.dirty = True
else:
i.dirty = False
self.refresh = True
def _save_button_fired(self):
self.debug("Apply changes")
dvc = self.dvc
with dvc.session_ctx(use_parent_session=False):
commit = False
for item in self.oitems:
if item.dirty:
pr = dvc.get_project_by_id(item.unique_id)
pr.name = item.name
pr.comment = item.comment
item.db_comment = item.comment
item.db_name = item.name
item.dirty = False
commit = True
if commit:
dvc.commit()
def _filter(self):
if self.filter_str:
self.items = fuzzyfinder(self.filter_str, self.oitems, self.filter_attr)
else:
self.items = self.oitems
self.scroll_to_row = len(self.items) - 1
def _filter_str_changed(self):
self._filter()
def _filter_attr_changed(self):
self._filter()
# def _add_button_fired(self):
| # s | elf._add()
# ============= EOF =============================================
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.