repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Sinar/popit_ng | popit/migrations/0016_auto_20151001_0131.py | Python | agpl-3.0 | 558 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migration | s
class Migration(migrations.Migration):
dependencies = [
('popit', '0015_auto_20151001_0130'),
]
operations = [
migrations.AlterField(
model_name='contacts',
name='valid_from',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='contacts',
name='valid_until',
field=models.DateField(null=True),
| ),
]
|
bsipocz/astropy | astropy/coordinates/builtin_frames/skyoffset.py | Python | bsd-3-clause | 8,538 | 0.002342 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.compat import namedtuple_asdict
from astropy.coordinates import representation as r
from astropy.coordinates.transformations import DynamicMatrixTransform, FunctionTransform
from astropy.coordinates.baseframe import (frame_transform_graph, RepresentationMapping,
BaseCoordinateFrame)
from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute
from astropy.coordinates.matrix_utilities import (rotation_matrix,
matrix_product, matrix_transpose)
_skyoffset_cache = {}
def m | ake_skyoffset_cls(framecls):
"""
Create a new class that is the sky offset frame for a specific class of
origin frame. If such a class has already been created for this frame, the
same class will be returned.
The new class will always have component names for spherical coordinates of
``lon``/``lat``.
Parameters
----------
framecls : coordinate | frame class (i.e., subclass of `~astropy.coordinates.BaseCoordinateFrame`)
The class to create the SkyOffsetFrame of.
Returns
-------
skyoffsetframecls : class
The class for the new skyoffset frame.
Notes
-----
This function is necessary because Astropy's frame transformations depend
on connection between specific frame *classes*. So each type of frame
needs its own distinct skyoffset frame class. This function generates
just that class, as well as ensuring that only one example of such a class
actually gets created in any given python session.
"""
if framecls in _skyoffset_cache:
return _skyoffset_cache[framecls]
# the class of a class object is the metaclass
framemeta = framecls.__class__
class SkyOffsetMeta(framemeta):
"""
This metaclass renames the class to be "SkyOffset<framecls>" and also
adjusts the frame specific representation info so that spherical names
are always "lon" and "lat" (instead of e.g. "ra" and "dec").
"""
def __new__(cls, name, bases, members):
# Only 'origin' is needed here, to set the origin frame properly.
members['origin'] = CoordinateAttribute(frame=framecls, default=None)
# This has to be done because FrameMeta will set these attributes
# to the defaults from BaseCoordinateFrame when it creates the base
# SkyOffsetFrame class initially.
members['_default_representation'] = framecls._default_representation
members['_default_differential'] = framecls._default_differential
newname = name[:-5] if name.endswith('Frame') else name
newname += framecls.__name__
return super().__new__(cls, newname, bases, members)
# We need this to handle the intermediate metaclass correctly, otherwise we could
# just subclass SkyOffsetFrame.
_SkyOffsetFramecls = SkyOffsetMeta('SkyOffsetFrame', (SkyOffsetFrame, framecls),
{'__doc__': SkyOffsetFrame.__doc__})
@frame_transform_graph.transform(FunctionTransform, _SkyOffsetFramecls, _SkyOffsetFramecls)
def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame):
"""Transform between two skyoffset frames."""
# This transform goes through the parent frames on each side.
# from_frame -> from_frame.origin -> to_frame.origin -> to_frame
intermediate_from = from_skyoffset_coord.transform_to(from_skyoffset_coord.origin)
intermediate_to = intermediate_from.transform_to(to_skyoffset_frame.origin)
return intermediate_to.transform_to(to_skyoffset_frame)
@frame_transform_graph.transform(DynamicMatrixTransform, framecls, _SkyOffsetFramecls)
def reference_to_skyoffset(reference_frame, skyoffset_frame):
"""Convert a reference coordinate to an sky offset frame."""
# Define rotation matrices along the position angle vector, and
# relative to the origin.
origin = skyoffset_frame.origin.spherical
mat1 = rotation_matrix(-skyoffset_frame.rotation, 'x')
mat2 = rotation_matrix(-origin.lat, 'y')
mat3 = rotation_matrix(origin.lon, 'z')
return matrix_product(mat1, mat2, mat3)
@frame_transform_graph.transform(DynamicMatrixTransform, _SkyOffsetFramecls, framecls)
def skyoffset_to_reference(skyoffset_coord, reference_frame):
"""Convert an sky offset frame coordinate to the reference frame"""
# use the forward transform, but just invert it
R = reference_to_skyoffset(reference_frame, skyoffset_coord)
# transpose is the inverse because R is a rotation matrix
return matrix_transpose(R)
_skyoffset_cache[framecls] = _SkyOffsetFramecls
return _SkyOffsetFramecls
class SkyOffsetFrame(BaseCoordinateFrame):
"""
A frame which is relative to some specific position and oriented to match
its frame.
SkyOffsetFrames always have component names for spherical coordinates
of ``lon``/``lat``, *not* the component names for the frame of ``origin``.
This is useful for calculating offsets and dithers in the frame of the sky
relative to an arbitrary position. Coordinates in this frame are both centered on the position specified by the
``origin`` coordinate, *and* they are oriented in the same manner as the
``origin`` frame. E.g., if ``origin`` is `~astropy.coordinates.ICRS`, this
object's ``lat`` will be pointed in the direction of Dec, while ``lon``
will point in the direction of RA.
For more on skyoffset frames, see :ref:`astropy-skyoffset-frames`.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
origin : `~astropy.coordinates.SkyCoord` or low-level coordinate object.
The coordinate which specifies the origin of this frame. Note that this
origin is used purely for on-sky location/rotation. It can have a
``distance`` but it will not be used by this ``SkyOffsetFrame``.
rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Notes
-----
``SkyOffsetFrame`` is a factory class. That is, the objects that it
yields are *not* actually objects of class ``SkyOffsetFrame``. Instead,
distinct classes are created on-the-fly for whatever the frame class is
of ``origin``.
"""
rotation = QuantityAttribute(default=0, unit=u.deg)
origin = CoordinateAttribute(default=None, frame=None)
def __new__(cls, *args, **kwargs):
# We don't want to call this method if we've already set up
# an skyoffset frame for this class.
if not (issubclass(cls, SkyOffsetFrame) and cls is not SkyOffsetFrame):
# We get the origin argument, and handle it here.
try:
origin_frame = kwargs['origin']
except KeyError:
raise TypeError("Can't initialize an SkyOffsetFrame without origin= keyword.")
if hasattr(origin_frame, 'frame'):
origin_frame = origin_frame.frame
newcls = make_skyoffset_cls(origin_frame.__class__)
return newcls.__new__(newcls, *args, **kwargs)
# http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases
# See above for why this is necessary. Basically, because some child
# may override __new__, we must override it here to never pass
# arguments to the object.__new__ method.
if super().__new__ is object.__new__:
return super().__new__(cls)
return super().__new__(cls, *args, **kwargs)
def __ |
telefonicaid/fiware-keystone-spassword | keystone_spassword/contrib/spassword/mailer.py | Python | apache-2.0 | 2,991 | 0.002675 | #
# Copyright 2018 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from keystone import exception
try: from oslo_log import log
except ImportError: from keystone.openstack.common import log
try: from oslo_config import cfg
except ImportError: from oslo.config import cfg
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class SendMail(object):
def send_email(self, to, subject, text):
dest = [to] # must be a list
#
# Prepare actual message
#
mimemsg = MIMEMultipart()
mimemsg['From'] = CONF.spassword.smtp_from
mimemsg['To'] = to
mimemsg['Subject'] = subject
body = text
mimemsg.attach(MIMEText(body, 'plain'))
msg = mimemsg.as_string()
#
# Send the mail
#
try:
# TODO: server must be initialized by current object
server = smtplib.SMTP(CONF.spassword.smtp_server,
CONF.spassword.smtp_port)
except smtplib.socket.gaierror:
LOG.error('SMTP socket error %s %s' % (
CONF.spassword.smtp_server, CONF.spassword.smtp_port))
return False
# Use tls for smtp if CONF.spassword.smtp_tls is True
if CONF.spassword.smtp_tls:
server.ehlo() |
server.starttls()
# Use auth only if smtp_user and smtp_password not empty
if CONF.spassword.smtp_user and CONF.spassword.smtp_password:
try:
server.login(CONF.spassword.smtp_user,
| CONF.spassword.smtp_password)
except smtplib.SMTPAuthenticationError:
LOG.error('SMTP authentication error %s' % CONF.spassword.smtp_user)
return False
try:
server.sendmail(CONF.spassword.smtp_from, dest, msg)
except Exception, ex: # try to avoid catching Exception unless you have too
LOG.error('SMTP sendmail error %s' % ex)
return False
finally:
server.quit()
LOG.info('email was sent to %s' % dest)
return True
|
GordonsBeard/frinkiac.py | Frinkiac/frinkiac.py | Python | mit | 4,152 | 0.006021 | import base64
import json
import requests
import textwrap
FRINK_URL = 'https://frinkiac.com'
FRINK_API_URL = '{0}/api/search'.format(FRINK_URL)
FRINK_CAPTION_URL = '{0}/api/caption'.format(FRINK_URL)
FRINK_RANDOM_URL = '{0}/api/random'.format(FRINK_URL)
MORB_URL = 'https://morbotron.com'
MORB_API_URL = '{0}/api/search'.format(MORB_URL)
MORB_CAPTION_URL = '{0}/api/caption'.format(MORB_URL)
MORB_RANDOM_URL = '{0}/api/random'.format(MORB_URL)
class Screencap(object):
def __init__(self, values, frink):
self.episode = values['Episode']
self.timestamp = values['Timestamp']
self.id = values['Id']
self.frink = frink
if frink:
self.rich_url = '{0}/caption/{1}/{2}'.format(FRINK_URL, self.episode, self.timestamp)
else:
self.rich_url = '{0}/caption/{1}/{2}'.format(MORB_URL, self.episode, self.timestamp)
def __repr__(self):
return '{1}/{2}'.format(self.id, self.episode, self.timestamp)
def image_url(self, caption = ""):
"""Provides the image for a given episode/timestamp. Pass 'True' for caption"""
SITE_URL = FRINK_URL if self.frink else MORB_URL
try:
ep = self.episode
ts = self.timestamp
except AttributeError:
self._get_details()
ep = self.ep_number
ts = self.timestamp
finally:
if len(caption) > 0:
return self.meme_url(caption = caption)
else:
return '{0}/img/{1}/{2}.jpg'.format(SITE_URL, ep, ts)
def meme_url(self, caption = None):
SITE_URL = FRINK_URL if self.frink else MORB_URL
if caption is None or not caption.strip():
try:
caption = self.caption
except AttributeError:
self._get_details()
caption = self.caption |
else:
if len(caption) > 300:
caption = caption[:300]
caption = self._chop_captions(caption)
return '{0}/meme/{1}/{2}.jpg?b64lines={3}'.format(
SITE_URL,
self.episode,
self.timestamp,
base64.urlsafe_b64encode(bytes(caption, 'utf-8')).decode('ascii'))
def _get_details(self):
CAPTION_URL = FRINK_CAPTION_URL if self.fr | ink else MORB_CAPTION_URL
cap_search = requests.get('{0}?e={1}&t={2}'.format(CAPTION_URL, self.episode, self.timestamp))
data = cap_search.json()
# This controls how many captions you get.
caption = " ".join([subtitle['Content'] for subtitle in data['Subtitles']])
self.caption = self._chop_captions(caption[:300])
self.ep_title = data['Episode']['Title']
self.season = data['Episode']['Season']
self.ep_number = data['Episode']['EpisodeNumber']
self.director = data['Episode']['Director']
self.writer = data['Episode']['Writer']
self.org_air_date = data['Episode']['OriginalAirDate']
self.wiki_link = data['Episode']['WikiLink']
def _chop_captions(self, caption):
return textwrap.fill(caption, 25)
def search(query, frink = True):
"""Returns a list of Screencap objects based on the string provided."""
SITE_URL = FRINK_URL if frink else MORB_URL
if len(query) > 200:
query = query[:200]
try:
gen_search = requests.get('{0}/api/search?q={1}'.format(SITE_URL, query))
except requests.exceptions.ConnectionError:
return []
info = gen_search.json()
search_results = []
for result in info:
search_results.append(Screencap(result, frink))
return search_results
def random(frink = True):
"""Returns a random screencap object"""
RANDOM_URL = FRINK_RANDOM_URL if frink else MORB_RANDOM_URL
try:
random_search = requests.get(RANDOM_URL)
except requests.exceptions.ConnectionError:
return []
info = random_search.json()
random_screen = {'Episode': info['Frame']['Episode'], 'Timestamp' : info['Frame']['Timestamp'], 'Id': info['Frame']['Id']}
random_Screencap = Screencap(random_screen, frink)
return random_Screencap |
nwjs/chromium.src | third_party/protobuf/python/google/protobuf/internal/message_test.py | Python | bsd-3-clause | 109,614 | 0.003431 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import pydoc
import six
import sys
import warnings
try:
# Since python 3
import collections.abc as collections_abc
except ImportError:
# Won't work after python 3.8
import collections as collections_abc
try:
import unittest2 as unittest # PY26
except ImportError:
import unittest
try:
cmp # Python 2
except NameError:
cmp = lambda x, y: (x > y) - (x < y) # Python 3
from google.protobuf import map_proto2_unittest_pb2
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import encoder
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import packed_field_test_pb2
from google.protobuf.internal import test_util
from google.protobuf.internal import test_proto3_optional_pb2
from google.protobuf.internal import testing_refleaks
from google.protobuf import message
from google.protobuf.internal import _parameterized
UCS2_MAXUNICODE = 65535
if six.PY3:
long = int
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
warnings.simplefilter('error', DeprecationWarning)
@_parameterized.named_parameters(
('_proto2', unittest_pb2),
('_proto3', unittest_proto3_arena_pb2))
@testing_refleaks.TestCase
class MessageTest(unittest.TestCase):
def testBadUtf8String(self, message_module):
if api_implementation.Type() != 'python':
self.skipTest("Skipping testBadUtf8String, currently only the python "
"api implementation raises UnicodeDecodeError when a "
"string field contains bad utf-8.")
bad_utf8_data = test_util.GoldenFileData('bad_utf8_string')
with self.assertRaises(UnicodeDecodeError) as context:
message_module.TestAllTypes.FromString(bad_utf8_data)
self.assertIn('TestAllTypes.optional_string', str(context.exception))
def testGoldenMessage(self, message_module):
# Proto3 doesn't have the "default_foo" members or foreign enums,
# and doesn't preserve unknown fields, so for proto3 we use a golden
# message that doesn't have these fields set.
if message_module is unittest_pb2:
golden_data = test_util.GoldenFileData(
'golden_message_oneof_implemented')
else:
golden_data = test_util.GoldenFileData('golden_message_proto3')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self, message_module):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = message_module.TestPackedTypes()
parsed_bytes = golden_message.ParseFromString(golden_data)
all_set = message_module.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEqual(parsed_bytes, len(golden_data))
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testParseErrors(self, message_module):
msg = message_module.TestAllTypes()
self.assertRaises(TypeError, msg.FromString, 0)
self.assertRaises(Exception, msg.FromString, '0')
# TODO(jieluo): Fix cpp extension to raise error instead of warning.
# b/27494216
end_tag = encoder.TagBytes(1, 4)
if api_implementation.Type() == 'python':
with self.assertRaises(message.DecodeError) as context:
msg.FromString(end_tag)
self.assertEqual('Unexpected end-group tag.', str(context.exception))
# Field number 0 is illegal.
self.assertRaises(message.DecodeError, msg.FromString, b'\3\4')
def testDeterminismParameters(self, message_module):
# This message is always deterministically serialized, even if determinism
# is disabled, so we can use it to verify that all the determinism
# parameters work correctly.
golden_data = (b'\xe2\x02\nOne string'
b'\xe2\x02\nTwo string'
b'\xe2\x02\nRed string'
b'\xe2\x02\x0bBlue string')
golden_message = message_module.TestAllTypes()
golden_message.repeated_string.extend([
'One string',
'Two string',
'Red string',
'Blue string',
])
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=None))
self.assertEqual(golden_data,
| golden_message.SerializeToString(deterministic=False))
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=True))
class BadArgError(Exception):
pass
class BadArg(object):
def __nonzero__(self):
raise BadArgError()
def __bool__(self):
raise BadArgError()
with self.assertRaises(BadArgError):
golden_message.SerializeToString(deterministic=BadArg())
def testPickleSupport(self, message_modu | le):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
|
taotetek/cookbook | pyzmq/helloword/src/tests/server.py | Python | mit | 1,799 | 0.004447 | # -*- coding: utf-8 -*-
# Copyright (c) the Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copie | s or sub | stantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from zmq.eventloop import ioloop
from core.handlers import CommandHandler
loop = ioloop.IOLoop.instance()
class MyCommandHandler(CommandHandler):
def __init__(self, name, end_point):
CommandHandler.__init__(self, name, end_point)
def _on_recv(self, stream, msg):
print('receive peer message {0}'. format(msg[0]))
stream.send('command: ok for message: {0}'.format(msg[0]), copy=False)
def _on_send(self, stream, msg, status):
print('send peer message {0}'. format(msg[0]))
if __name__ == "__main__":
server = 'tcp://127.0.0.1:5555'
handler = MyCommandHandler('MyCommandHandler', server)
handler.start()
loop.start()
|
mattgreen/hython | test/operators/bitwise.py | Python | gpl-3.0 | 213 | 0.004695 | print(~5)
print(18 & 3)
pr | int(83 | 7)
print(22 ^ 5)
print(2 << 4)
print(16 >> 2)
# multip | le
print(~~19)
print(197 & 92 & 345)
print(197 | 92 | 345)
print(197 ^ 92 ^ 345)
print(1 << 2 << 3)
print(256 >> 4 >> 3)
|
citp/BlockSci | blockscipy/setup.py | Python | gpl-3.0 | 2,879 | 0.004863 | import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmak | e_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j4']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVER | SION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='blocksci',
version='0.7.0',
author='Harry Kalodner',
author_email='blocksci@cs.princeton.edu',
description='BlockSci: A high-performance tool for blockchain science and exploration',
long_description='',
ext_modules=[CMakeExtension('blocksci/blocksci')],
packages = find_packages(),
include_package_data = True,
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
install_requires=[
'multiprocess>=0.70.5',
'psutil>=5.4.2',
'pycrypto>=2.6.1',
'pandas>=0.22.0',
'dateparser>=0.6.0',
'requests>=2.19.1'
]
)
|
decebel/dataAtom_alpha | bin/plug/py/sources/weby/WikipediaDataCommand.py | Python | apache-2.0 | 2,608 | 0.036043 |
import os, sys
basePlugPath = os.path.join("..", "..")
sys.path.insert(0, os.path.join(basePlugPath, "api"))
sys.path.insert(0, os.path.join(basePlugPath, "external"))
# TODO - know difference between module import vs package import?
import drawingboard
from pattern.web import Wikipedia
import pprint
pp = pprint.PrettyPrinter(indent=4)
print sys.modules["pattern.web"]
#print sys.modules["DataCommand"]
#pp.pprint(sys.modules)
class WikipediaDataCommand(drawingboard.DataCommand):
def __init__(self, **kwargs):
drawingboard.DataCommand.__init__(self, **kwargs)
print "init called "
#self.drawingboard.DataCommand
self.args = {}
def load_resources(self, **kwargs):
"""sets initial status of loading icon. then loads the icon. then sets various | other things and
as it does this, it will keep calling status message.
"" | "
print "loading "
#self.set_icon("GUI/icons/blogger.png")
# trying to figure out the icon to use based on display name
self.set_display_name("blogger")
self.set_initialized(True)
#def start(self, **kwargs):
def start(self, **kwargs):
"""Configures the command.
- sets the display name of the command
- sets initial status string
- sets a default icon - NO. Default Icon setup should happen well before this stage. Maybe a load api.
- sets is_initialized to return true, once all is well. TODO: Should we check for a net connection?
Note: all these arguments can also be set by callin set_params with key=value pairs.
is_initialized will return true when all the required argum(ents are ready
"""
#self.set_display_name(self, kwargs.get("name", "wikipedia"))
self.args["engine"] = Wikipedia(language="en")
def set_params(self, **kwargs):
pass
def get_params(self, **kwargs):
pass
#def submit_command(self, port, **commandArgs):
def execute(self, **commandArgs):
searchString = commandArgs.get("search", "life of pi") #from:decebel (from:username is also supported)
print("searching for {0}: ").format(searchString)
timeout = commandArgs.get("timeout", 25)
cached = commandArgs.get("cached", False)
engine = self.args["engine"]
return "skip skip"
article = engine.search(searchString, cached=cached, timeout=timeout)
print article.title
for s in article.sections:
print s.title.upper()
print
print s.content
print
return article.title
def main():
wp = WikipediaDataCommand(dummy="dummy")
wp.start(en="english")
res = wp.execute(search="Like of Pi")
#for k in res:
# print "key={0}".format(k)
pp.pprint(res)
if __name__ == '__main__':
main()
|
317070/kaggle-heart | generate_metadata_pkl.py | Python | mit | 2,940 | 0.001701 | import argparse
import glob
import re
import cPickle as pickle
from dicom.sequence import Sequence
from log import print_to_file
from paths import LOGS_PATH, TRAIN_DATA_PATH, TEST_DATA_PATH
def read_slice(path):
return pickle.load(open(path))['data']
def convert_to_number(value):
value = str(value)
try:
if "." in value:
return float(value)
else:
return int(value)
except:
pass
return value
def clean_metadata(metadatadict):
# Do cleaning
keys = sorted(list(metadatadict.keys()))
for key in keys:
value = metadatadict[key]
if key == 'PatientAge':
metadatadict[key] = int(value[:-1])
if key == 'PatientSex':
metadatadict[key] = 1 if value == 'F' else -1
else:
if isinstance(value, Sequence):
#convert to list
value = [i for i in value]
if isinstance(value, (list,)):
metadatadict[key] = [convert_to_number(i) for i in value]
else:
metadatadict[key] = convert_to_number(value)
return metadatadict
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = clean_metadata(d)
return metadata
def get_patient_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + r'/*.pkl'),
key=lambda x: int(re.search(r'/*_(\d+)\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.se | arch(r'/(((4ch)|(2ch)|(sax))_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
patient_data.append({'metadata': metadata,
| 'slice_id': slice_id})
print slice_id
return patient_data, pid
def get_metadata(data_path):
patient_paths = sorted(glob.glob(data_path + '*/study'))
metadata_dict = {}
for p in patient_paths:
patient_data, pid = get_patient_data(p)
print "patient", pid
metadata_dict[pid] = dict()
for pd in patient_data:
metadata_dict[pid][pd['slice_id']] = pd['metadata']
filename = data_path.split('/')[-2] + '_metadata.pkl'
with open(filename, 'w') as f:
pickle.dump(metadata_dict, f)
print 'saved to ', filename
return metadata_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = [TRAIN_DATA_PATH, TEST_DATA_PATH]
log_path = LOGS_PATH + "generate_metadata.log"
with print_to_file(log_path):
for d in data_paths:
get_metadata(d)
print "log saved to '%s'" % log_path
|
chshibo/CosData_Tools | test_SqlReader.py | Python | gpl-3.0 | 316 | 0.018987 | import SqlReader
def main():
reader=SqlReader.SqlReader("SELECT p.objid,p.ra,p.dec,p.r,s.z as redshift FROM galaxy as p join specobj as s on s.bestobjid=p.objid WHERE p.ra BET | WEEN 194.138787 AND 195.548787 AND p.dec BETWEEN 27.259389 AND 28.709389")
reader.dataCo | llect()
if __name__ =="__main__":
main() |
lukemn/python-lzw | setup.py | Python | mit | 2,823 | 0.008147 |
from distutils.core import setup, Command
import unittest
import doctest
from unittest import defaultTestLoader, TextTestRunner
import sys
# BUG. At least we should support 3.0, which means getting
# rid of a bunch of stupid byte-as-string stuff, which is kind
# of awesome.
(major_version, minor_version) = sys.version_info[:2]
if (major_version != 2) or (minor_version < 6):
raise Exception("LZW currently requires python 2.6")
else:
import lzw
TEST_MODULE_NAME = "tests.tests"
SLOW_TEST_MODULE_NAME = "tests.slow"
DOC_DIR_NAME = "doc"
MODULES = [ "lzw" ]
class RunTestsCommand(Command):
"""Runs package tests"""
user_options = [('runslow', None, 'also runs the (fairly slow) functional tests')]
def initialize_options(self):
self.runslow = False
def finalize_options(self):
pass # how on earth is this supposed to work?
def run(self):
import lzw
doctest.testmo | d(lzw)
utests = defaultTestLoader.loadTestsFromName(TEST_MODULE_NAME)
urunner = TextTestRunner(verbosity=2)
urunner.run(utests)
if self.runslow:
utests = defaultTes | tLoader.loadTestsFromName(SLOW_TEST_MODULE_NAME)
urunner = TextTestRunner(verbosity=2)
urunner.run(utests)
class DocCommand(Command):
"""Generates package documentation using epydoc"""
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
# Slightly stupid. Move to sphinx when you can, please.
import epydoc.cli
real_argv = sys.argv
sys.argv = [ "epydoc", "--output", DOC_DIR_NAME, "--no-private" ] + MODULES
epydoc.cli.cli()
sys.argv = real_argv
setup(name="lzw",
description="Low Level, pure python lzw compression/decompression library",
py_modules=MODULES,
version=lzw.__version__,
author=lzw.__author__,
author_email=lzw.__email__,
url=lzw.__url__,
license=lzw.__license__,
platforms='Python 2.6',
download_url='http://pypi.python.org/packages/source/l/lzw/lzw-0.01.tar.gz',
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Topic :: System :: Archiving",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
"Natural Language :: English",
],
packages = ['lzw'],
long_description = """
A pure python module for compressing and decompressing streams of
data, built around iterators. Requires python 2.6
""",
cmdclass = {
'test' : RunTestsCommand,
'doc' : DocCommand,
},
)
|
uclmr/inferbeddings | inferbeddings/evaluation/metrics.py | Python | mit | 5,197 | 0.003848 | # -*- coding: utf-8 -*-
import abc
import numpy as np
from sklearn import metrics
from inferbeddings.evaluation.util import apk
import logging
logger = logging.getLogger(__name__)
class BaseRanker(metaclass=abc.ABCMeta):
def __call__(self, pos_triples, neg_triples=None):
raise NotImplementedError
class MeanAveragePrecision(BaseRanker):
def __init__(self, scoring_function):
self.scoring_function = scoring_function
def __call__(self, pos_triples, neg_triples=None):
# First, create a list wih all relation indices
p_idxs = sorted({p for (_, p, _) in pos_triples + (neg_triples if neg_triples else [])})
average_precisions = []
# Iterate over each predicate p and create a list of positive and a list of negative triples for p
for p_idx in p_idxs:
p_pos_triples = [(s, p, o) for (s, p, o) in pos_triples if p == p_idx]
p_neg_triples = [(s, p, o) for (s, p, o) in neg_triples if p == p_idx]
# Score such triples:
n = len(p_pos_triples + p_neg_triples)
Xr = np.full(shape=(n, 1), fill_value=p_idx, dtype=np.int32)
Xe = np.full(shape=(n, 2), fill_value=0, dtype=np.int32)
for i, (s_idx, _p_idx, o_idx) in enumerate(p_pos_triples + p_neg_triples):
assert _p_idx == p_idx
Xe[i, 0], Xe[i, 1] = s_idx, o_idx
scores = self.scoring_function([Xr, Xe])
actual = range(1, len(p_pos_triples) + 1)
predicted = 1 + np.argsort(scores)[::-1]
average_precision = apk(actual=actual, predicted=predicted, k=n)
average_precisions += [average_precision]
return np.mean(average_precisions)
class Ranker(BaseRanker):
def __init__(self, scoring_function, nb_entities, true_triples=None):
self.scoring_function = scoring_function
self.nb_entities = nb_entities
self.true_triples = true_triples
def __call__(self, pos_triples, neg_triples=None):
err_subj, err_obj = [], []
filtered_err_subj, filtered_err_obj = [], []
for subj_idx, pred_idx, obj_idx in pos_triples:
Xr = np.full(shape=(self.nb_entities, 1), fill_value=pred_idx, dtype=np.int32)
Xe_o = np.full(shape=(self.nb_entities, 2), fill_value=obj_idx, dtype=np.int32)
Xe_o[:, 0] = np.arange(1, self.nb_entities + 1)
Xe_s = np.full(shape=(self.nb_entities, 2), fill_value=subj_idx, dtype=np.int32)
Xe_s[:, 1] = np.arange(1, self.nb_entities + 1)
# scores of (1, p, o), (2, p, o), .., (N, p, o)
scores_o = self.scoring_function([Xr, Xe_o])
# scores of (s, p, 1), (s, p, 2), .., (s, p, N)
scores_s = self.scoring_function([Xr, Xe_s])
#err_subj += [1 + np.sum(scores_o > scores_o[subj_idx - 1])]
#err_obj += [1 + np.sum(scores_s > scores_s[obj_idx - 1])]
err_subj += [1 + np.argsort(np.argsort(- scores_o))[subj_idx - 1]]
err_obj += [1 + np.argsort(np.argsort(- scores_s))[obj_idx - 1]]
if self.true_triples:
rm_idx_o = [o - 1 for (s, p, o) in self.true_triples if s == subj_idx and p == pred_idx and o != obj_idx]
rm_idx_s = [s - 1 for (s, p, o) in self.true_triples if o == obj_idx and p == pred_idx and s != subj_idx]
if rm_idx_o:
scores_s[rm_idx_o] = - np.inf
if rm_idx_s:
scores_o[rm_idx_s] = - np.inf
#filtered_err_subj += [1 + np.sum(scores_o > scores_o[subj_idx - 1])]
#filtered_err_obj += [1 + np.sum(scores_s > scores_s[obj_idx - 1])]
filtered_err_subj += [1 + np.argsort(np.argsort(- scores_o))[subj_idx - 1]]
filtered_err_obj += [1 + np.argsort(np.argsort(- scores_s))[obj_idx - 1]]
return (err_subj, err_obj), (filtered_err_subj, filtered_err_obj)
class AUC(BaseRanker):
def __init__(self, scoring_function, nb_entities, nb_predicates, rescale_predictions=False):
self.scoring_function = scoring_function
self.nb_entities = nb_entities
self.nb_predicates = nb_predicates
self.rescale_predictions = rescale_predictions
def __call__(self, pos_triples, neg_triples=None):
triples = pos_triples + neg_triples
labels = [1 for _ in range(len(pos_triples))] + [0 for _ in range(len(neg_triples))]
Xr, Xe = [], []
for (s_idx, p_idx, o_idx), label in zip(triples, labels):
Xr += [[p_idx]]
Xe += [[s_idx, o_idx]]
ascores = self.scoring_function([Xr, Xe])
ays = np.array(labels)
if self.rescale_predictions:
diffs = np.diff(np.sort(ascores))
min_diff = min(abs(diffs[np.nonzero(diffs)]))
if min_diff < 1e-8:
| ascores = (ascores * (1e-7 / min_diff)).ast | ype(np.float64)
aucroc_value = metrics.roc_auc_score(ays, ascores)
precision, recall, thresholds = metrics.precision_recall_curve(ays, ascores, pos_label=1)
aucpr_value = metrics.auc(recall, precision)
return aucroc_value, aucpr_value
|
mattclay/ansible | test/lib/ansible_test/_internal/__init__.py | Python | gpl-3.0 | 2,387 | 0.001257 | """Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
import typing as t
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .util import (
ApplicationError,
display,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
def main(cli_args=None): # type: (t.Optional[t.List[str]]) -> None
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args(cli_args)
config = args.config(args) # type: CommonConfig
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.info_stderr = config.info_stderr
configure_timeout(config)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
delegate_args = None
target_names = None
try:
data_context().check_layout()
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save | target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
| # save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # info goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except ApplicationWarning as ex:
display.warning(u'%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.error(u'%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)
|
iansprice/wagtail | wagtail/wagtailsites/wagtail_hooks.py | Python | bsd-3-clause | 1,173 | 0.001705 | from __futu | re__ import absolute_ | import, unicode_literals
from django.contrib.auth.models import Permission
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.permissions import site_permission_policy
from .views import SiteViewSet
@hooks.register('register_admin_viewset')
def register_viewset():
return SiteViewSet('wagtailsites', url_prefix='sites')
class SitesMenuItem(MenuItem):
def is_shown(self, request):
return site_permission_policy.user_has_any_permission(
request.user, ['add', 'change', 'delete']
)
@hooks.register('register_settings_menu_item')
def register_sites_menu_item():
return SitesMenuItem(_('Sites'), urlresolvers.reverse('wagtailsites:index'),
classnames='icon icon-site', order=602)
@hooks.register('register_permissions')
def register_permissions():
return Permission.objects.filter(content_type__app_label='wagtailcore',
codename__in=['add_site', 'change_site', 'delete_site'])
|
mani-mishra/automateboringstuff | practice excercies/comma-code.py | Python | unlicense | 721 | 0.019417 | # Say you have a list value like this:
#
#
# spam = ['apples', 'bananas', 'tofu', 'cats']
# Write a function that takes a list value as an argument and returns a string with all
# the items separated by a comma and a space, with and inserted before the last item.
# For example, passing the previous spam list to the function would return 'apples, bananas, tofu, and cats'.
# But your f | unction should be able to work with any list value passed to it.
def concat_list(input):
first = ', '.join(map(str,input[0:-1]))
last = str(input[-1])
print(first + ', and '+ last)
concat_list(['apples', 'bananas', 'tofu', 'c | ats'])
concat_list([22,33,112,92])
concat_list([2,3,'##',22])
concat_list([2.2,33,'fotu',42])
|
iadept/vim-gtranslate | plugin/gtranslate.py | Python | gpl-2.0 | 194 | 0.025773 | import vim
from translate i | mport Translator
def main():
word = vim.eval("s:word")
translator = Translator(to_lang="ru")
print translator.translate(word)
if __name__ == " | __main__":
main()
|
rshk/ckan3-poc-experiments | ckan/catalog/api.py | Python | agpl-3.0 | 6,354 | 0.001731 | import urllib
from math import ceil
from flask import request
from flask.ext import restful
from sqlalchemy.orm.exc import NoResultFound
from .app import app
from .models import db, Dataset, Distribution
api = restful.Api(app, prefix='/api/1')
class ModelResource(restful.Resource):
"""
Common methods for exposing SQLAlchemy models through
flask-restful
"""
model = None # Must be overridden by subclasses
_query = None
@property
def query(self):
if self._query is not None:
return self._query
return self.model.query
def _serialize(self, obj):
data = {}
if obj.attributes is not None:
data.update(obj.attributes)
data['id'] = obj.id
return data
def _get(self, obj_id):
try:
return self.query.filter_by(id=obj_id).one()
except NoResultFound:
restful.abort(404, message='Requested object not found')
def get(self, obj_id=None):
if obj_id is None:
## todo: load filters from arguments
query = self.query
## Pagination
page_size = 10
page = 0
if 'page_size' in request.args:
try:
page_size = int(request.args['page_size'])
except ValueError:
restful.abort(400, message="Page size must be an integer")
if page_size < 1:
restful.abort(
400, message="Page size must be greater than zero")
if page_size > 100:
page_size = 100
pages_count = int(ceil(query.count() * 1.0 / page_size))
max_page = pages_count - 1
if 'page' in request.args:
try:
page = int(request.args['page'])
except ValueError:
restful.abort(400,
message="Page number must be an integer")
if page < 0:
restful.abort(
400, message='Page number cannot be negative')
if page > max_page:
restful.abort(404, message='Page number out of range')
## Pagination links
links = []
def get_url(**kw):
args = dict(request.args)
args.update(kw)
query_string = urllib.urlencode(args)
if query_string:
return '{0}?{1}'.format(request.base_url, query_string)
return request.base_url
if page > 0:
links.append("<{0}>; rel=\"first\""
"".format(get_url(page=0,
page_size=page_size)))
links.append("<{0}>; rel=\"prev\""
"".format(get_url(page=page-1,
page_size=page_size)))
if page < max_page:
links.append("<{0}>; rel=\"next\""
"".format(get_url(page=page+1,
page_size=page_size)))
links.append("<{0}>; rel=\"last\""
"".format(get_url(page=max_page,
page_size=page_size)))
headers = {'Link': ", ".join(links)}
results = query.slice(page * page_size, (page + 1 | ) * page_size)
return [self._serialize(o) for o in results], 200, headers
pkg = self._get(obj_id)
return self._serialize(pkg)
def post(self):
new = self.model()
new.attributes = request.json
db.session.add(new)
db.session.commit()
return self._serialize(new) # todo: return 201 Created instead?
def put(self, obj_id):
obj = self._get(obj_id)
#obj.attributes = request.json
| for key, value in request.json.iteritems():
obj.attributes[key] = value
#obj.attributes.update(request.json)
db.session.commit()
def patch(self, obj_id):
obj = self._get(obj_id)
for key, value in request.json.iteritems():
if key.startswith('$'):
## Custom action -- handle separately
if key == '$del':
for k in value:
if k in obj.attributes:
del obj.attributes[k]
elif key == '$set':
for k, v in value.iteritems():
obj.attributes[k] = v
else:
restful.abort(
400, message="Invalid PATCH key: {0}".format(key))
else:
obj.attributes[key] = value
db.session.commit()
def delete(self, obj_id):
## todo: on dataset deletion, remove distributions?
## or, safer, disallow deletion if still referenced
## -> should be that way by default, btw
obj = self._get(obj_id)
db.session.delete(obj)
db.session.commit()
class DatasetResource(ModelResource):
model = Dataset
class DatasetDistributionsResource(ModelResource):
def _serialize(self, obj):
serialized = super(DatasetDistributionsResource, self)._serialize(obj)
serialized['dataset_id'] = obj.dataset_id
return serialized
def get(self, obj_id):
## Use a custom query, as we want to filter on the dataset id
self._query = Dataset.query.filter_by(id=obj_id).one().resources
return super(DatasetDistributionsResource, self).get()
def post(self, obj_id):
## todo: create a resource
pass
class DistributionResource(ModelResource):
model = Distribution
def _serialize(self, obj):
serialized = super(DistributionResource, self)._serialize(obj)
serialized['dataset_id'] = obj.dataset_id
return serialized
api.add_resource(DatasetResource,
'/dataset/',
'/dataset/<int:obj_id>/')
api.add_resource(DatasetDistributionsResource,
'/dataset/<int:obj_id>/resources/')
api.add_resource(DistributionResource,
'/distribution/',
'/distribution/<int:obj_id>/')
|
vapkarian/soccer-analyzer | src/parsers/flashscore.py | Python | mit | 30,552 | 0.003666 | import logging
import os
import re
from datetime import datetime
from typing import Tuple, Optional, List, Dict, Union, Any
from urllib.error import URLError
from bs4 import BeautifulSoup
from bs4.element import Tag
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import (
visibility_of_element_located, text_to_be_present_in_element, invisibility_of_element_located)
from selenium.webdriver.support.wait import WebDriverWait
from src.data.flashcore_leagues import ALLOWED_LEAGUES
from src.models import FlashscoreMatch
from src.settings import rel_path
WAITING_TIMEOUT = 3
WAITING_POLL_FREQUENCY = 0.1
logger = logging.getLogger(__name__)
class InvalidMatch(Exception):
pass
class ServiceRefused(Exception):
pass
def get_user_agent():
# TODO: rotate different real values
return ('Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0 Mozilla/5'
'.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0')
class FlashscoreScrapper(object):
_MATCH_ID_RE = re.compile(r'g_1_\w+')
_PARTICIPANT_RE = re.compile(r'glib-participant-\w+')
_BALL_RE = re.compile(r'soccer-ball(|-own)')
_GOAL_RE = re.compile(r'time-box(|-wide)')
def __init__(self) -> None:
os.environ['TZ'] = 'UTC' # chrome reads this value to setup default timezone
self.base_url = FlashscoreMatch.FLASHSCORE_BASE_URL
self.bookmakers = (
'bet365', '10Bet', 'Unibet', 'Betfair', 'bwin', 'William Hill', 'bet-at-home', '1xBet')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--blink-settings=imagesEnabled=false')
chrome_options.add_argument('user-agent={}'.format(get_user_agent()))
self.driver = webdriver.Chrome(
executable_path='chromedriver',
chrome_options=chrome_options,
service_args=[
'--verbose',
'--log-path={}'.format(rel_path('logs', 'ghostdriver.log')),
])
def _open_page(self, url: str) -> None:
try:
self.driver.get(url)
except URLError:
raise ServiceRefused
@staticmethod
def _parse_match_preview(tr: Tag) -> Optional[str]:
head = tr.find_parent('table', class_='soccer').find('thead')
league = head.find('span', class_='country').text
if 'play offs' in league.lower():
return None
for elem in ALLOWED_LEAGUES:
if league.startswith(elem):
break
else:
return None
match_id = tr.attrs['id'][4:]
return match_id
@staticmethod
def _parse_goals_time(table: Tag) -> Tuple[List[str], List[str]]:
value = [], []
table.find('div', class_='detailMS__headerText')
penalty_section = table.find('div', class_='detailMS__headerText', string='Penalties')
penalties = list(penalty_section.parent.next_siblings) if penalty_section is not None else []
goals = table.find_all('div', class_=FlashscoreScrapper._BALL_RE)
for goal in goals:
line = goal.find_parent('div', class_='detailMS__incidentRow')
if line in penalties:
continue
goal_time = line.find('div', class_=FlashscoreScrapper._GOAL_RE).text[:-1]
if goal_time:
if 'incidentRow--home' in line.attrs['class']:
value[0].append(goal_time)
elif 'incidentRow--away' in line.attrs['class']:
value[1].append(goal_time)
return value
@staticmethod
def _parse_odds(td: Tag) -> Tuple[Optional[str], Optional[str]]:
if 'no-odds' in td.attrs['class']:
return None, None
td_span = next(td.children)
if 'not-published' in td_span.attrs['class']:
return None, None
if 'alt' in td_span.attrs:
value = td_span.attrs['alt'].replace('[d]', '[u]')
if '[u]' in value:
previous_odds, current_odds = value.split('[u]')
else:
previous_odds = current_odds = value
else:
previous_odds = current_odds = td_span.text
if previous_odds == '-':
previous_odds = None
if current_odds == '-':
current_odds = None
return previous_odds, current_odds
@staticmethod
def _parse_h2h_matches(table: Tag, league: str,
count: int = 10) -> List[Dict[str, Union[str, int]]]:
matches = []
rows = table.find('tbody').find_all('tr', class_='highlight')
for row in rows:
cells = row.find_all('td')
if (
league.lower().replace(' - apertura', '').replace(' - clausura', '') in
cells[1].attrs['title'].lower()
):
home_team = cells[2].text
away_team = cells[3].text
try:
home_score, away_score = map(int, cells[4].text.split(':'))
except ValueError:
continue
matches.append({'home_team': home_team, 'away_team': away_team,
'home_score': home_score, 'away_score': away_score})
if len(matches) == count:
break
return matches
@staticmethod
def _parse_team_rank(tr: Tag) -> int:
return int(tr.find('td', class_='rank').text.replace('.', ''))
@staticmethod
def _parse_team_goals_difference(tr: Tag) -> str:
return tr.find('td', class_='goals').text
@staticmethod
def _parse_team_ou_standing(tr: Tag) -> Tuple[str, str, str, str]:
over = tr.find('td', class_='over').text
under = tr.find('td', class_='under').text
goals_difference = tr.find('td', class_='goals').text
goals_average = tr.find('td', class_='avg_goals_match').text
return over, under, goals_difference, goals_average
@staticmethod
def _normalize(value: str) -> str:
return value.replace('\n', '').replace('\'', '')
def _wait_until(self, expectation: type, id_: str = None, name: str = None, tag_name: str = None,
class_name: str = None, link_text: str = None, partial_link_text: str = None,
css_selector: str = None, xpath: str = None,
timeout: Union[int, float] = WAITING_TIMEOUT,
poll_frequency: Union[int, float] = WAITING_POLL_FREQUENCY,
| args: List[Any] = None):
locators = {
By.ID: id_,
By.NAME: name,
By.TAG_NAME: tag_name,
By.CLASS_NAME: class_name,
By.LINK_TEXT: link_text,
By.PARTIAL_LINK_TEXT: partial_link_text,
By.CSS_SELECTOR: css_selector,
By.XPATH: xpath}
lo | cators = {locator: value for locator, value in locators.items() if value is not None}
if len(locators) != 1:
raise Exception(
'You have to provide one and only one value among `id_`, `name`, `tag_name`, '
'`class_name`, `link_text`, `partial_link_text`, `css_selector`, `xpath`.')
locator, value = next(iter(locators.items()))
args = args or []
return WebDriverWait(self.driver, timeout, poll_frequency=poll_frequency).until(
expectation((locator, value), *args))
def _change_time_zone(self, value: str) -> None:
modal = self._wait_until(
visibility_of_element_located, class_name='header__button--settings')
modal.click()
self._wait_until(visibility_of_element_located, id_='tzactual')
# click on #tzactual displays #tzcontent as flex which does not work
# due to --load-images=no option of PhantomJS, but who cares?
self.driver.execute_script('$("#tzcontent").css({"display":"block"});')
tzcontent = self._wait_until(visibility_of_element_located, id_= |
pablodanielrey/python | gosa/changeOwnerDomain.py | Python | gpl-3.0 | 760 | 0.003947 | # -*- coding: utf-8 -*-
import os, sys, shutils
if len(sys.argv) <= 1:
print('debe llamar al sistema usando : ')
print('python3 ' + sys.argv[0] + ' usuario dni')
exit(1)
usuario = sys.argv[1]
dni = sys.argv[2]
os.chdir('/home')
os.rename(usuario,dni)
os.chdir('/home/samba/profiles')
os.rename(usuario,dni) |
"""
-- cambiar los permisos al directorio --
/home/dni
/home/samba/profiles/dni
"""
for ruta, dirs, archivos in os.walk('/home/' + dni):
shutil.chown(ruta, dni, 'root')
for archivo in archivos:
shutil.chown(archivo, dni, 'root')
for ruta, dirs, archivos in os.walk('/home/samba/ | profiles/' + dni):
shutil.chown(ruta, dni, 'root')
for archivo in archivos:
shutil.chown(archivo, dni, 'root')
|
aspose-words/Aspose.Words-for-Java | Plugins/Aspose_Words_Java_for_Jython/asposewords/quickstart/HelloWorld.py | Python | mit | 481 | 0.016632 | from asposewords import Settings
from com.aspose.words import Document
from com.aspose.words import DocumentBuilder
class HelloWorld:
def __init__(self):
dataDir = Settings.dataDir + 'quickstart/'
| doc = Document()
| builder = DocumentBuilder(doc)
builder.writeln('Hello World!')
doc.save(dataDir + 'HelloWorld.docx')
print "Document saved."
if __name__ == '__main__':
HelloWorld() |
vivek8943/GeoTweetSearch | main.py | Python | apache-2.0 | 14,290 | 0.006438 | from api.core.utility import red | irectOutputToLogger
redirectOutputToLogger()
__author__ = 'Michael Pryor'
if __name__ == '__main__':
import ConfigParser
import argparse
import datetime
import os
import logging
import sys
from logging import config
import bottle
parser = argparse.ArgumentParser(description='Twitter Project', argumen | t_default=argparse.SUPPRESS)
parser.add_argument('--config',
metavar='file',
default='config.conf',
help='Configuration file to use')
parser.add_argument('--logging_config',
metavar='file',
default='logging.conf',
help='Logging configuration file to use')
parser.add_argument('--setup_instance_code',
default=False,
action='store_true',
help='The server does not run as normal, it will run in instance setup code mode.\n'
'Instance setup codes allow users to setup special instances with special'
'rules')
parser.add_argument('--clear_instance_codes',
default=False,
action='store_true',
help='All instance codes will be wiped')
parser.add_argument('--clear_geocode_data',
default=False,
action='store_true',
help='Clear all geocode data stored in the database')
parser.add_argument('--wipe_instance_data',
default=False,
action='store_true',
help='Instance data is recovered from the database unless this flag is specified, in which case all instance data is wiped on startup')
parser.add_argument('--show_database_storage_usage',
default=False,
action='store_true',
help='This will query all collections in the MongoDB database and show their size in megabytes.')
parser.add_argument('--rebuild_instance_indexes',
default=False,
action='store_true',
help='Use with caution as it will lock up database until completed. Rebuilds indexes on instance collections.')
parser.add_argument('--view_profiling_info',
default=False,
action='store_true',
help='If profiling is enabled in configuration the server logs MongoDB performance. Use this to retrieve the logged data.')
parser._parse_known_args(sys.argv[1:], argparse.Namespace())
args = parser.parse_args()
loggingFile = args.logging_config
logging.config.fileConfig(loggingFile,disable_existing_loggers=False)
# Put this in all logs so we can clearly see when the server was restarted.
logger = logging.getLogger(__name__)
logger.critical('SERVER STARTED')
logger.info('Using logging configuration file: "%s"' % loggingFile)
configurationFile = args.config
logger.info('Using configuration file: "%s"' % configurationFile)
configParser = ConfigParser.SafeConfigParser()
configParser.read(configurationFile)
from api.config import loadConfigFromFile
loadConfigFromFile(configParser)
from api.core.utility import parseInteger, Timer
from api.caching.instance_lifetime import getInstances
from api.core.threads_core import BaseThread
from api.web.twitter_instance import TwitterInstance
from api.caching.instance_codes import resetCodeConsumerCounts, getInstanceCodeCollection, getCode
from api.config import Configuration
from api.caching.caching_shared import getCollections, getCollection, getDatabase
from api.caching.temporal_analytics import isTemporalInfluenceCollection
from api.caching.tweet_user import isUserCollection, isTweetCollection, getUserCollection, getTweetCollection
from api.geocode.geocode_shared import GeocodeResultAbstract
from api.core import threads
from api.twitter.feed import UserAnalysisFollowersGeocoded, TwitterAuthentication
from api.twitter.flow.display_instance_setup import GateInstance, StartInstancePost, ManageInstancePost
from api.twitter.flow.display_oauth import OAuthSignIn, OAuthCallback
from api.web import web_core
from api.web.web_core import WebApplicationTwitter
from api.twitter.flow.data_core import DataCollection
from api.twitter.flow.display import LocationsMapPage, UserInformationPage, UserFollowerEnrichPage, TwitterCachePage, LocationsPage, BulkDownloadDataProvider, InfluenceCachePage, GeocodeCachePage, LandingPage
from api.twitter.flow.web_socket_group import LocationMapWsg, TweetsByLocationWsg, UserWsg, BulkDownloadDataWsg, RealtimePerformanceWsg
resetCodeConsumerCounts()
if args.clear_instance_codes:
logger.info('Clearing instance codes')
getInstanceCodeCollection().drop()
if Configuration.PROXIES_ENABLED:
httpProxy = Configuration.PROXIES.get('http',None)
httpsProxy = Configuration.PROXIES.get('https',None)
# Requests API will use these environment variables.
if httpProxy is not None:
os.environ['HTTP_PROXY'] = Configuration.PROXIES['http']
if httpsProxy is not None:
os.environ['HTTPS_PROXY'] = Configuration.PROXIES['https']
bottle.debug(Configuration.BOTTLE_DEBUG)
GeocodeResultAbstract.initializeCountryContinentDataFromCsv()
dataCollection = DataCollection()
webApplication = WebApplicationTwitter(None, Configuration.MAX_INSTANCE_INACTIVE_TIME_MS, dataCollection)
landingPage = LandingPage(webApplication)
oauthSignIn = OAuthSignIn(webApplication, Configuration.CONSUMER_TOKEN, Configuration.CONSUMER_SECRET)
oauthCallback = OAuthCallback(webApplication, Configuration.CONSUMER_TOKEN, Configuration.CONSUMER_SECRET, GateInstance.link_info.getPageLink())
mapWebSocketGroup = LocationMapWsg(application=webApplication,locations=dataCollection.tweets_by_location)
tweetsByLocationWebSocketGroup = TweetsByLocationWsg(application=webApplication,signaler=dataCollection.tweets_by_location)
userInformationWebSocketGroup = UserWsg(application=webApplication, dataCollection=dataCollection, signaler=dataCollection.all_users)
instanceGate = GateInstance(webApplication)
startInstance = StartInstancePost(webApplication, Configuration.CONSUMER_TOKEN, Configuration.CONSUMER_SECRET, None)
manageInstance = ManageInstancePost(webApplication)
bulkDownloadDataProvider = BulkDownloadDataProvider(webApplication)
bulkDownloadData = BulkDownloadDataWsg(webApplication, bulkDownloadDataProvider=bulkDownloadDataProvider)
realtimePerformance = RealtimePerformanceWsg(webApplication, dataCollection.realtime_performance.event_signaler)
locationDisplay = LocationsMapPage(webApplication, mapWebSocketGroup, bulkDownloadData, realtimePerformance)
locationTextPage = LocationsPage(webApplication, tweetsByLocationWebSocketGroup, bulkDownloadData, realtimePerformance)
userInformationPage = UserInformationPage(webApplication,userInformationWebSocketGroup)
tweetPage = TwitterCachePage(webApplication)
influencePage = InfluenceCachePage(webApplication)
geocodeSearchPage = GeocodeCachePage(webApplication, Configuration.GEOCODE_EXTERNAL_PROVIDER)
userAnalysers = [lambda user: UserAnalysisFollowersGeocoded()]
# Setup all threads apart from twitter thread.
resultDic = threads.startThreads(data=dataCollection,
display=[mapWebSocketGroup,
tweetsByLocationWebSocketGroup,
userInformationWebSocketGroup,
realtimePerformance],
userAnalysers=userAnalysers)
tweetQueue = resultDic['tweet_queue']
followerExtractorGateThread = resultDic['follower_extractor_gate_thread']
userEn |
facebookexperimental/eden | eden/hg-server/edenscm/mercurial/progress.py | Python | gpl-2.0 | 26,512 | 0.000339 | # Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# progress.py progress bars related code
#
# Copyright (C) 2010 Augie Fackler <durin42@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import contextlib
import errno
import os
import threading
import time
import bindings
from bindings import threading as rustthreading, tracing
from . import encoding, pycompat, util
from .i18n import _, _x
_tracer = util.tracer
def spacejoin(*args): |
return " ".join(s for s in args if s)
def shouldprint(ui):
return not (ui.quiet or ui.plain("progress")) and (
ui._isatty(ui.ferr) or ui.configbool("progress", "assume-tty")
)
def fmtremaining(seconds):
"""format a number of remaining seconds in human readable way
This will properly display seconds, minutes, hours, days if needed"""
if seconds is None:
| return ""
if seconds < 60:
# i18n: format XX seconds as "XXs"
return _("%02ds") % seconds
minutes = seconds // 60
if minutes < 60:
seconds -= minutes * 60
# i18n: format X minutes and YY seconds as "XmYYs"
return _("%dm%02ds") % (minutes, seconds)
# we're going to ignore seconds in this case
minutes += 1
hours = minutes // 60
minutes -= hours * 60
if hours < 30:
# i18n: format X hours and YY minutes as "XhYYm"
return _("%dh%02dm") % (hours, minutes)
# we're going to ignore minutes in this case
hours += 1
days = hours // 24
hours -= days * 24
if days < 15:
# i18n: format X days and YY hours as "XdYYh"
return _("%dd%02dh") % (days, hours)
# we're going to ignore hours in this case
days += 1
weeks = days // 7
days -= weeks * 7
if weeks < 55:
# i18n: format X weeks and YY days as "XwYYd"
return _("%dw%02dd") % (weeks, days)
# we're going to ignore days and treat a year as 52 weeks
weeks += 1
years = weeks // 52
weeks -= years * 52
# i18n: format X years and YY weeks as "XyYYw"
return _("%dy%02dw") % (years, weeks)
def estimateremaining(bar):
if not bar._total:
return None
bounds = bar._getestimatebounds()
if bounds is None:
return None
startpos, starttime = bounds[0]
endpos, endtime = bounds[1]
if startpos is None or endpos is None:
return None
if startpos == endpos:
return None
target = bar._total - startpos
delta = endpos - startpos
if target >= delta and delta > 0.1:
elapsed = endtime - starttime
seconds = (elapsed * (target - delta)) // delta + 1
return seconds
return None
def fmtspeed(speed, bar):
if speed is None:
return ""
elif bar._formatfunc:
return _("%s/sec") % bar._formatfunc(speed)
elif bar._unit:
return _("%d %s/sec") % (speed, bar._unit)
else:
return _("%d per sec") % speed
def estimatespeed(bar):
bounds = bar._getestimatebounds()
if bounds is None:
return None
startpos, starttime = bounds[0]
endpos, endtime = bounds[1]
if startpos is None or endpos is None:
return None
delta = endpos - startpos
elapsed = endtime - starttime
if elapsed > 0:
return delta // elapsed
return None
# file_write() and file_flush() of Python 2 do not restart on EINTR if
# the file is attached to a "slow" device (e.g. a terminal) and raise
# IOError. We cannot know how many bytes would be written by file_write(),
# but a progress text is known to be short enough to be written by a
# single write() syscall, so we can just retry file_write() with the whole
# text. (issue5532)
#
# This should be a short-term workaround. We'll need to fix every occurrence
# of write() to a terminal or pipe.
def _eintrretry(func, *args):
while True:
try:
return func(*args)
except IOError as err:
if err.errno == errno.EINTR:
continue
raise
class baserenderer(object):
"""progress bar renderer for classic-style progress bars"""
def __init__(self, bar):
self._bar = bar
self.printed = False
self.configwidth = bar._ui.config("progress", "width", default=None)
def _writeprogress(self, msg, flush=False):
msg = msg.strip("\r\n")
# The Rust set_progress handles both the stderr (no pager),
# and streampager cases.
# If there is an external pager running, then the progress
# is not expected to be rendered.
util.mainio.set_progress(msg)
def width(self):
ui = self._bar._ui
tw = ui.termwidth()
if self.configwidth is not None:
return min(int(self.configwidth), tw)
else:
return tw
def show(self):
raise NotImplementedError()
def clear(self):
if not self.printed:
return
self._writeprogress("")
def complete(self):
if not self.printed:
return
self.show(time.time())
self._writeprogress("")
class classicrenderer(baserenderer):
def __init__(self, bar):
super(classicrenderer, self).__init__(bar)
self.order = bar._ui.configlist("progress", "format")
def show(self, now):
pos, item = _progvalue(self._bar.value)
if pos is None:
pos = round(now - self._bar._enginestarttime, 1)
formatfunc = self._bar._formatfunc
if formatfunc is None:
formatfunc = str
topic = self._bar._topic
unit = self._bar._unit
total = self._bar._total
termwidth = self.width()
self.printed = True
head = ""
needprogress = False
tail = ""
for indicator in self.order:
add = ""
if indicator == "topic":
add = topic
elif indicator == "number":
fpos = formatfunc(pos)
if total:
ftotal = formatfunc(total)
maxlen = max(len(fpos), len(ftotal))
add = ("% " + str(maxlen) + "s/%s") % (fpos, ftotal)
else:
add = fpos
elif indicator.startswith("item") and item:
slice = "end"
if "-" in indicator:
wid = int(indicator.split("-")[1])
elif "+" in indicator:
slice = "beginning"
wid = int(indicator.split("+")[1])
else:
wid = 20
if slice == "end":
add = encoding.trim(item, wid, leftside=True)
else:
add = encoding.trim(item, wid)
add += (wid - encoding.colwidth(add)) * " "
elif indicator == "bar":
add = ""
needprogress = True
elif indicator == "unit" and unit:
add = unit
elif indicator == "estimate":
add = fmtremaining(estimateremaining(self._bar))
elif indicator == "speed":
add = fmtspeed(estimatespeed(self._bar), self._bar)
if not needprogress:
head = spacejoin(head, add)
else:
tail = spacejoin(tail, add)
if needprogress:
used = 0
if head:
used += encoding.colwidth(head) + 1
if tail:
used += encoding.colwidth(tail) + 1
progwidth = termwidth - used - 3
if pos is not None and total and pos <= total:
amt = pos * progwidth // total
bar = "=" * (amt - 1)
if amt > 0:
bar += ">"
bar += " " * (progwidth - amt)
else:
elapsed = now - self._bar._enginestarttime
indetpos = int(elapsed / self._bar._refr |
Huawei/OpenStackClient_Auto-Scaling | asclient/common/utils.py | Python | apache-2.0 | 1,745 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language gover | ning permissions and limitations
# under the License.
#
from datetime import datetime
def get_id(obj):
"""Get obj's uuid or object itself if no uuid
Abstracts the common pattern of allowing both an object or
an object's ID (UUID) as a parameter when dealing with relationships.
"""
try:
return obj.uuid or obj['uu | id']
except AttributeError:
return obj
def remove_empty_from_dict(original):
"""get a new dict which removes keys with empty values
:param dict original: original dict, should not be None
:return: a new dict which removes keys with empty values
"""
return dict((k, v) for k, v in original.iteritems()
if v is not None and v != '' and v != [] and v != {})
def str_range(start, end):
"""get range with string type"""
return [str(i) for i in range(start, end)]
def format_time(time_in_long, _format='%Y-%m-%d %H:%M:%S'):
if time_in_long:
# if time-long is with mill seconds
if time_in_long > pow(10, 12):
time_in_long /= 1000
timestamp = datetime.fromtimestamp(time_in_long)
return timestamp.strftime(_format)
else:
return ''
|
exekias/droplet | droplet/web/wizards.py | Python | agpl-3.0 | 3,158 | 0 | # -*- coding: utf-8 -*-
#
# droplet
# Copyright (C) 2014 Carlos Pérez-Aradros Herce <exekias@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
import droplet
from droplet import actions
from droplet.web import blocks, forms, redirect
aregister = actions.Library('wizard')
class Wizard(blocks.Block):
"""
Wizard block, it walks the user trough different forms (see
class:`droplet.web.forms.FormBlock`)
"""
template_name = 'web/wizard.html'
#: Sorted list of class:`droplet.web.forms.Form` objects
forms = []
#: current step
step = 0
def get_forms(self):
"""
Return the sorted list of forms of the wizard
"""
return self.forms
def get_context_data(self, *args, **kwargs):
context = super(Wizard, self).get_context_data(*args, **kwargs)
context.update({
'wizard_name': self.register_name,
'form_name': self.next(self.step).register_name,
'step': self.step,
})
return context
def next(self, step):
forms = self | .get_forms()
return forms[step]
class InstallWizard(Wizard):
"""
Wizard bounded to a module, it will install the module when done and
redirect the user to moudles page
"""
#: Class of the module this wizard will | install
module = None
def __init__(self, *args, **kwargs):
if not self.module or \
not issubclass(self.module, droplet.module.Module):
raise ValueError('You should define module class to be installed')
super(InstallWizard, self).__init__(*args, **kwargs)
def finish(self, transport):
mod = self.module()
mod.install()
mod.enable()
redirect.redirect(transport, reverse('core:modules'))
@aregister.action
def next(transport, wizard, step, data):
"""
Validate step and go to the next one (or finish the wizard)
:param transport: Transport object
:param wizard: Wizard block name
:param step: Current step number
:param data: form data for the step
"""
step = int(step)
wizard = blocks.get(wizard)
# Retrieve form block
form = wizard.next(step)
valid = forms.send(transport, form.register_name, data=data)
if valid:
if wizard.next(step+1) is None:
# It was last step
wizard.finish(transport)
return
# Next step
wizard.step = step+1
wizard.update(transport)
|
kthyng/tracpy | tracpy/tools.py | Python | mit | 18,989 | 0.000369 | """
Tools for dealing with drifter stuff.
Functions include:
* interpolate2d
* interpolate3d
* find_final
* convert_indices
* check_points
* seed
"""
import numpy as np
from scipy import ndimage
import time
import matplotlib.tri as mtri
# from matplotlib.mlab import Path, find
from matplotlib.path import Path
def interpolate2d(x, y, grid, itype, xin=None, yin=None, order=1,
mode='nearest', cval=0.):
"""
Horizontal interpolation to map between coordinate transformations.
Args:
x, y: x, y
grid: grid as read in by inout.readgrid()
itype:
* 'd_xy2ij' delaunay, from projected x, y to grid i, j
* 'd_ij2xy' delaunay, from grid i, j to projected x, y
* 'd_ll2ij' delaunay, from lon, lat to grid i, j
* 'd_ij2ll' delaunay, from grid i, j to lon, lat
* 'm_ij2xy' map_coordinates, from grid i, j to projected x, y or if
z, xin, and yin are also input, from grid i, j, k to projecte | d x,
y, z. Can use the 3d version of this for transforming to lon/lat
also if the xin/yin input are lon/lat arrays.
* 'm_ij2ll' map_coordinates, from grid i, j to lon, lat
xin: 3D array of x values that are mapped to the inp | ut x,y,z
coordinates. This is only needed in the 3D mapping case. Normally,
can just do this in 2D instead of 3D and get the same results.
yin: 3D array of y values that are mapped to the input x,y,z
coordinates. This is only needed in the 3D mapping case. Normally,
can just do this in 2D instead of 3D and get the same results.
order: order of interpolation for map_coordinates. 1 for linear and 3
for cubic. Default=1
mode: behavior for edge points. Default is 'nearest'. Notes on the
map_coordinates function: The "mode" kwarg here just controls how
the boundaries are treated mode='nearest' is _not_ nearest neighbor
interpolation, it just uses the value of the nearest cell if the
point lies outside the grid. The default is to treat the values
outside the grid as zero, which can cause some edge effects if
you're interpolating points near the edge. 'constant', 'nearest',
'reflect' or 'wrap'. The "order" kwarg controls the order of the
splines used. The default is cubic splines, order=3
cval: Constant value used in map_coordinates if mode='constant'
Returns:
* xi,yi - Interpolated values
* dt - Time required for interpolation
"""
tic = time.time()
if itype == 'd_xy2ij':
# Set up functions for interpolating
# changing format to use more robust triangulation in grid set up
fx = mtri.LinearTriInterpolator(grid.trir, grid.X.flatten())
fy = mtri.LinearTriInterpolator(grid.trir, grid.Y.flatten())
# Need to shift indices to move from rho grid of interpolator to
# arakawa c grid
xi = fx(x, y) + .5
yi = fy(x, y) + .5
elif itype == 'd_ij2xy':
# Set up functions for interpolating
fx = mtri.LinearTriInterpolator(grid.tri, grid.x_rho.flatten())
fy = mtri.LinearTriInterpolator(grid.tri, grid.y_rho.flatten())
# Need to shift indices to move to rho grid of interpolator from
# arakawa c grid
xi = fx(x-0.5, y-0.5)
yi = fy(x-0.5, y-0.5)
elif itype == 'd_ll2ij':
# Set up functions for interpolating
fx = mtri.LinearTriInterpolator(grid.trirllrho, grid.X.flatten())
fy = mtri.LinearTriInterpolator(grid.trirllrho, grid.Y.flatten())
# Need to shift indices to move from rho grid of interpolator to
# arakawa c grid
xi = fx(x, y) + .5
yi = fy(x, y) + .5
elif itype == 'd_ij2ll':
# Set up functions for interpolating
fx = mtri.LinearTriInterpolator(grid.tri, grid.lon_rho.flatten())
fy = mtri.LinearTriInterpolator(grid.tri, grid.lat_rho.flatten())
# Need to shift indices to move to rho grid of interpolator from
# arakawa c grid
xi = fx(x-0.5, y-0.5)
yi = fy(x-0.5, y-0.5)
elif itype == 'm_ij2xy':
# .5's are to shift from u/v grid to rho grid for interpolator
xi = ndimage.map_coordinates(grid.x_rho.T, np.array([x.flatten()-.5,
y.flatten()-.5]),
order=order, mode=mode,
cval=cval).reshape(x.shape)
yi = ndimage.map_coordinates(grid.y_rho.T, np.array([x.flatten()-.5,
y.flatten()-.5]),
order=order, mode=mode,
cval=cval).reshape(y.shape)
elif itype == 'm_ij2ll':
xi = ndimage.map_coordinates(grid.lon_rho.T, np.array([x.flatten()-.5,
y.flatten()-.5]),
order=order, mode=mode,
cval=cval).reshape(x.shape)
yi = ndimage.map_coordinates(grid.lat_rho.T, np.array([x.flatten()-.5,
y.flatten()-.5]),
order=order, mode=mode,
cval=cval).reshape(y.shape)
# Need to retain nan's since are changed them to zeros here
if xi.size > 1:
ind = np.isnan(x)
xi[ind] = np.nan
yi[ind] = np.nan
dt = time.time() - tic
return xi, yi, dt
def interpolate3d(x, y, z, zin, order=1, mode='nearest', cval=0.):
"""
3D interpolation for transforming from grid/index space to whatever space
is input with zin.
Args:
x,y,z: x, y, z coordinates
zin: 3D array of z values that are mapped to the input x,y,z
coordinates.
order: order of interpolation for map_coordinates. 1 for linear and 3
for cubic. Default=1
mode: behavior for edge points. Default is 'nearest'. Notes on the
map_coordinates function: The "mode" kwarg here just controls how
the boundaries are treated mode='nearest' is _not_ nearest neighbor
interpolation, it just uses the value of the nearest cell if the
point lies outside the grid. The default is to treat the values
outside the grid as zero, which can cause some edge effects if
you're interpolating points near the edge.
Returns:
* zi - Interpolated values
* dt - Time required for interpolation
"""
tic = time.time()
# Shift of .5 is assuming that input x/y are on a staggered grid frame
# (counting from the cell edge and across the cell) but that the z values
# are at the cell center, or rho locations.
# instead of filling var array mask with nan's, extrapolate out nearest
# neighbor value. Distance is by number of cells not euclidean distance.
# https://stackoverflow.com/questions/3662361/fill-in-missing-values-with-nearest-neighbour-in-python-numpy-masked-arrays
ind = ndimage.distance_transform_edt(zin.mask, return_distances=False, return_indices=True)
zi = ndimage.map_coordinates(zin[tuple(ind)].T, np.array([x.flatten()-0.5,
y.flatten()-0.5,
z.flatten()]),
order=order, mode=mode,
cval=cval).reshape(z.shape)
# Need to retain nan's since are changed them to zeros here
ind = np.isnan(z)
zi[ind] = np.nan
dt = time.time() - tic
return zi, dt
def find_final(xp, yp, ind=-1):
"""
Loop through drifters and find final location of drifters within the
tracks arrays. This can be necessary because when drifters exit the
numerical domain, they are nan'ed out. default is to search for the final
non-nan location (-1), but can search for others instead, for example,
the first non-nan position, which is helpful if we are looking at the
flipped out |
compgeog/cgl | cgl/tests/test_kdtrees.py | Python | gpl-3.0 | 1,003 | 0.031904 | import sys
sys.path.append('../..')
from cgl.kdtree import *
from cgl.point import Point
data1 = [ (2,2), (0,5), (8,0), (9,8), (7,14), (13,12), (14,13) ]
points = [Point(d[0], d[1]) for d in data1]
t1 = kdtree(points, 'unbalanced')
t2 = kdtree(points)
print([t1.query_kdtree(p)[0] for p i | n points])
print([t2.query_kdtree(p)[0] for p in points])
print('Depth of t1:', t1.depth())
print('Depth of t2:', t2.depth())
data1 = [ (2,2), (0,5), (8,0), (9,8 | ), (7,14), (13,12), (14,13) ]
points = [Point(d[0], d[1]) for d in data1]
t1 = kdtree(points)
rect = [ [1, 9], [2, 9] ]
found = t1.range_query_orthogonal(rect)
print('Orthogonal:', found)
data1 = [ (2,2), (0,5), (8,0), (9,8), (7,14), (13,12), (14,13) ]
points = [Point(d[0], d[1]) for d in data1]
p = Point(5,5)
t1 = kdtree(points)
found = t1.range_query_circular(p, 5)
print('Circular:', found)
print(t1.nearest_neighbor_query(Point(100, 100), 3))
print(t1.nearest_neighbor_query(p, 3))
print(t1.nearest_neighbor_query(Point(50, 50), 3))
t1.draw()
|
DictGet/ecce-homo | setup.py | Python | mit | 1,454 | 0.002751 | # -*- coding: utf-8 -*-
#! /usr/bin/env python
import os
import subprocess
from setuptools import setup
import six
here = os.path.dirname(os.path.abspath(__file__))
README = open(os.path.join(here, 'README.md')).read()
REQUIREMENTS = open(os.path.join(here, 'requirements/base.txt')).readlines()
def get_version_from_git_tag():
command = "git describe --abbrev=0 --tags"
if six.PY2:
return subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT).strip()
return subprocess.getoutput(command).strip()
scripts = {
"console_scripts": [
"eccehomo=eccehomo.app:app",
]
}
setup | (
name='eccehomo',
version=get_version_from_git_tag(),
description='Micro application to serve images with customized cropping',
long_description=README,
author='DictGet Team',
author_email='dictget@gmail.com',
url="https://github.com/dictget/ecce-homo",
download_url="https://github.com/dictget/ecce- | homo/archive/{}.tar.gz".format(
get_version_from_git_tag()
),
packages=['eccehomo'],
test_suite="eccehomo.tests",
entry_points=scripts,
install_requires=REQUIREMENTS,
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
|
arthurljones/bikechurch-signin-python | src/controls/autowrapped_static_text.py | Python | bsd-3-clause | 2,223 | 0.042285 | # -*- coding: utf-8 -*-
import wx
from copy import copy
sWhitespace = ' \t\n'
def SplitAndKeep(string, splitchars = " \t\n"):
substrs = []
i = 0
while len(string) > 0:
if string[i] in splitchars:
substrs.append(string[:i])
substrs.append(string[i])
string = string[i+1:]
i = 0
else:
i += 1
if i >= len(string):
substrs.append(string)
break
return substrs
class AutowrappedStaticText(wx.StaticText):
"""A StaticText-like widget which implements word wrapping."""
def __init__(self, *args, **kwargs):
wx.StaticText.__init__(self, *args, **kwargs)
self.label = super(AutowrappedStaticText, self).GetLabel()
self.pieces = SplitAndKeep(self.label, sWhitespace)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.lastWrap = None
self.Wrap()
def SetLabel(self, newLabel):
"""Store the new label and recalculate the wrapped version."""
self.label = newLabel
self.pieces = SplitAndKeep(self.label, sWhitespace)
self.Wrap()
def GetLabel(self):
"""Returns the label (unwrapped)."""
return self.label
def Wrap(self):
"""Wraps the words in label."""
maxWidth = self.GetParent().GetVirtualSizeTuple()[0] - 10
#TODO: Fix this so that we're not wasting cycles, but so that it actually works
#if self.lastWrap and self.lastWrap == maxWidth:
# return
self.lastWrap = maxWidth
pieces = copy(self.pieces)
lines = []
currentLine = []
currentString = ""
while len(pieces) > 0:
nextPiece = pieces.pop(0)
newString = currentString + nextPiece
newWidth = self.GetTextExtent(newString)[0]
curren | tPieceCount = len(currentLine)
if (currentPieceCount > 0 and newW | idth > maxWidth) or nextPiece == '\n':
if currentPieceCount > 0 and currentLine[-1] in sWhitespace:
currentLine = currentLine[:-1]
if nextPiece in sWhitespace:
pieces = pieces[1:]
currentLine.append('\n')
lines.extend(currentLine)
currentLine = [nextPiece]
currentString = nextPiece
else:
currentString += nextPiece
currentLine.append(nextPiece)
lines.extend(currentLine)
line = "".join(lines)
super(AutowrappedStaticText, self).SetLabel(line)
self.Refresh()
def OnSize(self, event):
self.Wrap()
|
zielmicha/freeciv-android | lib/freeciv/client/actions.py | Python | gpl-2.0 | 10,344 | 0.004254 | # Copyright (C) 2011 Michal Zielinski (michal@zielinscy.org.pl)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from freeciv.client import _freeciv as freeciv
from freeciv import client
import city
import graphics
# Activity list: see freeciv-src/common/fc_types.h
# We're still using old values as ACTIVITY_ROAD and ACTIVITY_RAILROAD instead of ACTIVITY_GEN_ROAD
# but this is not a problem because these values are not sent to the server.
ACTIVITY_IDLE = 0
ACTIVITY_POLLUTION = 1
ACTIVITY_ROAD = 2
ACTIVITY_MINE = 3
ACTIVITY_IRRIGATE = 4
ACTIVITY_FORTIFIED = 5
ACTIVITY_FORTRESS = 6
ACTIVITY_SENTRY = 7
ACTIVITY_RAILROAD = 8
ACTIVITY_PILLAGE = 9
ACTIVITY_GOTO = 10
ACTIVITY_EXPLORE = 11
ACTIVITY_TRANSFORM = 12
# ACTIVITY_UNKNOWN = 13
ACTIVITY_AIRBASE = 14
ACTIVITY_FORTIFYING = 15
ACTIVITY_FALLOUT = 16
# ACTIVITY_PATROL_UNUSED = 17
ACTIVITY_MAGLEV = 17
ACTIVITY_BASE = 18
ACTIVITY_GEN_ROAD = 19
ACTIVITY_CONVERT = 20 # TODO: to implement
ACTIVITY_DISBAND = 1001
ACTIVITY_WAIT = 1002
ACTIVITY_DONE = 1003
ACTIVITY_ADD_TO_CITY = 1004
ACTIVITY_BUILD_CITY = 1005
ACTIVITY_PARADROP = 1007
ACTIVITY_CHANGE_HOMECITY = 1008
ACTIVITY_LOAD = 1009
ACTIVITY_UNLOAD = 1010
ACTIVITY_AIRLIFT = 1011
ACTIVITY_CENTER_ON_UNIT = 2003
ACTIVITY_UPGRADE = 2004
# freeciv-src/common/actions.h (+ 3000 to avoid collision with activities)
ACTION_MIN_VALUE=3000
ACTION_ESTABLISH_EMBASSY=3000
ACTION_SPY_INVESTIGATE_CITY=3001
ACTION_SPY_POISON=3002
ACTION_SPY_STEAL_GOLD=3003
ACTION_SPY_SABOTAGE_CITY=3004
ACTION_SPY_TARGETED_SABOTAGE_CITY=3005
ACTION_SPY_STEAL_TECH=3006
ACTION_SPY_TARGETED_STEAL_TECH=3007
ACTION_SPY_INCITE_CITY=3008
ACTIVITY_ESTABLISH_TRADE_ROUTE=3009
ACTION_MARKETPLACE=3010
ACTIVITY_HELP_BUILD_WONDER=3011
ACTION_SPY_BRIBE_UNIT=3012
ACTION_SPY_SABOTAGE_UNIT=3013
ACTION_MAX_VALUE=3013
def py_action_to_freeciv_action(py_action):
if py_action < ACTION_MIN_VALUE or py_action > ACTION_MAX_VALUE:
return -1
return py_action - ACTION_MIN_VALUE
def freeciv_action_target_city_to_py_action(act_id):
return act_id + ACTION_MIN_VALUE
BASE_GUI_FORTRESS = 0
BASE_GUI_AIRBASE = 1
ROCO_ROAD = 0
ROCO_RAILROAD = 1
activities = dict( (k,v) for k,v in globals().items() if k.startswith('ACTIVITY_' ) )
activity_names = dict( (v, k) for k,v in activities.items() )
action_names = {
'done': 'no orders',
'fortyfing': 'fortify',
'explore': 'auto explore',
}
class Unit(object):
def __init__(self, handle):
self.handle = handle
def get_properties(self):
return freeciv.func.get_unit_properties(self.handle)
def iter_actions(self):
id, tileid, city, terrain_name = self.get_properties()
yield ACTIVITY_CENTER_ON_UNIT
yield ACTIVITY_GOTO
yield ACTIVITY_DISBAND
yield ACTIVITY_WAIT
yield ACTIVITY_DONE
if freeciv.func.unit_can_add_or_build_city(id):
if city:
yield ACTIVITY_ADD_TO_CITY
else:
yield ACTIVITY_BUILD_CITY
if freeciv.func.unit_can_help_build_wonder_here(id):
yield ACTIVITY_HELP_BUILD_WONDER
if freeciv.func.can_unit_paradrop(id):
yield ACTIVITY_PARADROP
if freeciv.func.can_unit_change_homecity(id):
yield ACTIVITY_CHANGE_HOMECITY
if freeciv.func.can_unit_upgrade(self.handle):
yield ACTIVITY_UPGRADE
if freeciv.func.get_possible_unit_base_name(id, BASE_GUI_AIRBASE):
yield ACTIVITY_AIRBASE
if freeciv.func.get_possible_unit_base_name(id, BASE_GUI_FORTRESS):
yield ACTIVITY_FORTRESS
if freeciv.func.can_unit_do_activity_road(id, ROCO_ROAD):
yield ACTIVITY_ROAD
elif freeciv.func.can_unit_do_activity_road(id, ROCO_RAILROAD):
yield ACTIVITY_RAILROAD
elif freeciv.func.can_unit_do_activity_any_road(id):
yield ACTIVITY_MAGLEV
if len(freeciv.func.get_airlift_dest_cities(self.handle)) > 0:
yield ACTIVITY_AIRLIFT
standard_activities = [
ACTIVITY_IRRIGATE,
ACTIVITY_MINE,
ACTIVITY_TRANSFORM,
ACTIVITY_FORTIFYING,
ACTIVITY_POLLUTION,
ACTIVITY_FALLOUT,
ACTIVITY_SENTRY,
ACTIVITY_PILLAGE,
ACTIVITY_EXPLORE
]
for a_ident in standard_activities:
if freeciv.func.can_unit_do_activity(id, a_ident):
yield a_ident
def get_actions(self):
return [ (ident, self.get_action_name(ident), self.get_action_time(ident)) for ident in self.iter_actions() ]
def get_action_time(self, type):
if type > 1000:
return 0
else:
return freeciv.func.py_tile_activity_time(type, self.get_tile())
def get_action_name(self, type):
if type in [ACTIVITY_FORTRESS, ACTIVITY_AIRBASE]:
base_gui_type = BASE_GUI_FORTRESS
if type == ACTIVITY_AIRBASE:
base_gui_type = BASE_GUI_AIRBASE
return freeciv.func.get_possible_unit_base_name(self.handle, base_gui_type)
def_name = activity_names[type][len('ACTIVITY_'):].lower().replace('_', ' ')
if def_name in action_names:
return action_names[def_name]
return def_name
def get_tile(self):
id, tileid, city, terrain_name = self.get_properties()
return tileid
def get_terrain_name(self):
id, tileid, city, terrain_name = self.get_properties()
return terrain_name
def get_name(self):
return freeciv.func.get_unit_name(self.handle)
def get_image_simple(self):
return freeciv.func.get_unit_image(self.handle)
def get_image(self):
w = 91
h = 61
surf = graphics.create_surface(w, h)
freeciv.func.py_put_unit(self.handle, surf)
return surf
def focus(self):
freeciv.func.request_new_unit_activity(self.handle, ACTIVITY_IDLE)
freeciv.func.unit_focus_set(self.handle)
def perform_activity(self, ident, target=0):
# Warning! Safe to use only when `self` is in focus.
id, tileid, city, terrain_name = self.get_properties()
if ident == ACTIVITY_GOTO:
freeciv.func.key_unit_goto()
elif ident == ACTIVITY_ROAD:
freeciv.func.key_unit_road()
elif ident == ACTIVITY_RAILROAD:
freeciv.func.key_unit_road()
elif ident == ACTIVITY_MAGLEV:
freeciv.func.key_unit_road()
elif ident == ACTIVITY_BUILD_CITY or ident == ACTIVITY_ADD_TO_CITY:
freeciv.func.key_unit_build_city()
#elif ident == ACTIVITY_:
# freeciv.func.key_unit_trade_route()
elif ident == ACTIVITY_IRRIGATE:
freeciv.func.key_unit_irrigate()
elif ident == ACTIVITY_MINE:
freeciv.func.key_unit_mine()
elif ident == ACTIVITY_TRANSFORM:
| freeciv.func.key_unit_transform()
elif ident == ACTIVITY_FORTIFYING:
freeciv.func.key_unit_fortify()
elif ident == ACTIVITY_POLLUTION:
freeciv.func.key_unit_pollution()
elif ident == ACTIVITY_PARADROP:
freeciv.func.key_unit | _paradrop()
elif ident == ACTIVITY_FALLOUT:
freeciv.func.key_unit_fallout()
elif ident == ACTIVITY_SENTRY:
freeciv.func.key_unit_sentry()
elif ident == ACTIVITY_PILLAGE:
freeciv.func.key_unit_pillage()
#elif ident == ACTIVITY_:
# freeciv.func.key_unit_homecity()
#elif ident == ACTIVITY_:
# freeciv.func.key_unit_unload_all()
elif ident == ACTIVITY_WAIT:
freeciv.func.key_unit_wait()
elif ident == ACTIVITY_DONE:
fr |
carlgao/lenga | images/lenny64-peon/usr/share/python-support/python-pygments/pygments/formatters/html.py | Python | mit | 22,285 | 0.00193 | # -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: 2006-2007 by Georg Brandl, Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys, os
import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['HtmlFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
def get_random_id():
"""Return a random id for javascript fields."""
from random import random
from time import time
try:
| from hashlib import sha1 as sha
except ImportError:
import sha
sha = sha.new
return sha('%s|%s' % (random(), time())).hexdigest()
def _get_ttype_class(tty | pe):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
*New in Pygments 0.9:* If you select the ``'table'`` line numbers, the
wrapping table will have a CSS class of this string plus ``'table'``,
the default is accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file. *New in Pygments 0.6.*
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (defaul |
espenak/enkel | testsuite/wansgli/apprunner.py | Python | gpl-2.0 | 5,360 | 0.021642 | # This file is part of the Enkel web programming library.
#
# Copyright (C) 2007 Espen Angell Kristiansen (espen@wsgi.net)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from unittest import TestCase
from cStringIO import StringIO
from sys import exc_info
from enkel.wansgli.apprunner import run_app, AppError, Response
from enkel.wansgli.testhelpers import unit_case_suite, run_suite
HEAD = "HTTP/1.1 200 OK\r\ncontent-type: text/plain\r\n"
ERRHEAD = "HTTP/1.1 500 ERROR\r\ncontent-type: text/plain\r\n"
def only_header_app(env, start_response):
start_response("200 OK", [("Content-type", "text/plain")])
return list() # return empty list
def simple_app(env, start_response):
start_response("200 OK", [("Content-type", "text/plain")])
return ["Simple app"]
def using_write_app(env, start_response):
""" WSGI app for testing of the write function. """
write = start_response("200 OK", [("Content-type", "text/plain")])
write("Using write")
return []
def mixing_write_app(env, start_response):
""" WSGI app for tesing of mixing using the write function and iterator. """
write = start_response("200 OK", [("Content-type", "text/plain")])
write("Mixing write... ")
return [" ...and iterator."]
def double_response_error_app(env, start_response):
""" WSGI app for testing the situation when an error occurs | BEFORE
HTTP headers are sent to the browser and a traceback IS NOT supplied.
This should produce an error, and the same will happen if start_response
is called after HTTP headers are sent. """
start_response("200 OK", [("Content-type", "text/plain")])
start_response("500 ERROR", [("Content-type", "text/plain")])
return list() # return empty list
def double_response_ok_app(env, start_response):
""" WSGI app for testing the situation when an error occurs BEFORE
HTTP headers are sent to the brow | ser and a traceback is supplied.
Should work.
"""
start_response("200 OK", [("Content-type", "text/plain")])
try:
int("jeje")
except ValueError:
start_response("500 ERROR", [("Content-type", "text/plain")],
exc_info())
return list() # return empty list
class DoubleResponseErrInResponse(object):
""" WSGI app for testing the situation when an error occurs AFTER
HTTP headers are sent to the browser and a traceback is supplied.
Should re-raise the ValueError raised when "four" is sent to the
int function.
"""
def __init__(self, env, start_response):
start_response("200 OK", [("Content-type", "text/plain")])
self.it = [1, "2", 3, "four", 5, "6"].__iter__()
self.start_response = start_response
def __iter__(self):
for d in self.it:
try:
yield str(int(d)) # will fail on "four"
except ValueError:
self.start_response("500 ERROR",
[("Content-type", "text/plain")],
exc_info())
def noiter_app(env, start_response):
""" An app that does not return an iterator. This is an error,
and should raise AppError. """
start_response("200 OK", [("Content-type", "text/plain")])
return 10
def override_defaultheader(env, start_response):
""" An app that overrides the default HTTP header "server".
This should result in only one "server" header with the new value.
"""
start_response("200 OK", [
("Content-type", "text/plain"),
("Server", "xxx")
])
return []
class TestApprunner(TestCase):
""" Tests the entire apprunner module. """
def setUp(self):
self.buf = StringIO()
self.env = dict(SERVER_PROTOCOL="HTTP/1.1")
self.sr = Response(self.buf, self.env)
def test_only_header(self):
run_app(only_header_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
def test_simple(self):
run_app(simple_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
self.assert_(b.endswith("Simple app"))
def test_using_write(self):
run_app(using_write_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
self.assert_(b.endswith("Using write"))
def test_mixing_write(self):
run_app(mixing_write_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
self.assert_(b.endswith("Mixing write... ...and iterator."))
def test_double_response_error(self):
self.assertRaises(AppError, run_app,
double_response_error_app, self.sr)
def test_double_response_ok(self):
run_app(double_response_ok_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(ERRHEAD))
def testDoubleResponseErrInResponse(self):
self.assertRaises(ValueError, run_app,
DoubleResponseErrInResponse, self.sr)
def test_noiter(self):
self.assertRaises(AppError, run_app,
noiter_app, self.sr)
def suite():
return unit_case_suite(TestApprunner)
if __name__ == '__main__':
run_suite(suite())
|
teonlamont/mne-python | mne/io/array/tests/test_array.py | Python | bsd-3-clause | 4,770 | 0 | # Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_equal)
import pytest
from mne import find_events, Epochs, pick_types, channels
from mne.io import read_raw_fif
from mne.io.array import RawArray
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.meas_info import create_i | nfo, _kind_dict
from mne.utils import requires_ | version, run_tests_if_main
matplotlib.use('Agg') # for testing don't use X server
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
fif_fname = op.join(base_dir, 'test_raw.fif')
def test_long_names():
"""Test long name support."""
info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error')
data = np.empty((2, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1']
info = create_info(['a' * 16] * 11, 1000., verbose='error')
data = np.empty((11, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 12 + '-%s' % ii for ii in range(11)]
@pytest.mark.slowtest
@requires_version('scipy', '0.12')
def test_array_raw():
"""Test creating raw from array."""
import matplotlib.pyplot as plt
# creating
raw = read_raw_fif(fif_fname).crop(2, 5)
data, times = raw[:, :]
sfreq = raw.info['sfreq']
ch_names = [(ch[4:] if 'STI' not in ch else ch)
for ch in raw.info['ch_names']] # change them, why not
# del raw
types = list()
for ci in range(101):
types.extend(('grad', 'grad', 'mag'))
types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels
types.extend(['stim'] * 9)
types.extend(['eeg'] * 60)
# wrong length
pytest.raises(ValueError, create_info, ch_names, sfreq, types)
# bad entry
types.append('foo')
pytest.raises(KeyError, create_info, ch_names, sfreq, types)
types[-1] = 'eog'
# default type
info = create_info(ch_names, sfreq)
assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
# use real types
info = create_info(ch_names, sfreq, types)
raw2 = _test_raw_reader(RawArray, test_preloading=False,
data=data, info=info, first_samp=2 * data.shape[1])
data2, times2 = raw2[:, :]
assert_allclose(data, data2)
assert_allclose(times, times2)
assert ('RawArray' in repr(raw2))
pytest.raises(TypeError, RawArray, info, data)
# filtering
picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
assert_equal(len(picks), 4)
raw_lp = raw2.copy()
kwargs = dict(fir_design='firwin', picks=picks)
raw_lp.filter(None, 4.0, h_trans_bandwidth=4., n_jobs=2, **kwargs)
raw_hp = raw2.copy()
raw_hp.filter(16.0, None, l_trans_bandwidth=4., n_jobs=2, **kwargs)
raw_bp = raw2.copy()
raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
**kwargs)
raw_bs = raw2.copy()
raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
n_jobs=2, **kwargs)
data, _ = raw2[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
sig_dec = 15
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
# plotting
raw2.plot()
raw2.plot_psd(tmax=np.inf, average=True, n_fft=1024, spatial_colors=False)
plt.close('all')
# epoching
events = find_events(raw2, stim_channel='STI 014')
events[:, 2] = 1
assert (len(events) > 2)
epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
epochs.plot_drop_log()
epochs.plot()
evoked = epochs.average()
evoked.plot(time_unit='s')
assert_equal(evoked.nave, len(events) - 1)
plt.close('all')
# complex data
rng = np.random.RandomState(0)
data = rng.randn(1, 100) + 1j * rng.randn(1, 100)
raw = RawArray(data, create_info(1, 1000., 'eeg'))
assert_allclose(raw._data, data)
# Using digital montage to give MNI electrode coordinates
n_elec = 10
ts_size = 10000
Fs = 512.
elec_labels = [str(i) for i in range(n_elec)]
elec_coords = np.random.randint(60, size=(n_elec, 3)).tolist()
electrode = np.random.rand(n_elec, ts_size)
dig_ch_pos = dict(zip(elec_labels, elec_coords))
mon = channels.DigMontage(dig_ch_pos=dig_ch_pos)
info = create_info(elec_labels, Fs, 'ecog', montage=mon)
raw = RawArray(electrode, info)
raw.plot_psd(average=False) # looking for inexistent layout
raw.plot_psd_topo()
run_tests_if_main()
|
c3nav/c3nav | src/c3nav/editor/urls.py | Python | apache-2.0 | 4,247 | 0.004238 | from django.apps import apps
from django.conf.urls import url
from c3nav.editor.views.account import change_password_view, login_view, logout_view, register_view
from c3nav.editor.views.changes import changeset_detail, changeset_edit, changeset_redirect
from c3nav.editor.views.edit import edit, graph_edit, level_detail, list_objects, main_index, sourceimage, space_detail
from c3nav.editor.views.users import user_detail, user_redirect
def add_editor_urls(model_name, parent_model_name=None, with_list=True, explicit_edit=False):
model = apps.get_model('mapdata', model_name)
model_name_plural = model._meta.default_related_name
if parent_model_name:
parent_model = apps.get_model('mapdata', parent_model_name)
parent_model_name_plural = parent_model._meta.default_related_name
prefix = (parent_model_name_plural+r'/(?P<'+parent_model_name.lower()+'>c?[0-9]+)/')+model_name_plural
else:
prefix = model_name_plural
name_prefix = 'editor.'+model_name_plural+'.'
kwargs = {'model': model_name, 'explicit_edit': explicit_edit}
explicit_edit = r'edit' if explicit_edit else ''
result = []
if with_list:
result.append(url(r'^'+prefix+r'/$', list_objects, name=name_prefix+'list', kwargs=kwargs))
result.extend([
url(r'^'+prefix+r'/(?P<pk>c?\d+)/'+explicit_edit+'$', edit, name=name_prefix+'edit', kwargs=kwargs),
url(r'^'+prefix+r'/create$', edit, name=name_prefix+'create', kwargs=kwargs),
])
return result
urlpatterns = [
url(r'^$', main_index, name='editor.index'),
url(r'^levels/(?P<pk>c?[0-9]+)/$', level_detail, name='editor.levels.detail'),
url(r'^levels/(?P<level>c?[0-9]+)/spaces/(?P<pk>c?[0-9]+)/$', space_detail, name='editor.spaces.detail'),
url(r'^levels/(?P<on_top_of>c?[0-9]+)/levels_on_top/create$', edit, name='editor.levels_on_top.create',
kwargs={'model': 'Level'}),
url(r'^levels/(?P<level>c?[0-9]+)/graph/$', graph_edit, name='editor.levels.graph'),
url(r'^spaces/(?P<space>c?[0-9]+)/graph/$', graph_edit, name='editor.spaces.graph'),
url(r'^changeset/$', changeset_redirect, name='editor.changesets.current'),
url(r'^changesets/(?P<pk>[0-9]+)/$', changeset_detail, name='editor.changesets.detail'),
url(r'^changesets/(?P<pk>[0-9]+)/edit$', changeset_edit, name='editor.changesets.edit'),
url(r'^sourceimage/(?P<filename>[^/]+)$', sourceimage, name='editor.sourceimage'),
url(r'^user/$', user_redirect, name='editor.users.redirect'),
url(r'^users/(?P<pk>[0-9]+)/$', user | _detail, name='editor.users.detail'),
url(r'^login$', login_view, name='editor.login'),
url(r'^logout$', logout_view, name='editor.logout'),
url(r'^register$', register_view, name='editor.register'),
url(r'^change_password$', change_password_view, name='editor.change_password'),
]
urlpatterns.extend(add_editor_urls('Level', with_list=False, explicit_edit=True))
urlpatterns.extend(add_editor_urls('LocationGroupCategory'))
urlp | atterns.extend(add_editor_urls('LocationGroup'))
urlpatterns.extend(add_editor_urls('DynamicLocation'))
urlpatterns.extend(add_editor_urls('WayType'))
urlpatterns.extend(add_editor_urls('AccessRestriction'))
urlpatterns.extend(add_editor_urls('AccessRestrictionGroup'))
urlpatterns.extend(add_editor_urls('Source'))
urlpatterns.extend(add_editor_urls('LabelSettings'))
urlpatterns.extend(add_editor_urls('Building', 'Level'))
urlpatterns.extend(add_editor_urls('Space', 'Level', explicit_edit=True))
urlpatterns.extend(add_editor_urls('Door', 'Level'))
urlpatterns.extend(add_editor_urls('Hole', 'Space'))
urlpatterns.extend(add_editor_urls('Area', 'Space'))
urlpatterns.extend(add_editor_urls('Stair', 'Space'))
urlpatterns.extend(add_editor_urls('Ramp', 'Space'))
urlpatterns.extend(add_editor_urls('Obstacle', 'Space'))
urlpatterns.extend(add_editor_urls('LineObstacle', 'Space'))
urlpatterns.extend(add_editor_urls('Column', 'Space'))
urlpatterns.extend(add_editor_urls('POI', 'Space'))
urlpatterns.extend(add_editor_urls('AltitudeMarker', 'Space'))
urlpatterns.extend(add_editor_urls('LeaveDescription', 'Space'))
urlpatterns.extend(add_editor_urls('CrossDescription', 'Space'))
urlpatterns.extend(add_editor_urls('WifiMeasurement', 'Space'))
|
Webcampak/v1.0 | src/bin/wpakRtsp.py | Python | gpl-2.0 | 2,214 | 0.019422 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010-2012 Infracom & Eurotechnia (sup | port@webcampak.com)
# This file is part of the Webcampak project.
# Webcampak is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version. |
# Webcampak is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with Webcampak.
# If not, see http://www.gnu.org/licenses/
import os, sys, smtplib, datetime, tempfile, subprocess, datetime, shutil, time, ftplib
import getopt
import time
import smtplib
import zipfile
import socket
import urllib
import pwd
import locale
import gettext
#This class is used to capture pictures from RTSP stream
class Rtsp:
def __init__(self, c, cfgcurrentsource, g, debug, cfgnow, cmdType, FileManager):
#gettext.install('webcampak')
self.C = c
self.Cfgcurrentsource = cfgcurrentsource
self.G = g
self.Debug = debug
self.Cfgnow = cfgnow
self.Cfgdispdate = self.Cfgnow.strftime("%Y%m%d%H%M%S")
self.Cfgfilename = self.Cfgdispdate + ".jpg"
self.Cfgtmpdir = self.G.getConfig('cfgbasedir') + self.G.getConfig('cfgsourcesdir') + "source" + self.Cfgcurrentsource + "/" + self.G.getConfig('cfgtmpdir')
self.FileManager = FileManager
# Function: Capture
# Description; This function is used to capture a picture from a RTSP stream
# Return: True or False
def Capture(self):
self.Debug.Display(_("Catpure: RTSP: Starting capture process, URL: %(URL)s ") % {'URL': c.getConfig('cfgsourcewebfileurl')} )
Command = "cvlc -I dummy " + c.getConfig('cfgsourcewebfileurl') + " --run-time=2 --video-filter=scene --scene-path=" + self.Cfgtmpdir + " --scene-format=jpg --scene-ratio=1 --scene-prefix=capture vlc://quit"
os.system(Command)
return self.FileManager.CheckCapturedFile(self.Cfgtmpdir + "capture00001.jpg", self.Cfgtmpdir + self.Cfgfilename)
|
KSanthanam/rethinkdb | test/rdb_workloads/stress_workloads/x_stress_util.py | Python | agpl-3.0 | 3,021 | 0.004634 | import math, random, datetime
def perform_ignore_interrupt(f):
while True:
try:
return f()
except IOError as ex:
if ex.errno != errno.EINTR:
raise
# end_date should be of the format YYYY-MM-DD - default is today
# interval should be of the format NUMBER (day, month, year) - default is 1 month
# prob is the probability (0...1] of returning a more recent month (by power law)
class TimeDistribution:
def __init__(self, end_date, interval, prob=0.8):
self.prob = prob
if end_date is None:
self.end_date = datetime.date.today()
else:
(year, month, day) = end_date.split("-")
self.end_date = datetime.date(int(year), int(month), int(day))
if interval is None:
self.interval_type = "month"
self.interval_length = 1
else:
(length, self.interval_type) = interval.split(" ")
self.interval_length = int(length)
if self.interval_type not in ["day", "month", "year"]:
raise RuntimeError("unrecognized time interval: %s" % self.interval_type)
def shitty_power_law(self):
res = 1
r = random.random()
d_prob = self.prob
while r > d_prob:
res += 1
d_prob += (1 - d_prob) * self.prob
return res
def get(self):
delta = self.shitty_power_law() * self.interval_length
| if self.interval_type == "day":
start_date = self.end_date - datetime.timedelta(days=delta)
end_date = start_date + datetime.timedelta(days=self.interval_length)
elif self.interval_type == "month":
# | Subtract the delta from the start date
start_date = datetime.date(self.end_date.year, self.end_date.month, 1)
while start_date.month < delta:
delta -= 12
start_date = datetime.date(start_date.year - 1, start_date.month, start_date.day)
start_date = datetime.date(start_date.year, start_date.month - delta, start_date.day)
# Add the interval to the end date
end_date = start_date
interval = self.interval_length
while end_date.month + interval > 12:
interval -= 12
end_date = datetime.date(end_date.year + 1, end_date.month, end_date.day)
end_date = datetime.date(end_date.year, end_date.month + interval, end_date.day)
elif self.interval_type == "year":
start_date = datetime.date(self.end_date.year - delta, 1, 1)
end_date = datetime.date(start_date.year + self.interval_length, 1, 1)
return (start_date, end_date)
class Pareto:
def __init__(self, num_values, alpha=1.161):
self.num_values = num_values
self.exponent = alpha / (alpha - 1)
def get(self):
r = random.random()
f = 1 - math.pow(1 - r, self.exponent)
return min(self.num_values - 1, int(self.num_values * f))
|
fhs/pyhdf | pyhdf/HC.py | Python | mit | 1,928 | 0.010892 | # $Id: HC.py,v 1.2 2005-07-14 01:36:41 gosselin_a Exp $
# $Log: not supported by cvs2svn $
# Revision 1.1 2004/08/02 15:36:04 gosselin
# Initial revision
#
from . import hdfext as _C
class HC(object):
"""The HC class holds const | ants defining opening modes and data types.
File opening modes (flags ORed together)
CREATE 4 create file if it does not exist
READ 1 read-only mode
TRUNC 256 truncate if it exists
WRITE 2 read-write mode
Data types
CHAR 4 8-bit char
CHAR8 4 8-bit char
UCHAR 3 unsigned 8 | -bit integer (0 to 255)
UCHAR8 3 unsigned 8-bit integer (0 to 255)
INT8 20 signed 8-bit integer (-128 to 127)
UINT8 21 unsigned 8-bit integer (0 to 255)
INT16 23 signed 16-bit integer
UINT16 23 unsigned 16-bit integer
INT32 24 signed 32-bit integer
UINT32 25 unsigned 32-bit integer
FLOAT32 5 32-bit floating point
FLOAT64 6 64-bit floating point
Tags
DFTAG_NDG 720 dataset
DFTAG_VH 1962 vdata
DFTAG_VG 1965 vgroup
"""
CREATE = _C.DFACC_CREATE
READ = _C.DFACC_READ
TRUNC = 0x100 # specific to pyhdf
WRITE = _C.DFACC_WRITE
CHAR = _C.DFNT_CHAR8
CHAR8 = _C.DFNT_CHAR8
UCHAR = _C.DFNT_UCHAR8
UCHAR8 = _C.DFNT_UCHAR8
INT8 = _C.DFNT_INT8
UINT8 = _C.DFNT_UINT8
INT16 = _C.DFNT_INT16
UINT16 = _C.DFNT_UINT16
INT32 = _C.DFNT_INT32
UINT32 = _C.DFNT_UINT32
FLOAT32 = _C.DFNT_FLOAT32
FLOAT64 = _C.DFNT_FLOAT64
FULL_INTERLACE = 0
NO_INTERLACE =1
# NOTE:
# INT64 and UINT64 are not yet supported py pyhdf
DFTAG_NDG = _C.DFTAG_NDG
DFTAG_VH = _C.DFTAG_VH
DFTAG_VG = _C.DFTAG_VG
|
g8os/core0 | docs/_archive/examples/mongodb.py | Python | apache-2.0 | 3,162 | 0.004111 | #!/bin/python
from zeroos.core0.client import Client
import sys
import time
"""
This script expect you know the IP of the core0 and you can access it from the machine running this script.
an easy way to do it is to build the initramfs with a customr zerotier network id (https://github.com/g8os/initramfs/tree/0.10.0#customize-build)
At boot core0 will connect to the zerotier network and you can assing an IP to it.
"""
CORE0IP = "INSERT CORE0 IP HERE"
ZEROTIER = "INSERT ZEROTIER NETWORK ID HERE"
def main(init=False):
print("[+] connect to core0")
cl = Client(CORE0IP)
try:
cl.ping()
except Exception as e:
print("cannot connect to the core0: %s" % e)
return 1
print("[+] prepare data disks")
cl.system('mkdir -p /dev/mongodb_storage').get()
if init:
cl.btrfs.create('mongodb_storage', ['/dev/sda'])
disks = cl.disk.list().get('blockdevices', [])
if len(disks) < 1:
| print("[-] need at least one data disk available")
return
disks_by_name = {d['name']: d for d in disks}
if disks_by_name['sda']['mountpoint'] is None:
pri | nt("[+] mount disk")
cl.disk.mount('/dev/sda', '/dev/mongodb_storage', [''])
try:
print("[+] create container")
container_id = cl.container.create('https://stor.jumpscale.org/stor2/flist/ubuntu-g8os-flist/mongodb-g8os.flist',
mount={"/dev/mongodb_storage": "/mnt/data"},
zerotier=ZEROTIER).get()
print("[+] container created, ID: %s" % container_id)
except Exception as e:
print("[-] error during container creation: %s" % e)
return 1
container = cl.container.client(container_id)
print("[+] get zerotier ip")
container_ip = get_zerotier_ip(container)
print("[+] configure mongodb")
container.system("bash -c 'echo DAEMONUSER=\"root\" > /etc/default/mongodb'").get()
container.system("sed -i 's/dbpath.*/dbpath=\/mnt\/data/' /etc/mongodb.conf").get()
container.system("sed -i '/bind.*/d' /etc/mongodb.conf").get()
container.system("bash -c 'echo nounixsocket=true >> /etc/mongodb.conf'").get()
print("[+] starts mongod")
res = container.system('/etc/init.d/mongodb start').get()
print("[+] you can connect to mongodb at %s:27017" % container_ip)
def get_zerotier_ip(container):
i = 0
while i < 10:
addrs = container.info.nic()
ifaces = {a['name']: a for a in addrs}
for iface, info in ifaces.items():
if iface.startswith('zt'):
cidr = info['addrs'][0]['addr']
return cidr.split('/')[0]
time.sleep(2)
i += 1
raise TimeoutError("[-] couldn't get an ip on zerotier network")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='attach disks to core0')
parser.add_argument('--init', type=bool, default=False, const=True, required=False,
help='creation filesystem and subvolume', nargs='?')
args = parser.parse_args()
# print(args.init)
main(init=args.init)
|
jyapayne/PePy | __init__.py | Python | mit | 36 | 0 | __all__ | = ['pe']
from .pe | import *
|
PeterRochford/SkillMetrics | skill_metrics/plot_pattern_diagram_markers.py | Python | gpl-3.0 | 4,916 | 0.015663 | import matplotlib.pyplot as plt
import matplotlib.colors as clr
import matplotlib
import warnings
from skill_metrics import add_legend
def plot_pattern_diagram_markers(X,Y,option):
'''
Plots color markers on a pattern diagram.
Plots color markers on a target diagram according their (X,Y)
locations. The symbols and colors are chosen automatically with a
limit of 70 symbol & color combinations.
The color bar is titled using the content of option['titleColorBar']
(if non-empty string).
INPUTS:
x : x-coordinates of markers
y : y-coordinates of markers
z : z-coordinates of markers (used for color shading)
option : dictionary containing option values. (Refer to
GET_TARGET_DIAGRAM_OPTIONS function for more information.)
option['axismax'] : maximum for the X & Y values. Used to limit
maximum distance from origin to display markers
option['markerlabel'] : labels for markers
OUTPUTS:
None
Created on Nov 30, 2016
Revised on Jan 6, 2019
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
prochford@thesymplectic.com
'''
# Set face color transparency
alpha = option['alpha']
# Set font and marker size
fontSize = matplotlib.rcParams.get('font.size') - 2
markerSize = option['markersize']
if option['markerlegend'] == 'on':
# Check that marker labels have been provided
if option['markerlabel'] == '':
raise ValueError('No marker labels provided.')
# Plot markers of different color and shapes with labels
# displayed in a legend
# Define markers
kind = ['+','o','x','s','d','^','v','p','h','*']
colorm = ['b','r','g','c','m','y','k']
if len(X) > 70:
_disp('You must introduce new markers to plot more than 70 cases.')
_disp('The ''marker'' character array need to be extended inside the code.')
if len(X) <= len(kind):
# Define markers with specified color
marker = []
markercolor = []
for color in colorm:
for symbol in kind:
marker.append(symbol + option['markercolor'])
rgba = clr.to_rgb(option['markercolor']) + (alpha,)
markercolor.append(rgba)
else:
# Define markers and colors using predefined list
marker = []
markercolor = [] #Bug Fix: missing array initialization
for color in colorm:
for symbol in kind:
marker.append(symbol + color)
rgba = clr.to_rgb(color) + (alpha,)
markercolor.append(rgba)
# Plot markers at data points
limit = option['axismax']
hp = ()
markerlabel = []
for i, xval in enumerate(X):
if abs(X[i]) <= limit and abs(Y[i]) <= limit:
h = plt.plot(X[i],Y[i],marker[i], markersize = markerSize,
markerfacecolor = markercolor[i], |
markeredgecolor = marker[i][1],
markeredgewidth = 2)
hp += tuple(h)
markerlabel.append(option['markerlabel'][i])
# Add legend
if len(markerlabel) == 0:
warnings.warn('No markers with | in axis limit ranges.')
else:
add_legend(markerlabel, option, rgba, markerSize, fontSize, hp)
else:
# Plot markers as dots of a single color with accompanying labels
# and no legend
# Plot markers at data points
limit = option['axismax']
rgba = clr.to_rgb(option['markercolor']) + (alpha,)
for i,xval in enumerate(X):
if abs(X[i]) <= limit and abs(Y[i]) <= limit:
# Plot marker
marker = option['markersymbol']
plt.plot(X[i],Y[i],marker, markersize = markerSize,
markerfacecolor = rgba,
markeredgecolor = option['markercolor'])
# Check if marker labels provided
if type(option['markerlabel']) is list:
# Label marker
xtextpos = X[i]
ytextpos = Y[i]
plt.text(xtextpos,ytextpos,option['markerlabel'][i],
color = option['markerlabelcolor'],
verticalalignment = 'bottom',
horizontalalignment = 'right',
fontsize = fontSize)
# Add legend if labels provided as dictionary
markerlabel = option['markerlabel']
if type(markerlabel) is dict:
add_legend(markerlabel, option, rgba, markerSize, fontSize)
def _disp(text):
print(text)
|
jordanemedlock/psychtruths | temboo/core/Library/eBay/Trading/GetSellerTransactions.py | Python | apache-2.0 | 7,981 | 0.007267 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetSellerTransactions
# Retrieves order line item (transaction) information for the authenticated user only.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetSellerTransactions(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetSellerTransactions Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetSellerTransactions, self).__init__(temboo_session, '/Library/eBay/Trading/GetSellerTransactions')
def new_input_set(self):
return GetSellerTransactionsInputSet()
def _make_result_set(self, result, path):
return GetSellerTransactionsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetSellerTransactionsChoreographyExecution(session, exec_id, path)
class GetSellerTransactionsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetSellerTransactions
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_DetailLevel(self, value):
"""
Set the value of the DetailLevel input for this Choreo. ((optional, string) The detail level of the response. Valid values are: ItemReturnDescription and ReturnAll.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('DetailLevel', value)
def set_EntriesPerPage(self, value):
"""
Set the value of the EntriesPerPage input for this Choreo. ((optional, integer) The maximum number of records to return in the result.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('EntriesPerPage', value)
def set_IncludeCodiceFiscale(self, value):
"""
Set the value of the IncludeCodiceFiscale input for this Choreo. ((optional, string) When set to 'true', the buyer's Codice Fiscale number is returned in the response.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('IncludeCodiceFiscale', value)
def set_ | IncludeContainingOrder(self, value):
"""
Set the value of the IncludeContainingOrder input for this Choreo. ((optional, boolean) When set to true, the ContainingOrder container is retu | rned in the response for each transaction node.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('IncludeContainingOrder', value)
def set_IncludeFinalValueFee(self, value):
"""
Set the value of the IncludeFinalValueFee input for this Choreo. ((optional, boolean) When set to true, the Final Value Fee (FVF) for all order line items is returned in the response.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('IncludeFinalValueFee', value)
def set_InventoryTrackingMethod(self, value):
"""
Set the value of the InventoryTrackingMethod input for this Choreo. ((optional, boolean) Filters the response to only include order line items for listings that match this InventoryTrackingMethod setting. Valid values are: ItemID and SKU.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('InventoryTrackingMethod', value)
def set_ModTimeFrom(self, value):
"""
Set the value of the ModTimeFrom input for this Choreo. ((optional, date) Used to filter by date range (e.g., 2013-02-08T00:00:00.000Z).)
"""
super(GetSellerTransactionsInputSet, self)._set_input('ModTimeFrom', value)
def set_ModTimeTo(self, value):
"""
Set the value of the ModTimeTo input for this Choreo. ((optional, date) Used to filter by date range (e.g., 2013-02-08T00:00:00.000Z).)
"""
super(GetSellerTransactionsInputSet, self)._set_input('ModTimeTo', value)
def set_NumberOfDays(self, value):
"""
Set the value of the NumberOfDays input for this Choreo. ((optional, integer) The number of days in the past to search for order line items.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('NumberOfDays', value)
def set_PageNumber(self, value):
"""
Set the value of the PageNumber input for this Choreo. ((optional, integer) Specifies the page number of the results to return.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('PageNumber', value)
def set_Platform(self, value):
"""
Set the value of the Platform input for this Choreo. ((optional, string) The name of the eBay co-branded site upon which the order line item was created. Valid values are: eBay, Express, Half, Shopping, or WorldOfGood.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('Platform', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('ResponseFormat', value)
def set_SKU(self, value):
"""
Set the value of the SKU input for this Choreo. ((optional, string) One or more seller SKUs to filter the result. Multiple SKUs can be provided in a comma-separated list.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('SKU', value)
def set_SandboxMode(self, value):
"""
Set the value of the SandboxMode input for this Choreo. ((conditional, boolean) Indicates that the request should be made to the sandbox endpoint instead of the production endpoint. Set to 1 to enable sandbox mode.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('SandboxMode', value)
def set_SiteID(self, value):
"""
Set the value of the SiteID input for this Choreo. ((optional, string) The eBay site ID that you want to access. Defaults to 0 indicating the US site.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('SiteID', value)
def set_UserToken(self, value):
"""
Set the value of the UserToken input for this Choreo. ((required, string) A valid eBay Auth Token.)
"""
super(GetSellerTransactionsInputSet, self)._set_input('UserToken', value)
class GetSellerTransactionsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetSellerTransactions Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from eBay.)
"""
return self._output.get('Response', None)
class GetSellerTransactionsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetSellerTransactionsResultSet(response, path)
|
vj-ug/gcloud-python | gcloud/bigquery/test_job.py | Python | apache-2.0 | 51,631 | 0.000019 | # pylint: disable=C0302
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test_ConfigurationProperty(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigquery.job import _ConfigurationProperty
return _ConfigurationProperty
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_it(self):
class Configuration(object):
_attr = None
class Wrapper(object):
attr = self._makeOne('attr')
def __init__(self):
self._configuration = Configuration()
self.assertEqual(Wrapper.attr.name, 'attr')
wrapper = Wrapper()
self.assertEqual(wrapper.attr, None)
value = object()
wrapper.attr = value
self.assertTrue(wrapper.attr is value)
self.assertTrue(wrapper._configuration._attr is value)
del wrapper.attr
self.assertEqual(wrapper.attr, None)
self.assertEqual(wrapper._configuration._attr, None)
class Test_TypedProperty(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigquery.job import _TypedProperty
return _TypedProperty
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_it(self):
class Configuration(object):
_attr = None
class Wrapper(object):
attr = self._makeOne('attr', int)
def __init__(self):
self._configuration = Configuration()
wrapper = Wrapper()
with self.assertRaises(ValueError):
wrapper.attr = 'BOGUS'
wrapper.attr = 42
self.assertEqual(wrapper.attr, 42)
self.assertEqual(wrapper._configuration._attr, 42)
del wrapper.attr
self.assertEqual(wrapper.attr, None)
self.assertEqual(wrapper._configuration._attr, None)
class Test_EnumProperty(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigquery.job import _EnumProperty
return _EnumProperty
def test_it(self):
class Sub(self._getTargetClass()):
ALLOWED = ('FOO', 'BAR', 'BAZ')
class Configuration(object):
_attr = None
class Wrapper(object):
attr = Sub('attr')
def __init__(self):
self._configuration = Configuration()
wrapper = Wrapper()
with self.assertRaises(ValueError):
wrapper.attr = 'BOGUS'
wrapper.attr = 'FOO'
self.assertEqual(wrapper.attr, 'FOO')
self.assertEqual(wrapper._configuration._attr, 'FOO')
del wrapper.attr
self.assertEqual(wrapper.attr, None)
self.assertEqual(wrapper._configuration._attr, None)
class _Base(object):
PROJECT = 'project'
SOURCE1 = 'http://example.com/source1.csv'
DS_NAME = 'datset_name'
TABLE_NAME = 'table_name'
JOB_NAME = 'job_name'
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _setUpConstants(self):
import datetime
from gcloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(
tzinfo=UTC)
self.ETAG = 'ETAG'
self.JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_NAME)
self.RESOURCE_URL = 'http://example.com/path/to/resource'
self.USER_EMAIL = 'phred@example.com'
def _makeResource(self, started=False, ended=False):
self._setUpConstants()
resource = {
'configuration': {
self.JOB_TYPE: {
},
},
'statistics': {
'creationTime': self.WHEN_TS * 1000,
self.JOB_TYPE: {
}
},
'etag': self.ETAG,
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'selfLink': self.RESOURCE_URL,
'user_email': self.USER_EMAIL,
}
if started or ended:
| resource['statistics']['startTime'] = self.WHEN_TS * 1000
if ended:
resource['statistics']['endTime'] = (self.WHEN_TS + 1000) * 1000
return resource
def _verif | yInitialReadonlyProperties(self, job):
# root elements of resource
self.assertEqual(job.etag, None)
self.assertEqual(job.job_id, None)
self.assertEqual(job.self_link, None)
self.assertEqual(job.user_email, None)
# derived from resource['statistics']
self.assertEqual(job.created, None)
self.assertEqual(job.started, None)
self.assertEqual(job.ended, None)
# derived from resource['status']
self.assertEqual(job.error_result, None)
self.assertEqual(job.errors, None)
self.assertEqual(job.state, None)
def _verifyReadonlyResourceProperties(self, job, resource):
from datetime import timedelta
self.assertEqual(job.job_id, self.JOB_ID)
statistics = resource.get('statistics', {})
if 'creationTime' in statistics:
self.assertEqual(job.created, self.WHEN)
else:
self.assertEqual(job.created, None)
if 'startTime' in statistics:
self.assertEqual(job.started, self.WHEN)
else:
self.assertEqual(job.started, None)
if 'endTime' in statistics:
self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000))
else:
self.assertEqual(job.ended, None)
if 'etag' in resource:
self.assertEqual(job.etag, self.ETAG)
else:
self.assertEqual(job.etag, None)
if 'selfLink' in resource:
self.assertEqual(job.self_link, self.RESOURCE_URL)
else:
self.assertEqual(job.self_link, None)
if 'user_email' in resource:
self.assertEqual(job.user_email, self.USER_EMAIL)
else:
self.assertEqual(job.user_email, None)
class TestLoadTableFromStorageJob(unittest2.TestCase, _Base):
JOB_TYPE = 'load'
def _getTargetClass(self):
from gcloud.bigquery.job import LoadTableFromStorageJob
return LoadTableFromStorageJob
def _setUpConstants(self):
super(TestLoadTableFromStorageJob, self)._setUpConstants()
self.INPUT_FILES = 2
self.INPUT_BYTES = 12345
self.OUTPUT_BYTES = 23456
self.OUTPUT_ROWS = 345
def _makeResource(self, started=False, ended=False):
resource = super(TestLoadTableFromStorageJob, self)._makeResource(
started, ended)
if ended:
resource['statistics']['load']['inputFiles'] = self.INPUT_FILES
resource['statistics']['load']['inputFileBytes'] = self.INPUT_BYTES
resource['statistics']['load']['outputBytes'] = self.OUTPUT_BYTES
resource['statistics']['load']['outputRows'] = self.OUTPUT_ROWS
return resource
def _verifyBooleanConfigProperties(self, job, config):
if 'allowJaggedRows' in config:
self.assertEqual(job.allow_jagged_rows,
config['allowJaggedRows'])
else:
self.assertTrue(job.allow_jagged_rows is None)
if 'allowQuotedNewlines' in config:
self.assertEqual(job.allow_quoted_newlines,
config['allowQuotedNewlines'])
else:
self.assertTrue(job.allow_quoted_newlines is None)
if 'ignoreUnkno |
janezhango/BigDataMachineLearning | py/testdir_release/c5/test_c5_KMeans_sphere_26GB.py | Python | apache-2.0 | 6,833 | 0.010098 | import unittest, time, sys, random, math, json
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_hosts, h2o_import as h2i, h2o_common
import socket
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
DO_KMEANS = True
# assumes the cloud was built with CDH3? maybe doesn't matter as long as the file is there
FROM_HDFS = 'CDH3'
class releaseTest(h2o_common.ReleaseCommon, unittest.TestCase):
def test_c5_KMeans_sphere_26GB(self):
h2o.beta_features = False
# a kludge
h2o.setup_benchmark_log()
csvFilen | ame = 'syn_sphere_gen.csv'
totalBytes = 183538602156
if FROM_HDFS:
importFolderPath = "datasets/kmeans_big"
csvPathname = importFold | erPath + '/' + csvFilename
else:
importFolderPath = "/home3/0xdiag/datasets/kmeans_big"
csvPathname = importFolderPath + '/' + csvFilename
# FIX! put right values in
# will there be different expected for random vs the other inits?
expected = [
([0.0, -113.00566692375459, -89.99595447985321, -455.9970643424373, 4732.0, 49791778.0, 36800.0], 248846122, 1308149283316.2988) ,
([0.0, 1.0, 1.0, -525.0093818313685, 2015.001629398412, 25654042.00592703, 28304.0], 276924291, 1800760152555.98) ,
([0.0, 5.0, 2.0, 340.0, 1817.995920197288, 33970406.992053084, 31319.99486705394], 235089554, 375419158808.3253) ,
([0.0, 10.0, -72.00113070337981, -171.0198611715457, 4430.00952228909, 37007399.0, 29894.0], 166180630, 525423632323.6474) ,
([0.0, 11.0, 3.0, 578.0043558141306, 1483.0163188052604, 22865824.99639042, 5335.0], 167234179, 1845362026223.1094) ,
([0.0, 12.0, 3.0, 168.0, -4066.995950679284, 41077063.00269915, -47537.998050740985], 195420925, 197941282992.43475) ,
([0.0, 19.00092954923767, -10.999565572612255, 90.00028669073289, 1928.0, 39967190.0, 27202.0], 214401768, 11868360232.658035) ,
([0.0, 20.0, 0.0, 141.0, -3263.0030236302937, 6163210.990273981, 30712.99115201907], 258853406, 598863991074.3276) ,
([0.0, 21.0, 114.01584574295777, 242.99690338815898, 1674.0029079209912, 33089556.0, 36415.0], 190979054, 1505088759456.314) ,
([0.0, 25.0, 1.0, 614.0032787274755, -2275.9931284021022, -48473733.04122273, 47343.0], 87794427, 1124697008162.3955) ,
([0.0, 39.0, 3.0, 470.0, -3337.9880599007597, 28768057.98852736, 16716.003410920028], 78226988, 1151439441529.0215) ,
([0.0, 40.0, 1.0, 145.0, 950.9990795199593, 14602680.991458317, -14930.007919032574], 167273589, 693036940951.0249) ,
([0.0, 42.0, 4.0, 479.0, -3678.0033024834297, 8209673.001421165, 11767.998552236539], 148426180, 35942838893.32379) ,
([0.0, 48.0, 4.0, 71.0, -951.0035145455234, 49882273.00063991, -23336.998167498707], 157533313, 88431531357.62982) ,
([0.0, 147.00394564757505, 122.98729664236723, 311.0047920137008, 2320.0, 46602185.0, 11212.0], 118361306, 1111537045743.7646) ,
]
benchmarkLogging = ['cpu','disk', 'network', 'iostats', 'jstack']
benchmarkLogging = ['cpu','disk', 'network', 'iostats']
# IOStatus can hang?
benchmarkLogging = ['cpu', 'disk', 'network']
benchmarkLogging = []
for trial in range(6):
# IMPORT**********************************************
# since H2O deletes the source key, re-import every iteration.
# PARSE ****************************************
print "Parse starting: " + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
timeoutSecs = 2 * 3600
kwargs = {}
if FROM_HDFS:
parseResult = h2i.import_parse(path=csvPathname, schema='hdfs', hex_key=hex_key,
timeoutSecs=timeoutSecs, pollTimeoutSecs=60, retryDelaySecs=2,
benchmarkLogging=benchmarkLogging, **kwargs)
else:
parseResult = h2i.import_parse(path=csvPathname, schema='local', hex_key=hex_key,
timeoutSecs=timeoutSecs, pollTimeoutSecs=60, retryDelaySecs=2,
benchmarkLogging=benchmarkLogging, **kwargs)
elapsed = time.time() - start
fileMBS = (totalBytes/1e6)/elapsed
l = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, 'Parse', csvPathname, fileMBS, elapsed)
print "\n"+l
h2o.cloudPerfH2O.message(l)
# KMeans ****************************************
if not DO_KMEANS:
continue
print "col 0 is enum in " + csvFilename + " but KMeans should skip that automatically?? or no?"
kwargs = {
'k': 15,
'max_iter': 3,
'normalize': 1,
'initialization': 'Furthest',
'destination_key': 'junk.hex',
# we get NaNs if whole col is NA
'cols': 'C1, C2, C3, C4, C5, C6, C7',
# reuse the same seed, to get deterministic results
'seed': 265211114317615310,
}
if (trial%3)==0:
kwargs['initialization'] = 'PlusPlus'
elif (trial%3)==1:
kwargs['initialization'] = 'Furthest'
else:
kwargs['initialization'] = None
timeoutSecs = 4 * 3600
params = kwargs
paramsString = json.dumps(params)
start = time.time()
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=timeoutSecs,
benchmarkLogging=benchmarkLogging, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvPathname, 'took', elapsed, 'seconds.', "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
print "kmeans result:", h2o.dump_json(kmeans)
l = '{!s} jvms, {!s}GB heap, {:s} {:s} {:s} for {:.2f} secs {:s}' .format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, "KMeans", "trial "+str(trial), csvFilename, elapsed, paramsString)
print l
h2o.cloudPerfH2O.message(l)
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeans, csvPathname, parseResult, 'd', **kwargs)
# all are multipliers of expected tuple value
allowedDelta = (0.01, 0.01, 0.01)
h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected, allowedDelta, allowError=True, trial=trial)
h2i.delete_keys_at_all_nodes()
if __name__ == '__main__':
h2o.unit_main()
|
Razi91/BiblioTeKa | books/models.py | Python | gpl-2.0 | 3,611 | 0.003877 | from django.db import models
import uuid as uuid
__author__ = 'jkonieczny'
class Genre(models.Model):
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class Publisher(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Pricing(models.Model):
name = models.CharField(max_length=32)
initial = models.DecimalField(max_digits=5, decimal_places=2)
per_week = models.DecimalField(max_digits=5, decimal_places=2)
added = models.DateTimeField(auto_now_add=True)
closed = models.DateTimeField(null=True, blank=True)
@property
def enabled(self):
return self.closed is not None
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=256)
born = models.DateField()
def __str__(self):
return self.name
class BookTitleManager(models.Manager):
def get_queryset(self):
return super(BookTitleManager, self).get_queryset()\
| .extra(select={
'free_entities': "SELECT count(*) FROM books_bookentity "
"WHERE books_bookentity.title_id = books_booktitle.id and bo | oks_bookentity.quality > 0"
"and (SELECT count(*) FROM user_loan where book_id = books_bookentity.id) = 0",
'available_entities': "SELECT count(*) FROM books_bookentity "
"WHERE books_bookentity.title_id = books_booktitle.id and books_bookentity.quality > 0"
})
class BookTitle(models.Model):
objects = models.Manager()
data = BookTitleManager()
release = models.DateTimeField()
title = models.CharField(max_length=256)
genre = models.ManyToManyField('Genre')
author = models.ManyToManyField('Author')
def __str__(self):
return self.title
class BookEditionManager(models.Manager):
def get_queryset(self):
return super(BookEditionManager, self).select_related('title')
class BookEditionManager(models.Manager):
def get_queryset(self):
return super(BookEditionManager, self).get_queryset()\
.extra(select={
'free_entities': "SELECT count(*) FROM books_bookentity "
"WHERE books_bookentity.book_id = books_bookedition.id and books_bookentity.quality > 0"
"and (SELECT count(*) FROM user_loan where book_id = books_bookentity.id) = 0",
'available_entities': "SELECT count(*) FROM books_bookentity "
"WHERE books_bookentity.book_id = books_bookedition.id and books_bookentity.quality > 0"
})
class BookEdition(models.Model):
objects = BookEditionManager()
title = models.ForeignKey('BookTitle', null=False, related_name='publications')
publisher = models.ForeignKey('Publisher')
release = models.DateTimeField()
isbn = models.CharField(max_length=18)
pricing = models.ForeignKey('Pricing', null=True, blank=True)
def __str__(self):
return "[{0}] {1}".format(self.publisher.name, self.title.title)
class BookEntityManager(models.Manager):
def get_queryset(self):
return super(BookEntityManager, self).select_related('book', 'title')
class BookEntity(models.Model):
#objects = BookEntityManager()
book = models.ForeignKey('BookEdition', related_name='entities')
uuid = models.CharField(max_length=40)
title = models.ForeignKey('BookTitle')
quality = models.IntegerField()
|
rmp91/jitd | java/stats.py | Python | apache-2.0 | 893 | 0.00224 | import numpy as np
file | = "/home/rishabh/IdeaProjects/jitd/java/benchmark_20151213_224704_freqWrites/ben | chmark_scatter_1024m.txt"
# data = np.genfromtxt(file, dtype=(int, float, None), delimiter=',', names=['x', 'y', 'z'],)
data = np.genfromtxt(file, dtype=None, delimiter=',', names=['x', 'y', 'z'], )
write = 0
read = 0
max_read = 0
max_write = 0
read_count = 0
write_count = 0
total_time = 0
for each in data:
if each[2] == 'WRITE':
write += float(each[1])
max_write = max(max_write, each[1])
write_count += 1
else:
read += float(each[1])
max_read = max(max_read, each[1])
read_count += 1
total_time += each[1]
print "average_write: " + str(write / write_count)
print "average_read: " + str(read / read_count)
print "max_write: " + str(max_write)
print "max_read: " + str(max_read)
print "total_time: " + str(total_time)
|
addition-it-solutions/project-all | addons/point_of_sale/wizard/pos_session_opening.py | Python | agpl-3.0 | 5,032 | 0.006955 |
from open | erp.osv import osv, fields
from openerp.tools.translate import _
from openerp.addons.point_of_sale.point_of_sale import p | os_session
class pos_session_opening(osv.osv_memory):
_name = 'pos.session.opening'
_columns = {
'pos_config_id' : fields.many2one('pos.config', string='Point of Sale', required=True),
'pos_session_id' : fields.many2one('pos.session', string='PoS Session'),
'pos_state' : fields.related('pos_session_id', 'state',
type='selection',
selection=pos_session.POS_SESSION_STATE,
string='Session Status', readonly=True),
'pos_state_str' : fields.char('Status', readonly=True),
'show_config' : fields.boolean('Show Config', readonly=True),
'pos_session_name' : fields.related('pos_session_id', 'name', string="Session Name",
type='char', size=64, readonly=True),
'pos_session_username' : fields.related('pos_session_id', 'user_id', 'name',
type='char', size=64, readonly=True)
}
def open_ui(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids[0], context=context)
context = dict(context or {})
context['active_id'] = data.pos_session_id.id
return {
'type' : 'ir.actions.act_url',
'url': '/pos/web/',
'target': 'self',
}
def open_existing_session_cb_close(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
wizard.pos_session_id.signal_workflow('cashbox_control')
return self.open_session_cb(cr, uid, ids, context)
def open_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1, "you can open only one session at a time"
proxy = self.pool.get('pos.session')
wizard = self.browse(cr, uid, ids[0], context=context)
if not wizard.pos_session_id:
values = {
'user_id' : uid,
'config_id' : wizard.pos_config_id.id,
}
session_id = proxy.create(cr, uid, values, context=context)
s = proxy.browse(cr, uid, session_id, context=context)
if s.state=='opened':
return self.open_ui(cr, uid, ids, context=context)
return self._open_session(session_id)
return self._open_session(wizard.pos_session_id.id)
def open_existing_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1
wizard = self.browse(cr, uid, ids[0], context=context)
return self._open_session(wizard.pos_session_id.id)
def _open_session(self, session_id):
return {
'name': _('Session'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'pos.session',
'res_id': session_id,
'view_id': False,
'type': 'ir.actions.act_window',
}
def on_change_config(self, cr, uid, ids, config_id, context=None):
result = {
'pos_session_id': False,
'pos_state': False,
'pos_state_str' : '',
'pos_session_username' : False,
'pos_session_name' : False,
}
if not config_id:
return {'value' : result}
proxy = self.pool.get('pos.session')
session_ids = proxy.search(cr, uid, [
('state', '!=', 'closed'),
('config_id', '=', config_id),
('user_id', '=', uid),
], context=context)
if session_ids:
session = proxy.browse(cr, uid, session_ids[0], context=context)
result['pos_state'] = str(session.state)
result['pos_state_str'] = dict(pos_session.POS_SESSION_STATE).get(session.state, '')
result['pos_session_id'] = session.id
result['pos_session_name'] = session.name
result['pos_session_username'] = session.user_id.name
return {'value' : result}
def default_get(self, cr, uid, fieldnames, context=None):
so = self.pool.get('pos.session')
session_ids = so.search(cr, uid, [('state','<>','closed'), ('user_id','=',uid)], context=context)
if session_ids:
result = so.browse(cr, uid, session_ids[0], context=context).config_id.id
else:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
result = current_user.pos_config and current_user.pos_config.id or False
if not result:
r = self.pool.get('pos.config').search(cr, uid, [], context=context)
result = r and r[0] or False
count = self.pool.get('pos.config').search_count(cr, uid, [('state', '=', 'active')], context=context)
show_config = bool(count > 1)
return {
'pos_config_id' : result,
'show_config' : show_config,
}
|
eldarion/django-chunked-uploads | chunked_uploads/utils.py | Python | bsd-3-clause | 531 | 0 | from chunked_uploads.models import Upload, Chunk
def handle_upload(uploaded_file, who):
"""
Expects to handle an individual file from request.FILES[name]
Returns an Upload object
"""
u = Upload.objects.create(
user=who,
filename= | uploaded_file.name,
filesize=uploaded_file.size
)
Chunk.objects.create(
upload=u,
chunk=uploaded_file,
chunk_size=uploaded_file.size
)
u.state = Upload.STATE_COMPLETE
u.save()
u.sti | tch_chunks()
return u
|
cysuncn/python | study/test/TestRe.py | Python | gpl-3.0 | 655 | 0 | # -*- coding: utf-8 -*-
import re
test = '用户输入的字符串'
if re.match(r'用户', test):
print('ok')
else:
print('failed')
print('a b c'.split(' '))
print(re.split(r'\s*' | , 'a b c'))
print(re.split(r'[\s\,\;]+', 'a,b;; c d'))
m = re.match(r'^(\d{3})-(\d{3,8})$', '010-12345')
print(m.group(1))
m = re.match(r'^(\S+)@(\S+.com)$', 'cysuncn@126.com')
print(m.group(2))
print(m.groups())
# <Tom Paris> tom@voyager .org
re_mail = re.compile(r'<(\S+)\s+(\S+)>\s+(\S+)@(\S+.org)')
print(re_mail.match('<Tom Paris> tom@voyager.org').groups())
str = 'abcbacba'
# non-greed match
re = re.compile(r'a.*?a', re.S)
print(re | .match(str).group())
|
google/cloud-berg | examples/mpi_helloworld.py | Python | apache-2.0 | 855 | 0.00117 | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you | may not use this file except in complia | nce with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parallel Hello World
"""
from mpi4py import MPI
import sys
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
name = MPI.Get_processor_name()
sys.stdout.write(
"Hello, World! I am process %d of %d on %s.\n"
% (rank, size, name)) |
ging/fiware-chef_validator | doc/source/conf.py | Python | apache-2.0 | 9,737 | 0.000103 | # -*- coding: utf-8 -*-
#
# fi-ware-chef_validator documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 15 12:28:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that no | t all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentati | on root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fi-ware-chef_validator'
copyright = u'2015, Pedro Verdugo'
author = u'Pedro Verdugo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from chef_validator.tests.unit.version import version_info
# The short X.Y version.
version = version_info.version_string()
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# Set the default Pygments syntax
highlight_language = 'python'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'fi-ware-chef_validatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fi-ware-chef_validator.tex',
u'fi-ware-chef\\_validator Documentation',
u'Pedro Verdugo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings |
jotes/pontoon | pontoon/sync/formats/__init__.py | Python | bsd-3-clause | 2,225 | 0.000899 | """
Parsing resource files.
See base.py for the ParsedResource base class.
"""
import os.path
from pontoon.sync.formats import (
compare_locales,
ftl,
json_extensions,
lang,
po,
silme,
xliff,
)
# To add support for a new resource format, add an entry to this dict
# where the key is the extension you're parsing and the value is a
# callable returning an instance of a ParsedResource subclass.
SUPPORTED_FORMAT_PARSERS = {
".dtd": silme.parse_dtd,
".ftl": ftl.parse,
".inc": silme.parse_inc,
".ini": silme.parse_ini,
".json": json_extensions.parse,
".lang": lang.parse,
".po": po.parse,
".pot": po.parse,
".properties": silme.parse_properties,
".xlf": xliff.parse,
".xliff": xliff.parse,
".xml": compare_locales.parse,
}
def are_compatible_formats(extension_a, extension_b):
"""
Return True if given file extensions belong to the same file format.
We test that by comparing parsers used by each file extenion.
Note that some formats (e.g. Gettext, XLIFF) use multiple file extensions.
"""
try:
return (
SUPPORTED_FORMAT_PARSERS[extension_a]
== SUPPORTED_FORMAT_PARSERS[extension_b]
)
# File extension not supported
except KeyError:
return False
def parse(path, source_path=None, locale=None):
"""
Parse the resource file at the given path and return a
ParsedResource with its translations.
:param path:
Path to the resource file to parse.
:param source_path:
Path to the corresponding resource file in the source directory
for the resource we're parsing. Asymmetric formats need this
for saving. Defaults to None.
:param locale:
Object | which describes information about currently processed locale.
Some of the formats require information about things like e.g. plural form.
"""
root, extension = os.path.splitext(path)
if extension in SUPPORTED_FORMAT_PARSERS:
return SUPPORTED_FORMAT_PARSERS[extension](
path, source_path=source_path, locale=locale
)
else:
raise ValueError("T | ranslation format {0} is not supported.".format(extension))
|
seatme/nucleon.amqp | tests/test_limits.py | Python | lgpl-3.0 | 726 | 0 | import random
from gevent.pool import Group
from base import TestCase, declares_queues
from nucleon.amqp import Connection
from nucleon.amqp.spec import FrameQueueDeclareOk
qname = 'test%s' % (random.random(),)
queues = [qname + '.%s' % (i,) for i in xrange(100)]
class TestLimits(TestCase):
@declares_queues(*queues)
def test_parallel_queue_declare(self):
conn = Connection(self.amqp_url)
conn.connect()
channel = conn.allocate_channel()
def declare(name):
return channel.queue_declare | (queue=name)
g = Group()
res = g.map(declare, queues)
assert len(res) == len(queues)
assert all(isinstance(r, FrameQueueDeclareOk) for | r in res)
|
rmgorman/django-guardian | guardian/testapp/tests/test_tags.py | Python | bsd-2-clause | 6,449 | 0.002636 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
from guardian.core import ObjectPermissionChecker
from guardian.exceptions import NotUserNorGroup
from guardian.models import UserObjectPermission, GroupObjectPermission
User = get_user_model()
def render(template, context):
"""
Returns rendered ``template`` with ``context``, which are given as string
and dict respectively.
"""
t = Template(template)
return t.render(Context(context))
class GetObjPermsTagTest(TestCase):
def setUp(self):
self.ctype = ContentType.objects.create(
model='bar', app_label='fake-for-guardian-tests')
self.group = Group.objects.create(name='jackGroup')
self.user = User.objects.create(username='jack')
self.user.groups.add(self.group)
def test_wrong_formats(self):
| wrong_formats = (
'{% get_obj_perms user | for contenttype as obj_perms %}', # no quotes
'{% get_obj_perms user for contenttype as \'obj_perms" %}', # wrong quotes
'{% get_obj_perms user for contenttype as \'obj_perms" %}', # wrong quotes
'{% get_obj_perms user for contenttype as obj_perms" %}', # wrong quotes
'{% get_obj_perms user for contenttype as obj_perms\' %}', # wrong quotes
'{% get_obj_perms user for contenttype as %}', # no context_var
'{% get_obj_perms for contenttype as "obj_perms" %}', # no user/group
'{% get_obj_perms user contenttype as "obj_perms" %}', # no "for" bit
'{% get_obj_perms user for contenttype "obj_perms" %}', # no "as" bit
'{% get_obj_perms user for as "obj_perms" %}', # no object
)
context = {'user': User.get_anonymous(), 'contenttype': self.ctype}
for wrong in wrong_formats:
fullwrong = '{% load guardian_tags %}' + wrong
try:
render(fullwrong, context)
self.fail("Used wrong get_obj_perms tag format: \n\n\t%s\n\n "
"but TemplateSyntaxError have not been raised" % wrong)
except TemplateSyntaxError:
pass
def test_obj_none(self):
template = ''.join((
'{% load guardian_tags %}',
'{% get_obj_perms user for object as "obj_perms" %}{{ perms }}',
))
context = {'user': User.get_anonymous(), 'object': None}
output = render(template, context)
self.assertEqual(output, '')
def test_anonymous_user(self):
template = ''.join((
'{% load guardian_tags %}',
'{% get_obj_perms user for contenttype as "obj_perms" %}{{ perms }}',
))
context = {'user': AnonymousUser(), 'contenttype': self.ctype}
anon_output = render(template, context)
context = {'user': User.get_anonymous(), 'contenttype': self.ctype}
real_anon_user_output = render(template, context)
self.assertEqual(anon_output, real_anon_user_output)
def test_wrong_user_or_group(self):
template = ''.join((
'{% load guardian_tags %}',
'{% get_obj_perms some_obj for contenttype as "obj_perms" %}',
))
context = {'some_obj': ContentType(), 'contenttype': self.ctype}
# This test would raise TemplateSyntaxError instead of NotUserNorGroup
# if the template option 'debug' is set to True during tests.
template_options = settings.TEMPLATES[0]['OPTIONS']
tmp = template_options.get('debug', False)
template_options['debug'] = False
self.assertRaises(NotUserNorGroup, render, template, context)
template_options['debug'] = tmp
def test_superuser(self):
user = User.objects.create(username='superuser', is_superuser=True)
template = ''.join((
'{% load guardian_tags %}',
'{% get_obj_perms user for contenttype as "obj_perms" %}',
'{{ obj_perms|join:" " }}',
))
context = {'user': user, 'contenttype': self.ctype}
output = render(template, context)
for perm in ('add_contenttype', 'change_contenttype', 'delete_contenttype'):
self.assertTrue(perm in output)
def test_user(self):
UserObjectPermission.objects.assign_perm("change_contenttype", self.user,
self.ctype)
GroupObjectPermission.objects.assign_perm("delete_contenttype", self.group,
self.ctype)
template = ''.join((
'{% load guardian_tags %}',
'{% get_obj_perms user for contenttype as "obj_perms" %}',
'{{ obj_perms|join:" " }}',
))
context = {'user': self.user, 'contenttype': self.ctype}
output = render(template, context)
self.assertEqual(
set(output.split(' ')),
set('change_contenttype delete_contenttype'.split(' ')))
def test_group(self):
GroupObjectPermission.objects.assign_perm("delete_contenttype", self.group,
self.ctype)
template = ''.join((
'{% load guardian_tags %}',
'{% get_obj_perms group for contenttype as "obj_perms" %}',
'{{ obj_perms|join:" " }}',
))
context = {'group': self.group, 'contenttype': self.ctype}
output = render(template, context)
self.assertEqual(output, 'delete_contenttype')
def test_checker(self):
GroupObjectPermission.objects.assign_perm("delete_contenttype", self.group,
self.ctype)
checker = ObjectPermissionChecker(self.user)
checker.prefetch_perms(Group.objects.all())
template = ''.join((
'{% load guardian_tags %}',
'{% get_obj_perms group for contenttype as "obj_perms" checker %}',
'{{ obj_perms|join:" " }}',
))
context = {'group': self.group, 'contenttype': self.ctype, 'checker': checker}
output = render(template, context)
self.assertEqual(output, 'delete_contenttype')
|
openbmc/openbmc-test-automation | lib/boot_data.py | Python | apache-2.0 | 15,236 | 0.003544 | #!/usr/bin/env python3
r"""
This module has functions to support various data structures such as the boot_table, valid_boot_list and
boot_results_table.
"""
import os
import tempfile
import json
import glob
from tally_sheet import *
from robot.libraries.BuiltIn import BuiltIn
try:
from robot.utils import DotDict
except ImportError:
import collections
import gen_print as gp
import gen_valid as gv
import gen_misc as gm
import gen_cmd as gc
import var_funcs as vf
# The code base directory will be one level up from the directory containing this module.
code_base_dir_path = os.path.dirname(os.path.dirname(__file__)) + os.sep
redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
def create_boot_table(file_path=None,
os_host=""):
r"""
Read the boot table JSON file, convert it to an object and return it.
Note that if the user is running without a global OS_HOST robot variable specified, this function will
remove all of the "os_" start and end state requirements from the JSON data.
Description of argument(s):
file_path The path to the boot_table file. If this value is not specified, it will
be obtained from the "BOOT_TABLE_PATH" environment variable, if set.
Otherwise, it will default to "data/boot_table.json". If this value is a
relative path, this function will use the code_base_dir_path as the base
directory (see definition above).
os_host The host name or IP address of the host associated with the machine being
tested. If the user is running without an OS_HOST (i.e. if this argument
is blank), we remove os starting and ending state requirements from the
boot entries.
"""
if file_path is None:
if redfish_support_trans_state and platform_arch_type != "x86":
file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_redfish.json')
elif platform_arch_type == "x86":
file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_x86.json')
else:
file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table.json')
if not file_path.startswith("/"):
file_path = code_base_dir_path + file_path
# Pre-process the file by removing blank lines and comment lines.
temp = tempfile.NamedTemporaryFile()
temp_file_path = temp.name
cmd_buf = "egrep -v '^[ ]*$|^[ ]*#' " + file_path + " > " + temp_file_path
gc.cmd_fnc_u(cmd_buf, quiet=1)
boot_file = open(temp_file_path)
boot_table = json.load(boot_file, object_hook=DotDict)
# If the user is running without an OS_HOST, we remove os starting and ending state requirements from
# the boot entries.
if os_host == "":
for boot in boot_table:
state_keys = ['start', 'end']
for state_key in state_keys:
for sub_state in list(boot_table[boot][state_key]):
if sub_state.startswith("os_"):
boot_table[boot][state_key].pop(sub_state, None)
# For every boot_type we should have a corresponding mfg mode boot type.
enhanced_boot_table = DotDict()
for key, value in boot_table.items():
enhanced_boot_table[key] = value
enhanced_boot_table[key + " (mfg)"] = value
return enhanced_boot_table
def create_valid_boot_list(boot_table):
r"""
Return a list of all of the valid boot types (e.g. ['REST Power On', 'REST Power Off', ...]).
Description of argument(s):
boot_table A boot table such as is returned by the create_boot_table function.
"""
return list(boot_table.keys())
def read_boot_lists(dir_path="data/boot_lists/"):
r"""
Read the contents of all the boot lists files found in the given boot lists directory and return
dictionary of the lists.
Boot lists are simply files containing a boot test name on each line. These files are useful for
categorizing and organizing boot tests. For example, there may be a "Power_on" list, a "Power_off" list,
etc.
The names of the boot list files will be the keys to the top level dictionary. Each dictionary entry is
a list of all the boot tests found in the corresponding file.
Here is an abbreviated look at the resulting boot_lists dictionary.
boot_lists:
boot_lists[All]:
boot_lists[All][0]: REST Power On
boot_lists[All][1]: REST Power Off
...
boot_lists[Code_update]:
boot_lists[Code_update][0]: BMC oob hpm
boot_lists[Code_update][1]: BMC ib hpm
...
Description of argument(s):
dir_path The path to the directory containing the boot list files. If this value
is a relative path, this function will use the code_base_dir_path as the
base directory (see definition above).
"""
if not dir_path.startswith("/"):
# Dir path is relative.
dir_path = code_base_dir_path + dir_path
# Get a list of all file names in the directory.
boot_file_names = os.listdir(dir_path)
boot_lists = DotDict()
for boot_category in boot_file_names:
file_path = gm.which(dir_path + boot_category)
boot_list = gm.file_to_list(file_path, newlines=0, comments=0, trim=1)
boot_lists[boot_category] = boot_list
return boot_lists
def valid_boot_list(boot_list,
valid_boot_types):
r"""
Verify that each entry in boot_list is a supported boot test.
Description of argument(s):
boot_list An array (i.e. list) of boot test types (e.g. "REST Po | wer On").
valid_boot_types A list of valid boot types such as that returned by
create_valid_boot_list.
"""
for boot_name in boot_list:
boot_name = boot_name.strip(" ")
error_message = gv.valid_value(boot_name,
valid_values=valid_boot_types,
| var_name="boot_name")
if error_message != "":
BuiltIn().fail(gp.sprint_error(error_message))
class boot_results:
r"""
This class defines a boot_results table.
"""
def __init__(self,
boot_table,
boot_pass=0,
boot_fail=0,
obj_name='boot_results'):
r"""
Initialize the boot results object.
Description of argument(s):
boot_table Boot table object (see definition above). The boot table contains all of
the valid boot test types. It can be created with the create_boot_table
function.
boot_pass An initial boot_pass value. This program may be called as part of a
larger test suite. As such there may already have been some successful
boot tests that we need to keep track of.
boot_fail An initial boot_fail value. This program may be called as part of a
larger test suite. As such there may already have been some unsuccessful
boot tests that we need to keep track of.
obj_name The name of this object.
"""
# Store the method parms as class data.
self.__obj_name = obj_name
self.__initial_boot_pass = boot_pass
self.__i |
j12y/predixpy | predix/data/eventhub/client.py | Python | bsd-3-clause | 7,048 | 0.002838 | import json
import logging
import os
import threading
import time
import grpc
import predix.config
import predix.service
from predix.data.eventhub import Health_pb2_grpc
from predix.data.eventhub import Health_pb2
from predix.data.eventhub.publisher import PublisherConfig, Publisher
from predix.data.eventhub.subscriber import Subscriber
class EventHubException(Exception):
def __init__(self, str):
Exception.__init__(self, str)
class Eventhub(object):
"""
Client library for working with the Eventhub Service
Provide it with the id, and the config you need for each thing
if that feature is not required then leave config as None
"""
def __init__(self,
publish_config=None,
subscribe_config=None,
):
# initialize the publisher and subscriber
# only build shared grpc channel if required
self._ws = None
self._channel = None
self._run_health_checker = True
if publish_config is not None:
# make the channel
if publish_config.protocol == PublisherConfig.Protocol.GRPC:
self._init_channel()
self.publisher = Publisher(eventhub_client=self, channel=self._channel, config=publish_config)
if subscribe_config is not None:
if self._channel is None:
self._init_channel()
self.subscriber = Subscriber(self, channel=self._channel, config=subscribe_config)
def shutdown(self):
"""
Shutdown the client, shutdown the sub clients and stop the health checker
:return: None
"""
self._run_health_checker = False
if self.publisher is not None:
self.publisher.shutdown()
if self.subscriber is not None:
self.subscriber.shutdown()
def _get_host(self):
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
host = services['predix-event-hub'][0]['credentials']['publish']['protocol_details']['uri']
return host[:host.index(':')]
return self.get_service_env_value('host')
def _get_grpc_port(self):
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
host = services['predix-event-hub'][0]['credentials']['publish']['protocol_details']['uri']
return host[host.index(':')+1:]
return self.get_service_env_value('port')
def get_service_env_value(self, key):
"""
Get a env variable as defined by the service admin
:param key: the base of the key to use
:return: the env if it exists
"""
service_key = predix.config.get_env_key(self, key)
value = os.environ[service_key]
if not value:
raise ValueError("%s env unset" % key)
return value
def _init_channel(self):
"""
build the grpc channel used for both publisher and subscriber
:return: None
"""
host = self._get_host()
port = self._get_grpc_port()
if 'TLS_PEM_FILE' in os.environ:
with open(os.environ['TLS_PEM_FILE'], mode='rb') as f: # b is important -> binary
file_content = f.read()
credentials = grpc.ssl_channel_credentials(root_certificates=file_content)
else:
credentials = grpc.ssl_channel_credentials()
self._channel = grpc.secure_channel(host + ":" + port, credentials=credentials)
self._init_health_checker()
def _init_health_checker(self):
"""
start the health checker stub and start a thread to ping it every 30 seconds
:return: None
"""
stub = Health_pb2_grpc.HealthStub(channel=self._channel)
self._health_check = stub.Check
health_check_thread = threading.Thread(target=self._health_check_thread)
health_check_thread.daemon = True
health_check_thread.start()
def _health_check_thread(self):
"""
Health checker thread that pings the service every 30 seconds
:return: None
"""
while self._run_health_checker:
response = self._health_check(Health_pb2.HealthCheckRequest(service='predix-event-hub.grpc.health'))
logging.debug('received health check: ' + str(response))
time.sleep(30)
return
class GrpcManager:
"""
Class for managing GRPC calls by turing the generators grpc uses into function calls
This allows the sdk to man in the middle the messages
"""
def __init__(self, stub_call, on_msg_callback, metadata, tx_stream=True, initial_message=None):
"""
:param stub_call: the call on the grpc stub to build the generator on
:param on_msg_callback: the callback to pass any received functions on
:param metadata: metadata to attach to the stub call
"""
self._tx_stream = tx_stream
self._stub_call = stub_call
self._on_msg_callback = on_msg_callback
self._metadata = metadata
self._initial_message = initial_message
self._grpc_rx_thread = threading.Thread(target=self._grpc_rx_receiver)
self._grpc_rx_thread.daemon = True
self._grpc_rx_thread.start()
self._grpc_tx_queue = []
self._run_generator = True
time.sleep(1)
def send_message(self, tx_message):
"""
Add a message onto the tx queue to be sent on the stub
:param tx_message:
:return: None
"""
self._grpc_tx_queue.append(tx_message)
def _grpc_rx_receiver(self):
"""
Blocking Function that opens the stubs generator and pass any messages onto the callback
:return: None
"""
logging.debug("grpc rx stream metadata: " + str(self._metadata))
if self._tx_stream:
if self._initial_message is not None:
self.send_message(self._initial_message)
msgs = self._stub_call(request_iterator=self._grpc_tx_generator(), metadata=self._metadata)
else:
msgs = self._stub_call(self._initial_message, metadata=self._metadata)
for m in msgs:
self._on_msg_callback(m)
def stop_generator(self):
"""
Call this to close the generator
:return:
"""
logging.debug('stopping generator')
self._run_generator = Fal | se
def _grpc_tx_generator(self):
"""
the generator taking and messages added to the grpc_tx_queue
and yield them to grpc
:return: grpc messages
"""
while self._run_generator:
while | len(self._grpc_tx_queue) != 0:
yield self._grpc_tx_queue.pop(0)
return
|
nextgis/dhi | general/download_modis_data.py | Python | mit | 6,762 | 0.006655 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#******************************************************************************
#
# download_modis_data.py
# ---------------------------------------------------------
# Download MODIS data
# More: http://github.com/nextgis/dhi
#
# Usage:
# download_modis_data.py [-h] [-c CREATE_HDF_FOLDER] year numslices url output_folder
# where:
# -h show this help message and exit
# year year
# numslices number of time periods per year
# url base url where to download from
# output_folder where to store downloaded file
# create_hdf_folder if "yes", hdf subfolder will be created for each date, else hdf folder will be created for the whole session
# Example:
| # python download_modis_data.py 2003 92 http://e4ftl01.cr.usgs.gov/MOTA/MCD15A3.005/ x:\MCD15A3\2003\ no
#
# Copyright (C) 2015 Maxim Dubinin (sim@gis-lab.info)
#
# This source is f | ree software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/copyleft/gpl.html>. You can also obtain it by writing
# to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
#
#******************************************************************************
import sys
import os
import calendar
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('year', help='Year', type=int)
parser.add_argument('numslices', help='Number of time periods per year', type=int)
parser.add_argument('url', help='Base url where to download from')
parser.add_argument('output_folder', help='Output folder with HDFs')
parser.add_argument('-c','--create_hdf_folder', help='If "yes", hdf subfolder will be created for each date, else hdf folder will be created for the whole session')
args = parser.parse_args()
import getpass
u = raw_input("Username: ")
p = getpass.getpass(prompt='Password: ')
#Download data
def download(date):
print("Downloading started: " + date)
cmd = 'wget --user ' + u + ' --password ' + p + ' ' + args.url + '/' + date + '/ --quiet --recursive --level=1 --accept=hdf --no-directories'
print cmd
os.system(cmd)
print("Downloading completed: " + date)
def sanitize():
if not args.output_folder.endswith('\\'): args.output_folder = args.output_folder + '\\'
return args.output_folder
if __name__ == '__main__':
#od = sanitize()
od = args.output_folder
numslices = args.numslices
if numslices == 46:
# dates = '07.04'
# dates_leap = '07.04'
dates = '01.01,01.09,01.17,01.25,02.02,02.10,02.18,02.26,03.06,03.14,03.22,03.30,04.07,04.15,04.23,05.01,05.09,05.17,05.25,06.02,06.10,06.17,06.26,07.04,07.12,07.20,07.28,08.05,08.13,08.21,08.29,09.06,09.14,09.22,09.30,10.08,10.16,10.24,11.01,11.09,11.17,11.25,12.03,12.11,12.19,12.27'
dates_leap = '01.01,01.09,01.17,01.25,02.02,02.10,02.18,02.26,03.05,03.13,03.21,03.29,04.06,04.14,04.22,04.30,05.08,05.16,05.24,06.01,06.09,06.16,06.25,07.03,07.11,07.19,07.29,08.04,08.12,08.20,08.28,09.05,09.13,09.21,09.29,10.07,10.15,10.23,10.31,11.08,11.16,11.24,12.02,12.10,12.18,12.26'
# dates = '01.01,01.09,01.17,01.25,02.02,02.10,02.18,02.26,03.06,03.14,03.22,03.30,04.07,04.15,04.23,05.01,05.09,05.17,05.25,06.02,06.10,06.18,06.26,07.04,07.12,07.20,07.28,08.05,08.13,08.21,08.29,09.06,09.14,09.22,09.30,10.08,10.16,10.24,11.01,11.09,11.17,11.25,12.03,12.11,12.19,12.27'
# dates_leap = '01.01,01.09,01.17,01.25,02.02,02.10,02.18,02.26,03.05,03.13,03.21,03.29,04.06,04.14,04.22,04.30,05.08,05.16,05.24,06.01,06.09,06.17,06.25,07.03,07.11,07.19,07.27,08.04,08.12,08.20,08.28,09.05,09.13,09.21,09.29,10.07,10.15,10.23,10.31,11.08,11.16,11.24,12.02,12.10,12.18,12.26'
elif numslices == 23:
dates = '01.01,01.17,02.02,02.18,03.06,03.22,04.07,04.23,05.09,05.25,06.10,06.26,07.12,07.28,08.13,08.29,09.14,09.30,10.16,11.01,11.17,12.03,12.19'
dates_leap = '01.01,01.17,02.02,02.18,03.05,03.21,04.06,04.22,05.08,05.24,06.09,06.25,07.11,07.27,08.12,08.28,09.13,09.29,10.15,10.31,11.16,12.02,12.18'
elif numslices == 12:
dates = '01.01,02.01,03.01,04.01,05.01,06.01,07.01,08.01,09.01,10.01,11.01,12.01'
dates_leap = dates
elif numslices == 92:
dates = '01.01,01.05,01.09,01.13,01.17,01.21,01.25,01.29,02.02,02.06,02.10,02.14,02.18,02.22,02.26,03.02,03.06,03.10,03.14,03.18,03.22,03.26,03.30,04.03,04.07,04.11,04.15,04.19,04.23,04.27,05.01,05.05,05.09,05.13,05.17,05.21,05.25,05.29,06.02,06.06,06.10,06.14,06.18,06.22,06.26,06.30,07.04,07.08,07.12,07.16,07.20,07.24,07.28,08.01,08.05,08.09,08.13,08.17,08.21,08.25,08.29,09.02,09.06,09.10,09.14,09.18,09.22,09.26,09.30,10.04,10.08,10.12,10.16,10.20,10.24,10.28,11.01,11.05,11.09,11.13,11.17,11.21,11.25,11.29,12.03,12.07,12.11,12.15,12.19,12.23,12.27,12.31'
dates_leap = '01.01,01.05,01.09,01.13,01.17,01.21,01.25,01.29,02.02,02.06,02.10,02.14,02.18,02.22,02.26,03.01,03.05,03.09,03.13,03.17,03.21,03.25,03.29,04.02,04.06,04.10,04.14,04.18,04.22,04.26,04.30,05.04,05.08,05.12,05.16,05.20,05.24,05.28,06.01,06.05,06.09,06.13,06.17,06.21,06.25,06.29,07.03,07.07,07.11,07.15,07.19,07.23,07.27,07.31,08.04,08.08,08.12,08.16,08.20,08.24,08.28,09.01,09.05,09.09,09.13,09.17,09.21,09.25,09.29,10.03,10.07,10.11,10.15,10.19,10.23,10.27,10.31,11.04,11.08,11.12,11.16,11.20,11.24,11.28,12.02,12.06,12.10,12.14,12.18,12.22,12.26,12.30 '
else:
print('Can\'t assign dates intervals')
sys.exit(1)
if calendar.isleap(args.year): dates = dates_leap
if not os.path.exists(od): os.mkdir(od)
os.chdir(od)
if not args.create_hdf_folder:
if not os.path.exists('hdf'): os.mkdir("hdf")
os.chdir("hdf")
for date in dates.split(','):
date = str(args.year) + '.' + date
# if date in '2015.12.27':
if not os.path.exists(date): os.mkdir(date)
os.chdir(date)
if args.create_hdf_folder:
if not os.path.exists('hdf'): os.mkdir("hdf")
os.chdir("hdf")
download(date)
if args.create_hdf_folder:
os.chdir('../..')
else:
os.chdir('../')
|
rogerthat-platform/rogerthat-gig-g8 | src/gig/__init__.py | Python | bsd-2-clause | 1,334 | 0.00075 | # Copyright (c) 2016, Rogerthat
# All rights reserved.
#
# Redistribution and use in | source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and | the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
JuniorJPDJ/pyChomikBox | ChomikBox/PartFile.py | Python | lgpl-3.0 | 1,429 | 0.0014 | import io
import os
def total_len(o):
# Stolen from requests_toolbelt and modified
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if | hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
try:
current_ | pos = o.tell()
length = o.seek(0, 2)
o.seek(current_pos, 0)
return length
except IOError:
pass
class PartFile(io.IOBase):
def __init__(self, file, start):
assert hasattr(file, 'read') and hasattr(file, 'tell') and hasattr(file, 'seek')
assert isinstance(start, int)
self.file, self.start = file, start
self.total_len = total_len(file)
self.len = self.total_len - start
io.IOBase.__init__(self)
try:
self.seek(0)
except:
pass
def seek(self, offset, whence=0):
if whence == 0:
tell = self.file.seek(offset + self.start, 0)
else:
tell = self.file.seek(offset, whence)
return tell - self.start
def tell(self):
return self.file.tell() - self.start
def __getattr__(self, item):
return getattr(self.file, item)
|
t-peters/meetup-doc | Running Scripts from Command Line/sys_arg.py | Python | mpl-2.0 | 26 | 0.038462 | import | sys
print sys.arg | v |
globalwordnet/OMW | scripts/load-ili-kinds.py | Python | mit | 1,029 | 0.006803 | #!/usr/bin/python3
imp | ort sqlite3, sys
# It takes one argument: the name of the new database
if (len(sys.argv) < 1):
sys.stderr.write('You need to give the name of the ILI DB\n')
sys.exit(1)
else:
dbfile = sys.argv[1]
################################################################
# CONNECT TO DB
################################################################
con = sqlite3.connect(dbfile)
c = con.cursor()
###################################################### | ##########
# USER
################################################################
u = "ili_load-kinds.py"
################################################################
# INSERT POS DATA (CODES AND NAMES)
################################################################
c.execute("""INSERT INTO kind (id, kind, u)
VALUES (?,?,?)""", [1,'concept',u])
c.execute("""INSERT INTO kind (id, kind, u)
VALUES (?,?,?)""", [2,'instance',u])
con.commit()
con.close()
sys.stderr.write('Loaded KIND data in (%s)\n' % (dbfile))
|
freedesktop-unofficial-mirror/libqmi | utils/qmidb/qmidb.py | Python | gpl-2.0 | 1,448 | 0.000691 | #!/usr/bin/env python
# -*- Mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to th | e Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright (C) 2011 - 2012 Red Hat, Inc.
#
import sys
import Entities
import Enums
import Fields
import Structs
if len(sys.argv) > 2:
print "Usage: qmi | db.py <path to Entity.txt>"
sys.exit(1)
path = ""
if len(sys.argv) == 2:
path = sys.argv[1] + "/"
enums = Enums.Enums(path)
entities = Entities.Entities(path)
fields = Fields.Fields(path)
structs = Structs.Structs(path)
structs.validate(fields)
entities.validate(structs)
print '/* GENERATED CODE. DO NOT EDIT. */'
print '\ntypedef uint8 bool;\n'
enums.emit()
print '\n\n'
structs_used = entities.emit(fields, structs, enums)
# emit structs that weren't associated with an entity
structs.emit_unused(structs_used, fields, enums)
|
EdibleEd/vacbooru | VAB_massdownload.py | Python | bsd-2-clause | 1,305 | 0.005364 | import urllib2
import urllib
import os, sys
from bs4 import *
argv = sys.argv[1:]
begin = int(argv[0])
count = int(argv[1] | )
for i in range(begin, begin+count):
try:
url = 'http://danbooru.donmai.us/posts/' + str(i)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
html = response.read()
soup = BeautifulSoup(html)
relURL = soup.select('#image')[0]['src'].split('/data/')[1]
if 'sample' in relURL:
# Image was too big and thus was resized. |
relURL = relURL.split('sample-')[1]
newPath = 'http://danbooru.donmai.us/data/' + relURL
newFile = 'C:\\programming\\vacbooru-master\\dbu\\' + relURL
if not os.path.exists(newFile):
r = urllib.urlopen(newPath).read()
if len(r) > 400:
f = open(newFile,'wb')
f.write(r)
f.close()
print str(i) + " downloaded"
else:
print str(i) + " is a 0 size image"
else:
print str(i) + " already exists"
except Exception as e:
print str(i) + " download failed: " + str(e)
if 'list index out of range' in str(e):
print "\t This is likley a image that needs dbu gold" |
apmichaud/vitess-apm | test/keyspace_test.py | Python | bsd-3-clause | 10,189 | 0.008931 | #!/usr/bin/python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import base64
import logging
import threading
import struct
import time
import unittest
from vtdb import keyrange_constants
from vtdb import keyspace
import environment
import utils
import tablet
from zk import zkocc
SHARDED_KEYSPACE = "TEST_KEYSPACE_SHARDED"
UNSHARDED_KEYSPACE = "TEST_KEYSPACE_UNSHARDED"
# shards for SHARDED_KEYSPACE
# range "" - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
# range 80 - ""
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
# shard for UNSHARDED_KEYSPACE
unsharded_master = tablet.Tablet()
unsharded_replica = tablet.Tablet()
unsharded_rdonly = tablet.Tablet()
vtgate_server = None
vtgate_port = None
shard_names = ['-80', '80-']
shard_kid_map = {'-80': [527875958493693904, 626750931627689502,
345387386794260318, 332484755310826578,
1842642426274125671, 1326307661227634652,
1761124146422844620, 1661669973250483744,
3361397649937244239, 2444880764308344533],
'80-': [9767889778372766922, 9742070682920810358,
10296850775085416642, 9537430901666854108,
10440455099304929791, 11454183276974683945,
11185910247776122031, 10460396697869122981,
13379616110062597001, 12826553979133932576],
}
create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
keyspace_id bigint(20) unsigned NOT NULL,
primary key (id)
) Engine=InnoDB'''
def setUpModule():
try:
environment.topo_server_setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly.init_mysql(),
unsharded_master.init_mysql(),
unsharded_replica.init_mysql(),
unsharded_rdonly.init_mysql(),
]
utils.wait_procs(setup_procs)
setup_tablets()
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
global vtgate_server
utils.vtgate_kill(vtgate_server)
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly])
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly.teardown_mysql(),
unsharded_master.teardown_mysql(),
unsharded_replica.teardown_mysql(),
unsharded_rdonly.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly.remove_tree()
shard_1_rdonly.remove_tree()
shard_1_rdonly.remove_tree()
unsharded_master.remove_tree()
unsharded_replica.remove_tree()
unsharded_rdonly.remove_tree()
def setup_tablets():
global vtgate_server
global vtgate_port
setup_sharded_keyspace()
setup_unsharded_keyspace()
vtgate_server, vtgate_port = utils.vtgate_start()
def setup_sharded_keyspace():
utils.run_vtctl(['CreateKeyspace', SHARDED_KEYSPACE])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', SHARDED_KEYSPACE,
'keyspace_id', 'uint64'])
shard_0_master.init_tablet('master', keyspace=SHARDED_KEYSPACE, shard='-80')
shard_0_replica.init_tablet('replica', keyspace=SHARDED_KEYSPACE, shard='-80')
shard_0_rdonly.init_tablet('rdonly', keyspace=SHARDED_KEYSPACE, shard='-80')
shard_1_master.init_tablet('master', keyspace=SHARDED_KEYSPACE, shard='80-')
shard_1_replica.init_tablet('replica', keyspace=SHARDED_KEYSPACE, shard='80-')
shard_1_rdonly.init_tablet('rdonly', keyspace=SHARDED_KEYSPACE, shard='80-')
utils.run_vtctl(['RebuildKeyspaceGraph', SHARDED_KEYSPACE,], auto_log=True)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly, shard_1_master, shard_1_replica, shard_1_rdonly]:
t.create_db('vt_test_keyspace_sharded')
t.mquery(shard_0_master.dbname, create_vt_insert_test)
t.start_vttablet(wait_for_state=None)
for t in [shard_0_master, shard_0_replica, shard_1_master, shard_1_replica]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['ReparentShard', '-force', '%s/-80' % SHARDED_KEYSPACE,
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ReparentShard', '-force', '%s/80-' % SHARDED_KEYSPACE,
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', SHARDED_KEYSPACE],
auto_log=True)
utils.check_srv_keyspace('test_nj', SHARDED_KEYSPACE,
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n' +
'TabletTypes: master,rdonly,replica')
def setup_unsharded_keyspace():
utils.run_vtctl(['CreateKeyspace', UNSHARDED_KEYSPACE])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', UNSHARDED_KEYSPACE,
'keyspace_id', 'uint64'])
unsharded_master.init_tablet('master', keyspace=UNSHARDED_KEYSPACE, shard='0')
unsharded_replica.init_tablet('replica', keyspace=UNSHARDED_KEYSPACE, shard='0')
unsharded_rdonly.init_tablet('rdonly', keyspace=UNSHARDED_KEYSPACE, shard='0')
utils.run_vtctl(['RebuildKeyspaceGraph', UNSHARDED_KEYSPACE,], auto_log=True)
for t in [unsharded_master, unsharded_replica, unsharded_rdonly]:
t.create_db('vt_test_keyspace_unsharded')
t.mquery(unsharded_master.dbname, create_vt_insert_test)
t.start_vttablet(wait_for_state=None)
for t in [unsharded_master, unsharded_replica, unsharded_rdonly]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['ReparentShard', '-force', '%s/0' % UNSHARDED_KEYSPACE,
unsharded_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', UNSHARDED_KEYSPACE],
auto_log=True)
utils.check_srv_keyspace('test_nj', UNSHARDED_KEYSPACE,
'Pa | rtitions(master): -\n' +
'Partitions(rdonly): -\n' +
| 'Partitions(replica): -\n' +
'TabletTypes: master,rdonly,replica')
ALL_DB_TYPES = ['master', 'replica', 'rdonly']
class TestKeyspace(unittest.TestCase):
def _read_keyspace(self, keyspace_name):
global vtgate_port
vtgate_client = zkocc.ZkOccConnection("localhost:%u" % vtgate_port,
"test_nj", 30.0)
return keyspace.read_keyspace(vtgate_client, keyspace_name)
def test_get_keyspace(self):
ki = utils.run_vtctl_json(['GetKeyspace', UNSHARDED_KEYSPACE])
self.assertEqual('keyspace_id', ki['ShardingColumnName'])
self.assertEqual('uint64', ki['ShardingColumnType'])
def test_shard_count(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
self.assertEqual(sharded_ks.shard_count, 2)
for db_type in ALL_DB_TYPES:
self.assertEqual(sharded_ks.get_shard_count(db_type), 2)
unsharded_ks = self._read_keyspace(UNSHARDED_KEYSPACE)
self.assertEqual(unsharded_ks.shard_count, 1)
for db_type in ALL_DB_TYPES:
self.assertEqual(unsharded_ks.get_shard_count(db_type), 1)
def test_shard_names(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
self.assertEqual(sharded_ks.shard_names, ['-80', '80-'])
for db_type in ALL_DB_TYPES:
self.assertEqual(sharded_ks.get_shard_names(db_type), ['-80', '80- |
iandees/all-the-places | locations/spiders/pigglywiggly.py | Python | mit | 4,159 | 0.004328 | # -*- coding: utf-8 -*-
import scrapy
import json
import re
import logging
from locations.items import GeojsonPointItem
class PigglyWigglySpider(scrapy.Spider):
''' This spider scrapes from two different places, an api which has their stores in Wisconsin
and Illinois, and a page which has all of their other stores. Cookies are used for the
api request.
'''
name = "pigglywiggly"
allowed_domains = ["pigglywiggly.com"]
def start_requests(self):
url = 'https://www.shopthepig.com/api/m_store_location'
headers = {
'x-newrelic-id': 'XQYBWFVVGwAEVFNRBQcP',
'accept-encoding': 'gzip, deflate, br',
'x-csrf-token': 'eF2m10r8n51nsRgBSv1xSvhAGtCo8E84BExlmn54Vvc',
'accept-language': 'en-US,en;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'accept': 'application/json, text/plain, */*',
'referer': 'https://www.shopthepig.com/stores',
}
cookies = {
'__cfduid': 'db0a53231376d78a40dd7fd728fa896f51512948321',
'SESSb159e7 | a0d4a6fad9ba3abc7fadef99ec': 'h3o7xcjnfcERSRrqJVh0soQdUI5IFIBDIQlytOZkhIU | ',
'XSRF-TOKEN': 'eF2m10r8n51nsRgBSv1xSvhAGtCo8E84BExlmn54Vvc',
'has_js': 1,
}
yield scrapy.http.FormRequest(
url=url, headers=headers, callback=self.parse_wi, cookies=cookies
)
yield scrapy.Request(
'https://www.pigglywiggly.com/store-locations',
callback=self.parse_nonwi,
)
def parse_wi(self, response):
data = json.loads(response.body_as_unicode())
stores = data['stores']
for store in stores:
unp = {
'ref': store['storeID'],
'name': store['storeName'],
'addr_full': store['normalized_address'],
'city': store['normalized_city'],
'state': store['normalized_state'],
'postcode': store['normalized_zip'],
'lat': store['latitude'],
'lon': store['longitude'],
'phone': store['phone']
}
properties = {}
for key in unp:
if unp[key]: properties[key] = unp[key]
yield GeojsonPointItem(**properties)
def parse_nonwi(self, response):
for state_url in response.xpath('//div[@class="views-field-province-1"]/span[@class="field-content"]/a/@href').extract():
yield scrapy.Request(
response.urljoin(state_url),
callback=self.parse_state,
)
def parse_state(self, response):
for location in response.xpath('//li[contains(@class, "views-row")]'):
unp = {
'addr_full': location.xpath('.//div[@class="street-address"]/text()').extract_first(),
'city': location.xpath('.//span[@class="locality"]/text()').extract_first(),
'state': location.xpath('.//span[@class="region"]/text()').extract_first(),
'postcode': location.xpath('.//span[@class="postal-code"]/text()').extract_first(),
'phone': location.xpath('.//label[@class="views-label-field-phone-value"]/following-sibling::span[1]/text()').extract_first(),
'website': location.xpath('.//label[@class="views-label-field-website-value"]/following-sibling::span[1]/a/@href').extract_first(),
}
if unp['website']:
if 'google' in unp['website']:
unp['website'] = None
if unp['phone']:
unp['phone'] = unp['phone'].replace('.', '-')
properties = {}
for key in unp:
if unp[key]:
properties[key] = unp[key].strip()
ref = ''
if 'addr_full' in properties: ref += properties['addr_full']
if 'phone' in properties: ref += properties['phone']
properties['ref'] = ref
yield GeojsonPointItem(**properties)
|
RyanSkraba/beam | sdks/python/apache_beam/transforms/external_test_py37.py | Python | apache-2.0 | 2,471 | 0.009308 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the transform.external classes."""
from __future__ import absolute_import
import dataclasses
import typing
import unittest
import apache_beam as beam
from apache_beam import typehints
from apache_beam.portability.api.external_transforms_pb2 import ExternalConfigurationPayload
from apache_beam.transforms.external_test import PayloadBase
def get_payload(cls):
payload = ExternalConfigurationPayload()
payload.ParseFromString(cls._payload)
return payload
class ExternalDataclassesPayloadTest(PayloadBase, unittest.T | estCase):
def get_payload_from_typing_hints(self, values):
@dataclasses.dataclass
class DataclassTransform(beam.ExternalTransform):
URN = 'beam:external:fakeurn:v1'
integer_example: int
boolean: bool
string_example: str
list_of_strings: typing.List[str]
optional_kv: typing.Optional[typing.Tuple[str, float]] = None
optional_integer: typing.Optional[int] = None
expansion_service: dataclasses.InitVar[typing.Optional[st | r]] = None
return get_payload(DataclassTransform(**values))
def get_payload_from_beam_typehints(self, values):
@dataclasses.dataclass
class DataclassTransform(beam.ExternalTransform):
URN = 'beam:external:fakeurn:v1'
integer_example: int
boolean: bool
string_example: str
list_of_strings: typehints.List[str]
optional_kv: typehints.Optional[typehints.KV[str, float]] = None
optional_integer: typehints.Optional[int] = None
expansion_service: dataclasses.InitVar[typehints.Optional[str]] = None
return get_payload(DataclassTransform(**values))
if __name__ == '__main__':
unittest.main()
|
SkyTruth/vectortile | utils/tileinfo.py | Python | mit | 2,468 | 0.000405 | #!/usr/bin/env python
"""Print out a report about whats in a vectortile
Usage:
tileinfo.py [options] [SOURCE]
Options:
--srcformat=SRC_FORMAT Source file format: (tile | json)
--indent=INT|None JSON indentation level. Defaults to 4. Use 'None' to disable.
-h --help Show this screen.
--version Show version.
-q --quiet be quiet
"""
import json
import sys
from docopt import docopt
from vectortile import Tile
def info(data, cols):
"""
Compute min/max for all registered columns.
Parameters
----------
data : list
List of points from tile.
cols : list
List of columns from tile header.
Returns
-------
dict
{
column: {
min: value,
max: value
}
}
"""
stats = {c['name']: [] for c in cols}
for point in data:
for c, v in point.items():
stats[c].append(v)
return {n: {'min': min(v), 'max': max(v)} for n, v in stats.items()}
def main():
"""
Get an info report for a tile. Format is same as input tile but with
min/max values for values under 'data'.
"""
arguments = docopt(__doc__, version='tileinfo 0.1')
src_name = arguments['SOURCE']
src_format = arguments['--srcformat']
indent = arguments['--indent']
if isinstance(indent, str) and indent.lower() == 'none':
indent = None
elif isinstance(indent, str):
indent = int(indent)
else:
indent = 4
with sys.stdin if src_name in ('-', None) else open(src_name, 'rb') as f:
# Guess input format if not given
if src_format is None:
if '.json' == f.name[-5:]:
src_format = 'json'
else:
src_format = | 'tile'
if src_format == 'tile':
header, data = Tile(f.read()).unpack()
else:
header = json.loads(f.read())
data = header.pop('data')
# Generate the info report
| report = info(data, header['cols'])
# Merge report with other tile attributes
out = {k: v for k, v in header.items() if k != 'data'}
out['data'] = {}
for field, vals in report.items():
out['data'][field + '_min'] = vals['min']
out['data'][field + '_max'] = vals['max']
print(json.dumps(out, indent=indent, sort_keys=True))
if __name__ == '__main__':
sys.exit(main())
|
Tatsh-ansible/ansible | lib/ansible/modules/windows/win_share.py | Python | gpl-3.0 | 3,686 | 0.001628 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_share
version_added: "2.1"
short_description: Manage Windows shares |
description:
- Add, m | odify or remove Windows share and set share permissions.
requirements:
- As this module used newer cmdlets like New-SmbShare this can only run on
Windows 8 / Windows 2012 or newer.
- This is due to the reliance on the WMI provider MSFT_SmbShare
U(https://msdn.microsoft.com/en-us/library/hh830471) which was only added
with these Windows releases.
options:
name:
description:
- Share name.
required: True
path:
description:
- Share directory.
required: True
state:
description:
- Specify whether to add C(present) or remove C(absent) the specified share.
choices:
- present
- absent
default: present
description:
description:
- Share description
list:
description:
- Specify whether to allow or deny file listing, in case user got no permission on share.
type: bool
default: 'no'
read:
description:
- Specify user list that should get read access on share, separated by comma.
change:
description:
- Specify user list that should get read and write access on share, separated by comma.
full:
description:
- Specify user list that should get full access on share, separated by comma.
deny:
description:
- Specify user list that should get no access, regardless of implied access on share, separated by comma.
caching_mode:
description:
- Set the CachingMode for this share.
choices:
- BranchCache
- Documents
- Manual
- None
- Programs
- Unknown
default: "Manual"
version_added: "2.3"
encrypt:
description: Sets whether to encrypt the traffic to the share or not.
type: bool
default: 'no'
version_added: "2.4"
author:
- Hans-Joachim Kliemeck (@h0nIg)
- David Baumann (@daBONDi)
'''
EXAMPLES = r'''
# Playbook example
# Add share and set permissions
---
- name: Add secret share
win_share:
name: internal
description: top secret share
path: C:\shares\internal
list: no
full: Administrators,CEO
read: HR-Global
deny: HR-External
- name: Add public company share
win_share:
name: company
description: top secret share
path: C:\shares\company
list: yes
full: Administrators,CEO
read: Global
- name: Remove previously added share
win_share:
name: internal
state: absent
'''
RETURN = r'''
actions:
description: A list of action cmdlets that were run by the module.
returned: success
type: list
sample: ['New-SmbShare -Name share -Path C:\temp']
'''
|
dasseclab/dasseclab | clones/routersploit/tests/exploits/routers/zyxel/test_d1000_rce.py | Python | gpl-2.0 | 735 | 0 | from unittest import mock
from flask import Response
from routersploit.modules.exploits.routers.zyxel.d1000_rce import Exploit
def apply_response(*args, **kwargs):
resp = Response("TEST home_wan.ht | m TEST", status=404)
return resp
@mock.patch("routersploit.modules.exploits.routers.zyxel.d1000_rce.shell")
def test_check_success(mocked_shell, target):
""" Test scenario - successf | ul check """
route_mock = target.get_route_mock("/globe", methods=["GET"])
route_mock.side_effect = apply_response
exploit = Exploit()
assert exploit.target == ""
assert exploit.port == 7547
exploit.target = target.host
exploit.port = target.port
assert exploit.check()
assert exploit.run() is None
|
chubbymaggie/amoco | tests/test_system_pe.py | Python | gpl-2.0 | 148 | 0.02027 | import pytest
from amoco.system.pe import PE
def test_parser_PE(samples):
for f in samples:
if f | [-4: | ]=='.exe':
p = PE(f)
|
ActiveState/code | recipes/Python/577575_scale_rectangle_while_keeping_aspect/recipe-577575.py | Python | mit | 175 | 0.011429 | def scale(w, h, x, y, maximum=True):
nw = y * w / h
nh = x * h / w
if maximum ^ (nw >= x):
| retur | n nw or 1, y
return x, nh or 1
|
germandiagogomez/meson | test cases/common/98 gen extra/srcgen.py | Python | apache-2.0 | 650 | 0.001538 | #!/usr/bin/env python3
import sys
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input',
help='the input file')
parser.add_argument('--output', dest='output',
help='the output file')
parser.add_argument('--upper', dest='upper', action='store_true', default=False,
help='Convert to upper case.')
c_templ = '''int %s() { |
return 0;
}
'''
options = parser.parse_args(sys.argv[1:])
funcname = open(options.i | nput).readline().strip()
if options.upper:
funcname = funcname.upper()
open(options.output, 'w').write(c_templ % funcname)
|
UNINETT/nav | tests/unittests/netmap/topology_layer2_testcase.py | Python | gpl-2.0 | 3,029 | 0.001651 | from mock import patch
import networkx as nx
from nav.models.manage import SwPortVlan, Vlan
from nav.netmap import topology
from nav.topology import vlan
from .topology_testcase import TopologyTestCase
class TopologyLayer2TestCase(TopologyTestCase):
def setUp(self):
super(TopologyLayer2TestCase, self).setUp()
self.model_id = 1
self.nav_graph = nx.MultiDiGraph()
self.a = a = self._netbox_factory('a')
self.b = b = self._netbox_factory('b')
self.c = c = self._netbox_factory('c')
self.d = d = self._netbox_factory('d')
self.a1 = a1 = self._interface_factory('a1', a)
self.a2 = a2 = self._interface_factory('a2', a)
self.a3 = a3 = self._interface_factory('a3', a)
self.b1 = b1 = self._interface_factory('b1', b)
self.b2 = b2 = self._interface_factory('b2', b)
self.c3 = c3 = self._interface_factory('c3', c)
self.c4 = c4 = self._interface_factory('c4', c)
self.d4 = d4 = self._interface_factory('d4', d)
self._add_edge(self.nav_graph, a1.netbox, a1, b1.netbox, b1)
self._add_edge(self.nav_graph, b1.netbox, b1, a1.netbox, a1)
self._add_edge(self.nav_graph, a2.netbox, a2, b2.netbox, b2)
self._add_edge(self.nav_graph, b2.netbox, b2, a2.netbox, a2)
self._add_edge(self.nav_graph, a3.netbox, a3, c3.netbox, c3)
self._add_edge(self.nav_graph, d4.netbox, d4, c4.netbox, c4)
self.vlan__a1_b1 = a_vlan_between_a1_and_b1 = SwPortVlan(
id=self._next_id(), interface=self.a1, vlan=Vlan(id=201, vlan=2))
self.vlans = patch.object(topology, '_get_vlans_map_layer2',
return_value=(
{
self.a1: [a_vlan_between_a1_and_b1],
self.b1: [a_vlan_between_a1_and_b | 1],
self.a2: [],
self.b2: | [],
self.a3: [],
self.c3: []
},
{
self.a: {201: a_vlan_between_a1_and_b1},
self.b: {201: a_vlan_between_a1_and_b1},
self.c: {}
}))
self.vlans.start()
self.build_l2 = patch.object(vlan, 'build_layer2_graph', return_value=self.nav_graph)
self.build_l2.start()
bar = vlan.build_layer2_graph()
#foo = topology._get_vlans_map_layer2(bar)
vlan_by_interfaces, vlan_by_netbox = topology._get_vlans_map_layer2(self.nav_graph)
self.netmap_graph = topology.build_netmap_layer2_graph(
vlan.build_layer2_graph(),
vlan_by_interfaces,
vlan_by_netbox,
None)
def tearDown(self):
self.vlans.stop()
self.build_l2.stop()
def test_noop_layer2_testcase_setup(self):
self.assertTrue(True)
def _add_edge(self, g, node_a, interface_a, node_b, interface_b):
interface_a.to_interface = interface_b
g.add_edge(node_a, node_b, key=interface_a)
|
fako/datascope | src/sources/models/websites/kleding.py | Python | gpl-3.0 | 623 | 0.00321 | from core.models.resources.http import HttpResource
class KledingListing(HttpResource):
URI_TEMPLATE = "https://www.kleding.nl{}"
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel | Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
}
def next_parameters(self):
mime_type, soup = self.content
next_tag = soup.find(attrs={"rel":"next"})
if not next_tag:
return {}
next_link = next_tag["href"]
next_page = next_link[next_link.rfind("=") + 1:]
return {
"p": next_ | page
}
|
atty303/pyfilesystem | fs/tests/test_remote.py | Python | bsd-3-clause | 11,134 | 0.005569 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
fs.tests.test_remote: testcases for FS remote support utilities
"""
from fs.tests import FSTestCases, ThreadingTestCases
import unittest
import threading
import random
import time
import sys
from fs.remote import *
from fs import SEEK_END
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.tempfs import TempFS
from fs.path import *
from fs.local_functools import wraps
class RemoteTempFS(TempFS):
"""
Simple filesystem implementing setfilecontents
for RemoteFileBuffer tests
"""
def open(self, path, mode='rb', write_on_flush=True):
if 'a' in mode or 'r' in mode or '+' in mode:
f = super(RemoteTempFS, self).open(path, 'rb')
f = TellAfterCloseFile(f)
else:
f = None
return RemoteFileBuffer(self, path, mode, f,
write_on_flush=write_on_flush)
def setcontents(self, path, data, chunk_size=64*1024):
f = super(RemoteTempFS, self).open(path, 'wb')
if getattr(data, 'read', False):
f.write(data.read())
else:
f.write(data)
f.close()
class TellAfterCloseFile(object):
"""File-like object that allows calling tell() after it's been closed."""
def __init__(self,file):
self._finalpos = None
self.file = file
def close(self):
if self._finalpos is None:
self._finalpos = self.file.tell()
self.file.close()
def tell(self):
if self._finalpos is not None:
return self._finalpos
return self.file.tell()
def __getattr__(self,attr):
return getattr(self.file,attr)
class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
class FakeException(Exception): pass
def setUp(self):
self.fs = RemoteTempFS()
self.original_setcontents = self.fs.setcontents
def tearDown(self):
self.fs.close()
def fake_setcontents(self, path, content='', chunk_size=16*1024):
''' Fake replacement for RemoteTempFS setcontents() '''
raise self.FakeException("setcontents should not be called here!")
def fakeOn(self):
'''
Turn on fake_setcontents(). When setcontents on RemoteTempFS
is called, FakeException is raised and nothing is stored.
'''
self.original_setcontents = self.fs.setcontents
self.fs.setcontents = self.fake_setcontents
def fakeOff(self):
''' Switch off fake_setcontents(). '''
self.fs.setcontents = self.original_setcontents
def test_ondemand(self):
'''
Tests on-demand loading of remote content in RemoteFileBuffer
'''
contents = "Tristatricettri stribrnych strikacek strikalo" + \
"pres tristatricettri stribrnych strech."
f = self.fs.open('test.txt', 'wb')
f.write(contents)
f.close()
# During following tests, no setcontents() should be called.
self.fakeOn()
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(10), contents[:10])
f.wrapped_file.seek(0, SEEK_END)
self.assertEquals(f._rfile.tell(), 10)
f.seek(20)
self.assertEquals(f.tell(), 20)
self.assertEquals(f._rfile.tell(), 20)
f.seek(0, SEEK_END)
self.assertEquals(f._rfile.tell(), len(contents))
f.close()
f = self.fs.open('test.txt', 'ab')
self.assertEquals(f.tell(), len(contents))
f.close()
self.fakeOff()
# Writing over the rfile edge
f = self.fs.open('test.txt', 'wb+')
self.assertEquals(f.tell(), 0)
f.seek(len(contents) - 5)
# Last 5 characters not loaded from remote file
self.assertEquals(f._rfile.tell(), len(contents) - 5)
# Confirm that last 5 characters are still in rfile buffer
self.assertEquals(f._rfile.read(), contents[-5:])
# Rollback position 5 characters before eof
f._rfile.seek(len(contents[:-5]))
# Write 10 new characters (will make contents longer for 5 chars)
f.write(u'1234567890')
f.flush()
# We are on the end of file (and buffer not serve anything anymore)
self.assertEquals(f.read(), '')
self.fakeOn()
# Check if we wrote everything OK from
# previous writing over the remote buffer edge
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:-5] + u'1234567890')
f.close()
def test_writeonflush(self):
'''
Test 'write_on_flush' switch | of RemoteFileBuffer.
When True, flush() should call setcontents and store
to remote destination.
When False, setcontents should be called only on close().
'''
self.fakeOn()
f = self.fs.open('test.txt', 'wb', write_on_flush=True)
f.write('Sample text')
self.assertRaises(self.FakeException, f.flush)
f.write( | 'Second sample text')
self.assertRaises(self.FakeException, f.close)
f = self.fs.open('test.txt', 'wb', write_on_flush=False)
f.write('Sample text')
# FakeException is not raised, because setcontents is not called
f.flush()
f.write('Second sample text')
self.assertRaises(self.FakeException, f.close)
def test_flush_and_continue(self):
'''
This tests if partially loaded remote buffer can be flushed
back to remote destination and opened file is still
in good condition.
'''
contents = "Zlutoucky kun upel dabelske ody."
contents2 = 'Ententyky dva spaliky cert vyletel z elektriky'
f = self.fs.open('test.txt', 'wb')
f.write(contents)
f.close()
f = self.fs.open('test.txt', 'rb+')
# Check if we read just 10 characters
self.assertEquals(f.read(10), contents[:10])
self.assertEquals(f._rfile.tell(), 10)
# Write garbage to file to mark it as _changed
f.write('x')
# This should read the rest of file and store file back to again.
f.flush()
f.seek(0)
# Try if we have unocrrupted file locally...
self.assertEquals(f.read(), contents[:10] + 'x' + contents[11:])
f.close()
# And if we have uncorrupted file also on storage
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:10] + 'x' + contents[11:])
f.close()
# Now try it again, but write garbage behind edge of remote file
f = self.fs.open('test.txt', 'rb+')
self.assertEquals(f.read(10), contents[:10])
# Write garbage to file to mark it as _changed
f.write(contents2)
f.flush()
f.seek(0)
# Try if we have unocrrupted file locally...
self.assertEquals(f.read(), contents[:10] + contents2)
f.close()
# And if we have uncorrupted file also on storage
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:10] + contents2)
f.close()
class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
"""Test simple operation of CacheFS"""
def setUp(self):
self._check_interval = sys.getcheckinterval()
sys.setcheckinterval(10)
self.fs = CacheFS(TempFS())
def tearDown(self):
self.fs.close()
sys.setcheckinterval(self._check_interval)
class TestConnectionManagerFS(unittest.TestCase,FSTestCases):#,ThreadingTestCases):
"""Test simple operation of ConnectionManagerFS"""
def setUp(self):
self._check_interval = sys.getcheckinterval()
sys.setcheckinterval(10)
self.fs = ConnectionManagerFS(TempFS())
def tearDown(self):
self.fs.close()
sys.setcheckinterval(self._check_interval)
class DisconnectingFS(WrapFS):
"""FS subcla |
tebriel/dd-agent | checks.d/haproxy.py | Python | bsd-3-clause | 19,440 | 0.001235 | # (C) Datadog, Inc. 2012-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import re
import time
# 3rd party
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
STATS_URL = "/;csv;norefresh"
EVENT_TYPE = SOURCE_TYPE_NAME = 'haproxy'
class Services(object):
BACKEND = 'BACKEND'
FRONTEND = 'FRONTEND'
ALL = (BACKEND, FRONTEND)
ALL_STATUSES = (
'up', 'open', 'down', 'maint', 'nolb'
)
STATUSES_TO_SERVICE_CHECK = {
'UP': AgentCheck.OK,
'DOWN': AgentCheck.CRITICAL,
'no check': AgentCheck.UNKNOWN,
'MAINT': AgentCheck.OK,
}
class HAProxy(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.host_status = defaultdict(lambda: defaultdict(lambda: None))
METRICS = {
"qcur": ("gauge", "queue.current"),
"scur": ("gauge", "session.current"),
"slim": ("gauge", "session.limit"),
"spct": ("gauge", "session.pct"), # Calculated as: (scur/slim)*100
"stot": ("rate", "session.rate"),
"bin": ("rate", "bytes.in_rate"),
"bout": ("rate", "bytes.out_rate"),
"dreq": ("rate", "denied.req_rate"),
"dresp": ("rate", "denied.resp_rate"),
"ereq": ("rate", "errors.req_rate"),
"econ": ("rate", "errors.con_rate"),
"eresp": ("rate", "errors.resp_rate"),
"wretr": ("rate", "warnings.retr_rate"),
"wredis": ("rate", "warnings.redis_rate"),
"req_rate": ("gauge", "requests.rate"), # HA Proxy 1.4 and higher
"hrsp_1xx": ("rate", "response.1xx"), # HA Proxy 1.4 and higher
"hrsp_2xx": ("rate", "response.2xx"), # HA Proxy 1.4 and higher
"hrsp_3xx": ("rate", "response.3xx"), # HA Proxy 1.4 and higher
"hrsp_4xx": ("rate", "response.4xx"), # HA Proxy 1.4 and higher
"hrsp_5xx": ("rate", "response.5xx"), # HA Proxy 1.4 and higher
"hrsp_other": ("rate", "response.other"), # HA Proxy 1.4 and higher
"qtime": ("gauge", "queue.time"), # HA Proxy 1.5 and higher
"ctime": ("gauge", "connect.time"), # HA Proxy 1.5 and higher
"rtime": ("gauge", "response.time"), # HA Proxy 1.5 and higher
"ttime": ("gauge", "session.time"), # HA Proxy 1.5 and higher
}
SERVICE_CHECK_NAME = 'haproxy.backend_up'
def check(self, instance):
url = instance.get('url')
username = instance.get('username')
password = instance.get('password')
collect_aggregates_only = _is_affirmative(
instance.get('collect_aggregates_only', True)
)
collect_status_metrics = _is_affirmative(
instance.get('collect_status_metrics', False)
)
collect_status_metrics_by_host = _is_affirmative(
instance.get('collect_status_metrics_by_host', False)
)
count_status_by_service = _is_affirmative(
instance.get('count_status_by_service', True)
)
tag_service_check_by_host = _is_affirmative(
instance.get('tag_service_check_by_host', False)
)
services_incl_filter = instance.get('services_include', [])
services_excl_filter = instance.get('services_exclude', [])
verify = not _is_affirmative(instance.get('disable_ssl_validation', False))
self.log.debug('Processing HAProxy data for %s' % url)
data = self._fetch_data(url, username, password, verify)
process_events = instance.get('status_check', self.init_config.get('status_check', False))
self._process_data(
data, collect_aggregates_only, process_events,
url=url, collect_status_metrics=collect_status_metrics,
collect_status_metrics_by_host=collect_status_metrics_by_host,
tag_service_check_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service,
)
def _fetch_data(self, url, username, password, verify):
''' Hit a given URL and return the parsed json '''
# Try to fetch data from the stats URL
auth = (username, password)
url = "%s%s" % (url, STATS_URL)
self.log.debug("HAProxy Fetching haproxy search data from: %s" % url)
r = requests.get(url, auth=auth, headers=headers(self.agentConfig), verify=verify)
r.raise_for_status()
return r.content.splitlines()
def _process_data(self, data, collect_aggregates_only, process_events, url=None,
collect_status_metrics=False, collect_status_metrics_by_host=False,
tag_service_check_by_host=False, services_incl_filter=None,
services_excl_filter=None, count_status_by_service=True):
''' Main data-processing loop. For each piece of useful data, we'll
either save a metric, save an event or both. '''
# Split the first line into an index of fields
# The line looks like:
# "# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,"
fields = [f.strip() for f in data[0][2:].split(',') if f]
self.hosts_statuses = defaultdict(int)
back_or_front = None
# Skip the first line, go backwards to set back_or_front
for line in data[:0:-1]:
if not line.strip():
continue
# Store each line's values in a dictionary
| data_dict = self._line_to_dict(fields, line)
if self._is_aggregate(data_dict):
back_or_front = data_dict['svname']
self._update_data_dict(data_dict, back_or_front)
self._update_hosts_statuses_if_needed(
collect_status_metrics, collect_status_metrics_by_host,
data_dict, self.hosts_statuses
)
if self._should_process(data_dict, collect_aggregates_only):
| # update status
# Send the list of data to the metric and event callbacks
self._process_metrics(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
if process_events:
self._process_event(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
self._process_service_check(
data_dict, url,
tag_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
if collect_status_metrics:
self._process_status_metric(
self.hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service
)
self._process_backend_hosts_metric(
self.hosts_statuses,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
return data
def _line_to_dict(self, fields, line):
data_dict = {}
for i, val in enumerate(line.split(',')[:]):
if val:
try:
# Try converting to a long, if failure, just leave it
val = float(val)
except Exception:
pass
data_dict[fields[i]] = val
|
Dawny33/Code | HackerEarth/SuperProf Hiring/footy.py | Python | gpl-3.0 | 579 | 0.02418 | T = input()
while(T):
T -= 1
buff = []
a,b = [int(i) for i in raw_input().split()]
buff.append(b)
pt = 0
while(a):
a -= 1
c = [i for i in raw_input().split()]
if len(c)==2:
c[1] = int(c[1])
if c[0] == "P":
buff.append(c[1])
#pt = len(buff)-1
k = len(buff)-1
l = len(buff)-2
#print buff[k]
if c[0] == "B":
k,l = l,k
|
#print k
print buff, | buff[k]
print "Player " + str(buff[k])
|
openvenues/lieu | lib/lieu/address.py | Python | mit | 8,181 | 0.000122 | import six
from collections import defaultdict, OrderedDict
from lieu.coordinates import latlon_to_decimal
from lieu.encoding import safe_decode
class AddressComponents:
NAME = 'house'
HOUSE_NUMBER = 'house_number'
HOUSE_NUMBER_BASE = 'house_number_base'
STREET = 'road'
BUILDING = 'building'
FLOOR = 'floor'
UNIT = 'unit'
SUBURB = 'suburb'
CITY_DISTRICT = 'city_district'
CITY = 'city'
STATE_DISTRICT = 'state_district'
ISLAND = 'island'
STATE = 'state'
COUNTRY_REGION = 'country_region'
COUNTRY = 'country'
WORLD_REGION = 'world_region'
POSTAL_CODE = 'postcode'
class Coordinates:
LATITUDE = 'lat'
LONGITUDE = 'lon'
class EntityDetails:
PHONE = 'phone'
WEBSITE = 'website'
EMAIL = 'email'
FACEBOOK = 'facebook'
TWITTER = 'twitter'
INSTAGRAM = 'instagram'
class Aliases(object):
def __init__(self, aliases):
self.aliases = aliases
self.priorities = {k: i for i, k in enumerate(aliases)}
def key_priority(self, key):
return self.priorities.get(key, len(self.priorities))
def get(self, key, default=None):
return self.aliases.get(key, default)
def replace(self, components):
replacements = defaultdict(list)
for k in list(components):
new_key = self.aliases.get(k)
if new_key and new_key not in components:
replacements[new_key].append(k)
values = {}
for key, source_keys in six.iteritems(replacements):
source_keys.sort(key=self.key_priority)
values[key] = components[source_keys[0]]
return values
class Address(object):
field_map = Aliases(
OrderedDict([
('name', AddressComponents.NAME),
('wof:name', AddressComponents.NAME),
('house', AddressComponents.NAME),
('addr:housename', AddressComponents.NAME),
('addr:housenumber', AddressComponents.HOUSE_NUMBER),
('addr:house_number', AddressComponents.HOUSE_NUMBER),
('addr:housenumber:base', AddressComponents.HOUSE_NUMBER_BASE),
('house_number', AddressComponents.HOUSE_NUMBER),
('housenumber', AddressComponents.HOUSE_NUMBER),
('addr:street', AddressComponents.STREET),
('street', AddressComponents.STREET),
('addr:floor', AddressComponents.FLOOR),
('addr:level', AddressComponents.FLOOR),
('level', AddressComponents.FLOOR),
('floor', AddressComponents.FLOOR),
('addr:unit', AddressComponents.UNIT),
('unit', AddressComponents.UNIT),
('neighborhood', AddressComponents.SUBURB),
('addr:neighborhood', AddressComponents.SUBURB),
('is_in:neighborhood', AddressComponents.SUBURB),
('neighbourhood', AddressComponents.SUBURB),
('addr:neighbourhood', AddressComponents.SUBURB),
('is_in:neighbourhood', AddressComponents.SUBURB),
('barangay', AddressComponents.SUBURB),
('addr:barangay', AddressComponents.SUBURB),
('is_in:barangay', AddressComponents.SUBURB),
('suburb', AddressComponents.SUBURB),
('addr:suburb', AddressComponents.SUBURB),
('is_in:suburb', AddressComponents.SUBURB),
('city', AddressComponents.CITY),
('addr:city', AddressComponents.CITY),
('is_in:city', AddressComponents.CITY),
('municipality', AddressComponents.CITY),
('addr:municipality', AddressComponents.CITY),
('is_in:municipality', AddressComponents.CITY),
('locality', AddressComponents.CITY),
('addr:locality', AddressComponents.CITY),
('is_in:locality', AddressComponents.CITY),
('city_district', AddressComponents.CITY_DISTRICT),
('addr:city_district', AddressComponents.CITY_DI | STRICT),
('is_in:city_district', AddressComponents.CITY_DISTRICT),
('quarter', AddressComponents.CITY_DISTRICT),
('addr:quarter', AddressCompo | nents.CITY_DISTRICT),
('is_in:quarter', AddressComponents.CITY_DISTRICT),
('county', AddressComponents.STATE_DISTRICT),
('addr:county', AddressComponents.STATE_DISTRICT),
('is_in:county', AddressComponents.STATE_DISTRICT),
('state_district', AddressComponents.STATE_DISTRICT),
('addr:state_district', AddressComponents.STATE_DISTRICT),
('is_in:state_district', AddressComponents.STATE_DISTRICT),
('island', AddressComponents.ISLAND),
('addr:island', AddressComponents.ISLAND),
('is_in:island', AddressComponents.ISLAND),
('state', AddressComponents.STATE),
('addr:state', AddressComponents.STATE),
('is_in:state', AddressComponents.STATE),
('governorate', AddressComponents.STATE),
('addr:governorate', AddressComponents.STATE),
('is_in:governorate', AddressComponents.STATE),
('province', AddressComponents.STATE),
('addr:province', AddressComponents.STATE),
('is_in:province', AddressComponents.STATE),
('region', AddressComponents.STATE),
('addr:region', AddressComponents.STATE),
('is_in:region', AddressComponents.STATE),
('postcode', AddressComponents.POSTAL_CODE),
('addr:postcode', AddressComponents.POSTAL_CODE),
('postal_code', AddressComponents.POSTAL_CODE),
('addr:postal_code', AddressComponents.POSTAL_CODE),
('zipcode', AddressComponents.POSTAL_CODE),
('addr:zipcode', AddressComponents.POSTAL_CODE),
('zip_code', AddressComponents.POSTAL_CODE),
('addr:zip_code', AddressComponents.POSTAL_CODE),
('zip', AddressComponents.POSTAL_CODE),
('addr:zip', AddressComponents.POSTAL_CODE),
('postalcode', AddressComponents.POSTAL_CODE),
('addr:postalcode', AddressComponents.POSTAL_CODE),
('iso:country', AddressComponents.COUNTRY),
('country_code', AddressComponents.COUNTRY),
('addr:country_code', AddressComponents.COUNTRY),
('is_in:country_code', AddressComponents.COUNTRY),
('country', AddressComponents.COUNTRY),
('addr:country', AddressComponents.COUNTRY),
('is_in:country', AddressComponents.COUNTRY),
('country_region', AddressComponents.COUNTRY_REGION),
('addr:country_region', AddressComponents.COUNTRY_REGION),
('is_in:country_region', AddressComponents.COUNTRY_REGION),
('world_region', AddressComponents.WORLD_REGION),
('addr:world_region', AddressComponents.WORLD_REGION),
('is_in:world_region', AddressComponents.WORLD_REGION),
('phone', EntityDetails.PHONE),
('telephone', EntityDetails.PHONE),
('sg:phone', EntityDetails.PHONE),
('contact:phone', EntityDetails.PHONE),
('sg:website', EntityDetails.WEBSITE),
('website', EntityDetails.WEBSITE),
('contact:website', EntityDetails.WEBSITE),
('email', EntityDetails.EMAIL),
('contact:email', EntityDetails.EMAIL),
])
)
@classmethod
def from_geojson(cls, data):
properties = data.get('properties')
properties = {k: safe_decode(v) if k in cls.field_map.aliases else v for k, v in six.iteritems(properties)}
fields = cls.field_map.replace(properties)
lon, lat = data.get('geometry', {}).get('coordinates', (None, None))
try:
lat, lon = latlon_to_decimal(lat, lon)
except ValueError:
lat = lon = None
if lat is not None:
fields[Coordinates.LATITUDE] = lat
if lon is not None:
fields[Coordinates.LONGITUDE] = lon
return fields
@classmethod
def have_latlon(cls, props):
return Coordinates.LATITUDE in props and Coordinates.LONGITUDE in props |
dotancohen/burton | configure_websites.py | Python | gpl-3.0 | 4,169 | 0.023987 | import os
import re
import sys
"""
* Perform initial configuration to ensure that the server is set up to work with Burton's format
sudo chown -R ubuntu:ubuntu /var/www
mkdir -p /var/www/default/public_html
mv /var/www/html/index.html /var/www/default/public_html # Ubuntu >=14.04
mv /var/www/index.html /var/www/default/public_html # Ubuntu <14.04
rm -rf /var/www/html
sudo vim /etc/apache2/sites-available/000-default.conf # Ubuntu >=14.04
sudo vim /etc/apache2/sites-available/default # Ubuntu <14.04
sudo a2enmod ssl
sudo service apache2 restart
* Enable / disable .htaccess for a site
* PHP configuration
"""
environment = ''
def main(env):
global environment
environment = env
while True:
print("\nConfigure Websites\n")
print("Please select an operation:")
print(" 1. Restart Apache")
print(" 2. Add a new website")
print(" 3. Add SSL to website")
print(" 0. Go Back")
print(" -. Exit")
operation = input(environment.prompt)
if operation == '0':
return True
elif operation == '-':
sys.exit()
elif operation == '1':
restart_apache()
elif operation == '2':
add_website()
elif operation == '3':
add_ssl()
else:
print("Invalid input.")
def restart_apache():
print("\nAttempting to restart Apache:")
# TODO: Print an error when the user does not have permissions to perform the action.
result = os.system("sudo service apache2 restart")
print(result)
return True
def add_website():
global environment
print('\nAdd website.\n')
input_file = open('./example-files/apache-site', 'r')
input_file_text = input_file.read()
input_file.close()
site_name = input('Website name (without www or http)' + environment.prompt)
new_filename = '/etc/apache2/sites-available/%s.conf' % (site_name,)
tmp_filename = '/tmp/%s.conf' % (site_name,)
# TODO: Check that site_name is legal for both a domain name and a filename.
while os.path.isfile(new_filename):
print('Site exists! Please choose another.')
site_name = input('Website name (without www or http)' + environment.prompt)
new_filename = '/etc/apache2/sites-available/%s.conf' % (site_name,)
tmp_filename = '/tmp/%s.conf' % (site_name,)
new_config = re.sub('SITE', site_name, input_file_text)
try:
output_file = open(tmp_filename, 'w')
output_file.write(new_config)
output_file.close()
tmp_move = os.system("sudo mv %s %s" % (tmp_filename, new_filename))
except PermissionError as e:
print('\n\nError!')
print('The current user does not have permission to perform this action.')
#print('Please run Burton with elevated permissions to resolve this error.\n\n')
if tmp_move != 0:
print('\n\nError!')
print('The current user does not have permission to perform this action.')
#print('Please run Burton with elevated permissions to resolve this error.\n\n')
current_user = str(os.getuid())
result = os.system('sudo mkdir -p /var/www/%s/public_html/' % (site_name,))
result = os.system('sudo mkdi | r -p /var/www/%s/logs/' % (site_name | ,))
result = os.system('sudo chown -R %s:%s /var/www/%s/' % (current_user, current_user,))
result = os.system('sudo a2ensite %s.conf' % (site_name,))
restart_apache()
return True
def add_ssl():
global environment
print("\nAdd SSL to website.\n")
print("Please enter the URL of the website.\n")
site_name = input(environment.prompt)
print("Is this a wildcard certificate? (y/N)\n")
wildcard = input(environment.prompt)
if wildcard.lower()=='y':
print("Generating wildcard cert for *.%s" % (site_name,))
wildcard = '*.'
else:
print("Generating cert for %s" % (site_name,))
wildcard = ''
# http://serverfault.com/questions/649990/non-interactive-creation-of-ssl-certificate-requests
#command_template = 'openssl req -new -newkey rsa:2048 -nodes -sha256 -keyout foobar.com.key -out foobar.com.csr -subj "/C=US/ST=New foobar/L=foobar/O=foobar foobar, Inc./CN=foobar.com/emailAddress=foobar@foobar.com"'
command_template = "openssl req -new -newkey rsa:2048 -nodes -sha256 -keyout %s.key -out %s.csr -subj \"/CN=%s%s\""
print(command_template % (site_name, site_name, wildcard, site_name))
return True
|
haje01/swak | tests/test_cli.py | Python | mit | 6,911 | 0 | """This module implements core module test."""
from __future__ import absolute_import
import os
from subprocess import call
import shutil
import logging
import pytest
from swak.plugin import iter_plugins, get_plugins_dir,\
get_plugins_initpy_path, PREFIX
from swak.util import which_exe
from swak.const import PLUGINDIR_PREFIX
from swak.cli import ptrn_classnm, set_log_verbosity, _verbosity_from_log_level
SWAK_CLI = 'swak.bat' if os.name == 'nt' else 'swak'
@pytest.fixture
def rm_dummies():
"""Remove dummy plugin dirs from previous test."""
base_dir = get_plugins_dir(False)
for name in ['testfoo', 'testbad']:
plugin_dir = os.path.join(base_dir, '{}_{}'.format(PLUGINDIR_PREFIX,
name))
if os.path.isdir(plugin_dir):
shutil.rmtree(plugin_dir)
def plugin_filter_ext(_dir):
"""Plugin filter for external plugin test."""
return _dir in ['{}_testfoo'.format(PLUGINDIR_PREFIX)]
def test_cli_basic(capfd):
"""Test CLI list & desc commands."""
cmd = [SWAK_CLI, '-vv', 'list']
try:
call(cmd)
except FileNotFoundError:
return
out, err = capfd.readouterr()
print(err)
assert 'standard plugins' in out
# after first command, plugins/__init__.py shall exist.
assert os.path.isfile(get_plugins_initpy_path(True))
cmd = [SWAK_CLI, 'desc', 'i.counter']
call(cmd)
assert "Generate incremental numbers" in out
cmd = [SWAK_CLI, 'desc', 'i.notexist']
call(cmd)
out, err = capfd.readouterr()
assert "Can not find" in err
# desc for sub-command
cmd = [SWAK_CLI, 'desc', 'o.stdout', '-s', 'f.stdout']
call(cmd)
out, err = capfd.readouterr()
assert '--timezone' in out
@pytest.mark.skipif(which_exe('git') is None, reason="requires git")
def test_cli_clone(capfd):
"""Test clone external plugin."""
test_plugin = 'swak_plugin_boo'
base_dir = get_plugins_dir(False)
clone_dir = os.path.join(base_dir, test_plugin)
def del_plugin():
# delete existing test plugin
if os.path.isdir(clone_dir):
shutil.rmtree(clone_dir)
del_plugin()
git_clone_cmd = ['git', 'clone',
'https://github.com/haje01/{}'.format(test_plugin),
clone_dir]
call(git_clone_cmd)
out, err = capfd.readouterr()
assert 'Cloning into' in err
assert os.path.isdir(clone_dir)
cmd = [SWAK_CLI, 'list']
call(cmd)
out, err = capfd.readouterr()
assert 'o.boo' in out
del_plugin()
def test_cli_init(capfd, rm_dummies):
"""Test CLI init input(text), parser, modifier and output."""
# remove previous test pacakge.
base_dir = get_plugins_dir(False)
plugin_dir = os.path.join(base_dir, '{}_testfoo'.format(PLUGINDIR_PREFIX))
# test init plugin with parser & output modules.
cmd = [SWAK_CLI, 'init', '--type', 'it', '--type', 'p', '--type',
'm', '--type', 'o', 'testfoo', 'TestFoo']
try:
call(cmd)
except FileNotFoundError:
return
out, err = capfd.readouterr()
assert err == ''
for pr in PREFIX:
| pfile = os.path.join(plugin_dir, '{}_testfoo.py'.format(pr))
assert os.path.isfile(pfile)
with open(pfile, 'rt') as f:
code = f.read()
assert "class TestFoo" in code
readme_file = os.path.join(plugin_dir, 'README.md')
assert os.path.isfile(readme_file)
with open(readme_file, 'rt') as f:
text = f.read()
assert '# {}_testfoo'.format(PLUGINDIR_PREFIX) in text
assert "plugin package for Swak" in text
| test_file = os.path.join(plugin_dir, 'test_testfoo.py')
assert os.path.isfile(test_file)
with open(test_file, 'rt') as f:
code = f.read()
assert 'test_testfoo' in code
# enumerate external plugins
plugin_infos = list(iter_plugins(False, _filter=plugin_filter_ext))
assert plugin_infos[0].dname == '{}_testfoo'.format(PLUGINDIR_PREFIX)
# desc command should find new external plugins
cmd = [SWAK_CLI, 'list']
call(cmd)
out, err = capfd.readouterr()
assert 'i.testfoo' in out
assert 'p.testfoo' in out
# check duplicated plugin error
cmd = [SWAK_CLI, 'init', '--type', 'o', 'stdout', 'Stdout']
call(cmd)
out, err = capfd.readouterr()
assert 'already exists' in err
# check duplicate init type error
cmd = [SWAK_CLI, 'init', '--type', 'it', '--type', 'ir', 'boo',
'Boo']
call(cmd)
out, err = capfd.readouterr()
assert 'are mutually exclusive' in err
shutil.rmtree(plugin_dir)
# check after removing
cmd = [SWAK_CLI, 'list']
call(cmd)
out, err = capfd.readouterr()
assert err == ''
assert '0 external plugin' in out
def test_cli_init2(capfd, rm_dummies):
"""Test CLI init input(record) and unsuitable plugin dictory."""
# remove previous test pacakge.
base_dir = get_plugins_dir(False)
plugin_dir = os.path.join(base_dir, '{}_testfoo'.format(PLUGINDIR_PREFIX))
# test init plugin with parser & output modules.
cmd = [SWAK_CLI, 'init', '--type', 'ir', 'testfoo', 'TestFoo']
try:
call(cmd)
except FileNotFoundError:
return
out, err = capfd.readouterr()
assert err == ''
# test illegal names
cmd = [SWAK_CLI, 'init', '--type', 'ir', 'o_testboo', 'TestBoo']
call(cmd)
out, err = capfd.readouterr()
# desc command should find new external plugins
cmd = [SWAK_CLI, 'list']
call(cmd)
out, err = capfd.readouterr()
assert 'i.testfoo' in out
shutil.rmtree(plugin_dir)
# test warning unsuitable directory
plugin_dir = os.path.join(base_dir, '{}_testbad'.format(PLUGINDIR_PREFIX))
if os.path.isdir(plugin_dir):
shutil.rmtree(plugin_dir)
os.mkdir(plugin_dir)
cmd = [SWAK_CLI, 'list']
call(cmd)
out, err = capfd.readouterr()
# should report unsuitable directories
assert 'is not valid plugin directory' in err
shutil.rmtree(plugin_dir)
def test_cli_init_names():
"""Test CLI init command names."""
assert ptrn_classnm.match("Foo") is not None
assert ptrn_classnm.match("FooCls") is not None
assert ptrn_classnm.match("FooCls9") is not None
assert ptrn_classnm.match("Foo_Cls") is not None
assert ptrn_classnm.match("Foo_Cls9") is not None
assert ptrn_classnm.match("9Cls") is None
assert ptrn_classnm.match("_Cls") is None
assert ptrn_classnm.match("fooCls") is None
def test_cli_etc():
"""Test cli etc."""
logger = logging.getLogger()
org_level = logger.getEffectiveLevel()
org_verbosity = _verbosity_from_log_level(org_level)
if org_verbosity is None:
org_verbosity = 0
set_log_verbosity(0)
new_level = logger.getEffectiveLevel()
assert new_level == 40
set_log_verbosity(org_verbosity)
|
Administrate/surveymonkey | surveymonkey/tests/mocks/utils.py | Python | mit | 3,642 | 0.001647 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import math
from datetime import datetime
from random import randint
from furl import furl
from delorean import Delorean
def create_quota_headers(qpd=None, qps=None, reset=datetime.now()):
qpd_allotted, qpd_current = qpd if qpd else (10000, randint(1, 9000))
qps_allotted, qps_current = qps if qps else (8, randint(1, 5))
reset = Delorean(datetime=reset, timezone='UTC').datetime
return {
'X-Plan-QPS-Allotted': qps_allotted,
'X-Plan-QPS-Current': qps_current,
'X-Plan-Quota-Allotted': qpd_allotted,
'X-Plan-Quota-Current': qpd_current,
'X-Plan-Quota-Reset': reset.strftime("%A, %B %d, %Y %I:%M:%S %p %Z")
}
class BaseListMock(object):
def __init__(self, total=125, base_url=None):
self.total = total
self.base_url = base_url
def get_links(self, per_page, current_page, pages, folder_id=None):
last_page = pages
def _clean_base_url(url): # Prevent duplicate qs params
return furl(url).remove(['per_page', 'current_page', 'pages', 'folder_id']).copy()
links = dict()
default_link_values = {
"per_page": per_page,
"page": current_page
}
if folder_id is not None:
default_link_values['folder_id'] = folder_id
links["self"] = _clean_base_url(self.base_url).add(default_link_values).url
if last_page > 1:
if current_page != last_page:
links["last"] = _clean_base_url(self.base_url).add(default_link_values).url
if current_page < last_page:
next_link_values = default_link_values
next_link_values['page'] = current_page + 1
links["next"] = _clean_base_url(self.base_url).add(next_link_values).url
if current_page != 1:
first_link_values = default_link_values
first_link_values['page'] = 1
links["first"] = _clean_base_url(self.base_url).add(first_link_values).url
prev_link_values = default_link_values
prev_link_values['page'] = current_page - 1
links["prev"] = _clean_base_url(self.base_url).add(prev_link_values).url
return links
def calculate_number_remaining(self, per_page, current_page):
if current_page == 1:
return self.total
else:
total_done = (current_page - 1) * per_page
total_remaining = self.total - total_done
return 0 if total_remaining <= 0 else total_remaining
def parse_url(self, url):
url = furl(url.geturl())
per_page = int(url.args.get("per_page", 50))
current_page = int(url.args.get("page", 1))
pages = math.ceil(self.total / per_page)
ret | urn per_page, current_page, pages
def parse_url_with_folder(self, url):
per_page, current_page, pages = self.parse_url(url)
url = furl(url.geturl())
folder | _id = url.args.get("folder_id", None)
return per_page, current_page, pages, folder_id
def create_item(self):
raise NotImplementedError("Implemented in subclass")
def create_items(self, per_page, current_page, pages):
items = []
remaining = self.calculate_number_remaining(per_page, current_page)
if remaining > 0:
remaining = remaining if remaining < per_page else per_page
for x in range(0, remaining):
item = self.create_item()
items.append(item)
return items
|
odarbelaeze/dummy-project | firtFile.py | Python | mit | 26 | 0.038462 | # The scripts beg | in here | |
jlane9/selenium_data_attributes | sda/site.py | Python | mit | 1,806 | 0.000554 | # -*- coding: utf-8 -*-
"""sda.site
.. codeauthor:: John Lane <jlane@fanthreesixty.com>
"""
from sda.element import SeleniumObject
try:
from urlparse import urljoin, urlparse
except (ImportError, ModuleNotFoundError):
from urllib.parse import urljoin, urlparse
__all__ = ['Site']
class Site(SeleniumObject):
"""The Site Implementation
The intention for the Site object is to contain all website pages. An example usage of this might be:
Let's say we have the following file structure
my_project
- __init__.py
- main.py
- page_1
- __init__.py
- fixtures.py
- locators.py
- page.py
- page_2
- __init__.py
- fixtures.py
- locators.py
- page.py
- site
- __init__.py
- site.py
- settings.py
site/site.py
.. code-block:: python
from sda.site import Site
from page_1.page import Page1
from page_2.page import Page2
class ExampleSite(Site):
def __init__(self, web_driver):
super(ExampleSite, self).__in | it__(web_driver)
| self.page_1 = Page1(web_driver)
self.page_2 = Page2(web_driver)
"""
@property
def domain(self):
"""Returns the domain for a website
:return: domain
:rtype: str
"""
return urlparse(self.url).netloc
@property
def path(self):
"""Returns the website path
:return: path
:rtype: str
"""
return urlparse(self.url).path
@property
def url(self):
"""Current page URL
:return: Page URL
:rtype: str
"""
return self.driver.current_url
|
leedm777/asterisk | contrib/ast-db-manage/config/versions/45e3f47c6c44_add_pjsip_endpoint_identifier_order.py | Python | gpl-2.0 | 462 | 0.008658 | """add pjsip endpoint_identifier_order
Revision ID: 45e3f47c6c44
Revises: 945b1098bdd
Create Date: 2015-03-02 09:32:20.632015
"""
# revision id | entifiers, used by Alembic.
revision = '45e3f47c6c44'
down_ | revision = '945b1098bdd'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('ps_globals', sa.Column('endpoint_identifier_order', sa.String(40)))
def downgrade():
op.drop_column('ps_globals', 'endpoint_identifier_order')
|
Mirantis/tempest | tempest/api/compute/admin/test_quotas_negative.py | Python | apache-2.0 | 7,143 | 0 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class QuotasAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
force_tenant_isolation = True
@classmethod
def setUpClass(cls):
super(QuotasAdminNegativeTestJSON, cls).setUpClass()
cls.client = cls.os.quotas_client
cls.adm_client = cls.os_adm.quotas_client
cls.sg_client = cls.security_groups_client
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.client.tenant_id
@test.attr(type=['negative', 'gate'])
def test_update_quota_normal_user(self):
self.assertRaises(exceptions.Unauthorized,
self.client.update_quota_set,
self.demo_tenant_id,
ram=0)
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
@test.skip_because(bug="1298131")
@test.attr(type=['negati | ve', 'gate'])
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
resp, quota_set = self. | adm_client.get_quota_set(self.demo_tenant_id)
default_vcpu_quota = quota_set['cores']
vcpu_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set = self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
cores=vcpu_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_mem_quota = quota_set['ram']
mem_quota = 0 # Set the quota to zero to conserve resources
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
ram=mem_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_instances_quota = quota_set['instances']
instances_quota = 0 # Set quota to zero to disallow server creation
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@test.attr(type='gate')
def test_security_groups_exceed_limit(self):
# Negative test: Creation Security Groups over limit should FAIL
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_sg_quota = quota_set['security_groups']
sg_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set =\
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
security_groups=sg_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_groups=default_sg_quota)
# Check we cannot create anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.sg_client.create_security_group,
"sg-overlimit", "sg-desc")
@test.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@test.attr(type=['negative', 'gate'])
def test_security_groups_rules_exceed_limit(self):
# Negative test: Creation of Security Group Rules should FAIL
# when we reach limit maxSecurityGroupRules
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_sg_rules_quota = quota_set['security_group_rules']
sg_rules_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set =\
self.adm_client.update_quota_set(
self.demo_tenant_id,
force=True,
security_group_rules=sg_rules_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_group_rules=default_sg_rules_quota)
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup =\
self.sg_client.create_security_group(s_name, s_description)
self.addCleanup(self.sg_client.delete_security_group,
securitygroup['id'])
secgroup_id = securitygroup['id']
ip_protocol = 'tcp'
# Check we cannot create SG rule anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((exceptions.OverLimit, exceptions.Unauthorized),
self.sg_client.create_security_group_rule,
secgroup_id, ip_protocol, 1025, 1025)
class QuotasAdminNegativeTestXML(QuotasAdminNegativeTestJSON):
_interface = 'xml'
|
h4ck3rm1k3/github3.py | github3/gists/comment.py | Python | bsd-3-clause | 961 | 0 | # -*- coding: utf-8 -*-
"""
github3.gists.comment
---------------------
Module containing the logic for a GistComment
"""
from __future__ import un | icode_literals
from ..models import BaseComment
from ..users import User
class GistComment(BaseComment):
"""This object represents a comment on a gist.
Two comment instances can be checked like so::
c1 == c2
c1 != | c2
And is equivalent to::
c1.id == c2.id
c1.id != c2.id
See also: http://developer.github.com/v3/gists/comments/
"""
def _update_attributes(self, comment):
self._api = comment.get('url')
#: :class:`User <github3.users.User>` who made the comment
#: Unless it is not associated with an account
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self) # (No coverage)
def _repr(self):
return '<Gist Comment [{0}]>'.format(self.user.login)
|
hailinzeng/zhuaxia | zhuaxia/i18n/msg_cn.py | Python | mit | 7,738 | 0.01405 | # -*- coding:utf-8 -*-
head_xm = u'[虾]'
head_163 = u'[易]'
#summary
fmt_summary_skip_title = u'增量下载忽略列表:'
fmt_summary_skip_header = u'来源\t 最后下载时间 \t歌曲名\t保存路径'
fmt_summary_success_title = u'成功下载列表:'
fmt_summary_success_header = u'歌曲名\t保存路径'
fmt_summary_failed_title = u'失败下载列表:'
fmt_summary_failed_header = u'歌曲名\t保存路径'
summary_prompt = u'(q)退出/(v)查看下载报告/(s)保存下载报告. 请输入 [q/v/s]:'
summary_prompt_err = u" 无效输入\n"
summary_saved = u" 下载报告保存于: %s"
history_clear_confirm = u" 找到 %d 条下载记录, 确认要清空所有下载历史记录? [y/n]"
history_clearing = u" 忽略其它选项,清空zhuaxia下载历史记录..."
history_cleared = u" zhuaxia所有下载记录已清空"
history_exporting = u" 忽略其它选项, 正在导出下载历史记录..."
history_exported = u" 下载历史记录导出到: %s"
fmt_insert_hist = u' 为成功下载建立历史记录...'
fmt_all_finished = u' 所有任务都已完成'
fmt_dl_lyric_start = u' 开始下载歌词...'
fmt_dl_header = u' 保存目录:[%s] | 线程池:[%d]\n'
fmt_dl_progress = u'总进度[%d/%d]:'
fmt_dl_last_finished = u' 最近%d个完成任务:\n'
fmt_dl_failed_jobs = u' 失败的任务:\n'
fmt_quality_fallback = u'歌曲(%s) 无法获取128kbps资源,尝试获取低质量资源'
fmt_init_song = u'开始初始化歌曲[%s]'
fmt_err_song_parse = u'无法解析/下载歌曲链接: [%s]'
fmt_init_song_ok = u'初始化歌曲成功[%s]'
fmt_init_album = u'开始初始化专辑[%s]'
fmt_create_album_dir = u'创建专辑目录[%s]'
fmt_dl_album_cover = u'下载专辑[%s]封面'
fmt_save_album_desc = u'保存专辑[%s]介绍'
fmt_init_fav | = u'开始初始化用户收藏[%s]'
fmt_parse_song_url = u'解析歌曲链接[%s]'
fmt_init_fav_ok = u'初始化用户收藏完毕[%s]'
fmt_init_col | lect = u'开始初始化精选集[%s]'
fmt_init_collect_ok = u'初始化精选集完毕[%s]'
fmt_init_artist = u'初始化艺人TopSong[%s]'
fmt_init_artist_ok = u'初始化艺人TopSong完毕[%s]'
dl_128kbps_xm = u' 不登录虾米进行下载, 虾米资源质量为128kbps.'
fmt_login_ok_xm = u'[Login] 用户: %s (id:%s) 登录成功.'
login_xm = u' 登录虾米...'
login_err_xm = u' 登录失败, 略过登录, 虾米资源质量为 128kbps.'
short_xm = head_xm + ' '
short_163 = head_163 + ' '
fmt_parsing = u'解析: "%s" ..... [%s] %s'
fmt_has_song_nm = u'包含%d首歌曲.'
fmt_single_song = u'[曲目] %s'
init_proxypool = u'初始化proxy pool'
fmt_init_proxypool_done = u'proxy pool:[%d] 初始完毕'
fmt_skip_dl_nm = u' 启用增量下载, 忽略%d首曾下载过的歌曲.'
fmt_total_dl_nm = u' 下载任务总数: %s\n 3秒后开始下载'
no_dl_task = u' 没有可下载任务,自动退出.'
fmt_skip_unknown_url = u' 略过不能识别的url [%s].'
fmt_xm_unknown_url = u'%s [虾]不能识别的url [%s].'
fmt_163_unknow_url = u'%s [易]不能识别的url [%s].'
song = u'曲目'
album = u'专辑'
playlist = u'歌单'
artistTop = u'艺人热门歌曲'
collection = u'精选集'
favorite = u'用户收藏'
warning_many_collections = u'[虾]如用户收藏较多,解析歌曲需要较长时间,请耐心等待'
fmt_links_in_file = u' 文件包含链接总数: %d'
experimental = u'-p 选项为实验性选项. 自动获取代理服务器池解析/下载。因代理服务器稳定性未知,下载可能会慢或不稳定。'
ver_text = u'zhuaxia (抓虾) '
help_info = u"""
zhuaxia (抓虾) -- 抓取[虾米音乐]和[网易云音乐]的 mp3 音乐
[CONFIG FILE:] $HOME/.zhuaxia/zhuaxia.conf
缺省配置文件会在第一次运行zhuaxia时自动生成
[OPTIONS]
-H : 首选HQ质量(320kbps),
> 虾米音乐 <
- 配置文件中需给出正确登录信箱和密码, 登录用户需拥有VIP身份
- 用户需在xiami vip设置页面设置默认高音质
- 此选项对不满足上两项情况无效,仍下载128kbps资源
> 网易音乐 <
-无需特殊要求,直接下载高音质资源
-h : 显示帮助
-l : 下载歌曲的lrc格式歌词
-f : 从文件下载
-i : 增量下载. zhuaxia依靠历史记录来判定一首歌是否曾被下载
曾经被下载过的歌曲将被略过. 判断一首歌曲是否被下载过靠3个属性:
song_id(虾米或网易的歌曲id), source (虾米/网易), quality (H/L)
-e : 导出当前下载历史记录到文件
如果这个选项被使用, 其它选项将被忽略
-d : 清空当前下载历史记录
如果这个选项被使用, 其它选项将被忽略.
-e 和-d 选项不能同时使用
-v : 显示版本信息
-p : (实验性选项)使用代理池下载
在下载/解析量大的情况下,目标服务器会对禁止频繁的请求,所以zhuaxia可以自动获取 代理来解析和下载资源。因为获取的代理速度/可靠性不一,下载可能会缓慢或不稳定。
[USAGE]
zx [OPTION] <URL>
: 下载指定URL资源, 抓虾自动识别链接, 支持
- [虾] 歌曲,专辑,精选集,用户收藏,艺人TopN
- [易] 歌曲,专辑,歌单,艺人TopN
例子:
zx "http://www.xiami.com/space/lib-song/u/25531126"
zx "http://music.163.com/song?id=27552647"
zx [OPTION] -f <file>
: 多个URL在一个文件中,每个URL一行。 URLs可以是混合[虾]和[易]的不同类型音乐资源。例子:
>$ cat /tmp/foo.txt
http://music.163.com/artist?id=5345
http://www.xiami.com/song/1772130322
http://music.163.com/album?id=2635059
http://www.xiami.com/album/32449
>$ zx -f /tmp/foo.txt
Other Examples:
下载歌曲和歌词:
zx -l "http://music.163.com/song?id=27552647"
增量下载, 并下载歌词
zx -li "http://music.163.com/song?id=27552647"
导出下载历史记录. 文件会被保存在配置文件设置的"download.dir"目录中
zx -e
清空所有下载历史记录
zx -d
[AUTHOR]
Kai Yuan <kent.yuan(at)gmail.com>
please report bugs or feature requests at https://github.com/sk1418/zhuaxia/issues
"""
|
david-ragazzi/nupic | nupic/research/monitor_mixin/plot.py | Python | gpl-3.0 | 4,761 | 0.004621 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import traceback
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
print "Cannot import matplotlib. Plot class will not work."
print traceback.format_exc() + "\n"
class Plot(object):
def __init__(self, monitor, title):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest"):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
plt.draw()
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be a | dded. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be display | ed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
|
dcondrey/scrapy-spiders | dist/spiders/newenglandfilm.py | Python | mit | 1,625 | 0.009231 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from my_settings import name_file, test_mode, difference_days
from datetime import datetime, timedelta
print "Run spider NewenglandFilm"
file_output = open(name_file, 'a')
email_current_session = []
email_in_file = open(name_file, 'r').readlines()
if test_mode:
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y')
else:
current_date = datetime.today().strftime('%m/%d/%Y')
class NewenglandFilm(Spider):
name = 'newenglandfilm'
allowed_domains = ["newenglandfilm.com"]
start_urls = ["http://newenglandfilm.com/jobs.htm"]
def parse(self, response):
sel = Selector(response)
for num_div in xrange(1, 31):
date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.form | at(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0]
email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+ | ?\.[a-zA-Z]{2,6})')
if current_date == date:
for address in email:
if address + "\n" not in email_in_file and address not in email_current_session:
file_output.write(address + "\n")
email_current_session.append(address)
print "Spider: NewenglandFilm. Email {0} added to file".format(address)
else:
print "Spider: NewenglandFilm. Email {0} already in the file".format(address) |
LinkItONEDevGroup/LASS | LASS-Simulator/codes/ut.py | Python | mit | 1,669 | 0.014979 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# @file ut.py
# @brief The main unit test program of whole project
# README: organize the unit tests in the number range
# refer UTGeneral functions
# print the suggested procedure in the console
# print th | e suggested check procedure in the console
# support current supported important features
# this unit test include in the release procedure
# MODULE_ARCH:
# CLASS_ARCH: | UTGeneral
# GLOBAL USAGE:
#standard
import unittest
#homemake
import lib.globalclasses as gc
from lib.const import *
##### Unit test section ####
#the test ID provide the order of testes.
class UTGeneral(unittest.TestCase):
#local
#ID:0-99
def test_01_setting_signature(self):
print("\nThe expected unit test environment is")
print("1. TBD")
self.assertEqual(gc.SETTING["SIGNATURE"],'LASS-SIM')
def test_02_check_library(self):
#check external library that need to be installed
import simpy
from configobj import ConfigObj
import urllib
import simplejson
import requests
from vincenty import vincenty
import matplotlib
import numpy
import pygrib
def test_03_check_dir_exist(self):
pass
def test_04_check_grib(self):
import pygrib # import pygrib interface to grib_api
grbs = pygrib.open('include/M-A0060-000.grb2')
print("grbs[:4] count=%i" %(len(grbs[:4])))
def test_11_loadjson(self):
gc.LASSDATA.load_site_list()
print("LASS sites count = %i" % (len(gc.LASSDATA.sites)))
self.assertTrue(len(gc.LASSDATA.sites)>0) |
frascoweb/frasco-menu | frasco_menu.py | Python | mit | 2,929 | 0.002048 | from frasco import Feature, action, hook, current_app, g, request, url_for
class MenuMissingError(Exception):
pass
class Menu(object):
def __init__(self, name=None, label=None, view=None, login_required=None, childs=None, separator=False, url=None, **options):
self.name = name
self.label = label or name.capitalize()
self.view = view
self.login_required = login_required
self.childs = childs or []
self.separator = separator
self._url = url
self.options = options
def url(self, **kwargs):
if self._url:
return self._url
if self.view:
return url_for(self.view, **kwargs)
return "#"
def add_child(self, *args, **kwargs):
self.childs.append(Menu(*args, **kwargs))
def is_current(self):
current = getattr(g, "c | urrent_menu", None)
if current is None:
return requ | est.endpoint == self.view
return self.name == current
def is_visible(self):
if current_app.features.exists("users") and self.login_required is not None:
if (self.login_required and not current_app.features.users.logged_in()) or \
(not self.login_required and current_app.features.users.logged_in()):
return False
return True
def __iter__(self):
return iter(self.childs)
class MenuFeature(Feature):
name = "menu"
defaults = {"default": None}
def init_app(self, app):
app.add_template_global(lambda n: current_app.features.menu[n], "get_menu")
app.add_template_global(Menu, "menu")
self.menus = {}
for name, items in self.options.iteritems():
if name in self.defaults.keys():
continue
self.menus[name] = Menu(name)
for itemspec in items:
if isinstance(itemspec, dict):
iname, options = itemspec.popitem()
if isinstance(options, str):
options = {"view": options}
elif isinstance(options, list):
options = {"childs": options}
item = Menu(iname, **options)
elif itemspec == "--":
item = Menu(separator=True)
else:
item = Menu(itemspec, view=iname)
self.menus[name].childs.append(item)
def __getitem__(self, name):
if name not in self.menus:
raise MenuMissingError("Menu '%s' not found" % name)
return self.menus[name]
def ensure(self, name):
if name not in self.menus:
self.menus[name] = Menu(name)
return self.menus[name]
@hook()
def before_request(self):
g.current_menu = self.options["default"]
@action(default_option="name")
def set_current_menu(self, name):
g.current_menu = name |
kajgan/e2 | lib/python/Plugins/SystemPlugins/SkinSelector/plugin.py | Python | gpl-2.0 | 4,208 | 0.027329 | # -*- coding: iso-8859-1 -*-
# (c) 2006 Stephan Reichholf
# This Software is Free, use it where you want, when you want for whatever you want and modify it if you want but don't remove my copyright!
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Plugins.Plugin import PluginDescriptor
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from enigma import eEnv
import os
SKINXML = "skin.xml"
DEFAULTSKIN = "<Default Skin>"
class SkinSelector(Screen):
# for i18n:
# _("Choose your Skin")
skinlist = []
root = os.path.join(eEnv.resolve("${datadir}"),"enigma2")
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.skinlist = []
self.previewPath = ""
if os.path.exists(os.path.join(self.root, SKINXML)):
self.skinlist.append(DEFAULTSKIN)
for root, dirs, files in os.walk(self.root, followlinks=True):
for subdir in dirs:
dir = os.path.join(root,subdir)
if os.path.exists(os.path.join(dir,SKINXML)):
self.skinlist.append(subdir)
dirs = []
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self.skinlist.sort()
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"info": self.info,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
tmp = config.skin.primary_skin.value.find("/"+SKINXML)
if tmp != -1:
tmp = config.skin.primary_skin.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("STB-GUI Skinselector\n\nIf you experience any problems please contact\nstephan@reichholf.net\n\n\xA9 2006 - Stephan Reichholf"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def ok(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
self.skinfile = "."
else:
self.skinfile = self["SkinList"].getCurrent()
self.skinfile = os.path.join(self.skinfile, SKINXML)
print "Skinselector: Selected Skin: "+self.root+self.skinfile
restartbox = s | elf.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
pngpath = "."
else:
pngpath = self["SkinList"].getCurrent()
pngpath = os.path.join(os.path.join(s | elf.root, pngpath), "prev.png")
if not os.path.exists(pngpath):
pngpath = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SkinSelector/noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
def restartGUI(self, answer):
if answer is True:
config.skin.primary_skin.value = self.skinfile
config.skin.primary_skin.save()
self.session.open(TryQuitMainloop, 3)
def SkinSelMain(session, **kwargs):
session.open(SkinSelector)
def SkinSelSetup(menuid, **kwargs):
if menuid == "ui_menu":
return [(_("Skin"), SkinSelMain, "skin_selector", None)]
else:
return []
def Plugins(**kwargs):
return PluginDescriptor(name="Skinselector", description="Select Your Skin", where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=SkinSelSetup)
|
GbalsaC/bitnamiP | venv/src/pystache-custom/pystache_custom/tests/examples/delimiters.py | Python | agpl-3.0 | 281 | 0.003559 |
"""
TODO: add a docstring.
"""
class Delimiters(object):
def first(self):
return "It worked the first time."
def second(self):
return "And it worked the second time."
def third(self):
return "Then, surprisingly, it worked the third time. | "
| |
bbchung/clighter | misc/clang/cindex.py | Python | gpl-3.0 | 107,742 | 0.001327 | #===- cindex.py - Python Indexing Library Bindings -----------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
Clang Indexing Library Bindings
===============================
This module provides an interface to the Clang indexing library. It is a
low-level interface to the indexing library which attempts to match the Clang
API directly while also being "pythonic". Notable differences from the C API
are:
* string results are returned as Python strings, not CXString objects.
* null cursors are translated to None.
* access to child cursors is done via iteration, not visitation.
The major indexing objects are:
Index
The top-level object which manages some global library state.
TranslationUnit
High-level object encapsulating the AST for a single translation unit. These
can be loaded from .ast files or parsed on the fly.
Cursor
Generic object for representing a node in the AST.
SourceRange, SourceLocation, and File
Objects representing information about the input source.
Most object information is exposed using properties, when the underlying API
call is efficient.
"""
# TODO
# ====
#
# o API support for invalid translation units. Currently we can't even get the
# diagnostics on failure because they refer to locations in an object that
# will have been invalidated.
#
# o fix memory management issues (currently client must hold on to index and
# translation unit, or risk crashes).
#
# o expose code completion APIs.
#
# o cleanup ctypes wrapping, would be nice to separate the ctypes details more
# clearly, and hide from the external interface (i.e., help(cindex)).
#
# o implement additional SourceLocation, SourceRange, and File methods.
from ctypes import *
import collections
import clang.enumerations
# ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
# object. This is a problem, because it means that from_parameter will see an
# integer and pass the wrong value on platforms where int != void*. Work around
# this by marshalling object arguments as void**.
c_object_p = POINTER(c_void_p)
callbacks = {}
### Exception Classes ###
class TranslationUnitLoadError(Exception):
"""Represents an error that occurred when loading a TranslationUnit.
This is raised in the case where a TranslationUnit could not be
instantiated due to failure in the libclang library.
FIXME: Make libclang expose additional error information in this scenario.
"""
pass
class TranslationUnitSaveError(Exception):
"""Represents an error that occurred when saving a TranslationUnit.
Each error has associated with it an enumerated value, accessible under
e.save_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# Indicates that an unknown error occurred. This typically indicates that
# I/O failed during save.
ERROR_UNKNOWN = 1
# Indicates that errors during translation prevented saving. The errors
# should be available via the TranslationUnit's diagnostics.
ERROR_TRANSLATION_ERRORS = 2
# Indicates that the translation unit was somehow invalid.
ERROR_INVALID_TU = 3
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration < 1 or enumeration > 3:
raise Exception("Encountered undefined TranslationUnit save error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.save_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
### Structures and Utility Classes ###
class CachedProperty(object):
"""Decorator that lazy-loads the value of a property.
The first time the property is accessed, the original property function is
executed. The value it returns is set as the new value of that instance's
property, replacing the original method.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except:
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class _CXString(Structure):
"""Helper for transforming CXString results."""
_fields_ = [("spelling", c_char_p), ("free", c_int)]
def __del__(self):
conf.lib.clang_disposeString(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, _CXString)
return conf.lib.clang_getCString(res)
class SourceLocation(Structure):
"""
A SourceLocation represents a particular location within a source file.
"""
_fields_ = [("ptr_data", c_void_p * 2), ("int_data", c_uint)]
_data = None
def _get_instantiation(self):
if self._data is None:
f, l, c, o = c_object_p(), c_uint(), c_uint(), c_uint()
conf.lib.clang_getInstantiationLocation(self, byref(f), byref(l),
byref(c), byref(o))
if f:
f = File(f)
else:
f = None
self._data = (f, int(l.value), int(c.value), int(o.value))
return self._data
@staticmethod
def from_position(tu, file, line, column):
"""
Retrieve the source location associated with a given file/line/column in
a particular translation unit.
"""
return conf.lib.clang_getLocation(tu, file, line, column)
@staticmethod
def from_offset(tu, file, offset):
"""Retrieve a SourceLocation from a given character offset.
tu -- TranslationUnit file belongs to
file -- File instance to obtain offset from
offset -- Integer character offset within file
"""
return conf.lib.clang_getLocationForOffset(tu, file, offset)
@property
def file(self):
"""Get the file represented by this source location."""
return self._get_instantiation()[0]
@property
def line(self):
"""Get the line represented by this source location."""
return self._get_instantiation()[1]
@property
def column(self):
"""Get the column represented by this source location."""
return self._get_instantiation()[2]
@property
def offset(self):
"""Get the file offset represented by this source location."""
return self._get_instantiation()[3]
def __eq__(self, other):
return conf.lib.clang_equalLocations(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.file:
filename = self.file.name
else:
filename = None
return "<SourceLocation file %r, line %r, colu | mn %r>" % (
filename, self.line, self.column)
class SourceRange(Structure):
"""
A SourceRange describes a range of source locations within the source
code.
"""
_fields_ = [
("ptr_data", c_void_p * 2),
("begin_int_data", c_uint),
("end_int_data", | c_uint)]
# FIXME: Eliminate this and make normal constructor? Requires hiding ctypes
# object.
@staticmethod
def from_locations(start, end):
return conf.lib.clang_getRange(start, end)
@property
def start(self):
"""
Return a SourceLocation representing the first character within a
source range.
"""
return conf.lib.clang_getRangeStart(self)
@property
def end(self):
"""
Return a SourceLocation representing the last character within a
source range.
"""
return conf.lib.clang_getRangeEnd(self)
def __eq__(self, other):
return conf.lib.clang_equalRanges(self, other)
def __ne__(self, other) |
SekoiaLab/Fastir_Collector | health/windows10StateMachine.py | Python | gpl-3.0 | 4,814 | 0.004155 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from statemachine import _Statemachine
class Windows10StateMachine(_Statemachine):
def __init__(self, params):
_Statemachine.__init__(self, params)
def _list_share(self):
return super(Windows10StateMachine, self)._list_share()
def _list_running(self):
return super(Windows10StateMachine, self)._list_running()
def _list_drives(self):
return super(Windows10StateMachine, self)._list_drives()
def _list_network_drives(self):
return super(Windows10StateMachine, self)._list_network_drives()
def _list_sessions(self):
return super(Windows10StateMachine, self)._list_sessions()
def _list_scheduled_jobs(self):
return super(Windows10StateMachine, self)._list_scheduled_jobs()
def _list_network_adapters(self):
return super(Windows10StateMachine, self)._list_network_adapters()
def _list_arp_table(self):
return super(Windows10StateMachine, self)._list_arp_table()
def _list_route_table(self):
return super(Windows10StateMachine, self)._list_route_table()
def _list_sockets_network(self):
return super(Windows10StateMachine, self)._list_sockets_network()
def _list_sockets_services(self):
return super(Windows10StateMachine, self)._list_services()
def _list_kb(self):
return super(Windows10StateMachine, self)._list_kb()
def csv_list_drives(self):
super(Windows10StateMachine, self)._csv_list_drives(self._list_drives())
def csv_list_network_drives(self):
super(Windows10StateMachine, self)._csv_list_network_drives(self._list_network_drives())
def csv_list_share(self):
super(Windows10StateMachine, self)._csv_list_share(self._list_share())
def csv_list_running_proccess(self):
super(Windows10StateMachine, self)._csv_list_running_process(self._list_running())
def csv_hash_running_proccess(self):
super(Windows10StateMachine, self)._csv_hash_running_process(self._list_running())
def csv_list_sessions(self):
super(Windows10StateMachine, self)._csv_list_sessions(self._list_sessions())
def csv_list_scheduled_jobs(self):
super(Windows10StateMachine, self)._csv_list_scheduled_jobs()
def csv_list_network_adapters(self):
super(Windows10StateMachine, self)._csv_list_network_adapters(self._list_network_adapters())
def csv_list_arp_table(self):
super(Windows10StateMachine, self)._csv_list_arp_table(self._list_arp_table())
def csv_list_route_table(self):
super(Windows10StateMachine, self)._csv_list_route_table(self._list_route_table())
def csv_list_sockets_networks(self):
super(Windows10StateMachine, self)._csv_list_sockets_network(self._list_sockets_network())
def csv_list_services(self):
super(Windows10StateMachine, self)._csv_list_services(self._list_services())
def csv_list_kb(self):
super(Windows10StateMachine, self)._csv_list_kb(self._list_kb())
def json_list_drives(self):
super(Windows10StateMachine, self)._json_list_drives(self._list_drives())
def json_list_network_drives(self):
super(Windows10StateMachine, self)._json_list_network_drives(self._list_network_drives())
def json_list_share(self):
super(Windows10StateMachine, self)._json_list_share(self._list_share())
def json_list_running_proccess(self):
super(Windows10StateMachine, self)._json_list_running_process(self._list_running())
def json_hash_running_proccess(self):
super(Windows10StateMachine, self)._json_hash_running_process(self._list_running())
def json_list_sessions(self):
super(Windows10StateMachine, self)._json_list_sessions(self._list_sessions())
def json_list_scheduled_jobs(self):
super(Windows10StateMachine, self)._json_list_scheduled_jobs()
def json_list_network_adapters(self):
super(Windows10StateMachine, self)._json_list_network_adapters(self._list_network_adapters())
def json_list_arp_table(self):
super(Windows10StateMachine, self)._json_list_arp_table(self._list_arp_table())
def json_list_route_table(self):
super(Windows10StateMachine, self)._json_list_route_table(self._list_route_table())
def json_list_sockets_networks(self):
super(Windows10St | ateMachine, self)._json_list_sockets_network(self._list_sockets_network())
def json_list_services(self):
super(Windows10StateMachine, self)._json_list_services(self._list_services())
def json_list_kb(self):
super(Windows10StateMachine, self)._json_li | st_kb(self._list_kb())
|
ptitjes/quodlibet | quodlibet/formats/_id3.py | Python | gpl-2.0 | 17,996 | 0.000111 | # Copyright 2004-2013 Joe Wreschnig, Michael Urman, Niklas Janlert,
# Steven Robertson, Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import mutagen.id3
from quodlibet import config, const, print_d
from quodlibet import util
from quodlibet.util.iso639 import ISO_639_2
from quodlibet.util.path import get_temp_cover_file
from quodlibet.util.string import isascii
from ._audio import AudioFile, translate_errors, AudioFileError
from ._image import EmbeddedImage, APICType
def encoding_for(s):
"""Returns ID3 encoding ID best for string `s`"""
return 3 if isascii(s) else 1
RG_KEYS = [
"replaygain_track_peak", "replaygain_track_gain",
"replaygain_album_peak", "replaygain_album_gain",
]
# ID3 is absolutely the worst thing ever.
class ID3File(AudioFile):
# http://www.unixgods.org/~tilo/ID3/docs/ID3_comparison.html
# http://www.id3.org/id3v2.4.0-frames.txt
IDS = {"TIT1": "grouping",
"TIT2": "title",
"TIT3": "version",
"TPE1": "artist",
"TPE2": "performer",
"TPE3": "conductor",
"TPE4": "arranger",
"TEXT": "lyricist",
"TCOM": "composer",
"TENC": "encodedby",
"TALB": "album",
"TRCK": "tracknumber",
"TPOS": "discnumber",
"TSRC": "isrc",
"TCOP": "copyright",
"TPUB": "organization",
"TSST": "discsubtitle",
"TOLY": "author",
"TMOO": "mood",
"TBPM": "bpm",
"TDRC": "date",
"TDOR": "originaldate",
"TOAL": "originalalbum",
"TOPE": "originalartist",
"WOAR": "website",
"TSOP": "artistsort",
"TSOA": "albumsort",
"TSOT": "titlesort",
"TSO2": "albumartistsort",
"TSOC": "composersort",
"TMED": "media",
"TCMP": "compilation",
# TLAN requires an ISO 639-2 language code, check manually
#"TLAN": "language"
}
SDI = dict([(v, k) for k, v in IDS.items()])
# At various times, information for this came from
# http://musicbrainz.org/docs/specs/metadata_tags.html
# http://bugs.musicbrainz.org/ticket/1383
# http://musicbrainz.org/doc/MusicBrainzTag
TXXX_MAP = {
u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid",
u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid",
u"MusicBrainz Artist Id": "musicbrainz_artistid",
u"MusicBrainz Album Id": "musicbrainz_albumid",
u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
u"MusicBrainz TRM Id": "musicbrainz_trmid",
u"MusicIP PUID": "musicip_puid",
u"MusicMagic Fingerprint": "musicip_fingerprint",
u"MusicBrainz Album Status": "musicbrainz_albumstatus",
u"MusicBrainz Album Type": "musicbrainz_albumtype",
u"MusicBrainz Album Release Country": "releasecountry",
u"MusicBrainz Disc Id": "musicbrainz_discid",
u"ASIN": "asin",
u"ALBUMARTISTSORT": "albumartistsort",
u"BARCODE": "barcode",
}
PAM_XXXT = dict([(v, k) for k, v in TXXX_MAP.items()])
Kind = None
def __init__(self, filename):
with translate_errors():
audio = self.Kind(filename)
if audio.tags is None:
audio.add_tags()
tag = audio.tags
self._parse_info(audio.info)
for frame in tag.values():
if frame.FrameID == "APIC" and len(frame.data):
self.has_images = True
continue
elif frame.FrameID == "TCON":
self["genre"] = "\n".join(frame.genres)
continue
elif (frame.FrameID == "UFID" and
frame.owner == "http://musicbrainz.org"):
self["musicbrainz | _trackid"] = frame.data.decode("utf-8",
| "replace")
continue
elif frame.FrameID == "POPM":
rating = frame.rating / 255.0
if frame.email == const.EMAIL:
try:
self.setdefault("~#playcount", frame.count)
except AttributeError:
pass
self.setdefault("~#rating", rating)
elif frame.email == config.get("editing", "save_email"):
try:
self["~#playcount"] = frame.count
except AttributeError:
pass
self["~#rating"] = rating
continue
elif frame.FrameID == "COMM" and frame.desc == "":
name = "comment"
elif frame.FrameID in ["COMM", "TXXX"]:
if frame.desc.startswith("QuodLibet::"):
name = frame.desc[11:]
elif frame.desc in self.TXXX_MAP:
name = self.TXXX_MAP[frame.desc]
else:
continue
elif frame.FrameID == "RVA2":
self.__process_rg(frame)
continue
elif frame.FrameID == "TMCL":
for role, name in frame.people:
key = self.__validate_name("performer:" + role)
if key:
self.add(key, name)
continue
elif frame.FrameID == "TLAN":
self["language"] = "\n".join(frame.text)
continue
elif frame.FrameID == "USLT":
name = "lyrics"
else:
name = self.IDS.get(frame.FrameID, "").lower()
name = self.__validate_name(name)
if not name:
continue
name = name.lower()
id3id = frame.FrameID
if id3id.startswith("T"):
text = "\n".join(map(str, frame.text))
elif id3id == "COMM":
text = "\n".join(frame.text)
elif id3id == "USLT":
# lyrics are single string, not list
text = frame.text
elif id3id.startswith("W"):
text = frame.url
frame.encoding = 0
else:
continue
if not text:
continue
text = self.__distrust_latin1(text, frame.encoding)
if text is None:
continue
if name in self:
self[name] += "\n" + text
else:
self[name] = text
self[name] = self[name].strip()
# to catch a missing continue above
del name
# foobar2000 writes long dates in a TXXX DATE tag, leaving the TDRC
# tag out. Read the TXXX DATE, but only if the TDRC tag doesn't exist
# to avoid reverting or duplicating tags in existing libraries.
if audio.tags and "date" not in self:
for frame in tag.getall('TXXX:DATE'):
self["date"] = "\n".join(map(str, frame.text))
# Read TXXX replaygain and replace previously read values from RVA2
for frame in tag.getall("TXXX"):
k = frame.desc.lower()
if k in RG_KEYS:
self[str(k)] = u"\n".join(map(str, frame.text))
self.sanitize(filename)
def _parse_info(self, info):
"""Optionally implement in subclasses"""
pass
def __validate_name(self, k):
"""Returns a ascii string or None if the key isn't supported"""
if not k or "=" in k or "~" in k:
return
if not (k and "=" not in k and "~" not in k
and k.encode("ascii", "replace").decode("ascii") == k):
return
return k
def __process_rg(self, frame):
if frame.channel == 1:
if frame.desc == "album":
k = "album"
elif fr |
nesl/SCPI-Scripts | kei/ag-lib/power_measurement_suite_new.py | Python | bsd-3-clause | 1,078 | 0.014842 | import SCPI
import time
import numpy
totalSamples = 10
sampleFreq = 100
#freq= SCPI.SCPI("172.17.5.121")
dmm = SCP | I.SCPI("172.17.5.131")
#setup freq gen
#freq.setSquare()
#freq.setVoltage(0,3)
#freq.setFrequency(sampleFreq)
#setup voltage meter
#dmm.setVoltageDC("10V", "MAX")
# set external trigger
#dmm.setTriggerSource("INT")
#dmm.setTriggerCount(str(totalSamples))
# wait for trigger
dmm.setInitiate()
dmm.setCurrentDC("500mA", "MAX")
dmm.setTriggerSource("INT")
dmm.setTriggerCo | unt(str(totalSamples))
dmm.setInitiate()
time.sleep(1)
#freq.setOutput(1)
currentMeasurements = []
#voltageMeasurements = []
while 1:
if len(currentMeasurements) < totalSamples:
currentMeasurements += dmm.getMeasurements()
if (len(currentMeasurements) >= totalSamples):
break
time.sleep(0.1)
#freq.setOutput(0)
s = 0
for i in range(0, totalSamples):
print float(currentMeasurements[i])
#print "Average Power Consumption: ", s/float(totalSamples), "W avg volt: ", numpy.mean(voltageMeasurements), "V avg current: ", numpy.mean(currentMeasurements), "A"
|
ingadhoc/odoo-argentina | l10n_ar_ux/wizards/__init__.py | Python | agpl-3.0 | 400 | 0 | ############################ | ##################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
######################################## | ######################################
from . import res_config_settings
from . import account_move_change_rate
from . import account_payment_register
from . import account_payment_add_checks
|
piskvorky/flann | test/test_nn_autotune.py | Python | bsd-3-clause | 2,411 | 0.014517 | #!/usr/bin/env python
import sys
from os.path import *
import os
from pyflann import *
from copy import copy
from numpy import *
from numpy.random import *
import unittest
class Test_PyFLANN_nn(unittest.TestCase):
def setUp(self):
self.nn = FLANN(log_level="warning")
################################################################################
# The typical
def test_nn_2d_10pt(self):
self.__nd_random_test_autotune(2, 2)
def test_nn_autotune_2d_1000pt(self):
self.__nd_random_test_autotune(2, 1000)
def test_nn_autotune_100d_1000pt(self):
self.__nd_random_test_autotune(100, 1000)
def test_nn_autotune_500d_100pt(self):
self.__nd_random_test_autotune(500, 100)
#
# | #### | ######################################################################################
# # Stress it should handle
#
def test_nn_stress_1d_1pt_kmeans_autotune(self):
self.__nd_random_test_autotune(1, 1)
def __ensure_list(self,arg):
if type(arg)!=list:
return [arg]
else:
return arg
def __nd_random_test_autotune(self, dim, N, num_neighbors = 1, **kwargs):
"""
Make a set of random points, then pass the same ones to the
query points. Each point should be closest to itself.
"""
seed(0)
x = rand(N, dim)
xq = rand(N, dim)
perm = permutation(N)
# compute ground truth nearest neighbors
gt_idx, gt_dist = self.nn.nn(x,xq,
algorithm='linear',
num_neighbors=num_neighbors)
for tp in [0.70, 0.80, 0.90]:
nidx,ndist = self.nn.nn(x, xq,
algorithm='autotuned',
sample_fraction=1.0,
num_neighbors = num_neighbors,
target_precision = tp, checks=-2, **kwargs)
correctness = 0.0
for i in xrange(N):
l1 = self.__ensure_list(nidx[i])
l2 = self.__ensure_list(gt_idx[i])
correctness += float(len(set(l1).intersection(l2)))/num_neighbors
correctness /= N
self.assert_(correctness >= tp*0.9,
'failed #1: targ_prec=%f, N=%d,correctness=%f' % (tp, N, correctness))
if __name__ == '__main__':
unittest.main()
|
leesdolphin/rentme | rentme/raw/importer/models.py | Python | agpl-3.0 | 372 | 0 | from django.db | import models
class CachedResponse(models.Model):
class Meta:
unique_together = (
('method', 'url', 'kwargs')
)
method = models.CharField(max_length=10)
url = models.TextField()
kwargs = models.TextField()
content = models.BinaryField()
data = models.TextField()
expiry = models.DateTimeField() | |
bruecksen/notifhain | notifhain/event/migrations/0010_auto_20180210_1256.py | Python | mit | 821 | 0.002436 | # Generated by Django 2.0.1 on 2018-02-10 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0009_auto_20180208_1452'),
]
operations = [
migrations.AlterModelOptions(
name | ='dancefloorevent',
options={'ordering': ['event_date'], 'verbose_name': 'Event', 'verbose_name_plural': 'Events'},
),
migrations.AlterModelOptions(
name='danceflooreventdetails',
options={'ordering': ['event_date'], 'verbose_name': 'Event Detail', 'verbose_name_plural': 'Event Details'},
),
migrations.AddField(
model_name='dancefloorevent',
name='timetable_updated',
field=models.DateTimeField(blank | =True, null=True),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.