repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
akrherz/pyWWA | parsers/taf_parser.py | Python | mit | 120 | 0 | """ NESDIS TAF Ingestor """
# Local
from | pywwa.workflows.taf_parser import main
if __name__ == | "__main__":
main()
|
nawawi/wkhtmltopdf | webkit/Source/ThirdParty/gyp/test/make/gyptest-dependencies.py | Python | lgpl-3.0 | 812 | 0 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of t | his source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that .d files and all.deps are properly generated.
"""
import os
import TestGyp
# .d files are only used by the make build.
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('dependencies.gyp')
| test.build('dependencies.gyp', test.ALL)
deps_file = test.built_file_path(".deps/out/Default/obj.target/main/main.o.d")
test.must_contain(deps_file, "main.h")
# Build a second time to make sure we generate all.deps.
test.build('dependencies.gyp', test.ALL)
all_deps_file = test.built_file_path(".deps/all.deps")
test.must_contain(all_deps_file, "main.h")
test.must_contain(all_deps_file, "cmd_")
test.pass_test()
|
ymind/docker-mongo-es | conf/appconfig.py | Python | mit | 1,677 | 0 | import os
import yaml
MONGO_USERNAME = os.getenv('MONGO_USERNAME', None)
MONGO_PASSWORD = os.getenv('MONGO_PASSWORD', None)
MONGODB_HOST = os.getenv('MONGODB_HOST', '127.0.0.1')
MONGODB_PORT = int(os.getenv('MONGODB_PORT', '27017'))
MONGODB_SERVERS = os.getenv('MONGODB_SERVERS') \
or '{}:{}'.format(MONGODB_HOST, MONGODB_PORT)
MONGODB_DEFAULT_URL = 'mongodb://{}'.format(MONGODB_SERVERS)
MONGO_URL = | os.getenv('MONGO_URL') or MONGODB_DE | FAULT_URL
MONGO_INCLUDES = os.getenv('MONGO_INCLUDES', '')
ES_URL = os.getenv('ES_URL', 'http://localhost:9200')
ES_INDEXES = yaml.load(os.getenv('ES_INDEXES') or '{}')
ES_TIMEOUT_SECONDS = int(os.getenv('ES_TIMEOUT_SECONDS', '100'))
LOG_VERBOSITY = int(os.getenv('LOG_VERBOSITY', 2))
MONGO_CONNECTOR_CONFIG = 'mongo-connector.json'
DEFAULTS = {
'es': {
'url': ES_URL,
'indexes': ES_INDEXES
},
'mongo-connector': {
'mainAddress': MONGO_URL,
'authentication': {
'adminUsername': MONGO_USERNAME,
'password': MONGO_PASSWORD
},
'namespaces': {
'include': MONGO_INCLUDES.split(','),
},
'timezoneAware': True,
'docManagers': [
{
'docManager': 'elastic_doc_manager',
'targetURL': ES_URL,
"args": {
"clientOptions": {
"timeout": ES_TIMEOUT_SECONDS
}
}
}
],
'logging': {
'type': 'stream'
},
'verbosity': LOG_VERBOSITY,
'continueOnError': True
},
}
CONFIG_LOCATION = os.getenv('CONFIG_LOCATION')
|
david-hoffman/scripts | imreg_dph.py | Python | apache-2.0 | 14,711 | 0.000748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# imreg_dph.py
"""
Functions for 2D image registration
Copyright (c) 2018, David Hoffman
"""
import itertools
import numpy as np
from dphutils import slice_maker
# three different registration packages
# not dft based
import cv2
# dft based
from skimage.feature import register_translation as register_translation_base
from skimage.transform import warp
from skimage.transform import AffineTransform as AffineTransformBase
try:
import pyfftw
pyfftw.interfaces.cache.enable()
from pyfftw.interfaces.numpy_fft import fft2, ifft2, fftshift
except ImportError:
from numpy.fft import fft2, ifft2, fftshift
class AffineTransform(AffineTransformBase):
"""Only adding matrix multiply to previous class"""
def __matmul__(self, other):
newmat = self.params @ other.params
return AffineTransform(matrix=newmat)
def __eq__(self, other):
return np.array_equal(self.params, other.params)
@property
def inverse(self):
return AffineTransform(matrix=np.linalg.inv(self.params))
def __repr__(self):
return self.params.__repr__()
def __str__(self):
string = (
"<AffineTransform: translation = {}, rotation ={:.2f}," " scale = {}, shear = {:.2f}>"
)
return string.format(
np.round(self.translation, 2),
np.rad2deg(self.rotation),
np.round(np.array(self.scale), 2),
np.rad2deg(self.shear),
)
AffineTransform | .__init__.__doc__ = AffineTransformBase.__init__.__doc__
AffineTransform.__doc__ = AffineTransformBase.__doc__
def _calc_pad(oldnum, newnum):
""" Calculate the proper padding for fft_pad
We have three cases:
old number even new number even
>>> _calc_pad(10, 16)
(3, 3)
old number odd new number even
>>> _calc_pad(11, 16)
(2, 3)
o | ld number odd new number odd
>>> _calc_pad(11, 17)
(3, 3)
old number even new number odd
>>> _calc_pad(10, 17)
(4, 3)
same numbers
>>> _calc_pad(17, 17)
(0, 0)
from larger to smaller.
>>> _calc_pad(17, 10)
(-4, -3)
"""
# how much do we need to add?
width = newnum - oldnum
# calculate one side, smaller
pad_s = width // 2
# calculate the other, bigger
pad_b = width - pad_s
# if oldnum is odd and newnum is even
# we want to pull things backward
if oldnum % 2:
pad1, pad2 = pad_s, pad_b
else:
pad1, pad2 = pad_b, pad_s
return pad1, pad2
def highpass(shape):
"""Return highpass filter to be multiplied with fourier transform."""
# inverse cosine filter.
x = np.outer(
np.cos(np.linspace(-np.pi / 2.0, np.pi / 2.0, shape[0])),
np.cos(np.linspace(-np.pi / 2.0, np.pi / 2.0, shape[1])),
)
return (1.0 - x) * (2.0 - x)
def localize_peak(data):
"""
Small utility function to localize a peak center. Assumes passed data has
peak at center and that data.shape is odd and symmetric. Then fits a
parabola through each line passing through the center. This is optimized
for FFT data which has a non-circularly symmetric shaped peaks.
"""
# make sure passed data is symmetric along all dimensions
if not len(set(data.shape)) == 1:
print("data.shape = {}".format(data.shape))
return 0, 0
# pull center location
center = data.shape[0] // 2
# generate the fitting lines
my_pat_fft_suby = data[:, center]
my_pat_fft_subx = data[center, :]
# fit along lines, consider the center to be 0
x = np.arange(data.shape[0]) - center
xfit = np.polyfit(x, my_pat_fft_subx, 2)
yfit = np.polyfit(x, my_pat_fft_suby, 2)
# calculate center of each parabola
x0 = -xfit[1] / (2 * xfit[0])
y0 = -yfit[1] / (2 * yfit[0])
# NOTE: comments below may be useful later.
# save fits as poly functions
# ypoly = np.poly1d(yfit)
# xpoly = np.poly1d(xfit)
# peak_value = ypoly(y0) / ypoly(0) * xpoly(x0)
# #
# assert np.isclose(peak_value,
# xpoly(x0) / xpoly(0) * ypoly(y0))
# return center
return y0, x0
def logpolar(image, angles=None, radii=None):
"""Return log-polar transformed image and log base."""
shape = image.shape
center = shape[0] / 2, shape[1] / 2
if angles is None:
angles = shape[0]
if radii is None:
radii = shape[1]
theta = np.empty((angles, radii), dtype=np.float64)
theta.T[:] = -np.linspace(0, np.pi, angles, endpoint=False)
# d = radii
d = np.hypot(shape[0] - center[0], shape[1] - center[1])
log_base = 10.0 ** (np.log10(d) / (radii))
radius = np.empty_like(theta)
radius[:] = np.power(log_base, np.arange(radii, dtype=np.float64)) - 1.0
x = (radius / shape[1] * shape[0]) * np.sin(theta) + center[0]
y = radius * np.cos(theta) + center[1]
output = np.empty_like(x)
ndii.map_coordinates(image, [x, y], output=output)
return output, log_base
def translation(im0, im1):
"""Return translation vector to register images."""
shape = im0.shape
f0 = fft2(im0)
f1 = fft2(im1)
ir = fftshift(abs(ifft2((f0 * f1.conjugate()) / (abs(f0) * abs(f1)))))
t0, t1 = np.unravel_index(np.argmax(ir), shape)
dt0, dt1 = localize_peak(ir[slice_maker((t0, t1), 3)])
# t0, t1 = t0 + dt0, t1 + dt1
t0, t1 = np.array((t0, t1)) + np.array((dt0, dt1)) - np.array(shape) // 2
# if t0 > shape[0] // 2:
# t0 -= shape[0]
# if t1 > shape[1] // 2:
# t1 -= shape[1]
return AffineTransform(translation=(-t1, -t0))
def similarity(im0, im1):
"""Return similarity transformed image im1 and transformation parameters.
Transformation parameters are: isotropic scale factor, rotation angle (in
degrees), and translation vector.
A similarity transformation is an affine transformation with isotropic
scale and without shear.
Limitations:
Image shapes must be equal and square.
- can fix with padding, non-square images can be handled either with padding or
better yet compensating for uneven image size
All image areas must have same scale, rotation, and shift.
- tiling if necessary...
Scale change must be less than 1.8.
- why?
No subpixel precision.
- fit peak position or upsample as in (https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/register_translation.py)
"""
if im0.shape != im1.shape:
raise ValueError("Images must have same shapes.")
elif len(im0.shape) != 2:
raise ValueError("Images must be 2 dimensional.")
shape_ratio = im0.shape[0] / im0.shape[1]
# calculate fourier images of inputs
f0 = fftshift(abs(fft2(im0)))
f1 = fftshift(abs(fft2(im1)))
# high pass filter fourier images
h = highpass(f0.shape)
f0 *= h
f1 *= h
# del h
# convert images to logpolar coordinates.
f0, log_base = logpolar(f0)
f1, log_base = logpolar(f1)
# fourier transform again ?
f0 = fft2(f0)
f1 = fft2(f1)
# calculate impulse response
r0 = abs(f0) * abs(f1)
ir_cmplx = ifft2((f0 * f1.conjugate()) / r0)
ir = abs(ir_cmplx)
# find max, this fails to often and screws up when cluster processing.
i0, i1 = np.unravel_index(np.argmax(ir), ir.shape)
di0, di1 = localize_peak(ir[slice_maker((i0, i1), 3)])
i0, i1 = i0 + di0, i1 + di1
# calculate the angle
angle = i0 / ir.shape[0]
# and scale
scale = log_base ** i1
# if scale is too big, try complex conjugate of ir
if scale > 1.8:
ir = abs(ir_cmplx.conjugate())
i0, i1 = np.array(np.unravel_index(np.argmax(ir), ir.shape))
di0, di1 = localize_peak(ir[slice_maker((i0, i1), 5)])
i0, i1 = i0 + di0, i1 + di1
angle = -i0 / ir.shape[0]
scale = 1.0 / (log_base ** i1)
if scale > 1.8:
raise ValueError("Images are not compatible. Scale change > 1.8")
# center the angle
angle *= np.pi
if angle < -np.pi / 2:
angle += np.pi
elif angle > np.pi / 2:
angle -= np.pi
# apply scale and rotation
# first move center to 0, |
OpenPathView/batchPanoMaker | opv_import/helpers/bit_utils.py | Python | gpl-3.0 | 1,156 | 0 | # coding: utf-8
# Copyright (C) 2017 Open Path View, Maison Du Libre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Y | ou should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Benjamin BERNARD <benjamin.bernard@openpathview.fr>
# Email: team@openpathview.fr
# De | scription: Utils for makelot.
def bit_len(int_type: int) -> int:
"""
Returns int_type length, position of last non 0 bit.
:param int_type: An int value.
:type int_type: int
:return: position of last non 0 bit.
:rtype: int
"""
length = 0
while (int_type):
int_type >>= 1
length += 1
return(length)
|
glen3b/CyberPatriotScoreboardParser | scoreovertime.py | Python | gpl-3.0 | 756 | 0.003968 | #!/usr/bin/python3
import sys
from lxml import html
import urllib3
import re
http = urllib3.PoolManager()
baseUrl = 'http://scoreboard.uscyberpatriot.org/'
scoresPage = html.fromstring(http.request('GET', baseUrl + 'team.php?team=' + sys.argv[1]).data)
# XPath for chart script: /html/body/div[2]/div/script[1]
chart = scoresPage.xpath('/html/body/div[2]/div/script[1]')[0]
scoreTimes = re.compile(r'\[\'([0-9]{2}/[0123456789 :]+)\'((, (-?[0-9]{1,3}|null))+)\],?', re.MULTILINE)
reSearch = scoreTimes.find | all(chart.text)
for res in reSearch:
# Tuple result
# Capture 0 is time
# Capture 1 is screwyformat scores
print(res[0], end='')
for score in filter(None, res[1].split(',')):
print('\t' + | score, end='')
print() |
TaliesinSkye/evennia | wintersoasis-master/web/character/backend.py | Python | bsd-3-clause | 2,518 | 0.006354 | """
Backend functions for the character application.
"""
import string
from src.utils.create import create_player
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.core.mail import send_mail
from Crypto.Random import random
def switch_to(request, target):
"""
Switch to another user.
"""
target.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, target)
def activate_player(uid, activation_key, request):
"""
Activate a player upon receiving a proper activaton key.
"""
try:
user = User.objects.get(id = uid)
if user.is_active:
# User is already activated.
return False
if user.db.activation_key == activation_key:
user.is_active = True
user.save()
switch_to(request, user)
return True
else:
print "Else'd!"
return False
except User.DoesNotExist:
print "NonExistant."
return False
def send_activation_email(character, context):
"""
Generate an activation key, set it on a player, then have the user
emailed with the relevant info.
"""
lst = [random.choice(string.ascii_letters + string.digits) for n in xrange(30)]
key = "".join(lst)
character.player.db.activation_key = key
send_mail(
"Character Activation",
"""
Hello there!
You recently registered an account with Winter's Oasis. In order to use this account, you will need to ac | tivate it. You can activate the account by visiting the following URL:
https://%s%s/%s/%s/
If you didn't register with Winter's Oasis, you can ignore | this email. The account will be deleted within a few days if it is not activated.
""" % (context.META['HTTP_HOST'], reverse('roster.views.activate', args=(character.player.user.id, key))), settings.SERVER_EMAIL,
[character.player.user.email]
)
def new_player(name, email, password, context):
"""
Easier front-end for creating a new player. Also sends reg email.
"""
character = create_player(name=name, email=email, password=password,
permissions=settings.PERMISSION_PLAYER_DEFAULT,
typeclass=settings.BASE_PLAYER_TYPECLASS,
character_home=settings.CHARACTER_DEFAULT_HOME)
character.player.user.is_active = False
character.player.user.save()
send_activation_email(character, context)
|
tensorflow/model-analysis | tensorflow_model_analysis/evaluators/legacy_aggregate_test.py | Python | apache-2.0 | 10,171 | 0.004523 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for using the Aggregate API."""
import os
import apache_beam as beam
from apache_beam.testing import util
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.eval_saved_model.example_trainers import linear_classifier
from tensorflow_model_analysis.evaluators import legacy_aggregate as aggregate
from tensorflow_model_analysis.evaluators import legacy_poisson_bootstrap as poisson_bootstrap
def create_test_input(predict_list, slice_list):
results = []
for entry in predict_list:
for slice_key in slice_list:
results.append((slice_key, {constants.INPUT_KEY: entry}))
return results
class AggregateTest(testutil.TensorflowModelAnalysisTest):
def _getEvalExportDir(self):
return os.path.join(self._getTempDir(), 'eval_export_dir')
def testAggregateOverallSlice(self):
temp_eval_export_dir = self._getEvalExportDir()
_, eval_export_dir = linear_classifier.simple_linear_classifier(
None, temp_eval_export_dir)
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=eval_export_dir)
with beam.Pipeline() as pipeline:
example1 = self._makeExample(age=3.0, language='english', label=1.0)
example2 = self._makeExample(age=3.0, language='chinese', label=0.0)
example3 = self._makeExample(age=4.0, language='english', label=1.0)
example4 = self._makeExample(age=5.0, language='chinese', label=0.0)
predict_result = ([
example1.SerializeToString(),
example2.SerializeToString(),
example3.SerializeToString(),
example4.SerializeToString()
])
metrics = (
pipeline
| 'CreateTestInput' >> beam.Create(
create_test_input(predict_result, [()]))
| 'ComputePerSliceMetrics' >> aggregate.ComputePerSliceMetrics(
eval_shared_model=eval_shared_model, desired_batch_size=3))
def check_result(got):
self.assertEqual(1, len(got), 'got: %s' % got)
slice_key, metrics = got[0]
self.assertEqual(slice_key, ())
self.assertDictElementsAlmostEqual(
metrics, {
'accuracy': 1.0,
'label/mean': 0.5,
'my_mean_age': 3.75,
'my_mean_age_times_label': 1.75,
})
util.assert_that(metrics, check_result)
def testAggregateMultipleSlices(self):
temp_eval_export_dir = self._getEvalExportDir()
_, eval_export_dir = linear_classifier.simple_linear_classifier(
None, temp_eval_export_dir)
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=eval_export_dir)
with beam.Pipeline() as pipeline:
example1 = self._makeExample(age=3.0, language='english', label=1.0)
example2 = self._makeExample(age=3.0, language='chinese', label=0.0)
example3 = self._makeExample(age=4.0, language='english', label=1.0)
example4 = self._makeExample(age=5.0, language='chinese', label=0.0)
predict_result_english_slice = ([
example1.SerializeToString(),
example3.SerializeToString()
])
predict_result_chinese_slice = ([
example2.SerializeToString(),
example4.SerializeToString()
])
test_input = (
create_test_input(predict_result_english_slice, [(
('language', 'english'))]) +
create_test_input(predict_result_chinese_slice, [(
('language', 'chinese'))]) +
# Overall slice
create_test_input(
predict_result_english_slice + predict_result_chinese_slice,
[()]))
metrics = (
pipeline
| 'CreateTestInput' >> | beam.Create(test_input)
| 'ComputePerSliceMetrics' >> aggregate.ComputePerSliceMetrics(
eval_shared_model=eval_shared_model, desired_batch_size=3))
def check_result(got):
self.assertEqual(3, len(got), 'got: %s' % got)
slices = {}
for slice_key, metrics in got:
| slices[slice_key] = metrics
overall_slice = ()
english_slice = (('language', 'english'))
chinese_slice = (('language', 'chinese'))
self.assertCountEqual(
list(slices.keys()), [overall_slice, english_slice, chinese_slice])
self.assertDictElementsAlmostEqual(
slices[overall_slice], {
'accuracy': 1.0,
'label/mean': 0.5,
'my_mean_age': 3.75,
'my_mean_age_times_label': 1.75,
})
self.assertDictElementsAlmostEqual(
slices[english_slice], {
'accuracy': 1.0,
'label/mean': 1.0,
'my_mean_age': 3.5,
'my_mean_age_times_label': 3.5,
})
self.assertDictElementsAlmostEqual(
slices[chinese_slice], {
'accuracy': 1.0,
'label/mean': 0.0,
'my_mean_age': 4.0,
'my_mean_age_times_label': 0.0,
})
util.assert_that(metrics, check_result)
def testAggregateMultipleSlicesWithSampling(self):
temp_eval_export_dir = self._getEvalExportDir()
_, eval_export_dir = linear_classifier.simple_linear_classifier(
None, temp_eval_export_dir)
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=eval_export_dir)
with beam.Pipeline() as pipeline:
example1 = self._makeExample(age=3.0, language='english', label=1.0)
example2 = self._makeExample(age=3.0, language='chinese', label=0.0)
example3 = self._makeExample(age=4.0, language='english', label=1.0)
example4 = self._makeExample(age=5.0, language='chinese', label=0.0)
predict_result_english_slice = ([
example1.SerializeToString(),
example3.SerializeToString()
])
predict_result_chinese_slice = ([
example2.SerializeToString(),
example4.SerializeToString()
])
test_input = (
create_test_input(predict_result_english_slice, [(
('language', 'english'))]) +
create_test_input(predict_result_chinese_slice, [(
('language', 'chinese'))]) +
# Overall slice
create_test_input(
predict_result_english_slice + predict_result_chinese_slice,
[()]))
metrics = (
pipeline
| 'CreateTestInput' >> beam.Create(test_input)
| 'ComputePerSliceMetrics' >>
poisson_bootstrap.ComputeWithConfidenceIntervals(
aggregate.ComputePerSliceMetrics,
num_bootstrap_samples=10,
eval_shared_model=eval_shared_model,
desired_batch_size=3))
def assert_almost_equal_to_value_with_t_distribution(
target,
unsampled_value,
sample_mean,
sample_standard_deviation,
sample_degrees_of_freedom,
delta=2):
self.assertEqual(target.unsampled_value, unsampled_value)
self.assertAlmostEqual(target.sample_mean, sample_mean, delta=delta)
self.assertAlmostEqual(
target.sample_standard_deviation,
sample_standard_deviation,
delta=delta)
# The possion resampling could return [0, 0, ... ], which will reduce
# the number of samples.
self.assertLessEqual(target.sample_degrees_of_freedom,
sample_degrees_of_freedom)
def check_ov |
beni55/django | tests/auth_tests/test_views.py | Python | bsd-3-clause | 43,969 | 0.001638 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware
from django.test import (
TestCase, ignore_warnings, modify_settings, override_settings,
)
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[
('en', 'English'),
],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual | (response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optio | nal multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mai |
irmen/Pyro5 | examples/stockquotes/phase2/stockmarket.py | Python | mit | 1,133 | 0 | import random
import time
from Pyro5.api import expose, Daemon, locate_ns
@expose
class StockMarket(object):
def __init__(self, marketname, symbols):
self._name = marketname
self._symbols = symbols
def quotes(self):
w | hile True:
symbol = random.choice(self.symbols)
yield symbol, round(random.uniform(5, 150) | , 2)
time.sleep(random.random()/2.0)
@property
def name(self):
return self._name
@property
def symbols(self):
return self._symbols
if __name__ == "__main__":
nasdaq = StockMarket("NASDAQ", ["AAPL", "CSCO", "MSFT", "GOOG"])
newyork = StockMarket("NYSE", ["IBM", "HPQ", "BP"])
# for example purposes we will access the daemon and name server ourselves
with Daemon() as daemon:
nasdaq_uri = daemon.register(nasdaq)
newyork_uri = daemon.register(newyork)
with locate_ns() as ns:
ns.register("example.stockmarket.nasdaq", nasdaq_uri)
ns.register("example.stockmarket.newyork", newyork_uri)
print("Stockmarkets available.")
daemon.requestLoop()
|
Onetaway/YouCompleteMe | python/ycm/completers/all/tests/identifier_completer_test.py | Python | gpl-3.0 | 3,220 | 0.014907 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import eq_
from ycm.completers.all import identifier_completer
def GetCursorIdentifier_StartOfLine_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': 'foo'
} ) )
eq_( 'fooBar',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': 'fooBar'
} ) )
def GetCursorIdentifier_EndOfLine_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 2,
'line_value': 'foo'
} ) )
def GetCursorIdentifier_PastEndOfLine_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': 10,
'line_value': 'foo'
} ) )
def GetCursorIdentifier_NegativeColumn_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': -10,
'line_value': 'foo'
} ) ) |
def GetCursorIdentifier_StartOfLine_StopsAtNonIdentifierChar_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': 'foo(goo)'
} ) )
def GetCursorIdentifier_AtNonIdentifier_test():
eq_( 'goo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 3,
'line_value': 'foo(goo)'
} ) )
def GetCursorIdentifi | er_WalksForwardForIdentifier_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': ' foo'
} ) )
def GetCursorIdentifier_FindsNothingForward_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': 4,
'line_value': 'foo ()***()'
} ) )
def GetCursorIdentifier_SingleCharIdentifier_test():
eq_( 'f',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': ' f '
} ) )
def GetCursorIdentifier_StartsInMiddleOfIdentifier_test():
eq_( 'foobar',
identifier_completer._GetCursorIdentifier(
{
'column_num': 3,
'line_value': 'foobar'
} ) )
def GetCursorIdentifier_LineEmpty_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': 11,
'line_value': ''
} ) )
|
dracos/QGIS | python/plugins/GdalTools/tools/inOutSelector.py | Python | gpl-2.0 | 7,841 | 0.0264 | # -*- coding: utf-8 -*-
"""
***************************************************************************
inOutSelector.py
---------------------
Date : April 2011
Copyright : (C) 2011 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'April 2011'
__copyright__ = '(C) 2011, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import SIGNAL, Qt, pyqtProperty
from PyQt4.QtGui import QWidget, QComboBox
from qgis.core import QgsMapLayerRegistry, QgsMapLayer
from ui_inOutSelector import Ui_GdalToolsInOutSelector
class GdalToolsInOutSelector(QWidget, Ui_GdalToolsInOutSelector):
FILE = 0x1
LAYER = 0x2
MULTIFILE = 0x4 # NOT IMPLEMENTED YET
FILE_LAYER = 0x1|0x2
FILES = 0x1|0x4 # NOT IMPLEMENTED YET
FILES_LAYER = 0x3|0x4 # NOT IMPLEMENTED YET
__pyqtSignals__ = ("selectClicked | ()", "filenameChanged(), layerChanged()")
def __init__(self, parent=None, type=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.setFocusPolicy(Qt.StrongFocus)
self.combo.setInsertPolicy(Q | ComboBox.NoInsert)
self.clear()
self.typ = None
if type is None:
self.resetType()
else:
self.setType(type)
self.connect(self.selectBtn, SIGNAL("clicked()"), self.selectButtonClicked)
self.connect(self.fileEdit, SIGNAL("textChanged(const QString &)"), self.textChanged)
self.connect(self.combo, SIGNAL("editTextChanged(const QString &)"), self.textChanged)
self.connect(self.combo, SIGNAL("currentIndexChanged(int)"), self.indexChanged)
def clear(self):
self.filenames = []
self.fileEdit.clear()
self.clearComboState()
self.combo.clear()
def textChanged(self):
if self.getType() & self.MULTIFILE:
self.filenames = self.fileEdit.text().split(",")
if self.getType() & self.LAYER:
index = self.combo.currentIndex()
if index >= 0:
text = self.combo.currentText()
if text != self.combo.itemText( index ):
return self.setFilename( text )
self.filenameChanged()
def indexChanged(self):
self.layerChanged()
self.filenameChanged()
def selectButtonClicked(self):
self.emit(SIGNAL("selectClicked()"))
def filenameChanged(self):
self.emit(SIGNAL("filenameChanged()"))
def layerChanged(self):
self.emit(SIGNAL("layerChanged()"))
def setType(self, type):
if type == self.typ:
return
if type & self.MULTIFILE: # MULTITYPE IS NOT IMPLEMENTED YET
type = type & ~self.MULTIFILE
self.typ = type
self.selectBtn.setVisible( self.getType() & self.FILE )
self.combo.setVisible( self.getType() & self.LAYER )
self.fileEdit.setVisible( not (self.getType() & self.LAYER) )
self.combo.setEditable( self.getType() & self.FILE )
if self.getType() & self.FILE:
self.setFocusProxy(self.selectBtn)
else:
self.setFocusProxy(self.combo)
# send signals to refresh connected widgets
self.filenameChanged()
self.layerChanged()
def getType(self):
return self.typ
def resetType(self):
self.setType( self.FILE_LAYER )
selectorType = pyqtProperty("int", getType, setType, resetType)
def setFilename(self, fn=None):
self.blockSignals( True )
prevFn, prevLayer = self.filename(), self.layer()
if isinstance(fn, QgsMapLayer):
fn = fn.source()
elif isinstance(fn, str) or isinstance(fn, unicode):
fn = unicode( fn )
# TODO test
elif isinstance(fn, list):
if len( fn ) > 0:
if self.getType() & self.MULTIFILE:
self.filenames = fn
#fn = "".join( fn, "," )
fn = ",".join( fn )
else:
fn = ''
else:
fn = ''
if not (self.getType() & self.LAYER):
self.fileEdit.setText( fn )
else:
self.combo.setCurrentIndex(-1)
self.combo.setEditText( fn )
self.blockSignals( False )
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def setLayer(self, layer=None):
if not (self.getType() & self.LAYER):
return self.setFilename( layer )
self.blockSignals( True )
prevFn, prevLayer = self.filename(), self.layer()
if isinstance(layer, QgsMapLayer):
if self.combo.findData(layer.id()) >= 0:
index = self.combo.findData( layer.id() )
self.combo.setCurrentIndex( index )
else:
self.combo.setCurrentIndex( -1 )
self.combo.setEditText( layer.source() )
elif isinstance(layer, int) and layer >= 0 and layer < self.combo.count():
self.combo.setCurrentIndex( layer )
else:
self.combo.clearEditText()
self.combo.setCurrentIndex(-1)
self.blockSignals( False )
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def setLayers(self, layers=None):
if layers is None or not hasattr(layers, '__iter__') or len(layers) <= 0:
self.combo.clear()
return
self.blockSignals( True )
prevFn, prevLayer = self.filename(), self.layer()
self.saveComboState()
self.combo.clear()
for l in layers:
self.combo.addItem( l.name(), l.id() )
self.restoreComboState()
self.blockSignals( False )
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def clearComboState(self):
self.prevState = None
def saveComboState(self):
index = self.combo.currentIndex()
text = self.combo.currentText()
layerID = self.combo.itemData(index) if index >= 0 else ""
self.prevState = ( index, text, layerID )
def restoreComboState(self):
if self.prevState is None:
return
index, text, layerID = self.prevState
if index < 0:
if text == '' and self.combo.count() > 0:
index = 0
elif self.combo.findData( layerID ) < 0:
index = -1
text = ""
else:
index = self.combo.findData( layerID )
self.combo.setCurrentIndex( index )
if index >= 0:
text = self.combo.itemText( index )
self.combo.setEditText( text )
def layer(self):
if self.getType() != self.FILE and self.combo.currentIndex() >= 0:
layerID = self.combo.itemData(self.combo.currentIndex())
return QgsMapLayerRegistry.instance().mapLayer( layerID )
return None
def filename(self):
if not (self.getType() & self.LAYER):
if self.getType() & self.MULTIFILE:
return self.filenames
return self.fileEdit.text()
if self.combo.currentIndex() < 0:
if self.getType() & self.MULTIFILE:
return self.filenames
return self.combo.currentText()
layer = self.layer()
if layer is not None:
return layer.source()
return ''
|
chris-ch/us-equities | create-stats-perfs-db.py | Python | mit | 1,963 | 0.00866 | from collections import defaultdict
from zipfile import ZipFile
from datetime import datetime
from itertools import izip
import logging
import sys
import shelve
from backtest import constants
def main():
PRICES_DATA = constants.PRICES_DATA
performances = shelve.open(constants.CACHE_PERFS, protocol=2)
with ZipFile(PRICES_DATA, 'r') as prices_data:
securities = prices_data.namelist()
for index, dataset_name in enumerate(securities):
#if index == 100: break
batch_count = index / 100 + 1
if index % 100 == 0:
logging.info('processing batch %d/%d' % (batch_count, len(securities) / 100 + 1))
security_code = dataset_name.split('/')[-1][:-4]
security_performances = dict()
dataset = prices_data.open(dataset_name).readlines()
dates = list()
prices = list()
for row in dataset:
items = row.strip().split(',')
px_date = datetime.strptime(items[0], '%Y-%m-%d')
if items[4].startswith('#N/A'):
continue
|
px_last = float(items[4])
dates.append(px_date)
prices.append(px_last)
for date, price, price_prev in izip(dates[1:], prices[1:], prices[:-1]):
perf = (price / price_prev) - 1.0
security_performances[date.strftime('%Y%m%d | ')] = perf
performances[security_code] = security_performances
performances.close()
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s %(asctime)s %(module)s - %(message)s'
)
main()
|
acil-bwh/SlicerCIP | Scripted/CIP_/CIP/ui/CollapsibleMultilineText.py | Python | bsd-3-clause | 595 | 0.005042 | import qt
class Collapsibl | eMultilineText(qt.QTextEdit):
"""Text field that expands when it gets the focus and remain collapsed otherwise"""
def __init__(self):
super(CollapsibleMultilineText, self).__init__()
self.minHeight = 20
self.maxHeight = 50
self.setFixedHeight(self.minHeight)
def focusInEvent(self, event):
# super(MyLineEdit, self).focusInEvent(event)
self.setFixedHeight(self.maxHeight)
def | focusOutEvent(self, event):
# super(MyLineEdit, self).focusOutEvent(event)
self.setFixedHeight(self.minHeight) |
zach-king/Python-Miscellaneous | Cryptography/transpositionCipher.py | Python | gpl-2.0 | 1,466 | 0.004775 | import sys
import pyperclip
import math
def encrypt(key, message):
ciphertext = [''] * key
for col in range(key):
pointer = col
while pointer < len(message):
ciphertext[col] += message[pointer]
pointer += key
return ''.join(ciph | ertext)
def decrypt(key, ciphertext):
numColumns = math.ceil(len(ciphertext) / key)
numRows = key
numEmptyCells = (numColumns * numRows) - len(ciphertext)
plaintext = [''] * numColumns
col = 0
row = 0
for symbol in ciphertext:
plaintext[col] += symbol
col += 1
if (col == numColumns) or (col == numColumns - 1 and row >= numRows - numEmptyCells):
col = 0
row += 1
return ''.join(plaintext)
def main(key=None, mes | sage=None, mode=None):
if key == None:
key = int(input("Enter a Key: "))
if message == None:
message = input("Enter a Message: ")
if mode == None:
mode = int(input("Encrypt(1) or Decrypt(0): "))
if mode:
encrypted = encrypt(key, message)
print(encrypted)
pyperclip.copy(encrypted)
else:
decrypted = decrypt(key, message)
print(decrypted)
if __name__ == '__main__':
if len(sys.argv) == 4:
main(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
elif len(sys.argv) == 2:
main(sys.argv[1])
else:
main() |
firekillice/python-mysql-flushdata | gamemysql.py | Python | gpl-2.0 | 2,372 | 0.067454 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb
import MySQLdb.cursors
class GameMysql():
def __init__(self,host,port,user,pwd):
try:
print host,port,user,pwd
self.__conn = MySQLdb.connect(host = host,port = port,user = user,passwd = pwd)
self.__cursor = self.__conn.cursor()
except Exception,e:
print Exception,e
def try_create_db(self,dbname):
self.__cursor.execute('create database if not exists ' + dbname)
def clear_table(self,dbname,tablename):
try:
self.__before_excute__(dbname)
return self.__cursor.execute("truncate %s"%(dbname))
except Exception,e:
print Exception,e
def check_table(self,dbname,tablename):
self.__before_excute__(dbname)
exe_sql = "show tables like '%s'" %(tablename)
self.__cursor.execute(exe_sql)
result = self.__cursor.fetchone()
if result:
return True
else:
return False
def create_table(self,dbname,createsql):
try:
self.__before_excute__(dbname)
return self.__cursor.execute(createsql)
except Exception,e:
print Exception,e
def alert_table(self,dbname,alertsql):
try:
self.__before_excute__(dbname)
return self.__cursor.execute(alertsql)
except Exception,e:
print Exception,e
def __before_excute__(self,dbname):
self.__conn.select_db(dbname)
#insert table
def select_sql(self,dbname,mysql):
try:
self.__before_excute__(dbname)
count = self.__cursor.execute(mysql)
results=self.__cursor.fetchall()
return results
except Exception,e:
print Exception,e
#select data beyond db
def select_sql_up_db(self,mysql):
try:
count = self.__cursor.execute(mysql)
results=self.__cursor.fetchall()
return results
except Exception,e:
print Exception,e
#insert table
def insert_more(self,dbname,mysql,data):
try:
self.__before_excute__(dbname)
self.__cursor.executemany(mysql,data)
self.__conn.commit()
except Exception,e:
print Exception,e
def del_data(self,dbname,mysql):
try:
self.__before_excute__(dbname)
self.__cursor.execute(mysql)
self.__conn.commit()
except Exception,e:
print Exception,e
def update_data(self,dbname,mysql):
try:
se | lf.__befo | re_excute__(dbname)
self.__cursor.execute(mysql)
self.__conn.commit()
except Exception,e:
print Exception,e
#close this mysql conn
def close(self):
self.__cursor.close()
self.__conn.close()
|
rcgee/oq-hazardlib | openquake/hazardlib/gsim/akkar_2013.py | Python | agpl-3.0 | 1,189 | 0.000841 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU A | ffer | o General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`AkkarEtAl2013`.
"""
from __future__ import division
import warnings
from openquake.hazardlib.gsim.akkar_2014 import AkkarEtAlRjb2014
class AkkarEtAl2013(AkkarEtAlRjb2014):
"""
To ensure backwards compatibility with existing seismic hazard models,
the call AkkarEtAl2013 is retained as legacy. The AkkarEtAl2013 GMPE
is now implemented as AkkarEtAlRjb2014
"""
deprecated = True
|
amanharitsh123/zulip | zerver/migrations/0069_realmauditlog_extra_data.py | Python | apache-2.0 | 416 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-27 20:00
from django.db import migrations, models
class Migration(migrations.Migration) | :
dependencies | = [
('zerver', '0068_remove_realm_domain'),
]
operations = [
migrations.AddField(
model_name='realmauditlog',
name='extra_data',
field=models.TextField(null=True),
),
]
|
mrcaps/rainmon | code/ui/rain/tasks.py | Python | bsd-3-clause | 5,117 | 0.012116 | #Copyright (c) 2012, Carnegie Mellon University.
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#Celery data worker backend
#@author ishafer
try:
from celery.decorators import task
except:
def task():
def decorator(target):
return target
return decorator
import sys
import traceback
IMPORTPATH = "../../"
if IMPORTPATH not in sys.path:
sys.path.append(IMPORTPATH)
from pipeline import *
from rescache import *
def getconfig():
fp = open("config.json")
st = fp.read()
fp.close()
return json.loads(st)
#@param savename the name of the subdirectory save name
def getcachedir(savename):
cfg = getconfig() |
cdir = os.path.join(cfg["tmpdir"][3:],"cache")
return os.path.join(cdi | r,savename)
@task()
def add(x, y):
return str(x + y) + "was a test."
#@param outname the output directory name for the save cache
#@param machines the list of nodes to run the analysis over
#@param attributes the list of metrics to run the analysis over
#@param startt starting time for analysis
#@param endt ending time for analysis
#@param tstep optional step time to pass to backends that accept a step
#@param tsdbhost timeseries database host (overrides default in pipeline)
#@param tsdbport timeseries database port (overrides default in pipeline)
#@param skipstages a list of stages to skip (e.g. pipeline.KalmanStage)
@task()
def run_pipeline(outname, machines, attributes, startt, endt, \
tstep=None, sourcename=None, tsdbhost=None, tsdbport=None, skipstages=None):
print ">>>Running pipeline on ", machines, attributes
print ">>>In time range", startt, endt, tstep
print ">>>Source", sourcename, str(tsdbhost) + ":" + str(tsdbport)
print ">>>Skipping stages", skipstages
print ">>>Saving to", outname
cdir = getcachedir(outname)
try:
os.makedirs(cdir)
except:
pass
pipeline = get_current_pipeline()
if skipstages != None:
pipeline.set_skipstages(skipstages)
input = {}
input['hosts'] = machines
input['metrics'] = attributes
input['start'] = startt #'2011/11/01-00:00:00'
input['end'] = endt #'2011/11/01-23:30:00'
if sourcename is not None:
input['sourcename'] = sourcename
if tsdbhost is not None:
input['tsdbhost'] = tsdbhost
if tsdbport is not None:
input['tsdbport'] = tsdbport
if tstep is not None:
input['tstep'] = tstep
print "Starting with input", input
dump = Cache(cdir)
def statuswriter(txt):
withnl = txt + "\n"
sys.stdout.write(withnl)
dump.printstatus(withnl)
output = None
try:
output = pipeline.run(input,statuscb=statuswriter)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
dump.printstatus( \
traceback.format_exc().splitlines()[-1],
traceback.format_tb(exc_traceback))
#print "Got output: ", output.keys()
if output != None:
dump.write(output, input)
set_savemeta({
"lastsave": outname
})
print "Analysis Done"
return "Done"
def getmetaname():
root = getcachedir("")
return os.path.join(root, "meta.json")
@task()
def get_saveinfo():
"""Get save metadata and a list of saves."""
root = getcachedir("")
saves = []
#metadata about all saves
meta = dict()
if os.path.exists(root):
for d in os.listdir(root):
abspath = os.path.join(root,d)
if os.path.isdir(abspath):
saves.append(d)
metaname = getmetaname()
if os.path.exists(metaname):
with open(metaname, "r") as fp:
meta = json.load(fp)
return {
"saves": saves,
"meta": meta
}
@task()
def set_savemeta(data):
"""Set save metadata."""
with open(getmetaname(), "w") as fp:
json.dump(data, fp)
@task()
def get_status(savename):
cache = Cache(getcachedir(savename))
return cache.getstatus()
@task()
def get_summary(savename):
cache = Cache(getcachedir(savename))
return cache.getsummary()
@task()
def get_file(savename,fname):
cache = Cache(getcachedir(savename))
return cache.load(fname)
#get timeseries from the specified save name
@task()
def get_ts(savename, key, tmin, tmax):
cache = Cache(getcachedir(savename))
tsdata = cache.load(key)
tstimes = cache.load("tsample")
return zip(tstimes, tsdata) |
koxudaxi/BacklogPy | tests/backlog/test_base.py | Python | mit | 3,123 | 0.00064 | from __future__ import absolute_import
import six
if six.PY3:
from unittest import TestCase, mock
else:
import sys
if sys.version_info < (2, 7, 0):
from unittest2 import TestCase
else:
from unittest import TestCase
import mock
from BacklogPy.base import BacklogBase
class TestBacklogBase(TestCase):
def test_api_url(self):
backlog_base = BacklogBase('space-id', 'api-key')
self.assertEqual(backlog_base._api_url,
'https://space-id.backlog.jp/api/v2')
backlog_base = BacklogBase('space-id', 'api-key', suffix='com')
self.assertEqual(backlog_base._api_url,
'https://space-id.backlog.com/api/v2')
def test_request(self):
with mock.patch('requests.request') as m:
backlog_base = BacklogBase('space-id', 'api-key')
backlog_base._request('/path')
args, kwargs = m.call_args_list[0]
self.assertTupleEqual(args, ('GET',
'https://space-id.backlog.jp/api/v2/path'))
self.assertDictEqual(kwargs,
{'params': {'apiKey': 'api-key'}, 'data': {},
'headers': {}})
with mock.patch('requests.request') as m:
backlog_base._request('/path', method='POST')
args, kwargs = m.call_args_list[0]
self.assertTupleEqual(args,
('POST',
'https://space-id.backlog.jp/api/v2/path'))
self.assertDictEqual(kwargs,
{'params': {'apiKey': 'api-key'}, 'data': {},
'headers': {}})
with mock.patch('requests.request') as m:
backlog_base._request('/path', method='POST',
query_parameters={'id': 123})
args, kwargs = m.call_args_list[0]
self.assertTupleEqual(args,
('POST',
'https://space-id.backlog.jp/api/v2/path'))
self.assertDictEqual(kwargs,
{'params': {'apiKey': 'api-key', 'id': 123},
'data': {},
'headers': {}})
with mock.patch('requests.request') as m:
backlog_base._request('/path', method='POST',
query_parameters={'id': 123},
form_parame | ters={'name': 'abc'})
args, kwargs = m.call_args_list[0]
self.assertTupleEqual(args,
('POST',
'https://space-id.backlog.jp/api/v2/path'))
self. | assertDictEqual(kwargs,
{'params': {'apiKey': 'api-key', 'id': 123},
'data': {'name': 'abc'},
'headers': {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}})
|
takeflight/wagtailvideos | wagtailvideos/views/videos.py | Python | bsd-3-clause | 6,698 | 0.001941 | from django.http import HttpResponseNotAllowed
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.admin import messages
from wagtail.admin.forms import SearchForm
from wagtail.admin.utils import PermissionPolicyChecker, popular_tags_for_model
from wagtail.core.models import Collection
from wagtail.search.backends import get_search_backends
from wagtail.utils.pagination import paginate
from wagtailvideos import ffmpeg
from wagtailvideos.forms import VideoTranscodeAdminForm, get_video_form
from wagtailvideos.models import Video
from wagtailvideos.permissions import permission_policy
permission_checker = PermissionPolicyChecker(permission_policy)
@permission_checker.require_any('add', 'change', 'delete')
@vary_on_headers('X-Requested-With')
def index(request):
# Get Videos (filtered by user permission)
videos = Video.objects.all()
# Search
query_string = None
if 'q' in request.GET:
form = SearchForm(request.GET, placeholder=_("Search videos"))
if form.is_valid():
query_string = form.cleaned_data['q']
videos = videos.search(query_string)
else:
form = SearchForm(placeholder=_("Search videos"))
# Filter by collection
current_collection = None
collection_id = request.GET.get('collection_id')
if collection_id:
try:
current_collection = Collection.objects.get(id=collection_id)
videos = videos.filter(collection=current_collection)
except (ValueError, Collection.DoesNotExist):
pass
paginator, videos = paginate(request, videos)
# Create response
if request.is_ajax():
response = render(request, 'wagtailvideos/videos/results.html', {
'vidoes': videos,
'query_string': query_string,
'is_searching': bool(query_string),
})
return response
else:
response = render(request, 'wagtailvideos/videos/index.html', {
'videos': videos,
'query_string': query_string,
'is_searching': bool(query_string),
'search_form': form,
'popular_tags': popular_tags_for_model(Video),
'current_collection': current_collection,
})
return response
@permission_checker.require('change')
def edit(request, video_id):
VideoForm = get_video_form(Video)
video = get_object_or_404(Video, id=video_id)
if request.POST:
original_file = video.file
form = VideoForm(request.POST, request.FILES, instance=video)
if form.is_valid():
if 'file' in form.changed_data:
# if providing a new video file, delete the old one and all renditions.
# NB Doing this via original_file.delete() clears the file field,
# which definitely isn't what we want...
original_file.storage.delete(original_file.name)
# Set new video file size
video.file_size = video.file.size
video = form.save()
video.save()
# Reindex the image to make sure all tags are indexed
for backend in get_search_backends():
backend.add(video)
messages.success(request, _("Video '{0}' updated.").format(video.title), buttons=[
messages.button(reverse('wagtailvideos:edit', args=(video.id,)), _('Edit again'))
])
return redirect('wagtailvideos:index')
else:
messages.error(request, _("The video could not be saved due to errors."))
else:
form = VideoForm(instance=video)
if not video._meta.get_field('file').storage.exists(video.file.name):
# Give error if image file doesn't exist
messages.error(request, _(
"The source video file could not be found. Please change the source or delete the video."
).format(video.title), buttons=[
messages.button(reverse('wagtailvideos:delete', args=(video.id,)), _('Delete'))
]) |
return render(request, "wagtailvideos/videos/edit.html", {
'video': video,
'form': form,
'filesize': video.get_file_size(),
'can_transcode': ffmpeg.installed(),
'transcodes': video.transcodes.all(),
'transcode_form': VideoTranscodeAdminForm(video=video),
'user_can_delete': permissio | n_policy.user_has_permission_for_instance(request.user, 'delete', video)
})
def create_transcode(request, video_id):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
video = get_object_or_404(Video, id=video_id)
transcode_form = VideoTranscodeAdminForm(data=request.POST, video=video)
if transcode_form.is_valid():
transcode_form.save()
return redirect('wagtailvideos:edit', video_id)
@permission_checker.require('delete')
def delete(request, video_id):
video = get_object_or_404(Video, id=video_id)
if request.POST:
video.delete()
messages.success(request, _("Video '{0}' deleted.").format(video.title))
return redirect('wagtailvideos:index')
return render(request, "wagtailvideos/videos/confirm_delete.html", {
'video': video,
})
@permission_checker.require('add')
def add(request):
VideoForm = get_video_form(Video)
if request.POST:
video = Video(uploaded_by_user=request.user)
form = VideoForm(request.POST, request.FILES, instance=video, user=request.user)
if form.is_valid():
# Save
video = form.save(commit=False)
video.file_size = video.file.size
video.save()
# Success! Send back an edit form
for backend in get_search_backends():
backend.add(video)
messages.success(request, _("Video '{0}' added.").format(video.title), buttons=[
messages.button(reverse('wagtailvideos:edit', args=(video.id,)), _('Edit'))
])
return redirect('wagtailvideos:index')
else:
messages.error(request, _("The video could not be created due to errors."))
else:
form = VideoForm(user=request.user)
return render(request, "wagtailvideos/videos/add.html", {
'form': form,
})
def usage(request, image_id):
image = get_object_or_404(Video, id=image_id)
paginator, used_by = paginate(request, image.get_usage())
return render(request, "wagtailvideos/videos/usage.html", {
'image': image,
'used_by': used_by
})
|
ei-grad/django-environ | environ/environ.py | Python | mit | 22,397 | 0.001339 | """
Django-environ allows you to utilize 12factor inspired environment
variables to configure your Django application.
"""
import json
import logging
import os
import re
import sys
import warnings
from django.core.exceptions import ImproperlyConfigured
from six.moves import urllib_parse as urlparse
from six import string_types
logger = logging.getLogger(__file__)
__author__ = 'joke2k'
__version__ = (0, 4, 0)
# return int if possible
def _cast_int(v):
return int(v) if hasattr(v, 'isdigit') and v.isdigit() else v
class NoValue(object):
def __repr__(self):
return '<{0}>'.format(self.__class__.__name__)
class Env(object):
"""Provide scheme-based lookups of environment variables so that each
caller doesn't have to pass in `cast` and `default` parameters.
Usage:::
env = Env(MAIL_ENABLED=bool, SMTP_LOGIN=(str, 'DEFAULT'))
if env('MAIL_ENABLED'):
...
"""
NOTSET = NoValue()
BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
URL_CLASS = urlparse.ParseResult
DEFAULT_DATABASE_ENV = 'DATABASE_URL'
DB_SCHEMES = {
'postgres': 'django.db.backends.postgresql_psycopg2',
'postgresql': 'django.db.backends.postgresql_psycopg2',
'psql': 'django.db.backends.postgresql_psycopg2',
'pgsql': 'django.db.backends.postgresql_psycopg2',
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'ldap': 'ldapdb.backends.ldap',
}
_DB_BASE_OPTIONS = ['CONN_MAX_AGE', 'ATOMIC_REQUESTS', 'AUTOCOMMIT']
DEFAULT_CACHE_ENV = 'CACHE_URL'
CACHE_SCHEMES = {
'dbcache': 'django.core.cache.backends.db.DatabaseCache',
'dummycache': 'django.core.cache.backends.dummy.DummyCache',
'filecache': 'django.core.cache.backends.filebased.FileBasedCache',
'locmemcache': 'django.core.cache.backends.locmem.LocMemCache',
'memcache': 'django.core.cache.backends.memcached.MemcachedCache',
'pymemcache': 'django.core.cache.backends.memcached.PyLibMCCache',
'rediscache': 'django_redis.cache.RedisCache',
'redis': 'django_redis.cache.RedisCache',
}
_CACHE_BASE_OPTIONS = ['TIMEOUT', 'KEY_PREFIX', 'VERSION', 'KEY_FUNCTION']
DEFAULT_EMAIL_ENV = 'EMAIL_URL'
EMAIL_SCHEMES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'smtps': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+tls': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+ssl': 'django.core.mail.backends.smtp.EmailBackend',
'consolemail': 'django.core.mail.backends.console.EmailBackend',
'filemail': 'django.core.mail.backends.filebased.EmailBackend',
'memorymail': 'django.core.mail.backends.locmem.EmailBackend',
'dummymail': 'django.core.mail.backends.dummy.EmailBackend'
}
_EMAIL_BASE_OPTIONS = ['EMAIL_USE_TLS', 'EMAIL_USE_SSL']
DEFAULT_SEARCH_ENV = 'SEARCH_URL'
SEARCH_SCHEMES = {
"elasticsearch": "haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine",
"solr": "haystack.backends.solr_backend.SolrEngine",
"whoosh": "haystack.backends.whoosh_backend.WhooshEngine",
"simple": "haystack.backends.simple_backend.SimpleEngine",
}
def __init__(self, **scheme):
self.scheme = scheme
def __call__(self, var, cast=None, default=NOTSET, parse_default=False):
return self.get_value(var, cast=cast, default=default, parse_default=parse_default)
# Shortcuts
def str(self, var, default=NOTSET):
"""
:rtype: str
"""
return self.get_value(var, default=default)
def unicode(self, var, default=NOTSET):
"""Helper for python2
:rtype: unicode
"""
return self.get_value(var, cast=str, default=default)
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default)
def int(self, var, default=NOTSET):
"""
:rtype: int
"""
return self.get_value(var, cast=int, default=default)
def float(self, var, default=NOTSET):
"""
:rtype: float
"""
return self.get_value(var, cast=float, default=default)
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default)
def list(self, var, cast=None, default=NOTSET):
"""
:rtype: list
"""
return self.get_value(var, cast=list if not cast else [cast], default=default)
def dict(self, var, cast=dict, default=NOTSET):
"""
:rtype: dict
"""
return self.get_value(var, cast=cast, default=default)
def url(self, var, default=NOTSET):
"""
:rtype: urlparse.ParseResult
"""
return self.get_value(var, cast=urlparse.urlparse, default=default, parse_default=True)
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
:rtype: dict
"""
return self.db_url_config(self.get_value(var, default=default), engine=engine)
db = db_url
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to CACHE_URL.
:rtype: dict
"""
return self.cache_url_config(self.url(var, default=default), backend=backend)
cache = cache_url
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to EMAIL_URL.
:rtype: dict
"""
return self.email_url_config(self.url(var, default=default), backend=backend)
email = email_url
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
"""
return self.search_url_config(self.url(var, default=default), engine=engine)
def path(self, var, default=NOTSET, **kwargs):
"""
:rtype: Path
"""
return Path(self.get_value(var, default=default), **kwargs)
def get_value(self, var, cast=None, default=NOTSET, parse_default=False):
"""Return value for given environment variable.
:param var: Name of variable.
:param cast: Type to cast return value as.
:param default: If var not present in environ, return this instead.
:param parse_default: force to parse default..
:returns: Value from environment or default (if set)
"""
logger.debug("get '{0}' casted as '{1}' with default '{2}'".format(var, cast, default))
if var in self.scheme:
var_info = self.scheme[var]
try:
has_default = len(var_info) == 2
except TypeError:
has_default = False
if has_default:
if not cast:
cast = var_info[0]
if default is self.NOTSET:
try:
default = var_info[1]
except IndexError:
pass
else:
if not cast:
cast = var_info
try:
value = os.environ[var]
except KeyError:
if default is self.NOTSET:
error_msg = "Set the {0} environment variable".format(var)
raise ImproperlyConfigured(error_msg)
value = defa | ult
# Resolve any proxied values
if hasattr(value, 'startswith') and value.startswith('$'):
value = value.lstr | ip('$')
value = self.get_value(value, cast=cast, default=default)
if value != default or parse_default:
value = self.pa |
jcfr/girder | plugins/jobs/server/__init__.py | Python | apache-2.0 | 871 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Vers | ion 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
| # limitations under the License.
###############################################################################
from . import job_rest
def load(info):
info['apiRoot'].job = job_rest.Job()
|
redhat-cip/hardware | hardware/tests/test_benchmark_mem.py | Python | apache-2.0 | 5,424 | 0 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import unittest
from unittest import mock
from hardware.benchmark import mem
from hardware.benchmark import utils
SYSBENCH_OUTPUT = """Operations performed: 1957354 (391412.04 ops/sec)
1911.48 MB transferred (382.24 MB/sec)
Test execution summary:
total time: 5.0008s
total number of events: 1957354
total time taken by event execution: 3.0686
per-request statistics:
min: 0.00ms
avg: 0.00ms
max: 0.23ms
approx. 95 percentile: 0.00ms
Threads fairness:
events (avg/stddev): 1957354.0000/0.00
execution time (avg/stddev): 3.0686/0.00"""
EXPECTED_RESULT = [
('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2),
('cpu', 'logical_0', 'bandwidth_1K', '382'),
('cpu', 'logical_0', 'bandwidth_4K', '382'),
('cpu', 'logical_0', 'bandwidth_1M', '382'),
('cpu', 'logical_0', 'bandwidth_16M', '382'),
('cpu', 'logical_0', 'bandwidth_128M', '382'),
('cpu', 'logical_0', 'bandwidth_1G', '382'),
('cpu', 'logical_0', 'bandwidth_2G', '382'),
('cpu', 'logical_1', 'bandwidth_1K', '382'),
('cpu', 'logical_1', 'bandwidth_4K', '382'),
('cpu', 'logical_1', 'bandwidth_1M', '382'),
('cpu', 'logical_1', 'bandwidth_16M', '382'),
('cpu', 'logical_1', 'bandwidth_128M', '382'),
('cpu', 'logical_1', 'bandwidth_1G', '382'),
('cpu', 'logical_1', 'bandwidth_2G', '382'),
('cpu', 'logical', 'threaded_bandwidth_1K', '382'),
('cpu', 'logical', 'threaded_bandwidth_4K', '382'),
('cpu', 'logical', 'threaded_bandwidth_1M', '382'),
('cpu', 'logical', 'threaded_bandwidth_16M', '382'),
('cpu', 'logical', 'threaded_bandwidth_128M', '382'),
('cpu', 'logical', 'threaded_bandwidth_1G', '382'),
('cpu', 'logical', 'threaded_bandwidth_2G', '382'),
('cpu', 'logical', 'forked_bandwidth_1K', '382'),
('cpu', 'logical', 'forked_bandwidth_4K', '382'),
('cpu', 'logical', 'forked_bandwidth_1M', '382'),
('cpu', 'logical', 'forked_bandwidth_16M', '382'),
('cpu', 'logical', 'forked_bandwidth_128M', '382'),
('cpu', 'logical', 'forked_bandwidth_1G', '382'),
('cpu', 'logical', 'forked_bandwidth_2G', '382')
]
@mock.patch.object(mem, 'get_available_memory')
@mock.patch.object(utils, 'get_one_cpu_per_socket')
@mock.patch.object(subprocess, 'Popen')
class TestBenchmarkMem(unittest.TestCase):
def setUp(self):
super(TestBenchmarkMem, self).setUp()
self.hw_data = [('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2)]
def test_mem_perf_bytes(self, mock_popen, mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
mock_cpu_socket.return_value = range(2)
mem.mem_perf(self.hw_data)
expected = EXPECTED_RESULT
self.assertEqual(sorted(expected) | , sorted(self.hw_data))
def test_check_mem_size(self, mock_popen, moc | k_cpu_socket,
mock_get_memory):
block_size_list = ('1K', '4K', '1M', '16M', '128M', '1G', '2G')
mock_get_memory.return_value = 123456789012
for block_size in block_size_list:
self.assertTrue(mem.check_mem_size(block_size, 2))
# Low memory
mock_get_memory.return_value = 1
for block_size in block_size_list:
self.assertFalse(mem.check_mem_size(block_size, 2))
def test_run_sysbench_memory_forked_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_forked(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'forked_bandwidth_1K', '382')],
hw_data)
def test_run_sysbench_memory_threaded_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_threaded(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'threaded_bandwidth_1K', '382')],
hw_data)
|
Azure/azure-sdk-for-python | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py | Python | mit | 28,006 | 0.002287 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import pytest
import platform
import functools
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.aio import TextAnalyticsClient
from azure.ai.textanalytics import (
VERSION,
DetectLanguageInput,
TextDocumentInput,
TextAnalyticsApiVersion,
)
from testcase import TextAnalyticsPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from devtools_testutils.aio import recorded_by_proxy_async
from testcase import TextAnalyticsTest
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestRecognizeLinkedEntities(TextAnalyticsTest):
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_no_single_input(self, client):
with pytest.raises(TypeError):
response = await client.recognize_linked_entities("hello world")
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_all_successful_passing_dict(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs, show_stats=True)
for doc in response:
assert len(doc.entities) == 3
assert doc.id is not None
assert doc.statistics is not None
for entity in doc.entities:
assert entity.name is not None
assert entity.language is not None
assert entity.data_source_entity_id is not None
assert entity.url is not None
assert entity.data_source is not None
assert entity.matches is not None
for match in entity.matches:
assert match.offset is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_all_successful_passing_text_document_input(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen")
]
response = await client.recognize_linked_entities(docs)
for doc in response:
assert len(doc.entities) == 3
for entity in doc.entities:
assert entity.name is not None
assert entity.language is not None
assert entity.data_source_entity_id is not None
assert entity.url is not None
assert entity.data_source is not None
assert entity.matches is not None
for match in entity.matches:
assert match.offset is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_passing_only_string(self, client):
docs = [
"Microsoft was founded by Bill Gates and Paul Allen",
"Microsoft fue fundado por Bill Gates y Paul Allen",
""
]
response = await client.recognize_linked_entities(docs)
assert len(response[0].entities) == 3
assert len(response[1].entities) == 3
assert response[2].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_input_with_some_errors(self, client):
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs)
assert response[0].is_error
assert not response[1].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_input_with_all_errors(self, client):
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "Spanish", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs)
assert response[0].is_error
assert response[1].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_too_many_documents(self, client):
docs = ["One", "Two", "Three", "Four", "Five", "Six"]
with pytest.raises(HttpResponseError) as excinfo:
await client.recognize_linked_entities(docs)
assert excinfo.value.status_code == 400
assert excinfo.value.error.code == "InvalidDocumentBatch"
assert "Batch request contains too many records" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_output_same_order_as_input(self, cli | ent):
docs = [
TextDocumentInput(id="1", text="one"),
| TextDocumentInput(id="2", text="two"),
TextDocumentInput(id="3", text="three"),
TextDocumentInput(id="4", text="four"),
TextDocumentInput(id="5", text="five")
]
response = await client.recognize_linked_entities(docs)
for idx, doc in enumerate(response):
assert str(idx + 1) == doc.id
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": ""})
@recorded_by_proxy_async
async def test_empty_credential_class(self, client):
with pytest.raises(ClientAuthenticationError):
response = await client.recognize_linked_entities(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": "xxxxxxxxxxxx"})
@recorded_by_proxy_async
async def test_bad_credentials(self, client):
with pytest.raises(ClientAuthenticationError):
response = await client.recognize_linked_entities(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_bad_document_input(self, client):
docs = "This is the wrong type"
with pytest.raises(TypeError):
response = await client.recognize_linked_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_mixing_inputs(self, client):
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
TextDocumentInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
"You cannot mix string input with the above inputs"
]
with pytest.raises(TypeError):
response = await client.recognize_linked_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_out_of_order_ids(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.recognize_linked_entities(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
assert resp.id == in_order[idx]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_show_stats_and_model_version(self, client):
def callback(response):
assert respon |
wevoice/wesub | apps/openid_consumer/admin.py | Python | agpl-3.0 | 1,189 | 0.004205 | # Amara, universalsubtitles.org
#
# Copyright (C) 2016 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Softwar | e Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. I | f not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
# Based on: http://www.djangosnippets.org/snippets/73/
#
# Modified by Sean Reifschneider to be smarter about surrounding page
# link context. For usage documentation see:
#
# http://www.tummy.com/Community/Articles/django-pagination/
from django.contrib import admin
from django.conf import settings
from openid_consumer.models import Nonce, Association
admin.site.register(Nonce)
admin.site.register(Association)
|
soumyanishan/azure-linux-extensions | OSPatching/patch/centosPatching.py | Python | apache-2.0 | 770 | 0.001299 | #!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WI | THOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from redhatPatching import red | hatPatching
class centosPatching(redhatPatching):
def __init__(self, hutil):
super(centosPatching,self).__init__(hutil)
|
amboycharlie/Child-Friendly-LCMS | leonardo/__init__.py | Python | apache-2.0 | 520 | 0 |
default_app_config = 'leonardo.apps.LeonardoConfig'
__import__('pkg_resources').decla | re_namespace(__name__)
try:
from leonardo.base import leonardo # noqa
except ImportError:
import warnings
def simple_warn(m | essage, category, filename, lineno, file=None, line=None):
return '%s: %s' % (category.__name__, message)
msg = ("Could not import Leonardo dependencies. "
"This is normal during installation.\n")
warnings.formatwarning = simple_warn
warnings.warn(msg, Warning)
|
rvianello/rdkit | rdkit/ML/BuildComposite.py | Python | bsd-3-clause | 35,920 | 0.0098 | # $Id$
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" command line utility for building composite models
#DOC
**Usage**
BuildComposite [optional args] filename
Unless indicated otherwise (via command line arguments), _filename_ is
a QDAT file.
**Command Line Arguments**
- -o *filename*: name of the output file for the pickled composite
- -n *num*: number of separate models to add to the composite
- -p *tablename*: store persistence data in the database
in table *tablename*
- -N *note*: attach some arbitrary text to the persistence data
- -b *filename*: name of the text file to hold examples from the
holdout set which are misclassified
- -s: split the data into training and hold-out sets before building
the composite
- -f *frac*: the fraction of data to use in the training set when the
data is split
- -r: randomize the activities (for testing purposes). This ignores
the initial distribution of activity values and produces each
possible activity value with equal likliehood.
- -S: shuffle the activities (for testing purposes) This produces
a permutation of the input activity values.
- -l: locks the random number generator to give consistent sets
of training and hold-out data. This is primarily intended
for testing purposes.
- -B: use a so-called Bayesian composite model.
- -d *database name*: instead of reading the data from a QDAT file,
pull it from a database. In this case, the _filename_ argument
provides the name of the database table containing the data set.
- -D: show a detailed breakdown of the composite model performance
across the training and, when appropriate, hold-out sets.
- -P *pickle file name*: write out the pickled data set to the file
- -F *filter frac*: filters the data before training to change the
distribution of activity values in the training set. *filter
frac* is the fraction of the training set that should have the
target value. **See note below on data filtering.**
- -v *filter value*: filters the data before training to change the
distribution of activity values in the training set. *filter
value* is the target value to use in filtering. **See note below
on data filtering.**
- --modelFiltFrac *model filter frac*: Similar to filter frac above,
in this case the data is filtered for each model in the composite
rather than a single overall filter for a composite. *model
filter frac* is the fraction of the training set for each model
that should have the target value (*model filter value*).
- --modelFiltVal *model filter value*: target value to use for
filtering data before training each model in the composite.
- -t *threshold value*: use high-confidence predictions for the
final analysis of the hold-out data.
- -Q *list string*: the values of quantization bounds for the
activity value. See the _-q_ argument for the format of *list
string*.
- --nRuns *count*: build *count* composite models
- --prune: prune any models built
- -h: print a usage message and exit.
- -V: print the version number and exit
*-*-*-*-*-*-*-*- Tree-Related Options -*-*-*-*-*-*-*-*
- -g: be less greedy when training the models.
- -G *number*: force trees to be rooted at descriptor *number*.
- -L *limit*: provide an (integer) limit on individual model
complexity
- -q *list string*: Add QuantTrees to the composite and use the list
specified in *list string* as the number of target quantization
bounds for each descriptor. Don't forget to include 0's at the
beginning and end of *list string* for the name and value fields.
For example, if there are 4 descriptors and you want 2 quant
bounds apiece, you would use _-q "[0,2,2,2,2,0]"_.
Two special cases:
1) If you would like to ignore a descriptor in the model
building, use '-1' for its number of quant bounds.
2) If you have integer valued data that should not be quantized
further, enter 0 for that descriptor.
- --recycle: allow descriptors to be used more than once in a tree
- --randomDescriptors=val: toggles growing random forests with val
randomly-selected descriptors available at each node.
*-*-*-*-*-*-*-*- KNN-Related Options -*-*-*-*-*-*-*-*
- --doKnn: use K-Nearest Neighbors models
- --knnK=*value*: the value of K to use in the KNN models
- --knnTanimoto: use the Tanimoto metric in KNN models
- --knnEuclid: use a Euclidean metric in KNN models
*-*-*-*-*-*-*- Naive Bayes Classifier Options -*-*-*-*-*-*-*-*
- --doNaiveBayes : use Naive Bayes classifiers
- --mEstimateVal : the value to be used in the m-estimate formula
If this is greater than 0.0, we use it to compute the conditional
probabilities by the m-estimate
*-*-*-*-*-*-*-*- SVM-Related Options -*-*-*-*-*-*-*-*
**** NOTE: THESE ARE DISABLED ****
# # - --doSVM: use Support-vector machines
# # - --svmKernel=*kernel*: choose the type of kernel to be used for
# # the SVMs. Options are:
# # The default is:
# # - --svmType=*type*: choose the type of support-vector machine
# # to be used. Options are:
# # The default is:
# # - --svmGamma=*gamma*: provide the gamma value for the SVMs. If this
# # is not provided, a grid search will be carried out to determine an
# # optimal *gamma* value for each SVM.
# # - --svmCost=*cost*: provide the cost value for the SVMs. If this is
# # not provided, a grid search will be carried out to determine an
# # optimal *cost* value for each SVM.
# # - --svmWeights=*weights*: provide the weight values for the
# # activities. If provided this should be a sequence of (label,
# # weight) 2-tuples *nActs* long. If not provided, a weight of 1
# # will be used for each activity.
# # - --svmEps=*epsilon*: provide the epsilon value used to determine
# # when the SVM has converged. Defaults to 0.001
# # - --svmDegree=*degree*: provide the degree of the kernel (when
# # sensible) Defaults to 3
# # - --svmCoeff=*coeff*: provide the coefficient for the kernel (when
# # sensible) Defaults to 0
# # - --svmNu=*nu*: provide the nu value for the kernel (when sensible)
# # Defaults to 0.5
# # - --svmDataType=*float*: if the data is contains only 1 and 0 s, specify by
# # using binary. Defaults to float
# # - --svmCache=*cache*: provide the size of the memory cache (in MB)
# # to be used while building the SVM. Defaults to 40
**Notes**
- *Data filtering*: When there is a large disparity between the
numbers of points with various activity levels present in the
training set it is sometimes desirable to train on a more
homogeneous data set. This can be accomplished using filtering.
The filtering process works by selecting a particular target
fraction and target value. For example, in a case where 95% of
the original training set has activity 0 and ony 5% activity 1, we
could filter (by randomly removing points with activity 0) so that
30% of the data set used to build the composite has activity 1.
"""
from __future__ import print_function
import sys
import time
import numpy
from rdkit import DataStructs
from rdkit.Dbase import DbModule
from rdkit.ML import CompositeRun
from rdkit.ML import ScreenComposite
from rdkit.ML.Composite import Composite, BayesComposite
from rdkit.ML.Data import DataUtils, SplitData
from rdkit.utils import listutils
from rdkit.six.moves import cPickle
# # from ML.SVM import SVMClassificationModel as SVM
_runDetails = CompositeRun.CompositeRun()
__VERSION_STRING = "3.2.3"
_verbose = 1
def message(msg):
""" emits messages to _sys.stdout_
override this in modules which import this one to redirect output
**Arguments | **
- msg | : the string to be d |
evidation-health/ContinuousTimeMarkovModel | src/ContinuousTimeMarkovModel/profilingUtil.py | Python | mit | 2,046 | 0.002444 | import time, inspect
try:
from line_profiler import LineProfiler
def do_profile(follow=[]):
def inner(func):
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
for f in follow:
profiler.add_function(f)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
except ImportError:
def do_profile(follow=[]):
"Helpful if you accidentally leave in production!"
def inner(func):
def nothing(*args, **kwargs):
return func(*args, **kwargs)
return nothing
return inner
def timefunc(f):
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
theArgs = inspect.getcallargs(f,*args)
if 'self' in theArgs:
print theArgs['self'].__class__.__name__, f.__name__, 'took', end - start, 'sec'
else:
print f.__name__, 'took', end - start, 'sec'
return result
return f_timer
class timewith():
def __init__(self, name=''):
self.name = name
self.start = time.time()
self.prev = time.time()
@property
def elapsed(self):
return time.time() - self.start
| @property
def delta(self):
cur = time.time()
diff = cur - self.prev
self.prev = cur
return diff
def checkpoint(self, name=''):
print '{timer} {checkpoint} at {elapsed} took {delta} seconds'.format(
timer=self.name,
checkpoint=name,
elapsed=self.elapsed,
delta=self.delta,
).strip()
def __enter__(self):
return self
| def __exit__(self, type, value, traceback):
self.checkpoint('finished')
pass
|
soerendip42/rdkit | rdkit/ML/InfoTheory/entropy.py | Python | bsd-3-clause | 2,823 | 0.01842 | #
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
#
""" Informational Entropy functions
The definitions used are the same as those in Tom Mitchell's
book "Machine Learning"
"""
import numpy
import math
from rdkit.six.moves import xrange
# try to get the C versions of these routines
try:
import rdkit.ML.InfoTheory.rdInfoTheory as cEntropy
except:
hascEntropy=0
else:
hascEntropy= | 1
# it's pretty obvious what this is for ;-)
_log2 = math.log(2)
def PyInfoEntropy(results):
""" Calculates the informational entropy of a set of results.
**Arguments**
results is a 1D Numeric array containing the number of times a
given set hits each possible result.
For example, if a function has 3 possible results, and the
variable in question hits them 5, 6 and 1 times each,
results would be [5,6,1]
**Returns**
the informational entropy
"""
nInstances = f | loat(sum(results))
if nInstances == 0:
# to return zero or one... that is the question
return 0
probs = results/nInstances
#-------
# NOTE: this is a little hack to allow the use of Numeric
# functionality to calculate the informational entropy.
# The problem is that the system log function pitches a fit
# when you call log(0.0). We are perfectly happy with that
# returning *anything* because we're gonna mutiply by 0 anyway.
# Here's the risky (but marginally faster way to do it:
# add a small number to probs and hope it doesn't screw
# things up too much.
#t = probs+1e-10
# Here's a perfectly safe approach that's a little bit more obfuscated
# and a tiny bit slower
t = numpy.choose(numpy.greater(probs,0.0),(1,probs))
return sum(-probs*numpy.log(t)/_log2)
def PyInfoGain(varMat):
""" calculates the information gain for a variable
**Arguments**
varMat is a Numeric array with the number of possible occurances
of each result for reach possible value of the given variable.
So, for a variable which adopts 4 possible values and a result which
has 3 possible values, varMat would be 4x3
**Returns**
The expected information gain
"""
variableRes = numpy.sum(varMat,1) # indexed by variable, Sv in Mitchell's notation
overallRes = numpy.sum(varMat,0) # indexed by result, S in Mitchell's notation
term2 = 0
for i in xrange(len(variableRes)):
term2 = term2 + variableRes[i] * InfoEntropy(varMat[i])
tSum = sum(overallRes)
if tSum != 0.0:
term2 = 1./tSum * term2
gain = InfoEntropy(overallRes) - term2
else:
gain = 0
return gain
# if we have the C versions, use them, otherwise use the python stuff
if hascEntropy:
InfoEntropy = cEntropy.InfoEntropy
InfoGain = cEntropy.InfoGain
else:
InfoEntropy = PyInfoEntropy
InfoGain = PyInfoGain
|
ityaptin/ceilometer | ceilometer/tests/unit/hardware/pollsters/test_generic.py | Python | apache-2.0 | 7,408 | 0 | #
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import yaml
from oslo_config import fixture as fixture_config
from oslo_utils import fileutils
from oslotest import mockpatch
from ceilometer import declarative
from ceilometer.hardware.inspector import base as inspector_base
from ceilometer.hardware.pollsters import generic
from ceilometer import sample
from ceilometer.tests import base as test_base
class TestMeterDefinition(test_base.BaseTestCase):
def test_config_definition(self):
cfg = dict(name='test',
type='gauge',
unit='B',
snmp_inspector={})
definition = generic.MeterDefinition(cfg)
self.assertEqual('test', definition.name)
self.assertEqual('gauge', definition.type)
self.assertEqual('B', definition.unit)
self.assertEqual({}, definition.snmp_inspector)
def test_config_missing_field(self):
cfg = dict(name='test', type='gauge')
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Missing field unit", e.brief_message)
def test_config_invalid_field(self):
cfg = dict(name='test',
type='gauge',
unit='B',
invalid={})
definition = generic.MeterDefinition(cfg)
self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar'))
def test_config_invalid_type_field(self):
cfg = dict(name='test',
type='inva | lid',
unit='B',
snmp_inspector={})
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Unrecognized type value invalid",
e.brief_message)
@mock.patch('c | eilometer.hardware.pollsters.generic.LOG')
def test_bad_metric_skip(self, LOG):
cfg = {'metric': [dict(name='test1',
type='gauge',
unit='B',
snmp_inspector={}),
dict(name='test_bad',
type='invalid',
unit='B',
snmp_inspector={}),
dict(name='test2',
type='gauge',
unit='B',
snmp_inspector={})]}
data = generic.load_definition(cfg)
self.assertEqual(2, len(data))
LOG.error.assert_called_with(
"Error loading meter definition: %s",
"Unrecognized type value invalid")
class FakeInspector(inspector_base.Inspector):
net_metadata = dict(name='test.teest',
mac='001122334455',
ip='10.0.0.2',
speed=1000)
DATA = {
'test': (0.99, {}, {}),
'test2': (90, net_metadata, {}),
}
def inspect_generic(self, host, cache,
extra_metadata=None, param=None):
yield self.DATA[host.hostname]
class TestGenericPollsters(test_base.BaseTestCase):
@staticmethod
def faux_get_inspector(url, namespace=None):
return FakeInspector()
def setUp(self):
super(TestGenericPollsters, self).setUp()
self.conf = self.useFixture(fixture_config.Config()).conf
self.resources = ["snmp://test", "snmp://test2"]
self.useFixture(mockpatch.Patch(
'ceilometer.hardware.inspector.get_inspector',
self.faux_get_inspector))
self.conf(args=[])
self.pollster = generic.GenericHardwareDeclarativePollster()
def _setup_meter_def_file(self, cfg):
if six.PY3:
cfg = cfg.encode('utf-8')
meter_cfg_file = fileutils.write_to_tempfile(content=cfg,
prefix="snmp",
suffix="yaml")
self.conf.set_override(
'meter_definitions_file',
meter_cfg_file, group='hardware')
cfg = declarative.load_definitions(
self.conf, {}, self.conf.hardware.meter_definitions_file)
return cfg
def _check_get_samples(self, name, definition,
expected_value, expected_type, expected_unit=None):
self.pollster._update_meter_definition(definition)
cache = {}
samples = list(self.pollster.get_samples(None, cache,
self.resources))
self.assertTrue(samples)
self.assertIn(self.pollster.CACHE_KEY, cache)
for resource in self.resources:
self.assertIn(resource, cache[self.pollster.CACHE_KEY])
self.assertEqual(set([name]),
set([s.name for s in samples]))
match = [s for s in samples if s.name == name]
self.assertEqual(expected_value, match[0].volume)
self.assertEqual(expected_type, match[0].type)
if expected_unit:
self.assertEqual(expected_unit, match[0].unit)
def test_get_samples(self):
param = dict(matching_type='type_exact',
oid='1.3.6.1.4.1.2021.10.1.3.1',
type='lambda x: float(str(x))')
meter_def = generic.MeterDefinition(dict(type='gauge',
name='hardware.test1',
unit='process',
snmp_inspector=param))
self._check_get_samples('hardware.test1',
meter_def,
0.99, sample.TYPE_GAUGE,
expected_unit='process')
def test_get_pollsters_extensions(self):
param = dict(matching_type='type_exact',
oid='1.3.6.1.4.1.2021.10.1.3.1',
type='lambda x: float(str(x))')
meter_cfg = yaml.dump(
{'metric': [dict(type='gauge',
name='hardware.test1',
unit='process',
snmp_inspector=param),
dict(type='gauge',
name='hardware.test2.abc',
unit='process',
snmp_inspector=param)]})
self._setup_meter_def_file(meter_cfg)
pollster = generic.GenericHardwareDeclarativePollster
# Clear cached mapping
pollster.mapping = None
exts = pollster.get_pollsters_extensions()
self.assertEqual(2, len(exts))
self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc'])
self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc'])
|
20tab/twentytab-sortable | sortable/__init__.py | Python | mit | 80 | 0 | VERSION = (0, 11)
__versio | n__ = '.'.join(map( | str, VERSION))
DATE = "2015-02-06"
|
aileron-split/aileron-web | server/team/views.py | Python | gpl-3.0 | 4,519 | 0.003541 | from rest_framework import viewsets, mixins
from .models import Member
from .serializers import MemberSerializer, MemberDetailSerializer
# Team app views.
# All team members
class MembersViewSet (viewsets.ReadOnlyModelViewSet):
serializer_class = MemberSerializer
queryset = Member.objects
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedMembersViewSet (mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(published=True).order_by('-published_date')
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedMembersDetailViewSet (mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberDetailSerializer
queryset = Member.objects.filter(published=True).order_by('-published_date')
def retrieve(self, request, *args, **kwargs):
response = super(__class__, self).retrieve(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
# Employees only
class EmployeesViewSet (viewsets.ReadOnlyModelViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(status__in=(Member.FULLTIME, Member.PARTTIME))
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedEmployeesViewSet (mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(published=True, status__in=(Member.FULLTIME, Member.PARTTIME)).order_by('published_date')
def list(self, request, **kwargs):
response = super | (__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: RE | MOVE, TESTING ONLY
return response
class PublishedEmployeesDetailViewSet (mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberDetailSerializer
queryset = Member.objects.filter(published=True, status__in=(Member.FULLTIME, Member.PARTTIME)).order_by('published_date')
def retrieve(self, request, *args, **kwargs):
response = super(__class__, self).retrieve(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
# Associates only
class AssociatesViewSet (viewsets.ReadOnlyModelViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(status=Member.ASSOCIATE)
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedAssociatesViewSet (mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(published=True, status=Member.ASSOCIATE).order_by('-last_name')
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedAssociatesDetailViewSet (mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberDetailSerializer
queryset = Member.objects.filter(published=True, status=Member.ASSOCIATE).order_by('-last_name')
def retrieve(self, request, *args, **kwargs):
response = super(__class__, self).retrieve(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
|
ddd332/presto | presto-docs/target/sphinx/rst2pdf/extensions/vectorpdf/pdfrw/pdfcompress.py | Python | apache-2.0 | 1,656 | 0.001812 | # A part of pdfrw (pdfrw.googlecode.com)
# Copyright (C) 2006-2009 Patrick Maupin, Austin, Texas
# MIT license -- See LICENSE.txt for details
'''
Currently, this sad little file only knows how to decompress
using the flate (zlib) algorithm. Maybe more later, but it's
not a priority for me...
'''
from __future__ import generators
try:
set
except NameError:
from sets import Set as set
import zlib
from pdfobjects import PdfDict, PdfName
def streamobjects(mylist):
for obj in mylist:
if isinstance(obj, PdfDict) and obj.stream is not None:
yield obj
def uncompress(mylist, warnings=set()):
flate = PdfName.FlateDecode
for obj in streamobjects(mylist):
ftype = obj.Filter
if ftype is None:
continu | e
if isinstance(ftype, list) and len(ftype) == 1:
# todo: multiple filters
ftype = ftype[0]
parms = obj.DecodeParms
if ftype != flate or parms is not None:
msg = 'Not decompressing: cannot use filter %s with parameters | %s' % (repr(ftype), repr(parms))
if msg not in warnings:
warnings.add(msg)
print msg
else:
obj.stream = zlib.decompress(obj.stream)
obj.Filter = None
def compress(mylist):
flate = PdfName.FlateDecode
for obj in streamobjects(mylist):
ftype = obj.Filter
if ftype is not None:
continue
oldstr = obj.stream
newstr = zlib.compress(oldstr)
if len(newstr) < len(oldstr) + 30:
obj.stream = newstr
obj.Filter = flate
obj.DecodeParms = None
|
jamesblunt/sympy | sympy/interactive/tests/test_ipythonprinting.py | Python | bsd-3-clause | 5,908 | 0.0022 | """Tests that the IPython printing module is properly loaded. """
import warnings
from sympy.core.compatibility import u
fr | om sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not pr | esent
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
|
alexcoplan/p2proj | src/script/dist_comb.py | Python | mit | 4,527 | 0.02342 | # tpye hints
from typing import Dict,Any
# libs
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
import argparse
import json
""" returns (arithmetic, geometric) combinations of input distribution list """
def combine_dists(dist_list, entropy_bias):
dists = np.array(dist_list)
sums = np.sum(dists, axis=1)
assert np.all(np.ones_like(sums) == sums)
dist_size = dists.shape[1]
max_entropy = np.lo | g2(dist_size)
dist_entropies = -np.sum(dists * np.log2(dists), axis=1)
weights = np.expand_dims(dist_entropies ** (-entropy_bias), axis=0)
arith = np.squeeze(np.dot(np.transpose(dists), np.transpose(weights)) /
np.sum(weights))
geo_weighted = np.power(dists, np.transpose(weights))
product = np.product(geo_weighted, axis=0)
rooted = np.power(product, 1.0 / np.sum(weights))
geom = rooted / | np.sum(rooted)
return list(arith), list(geom)
parser = argparse.ArgumentParser()
parser.add_argument("--gen-tests", default=False, action="store_true",
help="if this option is set, then the script will generate test cases for\
both arithmetic and geometric distribution combination. otherwise, we\
will plot some example combinations")
args = parser.parse_args()
flat_dist = [0.25]*4
skew_dist = [0.5,0.25,1.0/8.0, 1.0/8.0]
peak_dist = [3.0/4.0, 1.0/8.0, 1.0/16.0, 1.0/16.0]
vpeak_dist = [3.0/4.0, 3.0/16.0, 1.0/32.0, 1.0/32.0]
distributions = {
"both_flat" : [flat_dist]*2,
"both_skew" : [skew_dist,skew_dist],
"flat+skew" : [flat_dist,skew_dist],
"skew+flat" : [skew_dist,flat_dist],
"peak+flat" : [peak_dist,flat_dist],
"flat+peak" : [flat_dist,peak_dist],
"opposite_skew" : [skew_dist, list(reversed(skew_dist))],
"opposite_peak" : [list(reversed(peak_dist)), peak_dist],
"skew+flat+oppskew" : [skew_dist, flat_dist, list(reversed(skew_dist))]
}
if args.gen_tests:
examples = []
for name, dists in distributions.items():
dist_matrix = np.array(dists)
dist_totals = np.sum(dist_matrix, axis=1)
assert np.all(dist_totals == np.ones_like(dist_totals)) # check everything adds to 1
dlen = len(dists[0])
for dist in dists:
assert len(dist) == dlen # check distributions all over n events
for eb in [0.0, 1.0, 2.0, 6.0]:
arith, geom = combine_dists(dists, eb)
example_obj = {
"example_name" : name,
"dists" : dists,
"entropy_bias" : eb,
"arithmetic_comb" : arith,
"geometric_comb" : geom
}
examples.append(example_obj)
root_obj = {
"dist_comb_examples" : examples
}
with open("test/combination_examples.json", 'w') as outfile:
outfile.write(json.dumps(root_obj, indent=2))
else:
# if we're not generating tests, then we'll just generate an example plot
dist_a, dist_b = peak_dist, list(reversed(peak_dist))
dist_c, dist_d = flat_dist, vpeak_dist
dist_size = len(dist_a)
assert dist_size == len(dist_b) == len(dist_c) == len(dist_d)
arith_ab, geom_ab = combine_dists([dist_a, dist_b], 2.0)
arith_cd, geom_cd = combine_dists([dist_c, dist_d], 2.0)
bar_width = 0.4
bar_idxs = np.arange(dist_size)
fig = plt.figure(figsize=(7,7))
ax_a = fig.add_subplot(321)
ax_b = fig.add_subplot(323)
ax_ab = fig.add_subplot(325)
ax_c = fig.add_subplot(322)
ax_d = fig.add_subplot(324)
ax_cd = fig.add_subplot(326)
dista_bars = ax_a.bar(bar_idxs, dist_a, bar_width, color='r')
distb_bars = ax_b.bar(bar_idxs, dist_b, bar_width, color='r')
distc_bars = ax_c.bar(bar_idxs, dist_c, bar_width, color='r')
distd_bars = ax_d.bar(bar_idxs, dist_d, bar_width, color='r')
arith_ab_bars = ax_ab.bar(bar_idxs, arith_ab, bar_width, color='b')
geom_ab_bars = ax_ab.bar(bar_idxs + bar_width, geom_ab, bar_width, color='g')
arith_cd_bars = ax_cd.bar(bar_idxs, arith_cd, bar_width, color='b')
geom_cd_bars = ax_cd.bar(bar_idxs + bar_width, geom_cd, bar_width, color='g')
for axes in [ax_a, ax_b, ax_ab, ax_c, ax_d, ax_cd]:
axes.set_ylabel('Probability')
axes.set_ylim([0.0,1.0])
ax_a.set_title('Distribution A')
ax_b.set_title('Distribution B')
ax_c.set_title('Distribution C')
ax_d.set_title('Distribution D')
ax_ab.set_title('Combination of A and B')
ax_cd.set_title('Combination of C and D')
ax_ab.legend(
(arith_ab_bars[0], geom_ab_bars[0]),
('Arithmetic', 'Geometric')
)
ax_cd.legend(
(arith_cd_bars[0], geom_cd_bars[0]),
('Arithmetic', 'Geometric')
)
plt.tight_layout()
plt.subplots_adjust(bottom=0.1)
plt.savefig('comb.svg')
|
purushothamc/myibitsolutions | two_pointers/remove_duplicates_sorted_array.py | Python | gpl-3.0 | 336 | 0.011905 | def removeDuplicates(A):
if not A:
return A
lenA = len(A)
k = 0
A[k] = A[0]
for idx in xrange(lenA):
| if idx+1 < lenA and A[k] == A[idx+1]:
continue
else:
k += 1
if idx + 1 < lenA:
A[k] = A[idx+1]
print k
A | = [1,2,3]
removeDuplicates(A) |
ledatelescope/bifrost | testbench/your_first_block.py | Python | bsd-3-clause | 3,507 | 0.003137 | #!/usr/bin/env python
# Copyright (c) 2017-2020, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# your_first_block.py
This testbench initializes a simple bifrost pipeline that reads from a binary file,
and then writes the data to an output file.
"""
# Python2 compatibility
from __future__ import print_function
import os
import numpy as np
import bifrost.pipeline as bfp
from bifrost.blocks import BinaryFileReadBlock, BinaryFileWriteBlock
import glob
from datetime import datetime
from copy import deepcopy
from pprint import pprint
class UselessAddBlock(bfp.TransformBlock):
def __init__(self, iring, n_to_add, *args, **kwargs):
super(UselessAddBlock, self).__init__(iring, *args, **kwargs)
self.n_to_add = n_to_add
def on_sequence(self, iseq):
ohdr = deepcopy(iseq.header)
ohdr["name"] += "_with_added_value"
return ohdr
def on_data(self, ispan, ospan):
in_nframe = ispan.nframe
out_nframe = in_nframe
idata = ispan.data + self.n_to_add
odata = ospan.data
odata[...] = idata
return out_nframe
class PrintStuffBlock(bfp.SinkBlock):
def __init__(self, iring, *args, **kwargs):
super(PrintStuffBlock, self).__init__(iring, *args, **kwargs)
self.n_iter = 0
def on_sequence(self, iseq):
print("[%s]" % datetime.now()) |
print(iseq.name)
pprint(iseq.header)
self.n_iter = 0
def on_data(self, ispan):
now = datetime.now()
if self.n_iter % 100 == 0:
print("[%s] %s" % (now, ispan.data))
self.n_iter += 1
i | f __name__ == "__main__":
# Setup pipeline
filenames = sorted(glob.glob('testdata/sin_data*.bin'))
b_read = BinaryFileReadBlock(filenames, 32768, 1, 'f32')
b_add = UselessAddBlock(b_read, n_to_add=100)
b_print = PrintStuffBlock(b_read)
b_print2 = PrintStuffBlock(b_add)
# Run pipeline
pipeline = bfp.get_default_pipeline()
print(pipeline.dot_graph())
pipeline.run()
|
tdyas/pants | contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py | Python | apache-2.0 | 2,841 | 0.002112 | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
import requests
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_method
from pants.contrib.go.subsystems.imported_repo import ImportedRepo
class GoImportMetaTagReader(Subsystem):
"""Implements a reader for the <meta name="go-import"> protocol.
See https://golang.org/cmd/go/#hdr-Remote_import_paths .
"""
options_scope = "go-import-metatag-reader"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--retries",
type=int,
default=1,
advanced=True,
help="How many times to retry when fetching meta tags.",
)
_META_IMPORT_REGEX = re.compile(
r | """
<meta
\s+
name=['"]go-import['"]
\s+
content=['"](?P<ro | ot>[^\s]+)\s+(?P<vcs>[^\s]+)\s+(?P<url>[^\s]+)['"]
\s*
/?>""",
flags=re.VERBOSE,
)
@classmethod
def find_meta_tags(cls, page_html):
"""Returns the content of the meta tag if found inside of the provided HTML."""
return cls._META_IMPORT_REGEX.findall(page_html)
@memoized_method
def get_imported_repo(self, import_path):
"""Looks for a go-import meta tag for the provided import_path.
Returns an ImportedRepo instance with the information in the meta tag, or None if no go-
import meta tag is found.
"""
try:
session = requests.session()
# TODO: Support https with (optional) fallback to http, as Go does.
# See https://github.com/pantsbuild/pants/issues/3503.
session.mount(
"http://", requests.adapters.HTTPAdapter(max_retries=self.get_options().retries)
)
page_data = session.get(f"http://{import_path}?go-get=1")
except requests.ConnectionError:
return None
if not page_data:
return None
# Return the first match, rather than doing some kind of longest prefix search.
# Hopefully no one returns multiple valid go-import meta tags.
for (root, vcs, url) in self.find_meta_tags(page_data.text):
if root and vcs and url:
# Check to make sure returned root is an exact match to the provided import path. If it is
# not then run a recursive check on the returned and return the values provided by that call.
if root == import_path:
return ImportedRepo(root, vcs, url)
elif import_path.startswith(root):
return self.get_imported_repo(root)
return None
|
TheTimmy/spack | var/spack/repos/builtin/packages/font-isas-misc/package.py | Python | lgpl-2.1 | 2,105 | 0.00095 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY o | r FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fon | tIsasMisc(Package):
"""X.org isas-misc font."""
homepage = "http://cgit.freedesktop.org/xorg/font/isas-misc"
url = "https://www.x.org/archive/individual/font/font-isas-misc-1.0.3.tar.gz"
version('1.0.3', 'ecc3b6fbe8f5721ddf5c7fc66f73e76f')
depends_on('font-util')
depends_on('fontconfig', type='build')
depends_on('mkfontdir', type='build')
depends_on('bdftopcf', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make()
make('install')
# `make install` copies the files to the font-util installation.
# Create a fake directory to convince Spack that we actually
# installed something.
mkdir(prefix.lib)
|
zyzyis/monetdb | sql/test/Users/Tests/table.SQL.py | Python | mpl-2.0 | 488 | 0.02459 | ###
# SET a GRANTed ROLE for a USER (possible).
# CREATE TABLE and INSERT (possible).
###
import os, sys
try:
from MonetDBtesting import process
except ImportError:
import process
clt = process.client('sql', user = 'my_user', passwd = 'p1',
| stdin = open(os.path.join(os.getenv('RELSRCDIR'), os.pardir, 'table.sql')),
stdout = process.PIPE, stderr = process.PIPE)
out, err = clt.communicate()
| sys.stdout.write(out)
sys.stderr.write(err)
|
marianinn/mingus | mingus/containers/track.py | Python | gpl-3.0 | 7,328 | 0.001365 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# mingus - Music theory Python package, track module.
# Copyright (C) 2008-2009, Bart Spaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mt_exceptions import InstrumentRangeError
from mingus.containers.note_container import NoteContainer
from mingus.containers.bar import Bar
import mingus.core.value as value
class Track(object):
"""A track object.
The Track class can be used to store Bars and to work on them.
The class is also designed to be used with Instruments, but this is
optional.
Tracks can be stored together in Compositions.
"""
bars = []
instrument = None
name = 'Untitled' # Will be looked for when saving a MIDI file.
tuning = None # Used by tablature
def __init__(self, instrument=None):
self.bars = []
self.instrument = instrument
def add_bar(self, bar):
"""Add a Bar to the current track."""
self.bars.append(bar)
return self
def add_notes(self, note, duration=None):
"""Add a Note, note as string or No | teContainer to the last Bar.
If the Bar is full, a new one will automatically be created.
If the Bar is not full but the note can't fit | in, this method will
return False. True otherwise.
An InstrumentRangeError exception will be raised if an Instrument is
attached to the Track, but the note turns out not to be within the
range of the Instrument.
"""
if self.instrument != None:
if not self.instrument.can_play_notes(note):
raise InstrumentRangeError, \
"Note '%s' is not in range of the instrument (%s)" % (note,
self.instrument)
if duration == None:
duration = 4
# Check whether the last bar is full, if so create a new bar and add the
# note there
if len(self.bars) == 0:
self.bars.append(Bar())
last_bar = self.bars[-1]
if last_bar.is_full():
self.bars.append(Bar(last_bar.key, last_bar.meter))
# warning should hold note if it doesn't fit
return self.bars[-1].place_notes(note, duration)
def get_notes(self):
"""Return an iterator that iterates through every bar in the this
track."""
for bar in self.bars:
for beat, duration, notes in bar:
yield beat, duration, notes
def from_chords(self, chords, duration=1):
"""Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
"""
tun = self.get_tuning()
def add_chord(chord, duration):
if type(chord) == list:
for c in chord:
add_chord(c, duration * 2)
else:
chord = NoteContainer().from_chord(chord)
if tun:
chord = tun.find_chord_fingering(chord,
return_best_as_NoteContainer=True)
if not self.add_notes(chord, duration):
# This should be the standard behaviour of add_notes
dur = self.bars[-1].value_left()
self.add_notes(chord, dur)
# warning should hold note
self.add_notes(chord, value.subtract(duration, dur))
for c in chords:
if c is not None:
add_chord(c, duration)
else:
self.add_notes(None, duration)
return self
def get_tuning(self):
"""Return a StringTuning object.
If an instrument is set and has a tuning it will be returned.
Otherwise the track's one will be used.
"""
if self.instrument and self.instrument.tuning:
return self.instrument.tuning
return self.tuning
def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self
def transpose(self, interval, up=True):
"""Transpose all the notes in the track up or down the interval.
Call transpose() on every Bar.
"""
for bar in self.bars:
bar.transpose(interval, up)
return self
def augment(self):
"""Augment all the bars in the Track."""
for bar in self.bars:
bar.augment()
return self
def diminish(self):
"""Diminish all the bars in the Track."""
for bar in self.bars:
bar.diminish()
return self
def __add__(self, value):
"""Enable the '+' operator for Tracks.
Notes, notes as string, NoteContainers and Bars accepted.
"""
if hasattr(value, 'bar'):
return self.add_bar(value)
elif hasattr(value, 'notes'):
return self.add_notes(value)
elif hasattr(value, 'name') or type(value) == str:
return self.add_notes(value)
def test_integrity(self):
"""Test whether all but the last Bars contained in this track are
full."""
for b in self.bars[:-1]:
if not b.is_full():
return False
return True
def __eq__(self, other):
"""Enable the '==' operator for tracks."""
for x in range(0, len(self.bars) - 1):
if self.bars[x] != other.bars[x]:
return False
return True
def __getitem__(self, index):
"""Enable the '[]' notation for Tracks."""
return self.bars[index]
def __setitem__(self, index, value):
"""Enable the '[] =' notation for Tracks.
Throw an UnexpectedObjectError if the value being set is not a
mingus.containers.Bar object.
"""
if not hasattr(value, 'bar'):
raise UnexpectedObjectError("Unexpected object '%s', "
"expecting a mingus.containers.Barobject" % value)
self.bars[index] = value
def __repr__(self):
"""Return a string representing the class."""
return str([self.instrument, self.bars])
def __len__(self):
"""Enable the len() function for Tracks."""
return len(self.bars)
|
dtattersall/BookSocialGraph | amazon_db.py | Python | mit | 5,605 | 0.023194 | '''
this script is used to create a sqlite databse for all the book pages we collected. It reads from book_info.txt and writes the data to the book_attribute table in amazon.db. It also creates an edgelist table in the database.
c1 to c10 are copurchases with the book.
'''
import sqlite3 as lite
import re
rPrice=re.compile(r'\d+\.\d+')
amazon_db = lite.connect("amazon.db")
amazon_db.text_factory = str
with amazon_db:
cur=amazon_db.cursor()
cur.execute("drop table if exists book_attribute")
cur.execute("create table book_attribute(id int, lang text, asin text, isbn text, nrevs int, format text, url text, price real, title text, publisher text, rank int, c1 text, c2 text, c3 text, c4 text, c5 text, c6 text, c7 text, c8 text, c9 text, c10 text, stars real, ncps int, listprice real)")
# parse the data into a db table
f = open('book_info.txt','r')
id=0
test_lim=100000000000
for count, line in enumerate(f):
if count%1000000==0:
print count
if count%18==1 and count<test_lim:
id+=1
lang, asin, isbn, nrevs, format, url, price, title, publisher, rank = None, None, None, None, None, None, None, None, None, None
c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 = None, None, None, None, None, None, None, None, None, None
categories, stars, ncps, listprice = None, None, None, None
lang = line.lstrip('lang:--').rstrip('\n')
if count%18==2 and count<test_lim:
asin = line.lstrip('asin:--').rstrip('\n | ')
if count%18==3 and count<test_lim:
isbn = line.lstrip('isbn:--').rstrip('\n')
if count%18==4 and count<test_lim:
nrevs = line.lstrip('nrevs:--').rstrip('\n')
if count%18==5 and count<test_lim:
format = line.lstrip('format:--').rstrip('\n')
if count%18==6 and count<test_li | m:
url = line.lstrip('url:--').rstrip('\n')
if count%18==7 and count<test_lim:
price = line.lstrip('price:--').rstrip('\n').replace(',','')
if count%18==8 and count<test_lim:
title = line.lstrip('title:--').rstrip('\n')
if count%18==9 and count<test_lim:
publisher = line.lstrip('publisher:--').rstrip('\n')
if count%18==10 and count<test_lim:
rank = line.lstrip('rank:--').rstrip('\n')
if count%18==11 and count<test_lim:
categories = line.lstrip('categories:--').rstrip('\n')
if count%18==12 and count<test_lim:
stars = line.lstrip('stars:--').rstrip('\n')
if count%18==14 and count<test_lim:
copurchasing_list = line.lstrip('copurchasing_list:--').rstrip('\n')
if count%18==15 and count<test_lim:
listprice = line.lstrip('listprice:--').rstrip('\n').replace(',','')
if count%18==17 and count<test_lim:
if nrevs!="None": nrevs=int(nrevs)
else: nrevs=0
if price!="None":
try:
price=float(rPrice.findall(price)[0])
except:
price=-1
print "price error!!", isbn
else: price=-1
if listprice!="None":
try:
listprice=float(rPrice.findall(listprice)[0])
except:
listprice=-1
print "listprice error!!", isbn
else: listprice=-1
if rank!='None': rank=int(rank.replace(',',''))
else: rank=-1
categories=categories.lstrip('None').replace(' ','').split('>>--')
try:
c1=categories[0]
c2=categories[1]
c3=categories[2]
c4=categories[3]
c5=categories[4]
c6=categories[5]
c7=categories[6]
c8=categories[7]
c9=categories[8]
c10=categories[9]
except:
a=0
ncps=len(categories)
cur.execute("insert into book_attribute values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (
id, lang, asin, isbn, nrevs,
format, url, price, title, publisher, rank,
c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
stars, ncps, listprice))
if count>test_lim:
break
f.close()
# build the cop_list table in which the column entries are book nodes of the copurchasing ties.
amazon_db = lite.connect("amazon.db")
with amazon_db:
cur=amazon_db.cursor()
isbn_=set(cur.execute("select isbn from book_attribute where isbn!='None'"))
isbn_set = set()
for item in isbn_:
isbn_set.add(item[0])
print len(isbn_set)
with amazon_db:
cur=amazon_db.cursor()
cur.execute("drop table if exists cop_list")
cur.execute("create table cop_list(book1 text, book2 text)")
edge_list = list()
f = open('book_info.txt','r')
for count, line in enumerate(f):
if count%1000000==0:
print count
if count%18==3:
book1, book2 = None, None
book1 = line.lstrip('isbn:--').rstrip('\n')
if count%18==14:
copurchaisng=line.lstrip("copurchasing_list:--").rstrip('\n')
copurchaisng=copurchaisng.split(',')
for book2 in copurchaisng:
if book2 in isbn_set:
edge_list.append((book1, book2))
cur.executemany("insert into cop_list values(?,?)", edge_list)
f.close()
|
LikeMyBread/Saylua | saylua/modules/forums/forms/main.py | Python | agpl-3.0 | 632 | 0 | from flask_wtf import FlaskForm
from saylua.utils.form import sl_validators
from saylua.utils.form.fields import SlField, SlTextAreaField
class ForumThreadForm(FlaskForm):
title = SlField('Thread Title', [
sl_validators.Required(),
sl_validators.NotBlank(),
sl_validators.Min(3)])
body = SlTextAreaField('Thread Body', [
sl_v | alidators.Required(),
sl_validators.NotBlank(),
sl_validators.Min(2)])
class ForumPostForm(FlaskForm):
body = SlTextAreaField('Post Content', [
sl_validators.Req | uired(),
sl_validators.NotBlank(),
sl_validators.Min(2)])
|
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_feature_filter127.py | Python | mit | 7,271 | 0.000275 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_feature_filter127_all_of
except ImportError:
bt_feature_filter127_all_of = sys.modules[
"onshape_client.oas.models.bt_feature_filter127_all_of"
]
try:
from onshape_client.oas.models import bt_query_filter183
except ImportError:
bt_query_filter183 = sys.modules["onshape_client.oas.models.bt_query_filter183"]
class BTFeatureFilter127(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("exclusion",): {
"EXCLUDE_EVERYTHING_ELSE": "EXCLUDE_EVERYTHING_ELSE",
"EXCLUDE_THIS": "EXCLUDE_THIS",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"exclusion": (str,), # noqa: E501
"feature_id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "b | tType", # noqa: E501
"exclusion": "exclusion", # noqa: E501
"feature_id": "featureId", # noqa: E501
}
required_properties = set(
[
"_data_store", |
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_feature_filter127.BTFeatureFilter127 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
exclusion (str): [optional] # noqa: E501
feature_id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_feature_filter127_all_of.BTFeatureFilter127AllOf,
bt_query_filter183.BTQueryFilter183,
],
"oneOf": [],
}
|
andyeff/skybot | plugins/countdown.py | Python | unlicense | 1,465 | 0.026621 | from util import http, hook
import time
import datetime
@hook.command(autohelp=False)
def countdown(inp, say=None):
".countdown -- countdown timer for game releases"
curtime = time.time()
items=(
('test1','Te | st Event One','25 dec 14 00 00'),
('test2','Test Event Two','22 nov 16 00 00'),
)
dict_date={}
for i in items:
dict_date[i[0]] = i[2]
dict_desc={}
for i in items:
dict_desc[i[0]] = i[1]
tracked = []
for i in items:
tracked.append(i[0])
if len(inp) == 0:
return 'Things I\'m tracking the countdowns of: %s ' % (" ".join(entry[0] for entry in items))
elif inp not in dict_date:
return 'Not tracking that game! :('
rlstime = time.mktime(time.st | rptime(dict_date[inp], "%d %b %y %H %M"))
if curtime > rlstime:
return "it is released!!"
difftime = datetime.timedelta(seconds=(rlstime - curtime))
def countdowner(td):
return td.days, td.seconds//3600, (td.seconds//60)%60
res = countdowner(difftime)
if res[0] == 1:
s_days = "day"
else:
s_days = "days"
if res[1] == 1:
s_hours = "hour"
else:
s_hours = "hours"
if res[2] == 1:
s_mins = "minute"
else:
s_mins = "minutes"
# return '%s days, %s hours, %s minutes until %s is released!' % (res[0], res[1], res[2], dict_desc[inp])
return '{0} {3}, {1} {4}, {2} {5} until {6} is released! ( {7} )'.format(res[0], res[1], res[2], s_days, s_hours, s_mins, dict_desc[inp], dict_date[inp][0:9])
|
bieschke/nuffle | lib/python/sqlobject/index.py | Python | gpl-2.0 | 5,230 | 0.002486 | from types import *
import col
from converters import sqlrepr
class SODatabaseIndex(object):
def __init__(self,
soClass,
name,
columns,
unique=False):
self.soClass = soClass
self.name = name
self.descriptions = self.convertColumns | (columns)
self.unique = unique
def convertColumns(self, columns):
"""
Converts all the columns to dictionary descriptors;
dereferences string column names.
"""
new = []
for desc in columns:
if not isinstance(desc, dict):
desc = {'column': desc}
if desc.has_key('expression'):
assert not desc.has_key('column'), (
'You cannot provide b | oth an expression and a column '
'(for %s in index %s in %s)' %
(desc, self.name, self.soClass))
assert not desc.has_key('length'), (
'length does not apply to expressions (for %s in '
'index %s in %s)' %
(desc, self.name, self.soClass))
new.append(desc)
continue
columnName = desc['column']
if not isinstance(columnName, str):
columnName = columnName.name
colDict = self.soClass.sqlmeta.columns
if not colDict.has_key(columnName):
for possible in colDict.values():
if possible.origName == columnName:
column = possible
break
else:
# None found
raise ValueError, "The column by the name %r was not found in the class %r" % (columnName, self.soClass)
else:
column = colDict[columnName]
desc['column'] = column
new.append(desc)
return new
def getExpression(self, desc, db):
if isinstance(desc['expression'], str):
return desc['expression']
else:
return sqlrepr(desc['expression'], db)
def sqliteCreateIndexSQL(self, soClass):
if self.unique:
uniqueOrIndex = 'UNIQUE INDEX'
else:
uniqueOrIndex = 'INDEX'
spec = []
for desc in self.descriptions:
if desc.has_key('expression'):
spec.append(self.getExpression(desc, 'sqlite'))
else:
spec.append(desc['column'].dbName)
ret = 'CREATE %s %s_%s ON %s (%s)' % \
(uniqueOrIndex,
self.soClass.sqlmeta.table,
self.name,
self.soClass.sqlmeta.table,
', '.join(spec))
return ret
postgresCreateIndexSQL = maxdbCreateIndexSQL = sybaseCreateIndexSQL = firebirdCreateIndexSQL = sqliteCreateIndexSQL
def mysqlCreateIndexSQL(self, soClass):
if self.unique:
uniqueOrIndex = 'UNIQUE'
else:
uniqueOrIndex = 'INDEX'
spec = []
for desc in self.descriptions:
if desc.has_key('expression'):
spec.append(self.getExpression(desc, 'mysql'))
elif desc.has_key('length'):
spec.append('%s(%d)' % (desc['column'].dbName, desc['length']))
else:
spec.append(desc['column'].dbName)
return 'ALTER TABLE %s ADD %s %s (%s)' % \
(soClass.sqlmeta.table, uniqueOrIndex,
self.name,
', '.join(spec))
class DatabaseIndex(object):
"""
This takes a variable number of parameters, each of which is a
column for indexing. Each column may be a column object or the
string name of the column (*not* the database name). You may also
use dictionaries, to further customize the indexing of the column.
The dictionary may have certain keys:
'column':
The column object or string identifier.
'length':
MySQL will only index the first N characters if this is
given. For other databases this is ignored.
'expression':
You can create an index based on an expression, e.g.,
'lower(column)'. This can either be a string or a sqlbuilder
expression.
Further keys may be added to the column specs in the future.
The class also take the keyword argument `unique`; if true then
a UNIQUE index is created.
"""
baseClass = SODatabaseIndex
def __init__(self, *columns, **kw):
kw['columns'] = columns
self.kw = kw
def setName(self, value):
assert self.kw.get('name') is None, "You cannot change a name after it has already been set (from %s to %s)" % (self.kw['name'], value)
self.kw['name'] = value
def _get_name(self):
return kw['name']
def _set_name(self, value):
self.setName(value)
name = property(_get_name, _set_name)
def withClass(self, soClass):
return self.baseClass(soClass=soClass, **self.kw)
def __repr__(self):
return '<%s %s %s>' % (
self.__class__.__name__,
hex(abs(id(self)))[2:],
self.kw)
__all__ = ['DatabaseIndex']
|
Taifxx/xxtrep | context.addtolib/lfm_service.py | Python | gpl-3.0 | 2,631 | 0.016356 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Taifxx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########## LAUNCHING FROM MEMORY SERVICE:
### Import modules ...
import context_ex as context
import context as cstart
import deb
### Define ...
ITD_FILE = cstart.ITD_FILE
error_file = cstart.error_file
adnname = context.tl(context.TAG_TTL_NM) % (context.addon.name)
### Messages ...
msgStrat = 'Launching from memory service started ...'
msgEnd = 'Launching from memory service stopped ...'
msgStratVisual = 'LFM service started'
msgEndVisual = 'LFM service stopped'
msgProcessError = 'Process ERROR'
### Base functions ...
log = lambda event : cont | ext.xbmc.log('[%s] >> %s' % (context.addon.id, event))
def starter():
isRaise = False
try:
context.plgMain (importLI=ITD_FILE)
except Exception as exc:
context.DOS.delf(context.DOS.join(context.addon.profile, context.TAG_PAR_STRARTF))
context.GUI.msgf(adnname, msgProcessError, context.GUI.notError)
deb.addraise(context.DOS.join(context.addon.profile, error_file))
isRaise | = True
finally:
## If user try double run ...
if context.xbmcvfs.exists(context.DOS.join(context.addon.profile, ITD_FILE)):
context.DOS.delf(context.DOS.join(context.addon.profile, ITD_FILE))
if isRaise : raise
### Main ...
def service(externalAbort, report):
## Load monitor ...
monitor = context.xbmc.Monitor()
## Log start ...
log(msgStrat)
if report : context.GUI.msg(adnname, msgStratVisual)
## Start service ...
while not monitor.abortRequested():
## Check starter ...
if context.xbmcvfs.exists(context.DOS.join(context.addon.profile, ITD_FILE)) : starter()
## Check exit ...
if monitor.waitForAbort(1) or externalAbort() : break
## End service (log end) ...
del monitor
log(msgEnd)
context.GUI.msg(adnname, msgEndVisual)
|
TwigWorld/Impostor | impostor/forms.py | Python | mit | 252 | 0.007937 | from django.contrib.auth.forms import AuthenticationForm
fr | om django import forms
from django.utils.translation import ugettext_lazy as _
class BigAuthenticationForm(AuthenticationForm):
| username = forms.CharField(label=_("Username"), max_length=70)
|
CiscoSystems/os-sqe | lab/scenarios/server_resume.py | Python | apache-2.0 | 506 | 0 | from lab.test_case_worker import TestCaseWorker
class ServerResume(TestCaseWorker):
def check_arguments(self, | **kwargs):
pass
# noinspection PyAttributeOutsideInit
def setup_worker(self):
self.name = self._kwargs.get('name', '')
def | loop_worker(self):
servers = self._cloud.os_server_list()
for server in servers:
server_name = server['Name']
if self.name in server_name:
self._cloud.os_server_resume(server_name)
|
GoogleCloudPlatform/python-docs-samples | workflows/cloud-client/main.py | Python | apache-2.0 | 2,637 | 0.001138 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def execute_workflow(
project, location="us-central1", workflow="myFirstWorkflow"
):
"""Execute a workflow and print the execution results."""
# [START workflows_api_quickstart]
import time
from google.cloud import workflows_v1beta
from google.cloud.workflows import executions_v1beta
from google.cloud.workflows.executions_v1beta.types import executions
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us-central1'
# workflow = 'myFirstWorkflow'
if not project:
raise Exception('GOOGLE_CLOUD_PROJECT env var is required.')
# Set up API clients.
execution_client = executions_v1beta.ExecutionsClient()
workflows_client = workflows_v1beta.WorkflowsClient()
# Construct the fully qualified loc | ation path.
parent = workflows_client.workflow_path(project, location, workflow)
# Execute the workflow.
response = execution_client.create_execution(request={"parent": parent})
print(f"Created execution: {response.name}")
| # Wait for execution to finish, then print results.
execution_finished = False
backoff_delay = 1 # Start wait with delay of 1 second
print('Poll every second for result...')
while (not execution_finished):
execution = execution_client.get_execution(request={"name": response.name})
execution_finished = execution.state != executions.Execution.State.ACTIVE
# If we haven't seen the result yet, wait a second.
if not execution_finished:
print('- Waiting for results...')
time.sleep(backoff_delay)
backoff_delay *= 2 # Double the delay to provide exponential backoff.
else:
print(f'Execution finished with state: {execution.state.name}')
print(execution.result)
return execution.result
# [END workflows_api_quickstart]
if __name__ == "__main__":
project = os.environ.get('GOOGLE_CLOUD_PROJECT')
execute_workflow(project=project)
|
kitizz/edge3d-paper | src/SandBox.py | Python | gpl-3.0 | 23,475 | 0.004047 | '''
SandBox.py
----------
Code that I was using to experiment with and understand how edges behaved.
Very raw, a bit of a mess. Read at your own peril. I've included it in the
name of being open, and would be a good place to start if anyone wanted to
understand the algorithms with the help of Nutmeg for visualization.
'''
from numpy import empty, zeros, dot, sqrt
from numpy.linalg import norm
from tools import IO, Geom, Util
from tools.FastMath import norm3, cross3
from skimage.morphology import skeletonize
import Contours
import RayCloud
import RayVoxel
from RaySoup import extract_rays, build_voxel_grid
import pynutmeg
import os
import glob
import time
np.set_printoptions(suppress=True, linewidth=160)
@jit(nopython=True)
def to_global(Qs, frames, Rs):
N = len(frames)
out = empty((N,3), np.float32)
for i in range(N):
out[i] = dot( Rs[frames[i]], Qs[i] )
return out
| @jit(no | python=True)
def find_close_rays(Cs, Qs, frames, ind, eps=1e-3, min_angle=np.pi/6, min_depth=0.1, max_depth=np.inf, cone=False):
c = Cs[ind]
q = Qs[ind]
frame = frames[ind]
N = Cs.shape[0]
sel = empty(N, np.int64)
j = 0
for i in range(N):
if frames[i] == frame:
continue
c2 = Cs[i]
q2 = Qs[i]
dist, depth = Geom.line_distance_3d(c, q, c2, q2)
if (not cone and dist > eps) or (cone and dist*depth > eps) or not (0.1 < depth < max_depth):
continue
# # Want to check if it's close enough to plane defined as
# # tangent to q and n
# dc = (c2 - c)
# dc -= dc * dot(dc, q) # Remove q component
# mag = norm3(dc)
# if mag == 0:
# # Other view lies on the same ray
# continue
# dc /= mag
# if abs(dot(dc, n)) < min_cos:
# continue
sel[j] = i
j += 1
return sel[:j]
@jit(nopython=True)
def find_in_cone(cone_center, cone_ray, cone_radius, Cs, Qs):
N = Cs.shape[0]
sel = empty(N, np.int64)
j = 0
for i in range(N):
in_cone = Geom.check_intersect_cone_ray(cone_center, cone_ray, cone_radius, Cs[i], Qs[i])
if in_cone:
sel[j] = i
j += 1
return sel[:j]
@jit(nopython=True)
def find_intersections(Cs, Qs, plucker, ray_ind, plane, sel):
'''
Cs, Qs, plucker: Canonical and Plucker representations of set of rays
ray_ind: Specific inds of the rays/ray triangle that's being intersected
plane: Plane describing the ray triangle
sel: Subset of rays to check
'''
N = Cs.shape[0]
pl1 = plucker[ray_ind: ray_ind+2]
intersects = empty(N, np.int64)
ps = empty((N,3), Cs.dtype)
us = empty((N,3), Cs.dtype)
j = 0
for k, i in enumerate(sel):
pl2 = plucker[i: i+2]
skew = empty(4, np.int16)
for a in range(2):
for b in range(2):
skew[2*a + b] = np.sign( Geom.check_plucker(pl1[a], pl2[b]) )
if abs(skew.sum()) == 4:
# Both rays skew on the same side of the other 2 rays
continue
# Find the intersection of the rays with the plane
c1, c2 = Cs[i], Cs[i+1]
q1, q2 = Qs[i], Qs[i+1]
t1, int1 = Geom.intersect_line_plane(c1, q1, plane)
t2, int2 = Geom.intersect_line_plane(c2, q2, plane)
if not int1 or not int2:
continue
intersects[j] = k
ps[j] = c1 + t1*q1
us[j] = (c2 + t2*q2) - ps[j]
j += 1
return ps[:j], us[:j], intersects[:j]
# @jit(nopython=True)
def cache_segment(Cs, Qs, planes, pluckers, frames, labels, raygrid, inds, eps):
starts = []
deltas = []
depths = []
sideps = []
sideds = []
ray_frames = []
planar_angles = []
sel_inds = []
j = 0
M = len(inds)
for ind in range(M):
ray_ind = inds[ind]
j += 1
print(j)
print("Checking ray_ind:", ray_ind)
# TODO: Check that this is still working correctly for the triangles
in_cone = raygrid.rays_near_ind(ray_ind)
print("Near: {}/{}".format(len(in_cone), raygrid.n_rays))
if len(in_cone) == 0:
print("Total grid for ray:", raygrid.ray2vox_count[ray_ind])
center = Cs[ray_ind]
ray = Qs[ray_ind: ray_ind+2].mean(axis=0)
ray /= Geom.norm3(ray)
plane = planes[ray_ind]
normal = plane[:3]
tangent = cross3(ray, normal)
print("Intersect planes")
# TODO: Use Plucker coords to quickly calculate intersection dual ray segments (infinite triangles?)
# Will need labels. Precalc Plucker before (do same as the planes)
# ps -> us, should describe how the other segment intersects this plane
# in terms of it's width
ps, us, intersecting = find_intersections(Cs, Qs, pluckers, ray_ind, plane, in_cone)
print("Project")
R = empty((3,3), ps.dtype)
R[0] = ray
R[1] = tangent
R[2] = normal
ps2d = dot(R[:2], (ps - center).T)
us2d = dot(R[:2], us.T)
# Solve for the intersection with the center of the segment
# [x, 0] = p + t*u
us2d[1, us2d[1] == 0] = 1e-10
ts = -ps2d[1] / us2d[1]
ds = ps2d[0] + ts*us2d[0]
# Keep only lines that are more vertical
# vert = np.abs(us2d[0] / (np.abs(us2d[1]) + 1e-12)) < 0.4
crossing = ps2d[1] * (ps2d[1] + us2d[1]) < 0
vert = np.arctan(us2d[0] / us2d[1]) < 0.85
# near = np.abs(ps2d[1]) < eps*ps2d[0]
forward = (ds > 0.2) & (ds < 1e6)
intersecting = in_cone[intersecting]
centers = dot(R, (Cs[intersecting] - center).T)
sidep = empty((2, len(intersecting)), Cs.dtype)
sidep[0] = centers[0]
sidep[1] = centers[2]
rays = empty((3, len(intersecting)), Cs.dtype)
rays[0] = ds - centers[0]
rays[1] = -centers[2]
rays[2] = -centers[1]
# rays = dot(R, Qs[intersecting].T)
sided = np.empty_like(sidep)
sided[0] = rays[0]
sided[1] = rays[1]
# hori = np.abs(sided[1] / sided[0]) < 1.5*eps
sel = np.where(forward & crossing)[0]
ps2d = ps2d[:,sel]
us2d = us2d[:,sel]
starts.append( ps2d )
deltas.append( us2d )
depths.append( ds[sel] )
ray_frames.append( frames[ intersecting[sel] ] )
sideps.append( sidep[:, sel] )
sideds.append( 1.5*sided[:, sel] )
planar_angles.append( np.arctan(centers[1, sel]/centers[2, sel]) )
# ray_dot = rays[1, sel] / norm(rays[:,sel], axis=0)
# planar_angles.append( np.arctan(ray_dot) )
sel_inds.append(intersecting[sel])
return starts, deltas, depths, ray_frames, sideps, sideds, sel_inds, planar_angles
def visualize_segments(seq):
edge_dir = os.path.join(seq, 'edges', '*.jpg')
paths = glob.glob(edge_dir)
fig = pynutmeg.figure('segments', 'figs/segments.qml')
fig.set_gui('figs/segments_gui.qml')
fig.set('ax.im', xOffset=-0.5, yOffset=-0.5)
nextframe = fig.parameter('nextframe')
nextframe.wait_changed()
prv = fig.parameter('prev')
nxt = fig.parameter('next')
for p in paths[::50]:
print("\nReading in {}".format(p))
E = IO.imread(p)
pts, labels = Contours.find_contours_edge(E, low=30, high=50, min_length=10)
J, I = pts.T
show = np.empty_like(E)
show[:] = 255
show[I, J] = 255 - E[I, J]
fig.set('ax.im', binary=show)
label = 0
x = empty(0)
y = empty(0)
while True:
if nextframe.changed:
nextframe.read()
break
label_changed = nxt.changed or prv.changed
if nxt.changed:
label += 1
elif prv.changed:
label = max(0, label - 1)
if label_changed:
print("Calc for label {}".format(label))
prv.read()
nxt.read()
sel = np.where(labels == label)[0]
x = J[sel].astype(float)
y = I[sel].astype(float)
|
umitproject/network-admin | netadmin/plugins/utils.py | Python | agpl-3.0 | 1,010 | 0.009901 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Amit Pal <amix.pal@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is | distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import U | ser
def get_user_objects(user_object,widget):
if user_object==None:
user_name = widget.user
else:
user_name = User.objects.get(username = user_object)
return user_name
|
C4ptainCrunch/libulb | libulb/catalog/course.py | Python | bsd-3-clause | 3,809 | 0.002371 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import requests
from bs4 import BeautifulSoup
import re
import six
from libulb.tools import Slug
class Course:
def __init__(self, slug, year, infos):
self.slug = slug
self.year = year
self._fill(infos)
de | f __repr__(self):
return self.__unicode__().encode('utf-8') if six.PY2 else self.__unicode__()
d | ef __unicode__(self):
if self.credits:
return "<Course {}: {} ({} crédits)>".format(self.slug, self.name, self.credits)
else:
return "<Course {}: {}>".format(self.slug, self.name)
@classmethod
def get_from_slug(cls, slug, year):
slug = Slug.match_all(slug)
response = cls._query_ulb(year, slug)
if not response.ok:
raise Exception("Error with ulb")
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find('table', 'bordertable')
tr_list = table('tr')
infos = {}
for line in tr_list:
if len(line('td')) != 2:
continue
key, val = line('td')
key = key.text.strip().strip("*").strip()
val = val.text.strip().strip("*").strip()
infos[key] = val
slug = "{}-{}-{}".format(slug.domain, slug.faculty, slug.number).lower()
return cls(slug, year, infos)
@classmethod
def _query_ulb(cls, year, slug):
url = "http://banssbfr.ulb.ac.be/PROD_frFR/bzscrse.p_disp_course_detail"
params = {
'cat_term_in': year,
'subj_code_in': slug.domain.upper(),
'crse_numb_in': slug.faculty.upper() + slug.number.upper(),
}
return requests.get(url, params=params)
def _fill(self, d):
self.name = d["Intitulé de l'unité d'enseignement"]
language = d.get("Langue d'enseignement", None)
if "français" in language:
self.language = "french"
elif "anglais" in language:
self.language = "english"
elif "néerlandais" in language:
self.language = "dutch"
else:
if "Enseigné en" in language:
self.language = language.replace("Enseigné en", "").strip()
else:
self.language = None
self.profs = []
prof_str = d.get("Titulaire(s) * [y inclus le coordonnateur]", None)
if not (prof_str is None or prof_str.strip() == ""):
for prof in prof_str.split(','):
prof = prof.replace("(coordonnateur)", "")
prof = prof.strip()
prof = prof.title()
self.profs.append(prof)
self.requirements = d.get("Connaissances et compétences pré-requises", None)
self.sections = []
sections_str = d.get("Programme(s) d'études comprenant l'unité d'enseignement", None)
# import ipdb; ipdb.set_trace()
if sections_str is not None and sections_str.strip() != "":
for section in sections_str.split("\n"):
match_section = re.match(r'^- ([A-Z1-9\-]{2,}) -', section.strip())
if match_section:
search = re.search(r'\(([0-9]+) (crédit|crédits), (optionnel|obligatoire)\)', section)
self.sections.append({
'section': match_section.group(1),
'credits': int(search.group(1)),
'required': search.group(3) == 'obligatoire'
})
if len(self.sections) != 0:
self.credits = max(map(lambda x: x['credits'], self.sections))
else:
self.credits = None
if __name__ == '__main__':
course = Course.get_from_slug('info-f-101', "201516")
|
RedHatQE/cfme_tests | cfme/tests/ansible/test_embedded_ansible_actions.py | Python | gpl-2.0 | 9,360 | 0.003205 | import fauxfactory
import pytest
from cfme import test_requirements
from cfme.control.explorer.policies import VMControlPolicy
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.services.myservice import MyService
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.conf import cfme_data
from cfme.utils.conf import credentials
from cfme.utils.update import update
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.ignore_stream("upstream"),
pytest.mark.long_running,
pytest.mark.provider([VMwareProvider], selector=ONE_PER_TYPE, scope="module"),
test_requirements.ansible,
]
@pytest.fixture(scope="module")
def wait_for_ansible(appliance):
appliance.server.settings.enable_server_roles("embedded_ansible")
appliance.wait_for_embedded_ansible()
yield
appliance.server.settings.disable_server_roles("embedded_ansible")
@pytest.fixture(scope="module")
def ansible_repository(appliance, wait_for_ansible):
repositories = appliance.collections.ansible_repositories
try:
repository = repositories.create(
name=fauxfactory.gen_alpha(),
url=cfme_data.ansible_links.playbook_repositories.embedded_ansible,
description=fauxfactory.gen_alpha())
except KeyError:
pytest.skip("Skipping since no such key found in yaml")
view = navigate_to(repository, "Details")
wait_for(
lambda: view.entities.summary("Properties").get_text_of("Status") == "successful",
timeout=60,
fail_func=view.toolbar.refresh.click
)
yield repository
repository.delete_if_exists()
@pytest.fixture(scope="module")
def ansible_catalog_item(appliance, ansible_repository):
cat_item = appliance.collections.catalog_items.create(
appliance.collections.catalog_items.ANSIBLE_PLAYBOOK,
fauxfactory.gen_alphanumeric(),
fauxfactory.gen_alphanumeric(),
display_in_catalog=True,
provisioning={
"repository": ansible_repository.name,
"playbook": "dump_all_variables.yml",
"machine_credential": "CFME Default Credential",
"create_new": True,
"provisioning_dialog_name": fauxfactory.gen_alphanumeric()
}
)
yield cat_item
cat_item.delete_if_exists()
@pytest.fixture(scope="module")
def ansible_action(appliance, ansible_catalog_item):
action_collection = appliance.collections.actions
action = action_collection.create(
fauxfactory.gen_alphanumeric(),
action_type="Run Ansible Playbook",
action_values={
"run_ansible_playbook": {
"playbook_catalog_item": ansible_catalog_item.name
}
}
)
yield action
action.delete_i | f_exists()
@pytest.fixture(scope="module")
def policy_for_testing(appliance, full_template_vm_modscope, provider, ansible_action):
vm = full_template_vm_modscope
policy = appliance.collections.policies.create(
VMControlPolicy,
fauxfactory.gen_alpha( | ),
scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vm.name)
)
policy.assign_actions_to_event("Tag Complete", [ansible_action.description])
policy_profile = appliance.collections.policy_profiles.create(
fauxfactory.gen_alpha(), policies=[policy])
provider.assign_policy_profiles(policy_profile.description)
yield
if policy.exists:
policy.assign_events()
provider.unassign_policy_profiles(policy_profile.description)
policy_profile.delete()
policy.delete()
@pytest.fixture(scope="module")
def ansible_credential(wait_for_ansible, appliance, full_template_modscope):
credential = appliance.collections.ansible_credentials.create(
fauxfactory.gen_alpha(),
"Machine",
username=credentials[full_template_modscope.creds]["username"],
password=credentials[full_template_modscope.creds]["password"]
)
yield credential
credential.delete_if_exists()
@pytest.fixture
def service_request(appliance, ansible_catalog_item):
request_desc = "Provisioning Service [{0}] from [{0}]".format(ansible_catalog_item.name)
_service_request = appliance.collections.requests.instantiate(request_desc)
yield _service_request
_service_request.delete_if_exists()
@pytest.fixture
def service(appliance, ansible_catalog_item):
service_ = MyService(appliance, ansible_catalog_item.name)
yield service_
if service_.exists:
service_.delete()
@pytest.mark.tier(3)
def test_action_run_ansible_playbook_localhost(request, ansible_catalog_item, ansible_action,
policy_for_testing, full_template_vm_modscope, ansible_credential, service_request,
service):
"""Tests a policy with ansible playbook action against localhost.
Polarion:
assignee: sbulage
initialEstimate: 1/6h
casecomponent: Ansible
"""
with update(ansible_action):
ansible_action.run_ansible_playbook = {"inventory": {"localhost": True}}
added_tag = full_template_vm_modscope.add_tag()
request.addfinalizer(lambda: full_template_vm_modscope.remove_tag(added_tag))
wait_for(service_request.exists, num_sec=600)
service_request.wait_for_request()
view = navigate_to(service, "Details")
assert view.provisioning.details.get_text_of("Hosts") == "localhost"
assert view.provisioning.results.get_text_of("Status") == "successful"
@pytest.mark.tier(3)
def test_action_run_ansible_playbook_manual_address(request, ansible_catalog_item, ansible_action,
policy_for_testing, full_template_vm_modscope, ansible_credential, service_request,
service):
"""Tests a policy with ansible playbook action against manual address.
Polarion:
assignee: sbulage
initialEstimate: 1/6h
casecomponent: Ansible
"""
vm = full_template_vm_modscope
with update(ansible_catalog_item):
ansible_catalog_item.provisioning = {"machine_credential": ansible_credential.name}
with update(ansible_action):
ansible_action.run_ansible_playbook = {
"inventory": {
"specific_hosts": True,
"hosts": vm.ip_address
}
}
added_tag = vm.add_tag()
request.addfinalizer(lambda: vm.remove_tag(added_tag))
wait_for(service_request.exists, num_sec=600)
service_request.wait_for_request()
view = navigate_to(service, "Details")
assert view.provisioning.details.get_text_of("Hosts") == vm.ip_address
assert view.provisioning.results.get_text_of("Status") == "successful"
@pytest.mark.tier(3)
def test_action_run_ansible_playbook_target_machine(request, ansible_catalog_item, ansible_action,
policy_for_testing, full_template_vm_modscope, ansible_credential, service_request,
service):
"""Tests a policy with ansible playbook action against target machine.
Polarion:
assignee: sbulage
initialEstimate: 1/6h
casecomponent: Ansible
"""
vm = full_template_vm_modscope
with update(ansible_action):
ansible_action.run_ansible_playbook = {"inventory": {"target_machine": True}}
added_tag = vm.add_tag()
request.addfinalizer(lambda: vm.remove_tag(added_tag))
wait_for(service_request.exists, num_sec=600)
service_request.wait_for_request()
view = navigate_to(service, "Details")
assert view.provisioning.details.get_text_of("Hosts") == vm.ip_address
assert view.provisioning.results.get_text_of("Status") == "successful"
@pytest.mark.tier(3)
def test_action_run_ansible_playbook_unavailable_address(request, ansible_catalog_item,
full_template_vm_modscope, ansible_action, policy_for_testing, ansible_credential,
service_request, service):
"""Tests a policy with ansible playbook action against unavailable address.
Polarion:
assignee: sbulage
initialEstimate: 1/6h
casecomponent: Ansible
"""
vm = full_template_vm_modscope
with update(ansible_catalog_item):
ansible |
Stanford-Online/edx-ora2 | openassessment/fileupload/backends/swift.py | Python | agpl-3.0 | 3,388 | 0.001476 | '''
Add in /edx/app/edxapp/edx-platform/lms/envs/aws.py:
ORA2_SWIFT_URL = AUTH_TOKENS["ORA2_SWIFT_URL"]
ORA2_SWIFT_KEY = AUTH_TOKENS["ORA2_SWIFT_KEY"]
Add in /edx/app/edxapp/lms.auth.json
"ORA2_SWIFT_URL": "https://EXAMPLE",
"ORA2_SWIFT_KEY": "EXAMPLE",
ORA2_SWIFT_KEY should correspond to Meta Temp-Url-Key configure in swift. Run
'swift stat -v' to get it.
'''
import logging
import urlparse
import requests
import swiftclient
from django.conf import settings
from ..exceptions import FileUploadInternalError
from .base import BaseBackend
logger = logging.getLogger("openassessment.fileupload.api")
# prefix paths with current version, in case we need to roll it at some point
SWIFT_BACKEND_VERSION = 1
class Backend(BaseBackend):
"""
Upload openassessment student files to swift
"""
def get_upload_url(self, key, content_type):
bucket_name, key_name = self._retrieve_parameters(key)
key, url = get_s | ettings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
ke | y=key,
method='PUT',
seconds=self.UPLOAD_URL_TIMEOUT
)
return '%s://%s%s' % (url.scheme, url.netloc, temp_url)
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating an upload URL."
)
raise FileUploadInternalError(ex)
def get_download_url(self, key):
bucket_name, key_name = self._retrieve_parameters(key)
key, url = get_settings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
key=key,
method='GET',
seconds=self.DOWNLOAD_URL_TIMEOUT
)
download_url = '%s://%s%s' % (url.scheme, url.netloc, temp_url)
response = requests.get(download_url)
return download_url if response.status_code == 200 else ""
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating a download URL."
)
raise FileUploadInternalError(ex)
def remove_file(self, key):
bucket_name, key_name = self._retrieve_parameters(key)
key, url = get_settings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='%s/%s/%s' % (url.path, bucket_name, key_name),
key=key,
method='DELETE',
seconds=self.DOWNLOAD_URL_TIMEOUT)
remove_url = '%s://%s%s' % (url.scheme, url.netloc, temp_url)
response = requests.delete(remove_url)
return response.status_code == 204
except Exception as ex:
logger.exception(
u"An internal exception occurred while removing object on swift storage."
)
raise FileUploadInternalError(ex)
def get_settings():
"""
Returns the swift key and a parsed url.
Both are generated from django settings.
"""
url = getattr(settings, 'ORA2_SWIFT_URL', None)
key = getattr(settings, 'ORA2_SWIFT_KEY', None)
url = urlparse.urlparse(url)
return key, url
|
zhaomr13/fruit | tests/test_this_module.py | Python | gpl-2.0 | 247 | 0 | import fruit
print locals()
print fruit.utils.pathformat.prefix("hello/good")
print fruit.utils.pathformat.prefix("good")
print fruit.utils.pat | hforma | t.suffix("hello/good")
print fruit.utils.pathformat.suffix("good")
a = 'hello my name is zhaomr'
|
xkcd1253/Mimi | flask/lib/python2.7/site-packages/flask_wtf/i18n.py | Python | gpl-2.0 | 1,803 | 0 | # coding: utf-8
"""
flask_wtf.i18n
~~~~~~~~~~~~~~
Internationalization support for Flask WTF.
:copyright: (c) 2013 by Hsiaoming Yang.
"""
from flask import _request_ctx_stack
from wtforms.ext.i18n.utils import messages_path
from flask.ext.babel import get_locale
from speaklater import make_lazy_string
from babel import support
__all__ = ('Translations', 'translations')
def _get_tra | nslations():
"""Returns the correct gettext translations.
Copy from flask-babel with some modifications.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
# babel should be in extensions for get_l | ocale
if 'babel' not in ctx.app.extensions:
return None
translations = getattr(ctx, 'wtforms_translations', None)
if translations is None:
dirname = messages_path()
translations = support.Translations.load(
dirname, [get_locale()], domain='wtforms'
)
ctx.wtforms_translations = translations
return translations
def _gettext(string):
t = _get_translations()
if t is None:
return string
if hasattr(t, 'ugettext'):
return t.ugettext(string)
# Python 3 has no ugettext
return t.gettext(string)
def _ngettext(singular, plural, n):
t = _get_translations()
if t is None:
if n == 1:
return singular
return plural
if hasattr(t, 'ungettext'):
return t.ungettext(singular, plural, n)
# Python 3 has no ungettext
return t.ngettext(singular, plural, n)
class Translations(object):
def gettext(self, string):
return make_lazy_string(_gettext, string)
def ngettext(self, singular, plural, n):
return make_lazy_string(_ngettext, singular, plural, n)
translations = Translations()
|
SeaFalcon/Musicool_Pr | apps/music_database.py | Python | apache-2.0 | 622 | 0.037162 | # -*- coding: utf-8 -*-
# [{time: value(button)}{}]
#time 함수 해서time 저장
class Database(object):
def __init__(self):
self.music_database = []
#button은 키코드, timing은 현재시간.
def start_music(self,button, timing):
self.music_database.append({timing: button})
def put(self, storage):
self.database.append(storage)
def
def out(self):
return self.database
def start_music(button, timing, db):
db.append({timing:button})
music_database=[ | ]
start_music(85,"10: | 00:01",music_database)
start_music(90,"10:00:04",music_database)
print music_database |
cdubz/timestrap | core/migrations/0004_auto_20180204_1439.py | Python | bsd-2-clause | 553 | 0 | # Generated by Django 2.0.1 on 2018-02-04 19:39
from django.db import migr | ations, models
class Migration(migrations.Migration):
dependencies = [("core", "0003_auto_20171001_1018")]
operations = [
migrations.AddField(
model_name="entry",
name="datetime_end",
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name="entry",
name="datetime_start",
field=models.DateTimeField(bla | nk=True, null=True),
),
]
|
tchellomello/home-assistant | tests/components/bond/test_fan.py | Python | apache-2.0 | 9,312 | 0.001933 | """Tests for the Bond fan device."""
from datetime import timedelta
from typing import Optional
from bond_api import Action, DeviceType, Direction
from homeassistant import core
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_SPEED,
ATTR_SPEED_LIST,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_DIRECTION,
SERVICE_SET_SPEED,
SPEED_OFF,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_action,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def turn_fan_on(
hass: core.HomeAssistant, fan_id: str, speed: Optional[str] = None
) -> None:
"""Turn the fan on at the specified speed."""
service_data = {ATTR_ENTITY_ID: fan_id}
if speed:
service_data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_ON,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["fan.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_non_standard_speed_list(hass: core.HomeAssistant):
"""T | ests that the device is registered with custom speed list if number of supported speeds differs form 3."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
actual_speeds = hass.states.get("fan.name_1").attributes[ATTR_SPEED_LIST]
assert actual_speeds == [
fan.SPEED_OFF,
fan.SPEED_LOW,
fan.SPEED_MEDIUM,
fan.SPEED_HIGH | ,
]
with patch_bond_device_state():
with patch_bond_action() as mock_set_speed_low:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed_low.assert_called_once_with(
"test-device-id", Action.set_speed(1)
)
with patch_bond_action() as mock_set_speed_medium:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_MEDIUM)
mock_set_speed_medium.assert_called_once_with(
"test-device-id", Action.set_speed(3)
)
with patch_bond_action() as mock_set_speed_high:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_HIGH)
mock_set_speed_high.assert_called_once_with(
"test-device-id", Action.set_speed(6)
)
async def test_fan_speed_with_no_max_seed(hass: core.HomeAssistant):
"""Tests that fans without max speed (increase/decrease controls) map speed to HA standard."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"no": "max_speed"},
state={"power": 1, "speed": 14},
)
assert hass.states.get("fan.name_1").attributes["speed"] == fan.SPEED_HIGH
async def test_turn_on_fan_with_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
async def test_turn_on_fan_without_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn on API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1")
mock_turn_on.assert_called_with("test-device-id", Action.turn_on())
async def test_turn_on_fan_with_off_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_OFF)
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_set_speed_off(hass: core.HomeAssistant):
"""Tests that set_speed(off) command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
service_data={ATTR_ENTITY_ID: "fan.name_1", ATTR_SPEED: SPEED_OFF},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_turn_off_fan(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "fan.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_update_reports_fan_on(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is on."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "on"
async def test_update_reports_fan_off(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is off."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 0, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "off"
async def test_update_reports_direction_forward(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is forward."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.FORWARD}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
async def test_update_reports_direction_reverse(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is reverse."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.REVERSE}):
|
qqalexqq/monkeys | app.py | Python | mit | 737 | 0 | """Monkeys app exercise."""
import os
import logging
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.menu import Menu
import views
def create_app(name_handler, config_object):
"""Application factory."" | "
app = Flask(name_handler)
app.config.from_object(config_object)
# Imports db and all models-related things
from models import db
db.init_app(app)
Bootstrap(app)
Menu(app)
app.register_blueprint(views.bp_monkey)
if os.environ.get('HEROKU') is not None:
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Application startup')
return app | |
euphorie/Euphorie | src/euphorie/client/tests/test_model.py | Python | gpl-2.0 | 20,931 | 0.001386 | # coding=utf-8
from AccessControl.PermissionRole import _what_not_even_god_should_do
from datetime import timedelta
from euphorie.client import config
from euphorie.client import model
from euphorie.client.tests.database import DatabaseTests
from euphorie.testing import EuphorieIntegrationTestCase
from plone import api
from plone.app.event.base import localized_now
from sqlalchemy.exc import StatementError
from z3c.saconfig import Session
try:
from unittest import mock
except ImportError:
import mock
def createSurvey():
session = Session()
account = model.Account(loginname=u"jane", password=u"secret")
session.add(account)
survey = model.SurveySession(titl | e=u"Session", zodb_path="survey", account=account)
session.add(survey)
session.flush()
return (session, survey)
class SurveySessionTests(EuphorieIntegrationTestCase):
def test_iface(self):
"""SurveySessions are marked by the ISurveySession interface"""
survey = createSurvey()[-1]
self.assertTrue(mode | l.ISurveySession.providedBy(survey))
def test_is_archived(self):
"""Verify that a session is archived when the archived attribute
is set and it is in the past
"""
session = model.SurveySession()
self.assertIsNone(session.archived)
self.assertFalse(session.is_archived)
session.archived = localized_now()
self.assertTrue(session.is_archived)
session.archived += timedelta(days=1)
self.assertFalse(session.is_archived)
def test_get_context_filter(self):
with api.env.adopt_user("admin"):
eu = api.content.create(
type="euphorie.clientcountry", id="eu", container=self.portal.client
)
eusector = api.content.create(
type="euphorie.clientsector", id="eusector", container=eu
)
api.content.create(
type="euphorie.survey", id="eusurvey", container=eusector
)
nl = api.content.create(
type="euphorie.clientcountry", id="nl", container=self.portal.client
)
nlsector = api.content.create(
type="euphorie.clientsector", id="nlsector", container=nl
)
api.content.create(
type="euphorie.survey", id="nlsurvey", container=nlsector
)
context_filter = model.SurveySession.get_context_filter(self.portal)
self.assertSetEqual(
{clause.value for clause in context_filter.right.element.clauses},
{u"eu/eusector/eusurvey", u"nl/nlsector/nlsurvey"},
)
context_filter = model.SurveySession.get_context_filter(self.portal.client)
self.assertSetEqual(
{clause.value for clause in context_filter.right.element.clauses},
{u"eu/eusector/eusurvey", u"nl/nlsector/nlsurvey"},
)
context_filter = model.SurveySession.get_context_filter(
self.portal.client.eu
)
self.assertSetEqual(
{clause.value for clause in context_filter.right.element.clauses},
{u"eu/eusector/eusurvey"},
)
context_filter = model.SurveySession.get_context_filter(self.portal.sectors)
self.assertFalse(context_filter)
def testNoChildren(self):
(ses, survey) = createSurvey()
root = survey.addChild(
model.Module(title=u"Root", module_id="1", zodb_path="1")
)
ses.add(root)
ses.flush()
self.assertEqual(root.children().count(), 0)
def testAddChild(self):
(ses, survey) = createSurvey()
root = survey.addChild(
model.Module(title=u"Root", module_id="1", zodb_path="1")
)
ses.add(root)
root.addChild(model.Module(title=u"Module", module_id="1", zodb_path="1/1"))
ses.flush()
self.assertEqual(root.children().count(), 1)
def testChildOrder(self):
(ses, survey) = createSurvey()
root = survey.addChild(
model.Module(title=u"Root", module_id="1", zodb_path="1")
)
ses.add(root)
ses.flush()
root.addChild(model.Module(title=u"Profile 5", module_id="5", zodb_path="1/5"))
root.addChild(model.Module(title=u"Profile 1", module_id="1", zodb_path="1/1"))
root.addChild(model.Module(title=u"Profile 3", module_id="3", zodb_path="1/3"))
ses.flush()
self.assertEqual([c.module_id for c in list(root.children())], [u"5", "1", "3"])
def testReset_NoChildren(self):
(ses, survey) = createSurvey()
survey.reset()
children = ses.query(model.SurveyTreeItem.id).filter(
model.SurveyTreeItem.session == survey
)
self.assertEqual(children.count(), 0)
def testReset_SingleChild(self):
(ses, survey) = createSurvey()
root = survey.addChild(
model.Module(title=u"Root", module_id="1", zodb_path="1")
)
ses.add(root)
children = ses.query(model.SurveyTreeItem.id).filter(
model.SurveyTreeItem.session == survey
)
self.assertEqual(children.count(), 1)
survey.reset()
self.assertEqual(children.count(), 0)
def testHasTree_NoChildren(self):
(ses, survey) = createSurvey()
self.assertEqual(survey.hasTree(), False)
def testHasTree_SingleChild(self):
(ses, survey) = createSurvey()
root = survey.addChild(
model.Module(title=u"Root", module_id="1", zodb_path="1")
)
ses.add(root)
self.assertEqual(survey.hasTree(), True)
class RiskPresentFilterTests(EuphorieIntegrationTestCase):
def createData(self):
(self.session, self.survey) = createSurvey()
self.mod1 = model.Module(
title=u"Module 1", module_id="1", zodb_path="1", skip_children=False
)
self.survey.addChild(self.mod1)
self.q1 = model.Risk(
title=u"Risk 1",
risk_id="1",
zodb_path="1/1",
type="risk",
identification="no",
)
self.mod1.addChild(self.q1)
def query(self):
return self.session.query(model.SurveyTreeItem).filter(
model.RISK_PRESENT_FILTER
)
def testValidRisk(self):
self.createData()
self.assertEqual(self.query().count(), 1)
def testIncludeTop5(self):
self.createData()
self.q1.risk_type = "top5"
self.assertEqual(self.query().count(), 1)
def testNoPresentRisk(self):
self.createData()
self.q1.identification = "n/a"
self.assertEqual(self.query().count(), 0)
class RiskPresentNoTop5FilterTests(EuphorieIntegrationTestCase):
def createData(self):
(self.session, self.survey) = createSurvey()
self.mod1 = model.Module(
title=u"Module 1", module_id="1", zodb_path="1", skip_children=False
)
self.survey.addChild(self.mod1)
self.q1 = model.Risk(
title=u"Risk 1",
risk_id="1",
zodb_path="1/1",
type="risk",
identification="no",
)
self.mod1.addChild(self.q1)
def query(self):
return self.session.query(model.SurveyTreeItem).filter(
model.RISK_PRESENT_NO_TOP5_NO_POLICY_DO_EVALUTE_FILTER
)
def testValidRisk(self):
self.createData()
self.assertEqual(self.query().count(), 1)
def testSkipTop5(self):
self.createData()
self.q1.risk_type = "top5"
self.assertEqual(self.query().count(), 0)
def testSkipPolicy(self):
self.createData()
self.q1.risk_type = "policy"
self.assertEqual(self.query().count(), 0)
def testNoPresentRisk(self):
self.createData()
self.q1.identification = "n/a"
self.assertEqual(self.query().count(), 0)
class ModuleWithRiskFilterTests(EuphorieIntegrationTestCase):
def createData(self):
(self.session, self.survey) = createSurvey()
self.mod1 = mod |
saltstack/salt | salt/modules/ebuildpkg.py | Python | apache-2.0 | 39,670 | 0.001235 | """
Support for Portage
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
:optdepends: - portage Python adapter
For now all package names *MUST* include the package category,
i.e. ``'vim'`` will not work, ``'app-editors/vim'`` will.
"""
import copy
import datetime
import logging
import os
import re
import salt.utils.args
import salt.utils.compat
import salt.utils.data
import salt.utils.functools
import salt.utils.path
import salt.utils.pkg
import salt.utils.systemd
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError
HAS_PORTAGE = False
try:
import portage
HAS_PORTAGE = True
except ImportError:
import os
import sys
if os.path.isdir("/usr/lib/portage/pym"):
try:
# In a virtualenv, the portage python path needs to be manually added
sys.path.insert(0, "/usr/lib/portage/pym")
import portage
HAS_PORTAGE = True
except ImportError:
pass
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "pkg"
def __virtual__():
"""
Confirm this module is on a Gentoo based system
"""
if HAS_PORTAGE and __grains__["os"] == "Gentoo":
return __virtualname__
return (
False,
"The ebuild execution module cannot be loaded: either the system is not Gentoo"
" or the portage python library is not available.",
)
def _vartree():
import portage # pylint: disable=3rd-party-module-not-gated
portage = salt.utils.compat.reload(portage)
return portage.db[portage.root]["vartree"]
def _porttree():
import portage # pylint: disable=3rd-party-module-not-gated
portage = salt.utils.compat.reload(portage)
return portage.db[portage.root]["porttree"]
def _p_to_cp(p):
try:
ret = portage.dep_getkey(p)
if ret:
return ret
except portage.exception.InvalidAtom:
pass
try:
ret = _porttree().dbapi.xmatch("bestmatch-visible", p)
if ret:
return portage.dep_getkey(ret)
except portage.exception.InvalidAtom:
pass
try:
ret = _porttree().dbapi.xmatch("match-all", p)
if ret:
return portage.cpv_getkey(ret[0])
except portage.exception.InvalidAtom:
pass
return None
def _allnodes():
if "portage._allnodes" in __context__:
return __context__["portage._allnodes"]
else:
ret = _porttree().getallnodes()
__context__["portage._allnodes"] = ret
return ret
def _cpv_to_cp(cpv):
try:
ret = portage.dep_getkey(cpv)
if ret:
return ret
except portage.exception.InvalidAtom:
pass
try:
ret = portage.cpv_getkey(cpv)
if ret:
return ret
except portage.exception.InvalidAtom:
pass
return cpv
def _cpv_to_version(cpv):
return portage.versions.cpv_getversion(cpv)
def _process_emerge_err(stdout, stderr):
"""
Used to parse emerge output to provide meaningful output when emerge fails
"""
ret = {}
rexp = re.compile(r"^[<>=][^ ]+/[^ ]+ [^\n]+", re.M)
slot_conflicts = re.compile(r"^[^ \n]+/[^ ]+:[^ ]", re.M).findall(stderr)
if slot_conflicts:
ret["slot conflicts"] = slot_conflicts
blocked = re.compile(
r"(?m)^\[blocks .+\] " r"([^ ]+/[^ ]+-[0-9]+[^ ]+)" r".*$"
).findall(stdout)
unsatisfied = re.compile(r"Error: The above package list contains").findall(stderr)
# If there were blocks and emerge could not resolve it.
if blocked and unsatisfied:
ret["blocked"] = blocked
sections = re.split("\n\n", stderr)
for section in sections:
if "The following keyword changes" in section:
ret["keywords"] = rexp.findall(section)
elif "The following license changes" in section:
ret["license"] = rexp.findall(section)
elif "The following USE changes" in section:
ret["use"] = rexp.findall(section)
elif "The following mask changes" in section:
ret["mask"] = rexp.findall(section)
return ret
def check_db(*names, **kwargs):
"""
.. versionadded:: 0.17.0
Returns a dict containing the following information for each specified
package:
1. A key ``found``, which will be a boolean value denoting if a match was
found in the package database.
2. If ``found`` is ``False``, then a second key called ``suggestions`` will
be present, which will contain a list of possible matches. This list
will be empty if the package name was specified in ``category/pkgname``
format, since the suggestions are only intended to disambiguate
ambiguous package names (ones submitted without a category).
CLI Examples:
.. code-block:: bash
salt '*' pkg.check_db <package1> <package2> <package3>
"""
### NOTE: kwargs is not used here but needs to be present due to it being
### required in the check_db function in other package providers.
ret = {}
for name in names:
if name in ret:
log.warning("pkg.check_db: Duplicate package name '%s' submitted", name)
continue
if "/" not in name:
ret.setdefault(name, {})["found"] = False
ret[name]["suggestions"] = porttree_matches(name)
else:
ret.setdefault(name, {})["found"] = name in _allnodes()
if ret[name]["found"] is False:
ret[name]["suggestions"] = []
return ret
def ex_mod_init(low):
"""
If the config option ``ebuild.enforce_nice_config`` is set to True, this
module will enforce a nice tree structure for /etc/portage/package.*
configuration files.
.. versionadded:: 0.17.0
Initial automatic enforcement added when pkg is used on a Gentoo system.
.. versionchanged:: 2014.7.0
Configure option a | dded to make this behaviour optional, defaulting to
off.
.. seealso::
``ebuild.ex_mod_init`` is called automatically when a state invokes a
pkg state on a Gentoo system.
:py:func:`salt.states.pkg.mod_init`
``ebuild.ex_mod_init`` uses ``portage_config.enforce_nice_config`` to do
the lifting.
:py:func:`salt.modules.portage_config.enforce_nice_config`
| CLI Example:
.. code-block:: bash
salt '*' pkg.ex_mod_init
"""
if __salt__["config.get"]("ebuild.enforce_nice_config", False):
__salt__["portage_config.enforce_nice_config"]()
return True
def latest_version(*names, **kwargs):
"""
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
"""
refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
if not names:
return ""
# Refresh before looking for the latest version available
if refresh:
refresh_db()
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ""
installed = _cpv_to_version(_vartree().dep_bestmatch(name))
avail = _cpv_to_version(_porttree().dep_bestmatch(name))
if avail and (
not installed
or salt.utils.versions.compare(
ver1=installed, oper="<", ver2=avail, cmp_func=version_cmp
)
):
ret[name] = avail
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(
latest_version, "available_version"
)
def _get_upgradable(backtrack=3):
"""
Utility fu |
odoo-arg/odoo_l10n_ar | l10n_ar_account_check_sale/__manifest__.py | Python | agpl-3.0 | 1,674 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# b | y the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or F | ITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'l10n_ar_account_check_sale',
'version': '1.0',
'summary': 'Venta de cheques de terceros',
'description': """
Cheques
==================================
Venta de cheques de terceros.
""",
'author': 'OPENPYME S.R.L.',
'website': 'http://www.openpyme.com.ar',
'category': 'Accounting',
'depends': [
'l10n_ar_account_check',
],
'data': [
'data/sold_check_data.xml',
'views/account_third_check_view.xml',
'views/account_sold_check_view.xml',
'wizard/wizard_sell_check_view.xml',
'security/ir.model.access.csv',
'data/security.xml',
],
'active': False,
'application': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mbuhot/mbuhot-euler-solutions | python/problem-001.py | Python | mit | 857 | 0.01867 | #! /usr/bin/env python
#if we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
def BruteForceSolution():
multiplesOf3 = range(3, 1000, 3)
multiplesOf5 = range(5, 1000, 5)
multiplesOfBoth = range(3*5, 1000, 3 | *5)
result = sum(multiplesOf3) + sum(multiplesOf5) - sum(multiplesOfBoth)
print("Brute force result: %d" % result)
def | MathSolution():
def SumOfArithmeticSequence(a, n):
return a * (n + 1) * n // 2
multiplesOf3 = SumOfArithmeticSequence(3, 999 // 3)
multiplesOf5 = SumOfArithmeticSequence(5, 999 // 5)
multiplesOfBoth = SumOfArithmeticSequence(15, 999 // 15)
result = multiplesOf3 + multiplesOf5 - multiplesOfBoth
print("Math result: %d" % result)
BruteForceSolution()
MathSolution()
|
N3evin/AmiiboAPI | routes/amiibo.py | Python | mit | 5,303 | 0.001131 | from flask import Blueprint, request, jsonify, abort
from amiibo.amiibo import AmiiboHex, GameSeriesHex, CharacterHex, VariantHex, AmiiboTypeHex, AmiiboModelHex, AmiiboSeriesHex, Hex
from amiibo.manager import AmiiboManager
from amiibo.filterable import AmiiboCollection
amiiboApp = Blueprint("amiibo", __name__)
amiibo_manager = AmiiboManager.getInstance()
# Get the amiibo
@amiiboApp.route('/api/amiibo/', methods=['GET'])
def route_api_amiibo():
args = request.args
if 'id' in args:
try:
id_ = AmiiboHex(args['id'].strip())
except ValueError:
abort(400)
result = amiibo_manager.amiibos.get(id_)
else:
filters = {}
if 'head' in args:
try:
filters['head'] = Hex(args['head'].strip())
except ValueError:
abort(400)
if 'tail' in args:
try:
filters['tail'] = Hex(args['tail'].strip())
except ValueError:
abort(400)
if 'name' in args:
filters['name'] = args['name'].strip()
if 'gameseries' in args:
game_series = args['gameseries'].strip()
if game_series.startswith('0x'):
try:
filters['game_series_id'] = GameSeriesHex(game_series)
except ValueError:
abort(400)
else:
filters['game_series_name'] = game_series
if 'switch_titleid' in args:
filters['switch_titleid'] = args['switch_titleid']
if 'wiiu_titleid' in args:
filters['wiiu_titleid'] = args['wiiu_titleid']
if '3ds_titleid' in args:
filters['3ds_titleid'] = args['3ds_titleid']
if 'character' in args:
character = args['character'].strip()
if character.startswith('0x'):
try:
filters['character_id'] = CharacterHex(character)
except ValueError:
abort(400)
else:
filters['character_name'] = character
if 'variant' in args:
try:
filters['variant_id'] = VariantHex(args['variant'].strip())
except ValueError:
abort(400)
if 'type' in args:
amiibo_type = args['type'].strip()
if amiibo_type.startswith('0x'):
try:
filters['amiibo_type_id'] = AmiiboTypeHex(amiibo_type)
except ValueError:
abort(400)
else:
filters['amiibo_type_name'] = amiibo_type
if 'amiibo_model' in args:
filters['amiibo_model_id'] = AmiiboModelHex(args['amiibo_model'].strip())
if 'amiiboSeries' in args:
amiibo_series = args['amiiboSeries'].strip()
if amiibo_series.startswith('0x'):
try:
filters['amiibo_series_id'] = AmiiboSeriesHex(amiibo_series)
except ValueError:
abort(400)
else:
filters['amiibo_series_name'] = amiibo_series
result = AmiiboCollection()
print(args)
if 'showusage' in args:
if filters != {}:
result = amiibo_manager.amiibosfull.filter(**filters)
else:
result = amiibo_manager.a | miibosfull
elif 'showgames' in args:
if filters != {}:
result = amiibo_manager.amiibosfullwithoutusage.filter(**fil | ters)
else:
result = amiibo_manager.amiibosfullwithoutusage
else:
if filters != {}:
for amiibo in amiibo_manager.amiibosfull.filter(**filters):
result.add(amiibo_manager.amiibos[amiibo.id])
else:
result = amiibo_manager.amiibos
if 'sort' in args:
values = {
'id': 'id',
'head': 'head',
'tail': 'tail',
'name': 'name',
'gameseries': 'gameseries',
'gameseries_id': 'game_series_id',
'gameseries_name': 'game_series_name',
'character': 'character_name',
'character_id': 'character_id',
'character_name': 'character_name',
'variant': 'variant_id',
'variant_id': 'variant_id',
'type': 'amiibo_type_id',
'type_id': 'amiibo_type_id',
'type_name': 'amiibo_type_name',
'amiibo_model': 'amiibo_model_id',
'amiibo_model_id': 'amiibo_model_id',
'series': 'amiibo_series_name',
'series_id': 'amiibo_series_id',
'series_name': 'amiibo_series_name',
'release_na': 'release_na',
'release_jp': 'release_jp',
'release_eu': 'release_eu',
'release_au': 'release_au',
}
result = result.sort(*[
values[value]
for value in args['sort'].split(',')
if value in values
])
if not result:
abort(404)
respond = jsonify({'amiibo': result})
return respond |
pprett/statsmodels | statsmodels/tools/eval_measures.py | Python | bsd-3-clause | 8,338 | 0.003118 | # -*- coding: utf-8 -*-
"""some measures for evaluation of prediction, tests and model selection
Created on Tue Nov 08 15:23:20 2011
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
def mse(x1, x2, axis=0):
'''mean squared error
'''
return np.mean((x1-x2)**2, axis=axis)
def rmse(x1, x2, axis=0):
'''root mean squared error
'''
return np.sqrt(mse(x1, x2, axis=axis))
def maxabs(x1, x2, axis=0):
'''maximum absolute error
'''
return np.max(np.abs(x1-x2), axis=axis)
def meanabs(x1, x2, axis=0):
'''mean absolute error
'''
return np.mean(np.abs(x1-x2), axis=axis)
def medianabs(x1, x2, axis=0):
'''median absolute error
'''
return np.median(np.abs(x1-x2), axis=axis)
def bias(x1, x2, axis=0):
'''bias, mean error
'''
return np.mean(x1-x2, axis=axis)
def medianbias(x1, x2, axis=0):
'''median bias, median error
'''
return np.median(x1-x2, axis=axis)
def vare(x1, x2, ddof=0, axis=0):
'''variance of error
'''
return np.var(x1-x2, ddof=0, axis=axis)
def stde(x1, x2, ddof=0, axis=0):
'''variance of error
'''
return np.std(x1-x2, ddof=0, axis=axis)
def iqr(x1, x2, axis=0):
'''interquartile range of error
rounded index, no interpolations
this could use newer numpy function instead
'''
if axis is None:
x1 = np.ravel(x1)
x2 = np.ravel(x2)
axis = 0
xdiff = np.sort(x1 - x2)
nobs = x1.shape[axis]
idx = np.round((nobs-1) * np.array([0.25, 0.75])).astype(int)
sl = [slice(None)] * xdiff.ndim
sl[axis] = idx
iqr = np.diff(xdiff[sl], axis=axis)
iqr = np.squeeze(iqr) #drop reduced dimension
if iqr.size == 1:
return iqr #[0]
else:
return iqr
# Information Criteria
#---------------------
def aic(llf, nobs, df_modelwc):
'''Akaike information criterion
Parameters
----------
llf : float
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aic : float
information criterion
References
----------
http://en.wikipedia.org/wiki/Akaike_information_criterion
'''
return -2. * llf + 2. * df_modelwc
def aicc(llf, nobs, df_modelwc):
'''Akaike information criterion (AIC) with small sample correction
Parameters
----------
llf : float
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aicc : float
information criterion
References
----------
http://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
'''
return -2. * llf + 2. * df_modelwc * nobs / (nobs - df_modelwc - 1.)
#float division
def bic(llf, nobs, df_modelwc):
'''Bayesian information criterion (BIC) or Schwarz criterion
Parameters
----------
llf : float
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
bic : float
information criterion
References
----------
http://en.wikipedia.org/wiki/Bayesian_information_criterion
'''
return -2. * llf + np.log(nobs) * df_modelwc
def hqic(llf, nobs, df_modelwc):
'''Hannan-Quinn information criterion (HQC)
Parameters
----------
llf : float
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
hqic : float
information criterion
References
----------
Wikipedia doesn't say much
'''
return -2. * llf + 2 * np.log(np.log(nobs)) * df_modelwc
#IC based on residual sigma
def aic_sigma(sigma2, nobs, df_modelwc, islog=False):
'''Akaike information criterion
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aic : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. The information criteria should be used to compare
only comparable models.
For example, AIC is defined in terms of the loglikelihood as
-2 llf + 2 k
in terms of sigma_hat
log(sigma_hat^2) + 2 k / n
in terms of the determinant of Sigma_hat
log(|sigma_hat|) + 2 k / n
Note: In our definition we do not divide by n in the log-likelihood
version.
TODO: Latex math
reference for example lecture notes by Herman Bierens
References
----------
http://en.wikipedia.org/wiki/Akaike_information_criterion
'''
if not islog:
sigma2 = np.log(sigma2)
return sigma2 + aic(0, nobs, df_modelwc) / nobs
def aicc_sigma(sigma2, nobs, df_modelwc, islog=False):
'''Akaike information criterion (AIC) with small sample correction
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
| number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aicc : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable models.
References
----------
http://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
'''
if not islog:
sigma2 = | np.log(sigma2)
return sigma2 + aicc(0, nobs, df_modelwc) / nobs
#float division
def bic_sigma(sigma2, nobs, df_modelwc, islog=False):
'''Bayesian information criterion (BIC) or Schwarz criterion
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
bic : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable models.
References
----------
http://en.wikipedia.org/wiki/Bayesian_information_criterion
'''
if not islog:
sigma2 = np.log(sigma2)
return sigma2 + bic(0, nobs, df_modelwc) / nobs
def hqic_sigma(sigma2, nobs, df_modelwc, islog=False):
'''Hannan-Quinn information criterion (HQC)
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
hqic : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable models.
References
----------
xxx
'''
if not islog:
sigma2 = np.log(sigma2)
return sigma2 + hqic(0, nobs, df_modelwc) / nobs
#from var_model.py, VAR only? separates neqs and k_vars per equation
#def fpe_sigma():
# ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
__all__ = [maxabs |
Josowsky/Simple-Site-Monitor | scanner_engine/apps.py | Python | mit | 143 | 0 | from __future__ import unicode_literals
from django.apps import AppConfig
| class ScannerEngineConfig(AppConfig):
na | me = 'scanner_engine'
|
Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/azure/servicebus/aio/__init__.py | Python | mit | 725 | 0 | # ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------- | -----------------
from ._servicebus_sender_async import ServiceBusSender
from ._servicebus_receiver_async import ServiceBusReceiver
from ._servicebus_session_async import ServiceBusSession
from ._servicebus_client_async import ServiceBusClient
from ._async_auto_lock_renewer import AutoLockRenewer
__all__ = [
"ServiceBusClient",
"ServiceBusSender",
"ServiceBusReceiver",
"ServiceBus | Session",
"AutoLockRenewer",
]
|
zhangxq5012/sky_engine | mojo/devtools/common/android_gdb/install_remote_file_reader.py | Python | bsd-3-clause | 903 | 0.006645 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import android_gdb.config as config
import subprocess
import tempfile
def install(gsutil, adb='adb'):
verification_call_output = subprocess.check_output(
[adb, 'shell', 'ls', config.REMO | TE_FILE_READER_DEVICE | _PATH])
if config.REMOTE_FILE_READER_DEVICE_PATH != verification_call_output.strip():
with tempfile.NamedTemporaryFile() as temp_file:
subprocess.check_call([gsutil, 'cp', config.REMOTE_FILE_READER_CLOUD_PATH,
temp_file.name])
subprocess.check_call([adb, 'push', temp_file.name,
config.REMOTE_FILE_READER_DEVICE_PATH])
subprocess.check_call([adb, 'shell', 'chmod', '777',
config.REMOTE_FILE_READER_DEVICE_PATH])
|
arokem/scipy | scipy/io/harwell_boeing/tests/test_hb.py | Python | bsd-3-clause | 2,366 | 0.000845 | from __future__ import division, print_function, absolute_import
from io import StringIO
import tempfile
import numpy as np
from numpy.testing import assert_equal, \
assert_array_almost_equal_nulp
from scipy.sparse import coo_matrix, csc_matrix, rand
from scipy.io import hb_read, hb_write
SIMPLE = """\
No Title |No Key
9 4 1 4
RUA 100 100 10 0
(26I3) (26I3) (3E23.15)
1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9
9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11
37 71 89 18 30 45 70 19 25 52
2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01
6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01
4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01
6.912334991524289e-01
"""
SIMPLE_MATRIX = coo_matrix(
((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
0.0661749042483, 0.887037034319, 0.419647859016,
0.564960307211, 0.993442388709, 0.691233499152,),
(np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
[0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
def assert_csc_almost_equal(r, l):
r = csc_matri | x(r)
l = csc_matrix(l)
assert_equal(r.indptr, l.indptr)
assert_equal(r.indices, l.indices)
assert_array_almost_equal_nulp(r.data, l.data, 10000)
class TestHBReader(object):
def test_simple(self):
m = hb_read(StringIO | (SIMPLE))
assert_csc_almost_equal(m, SIMPLE_MATRIX)
class TestHBReadWrite(object):
def check_save_load(self, value):
with tempfile.NamedTemporaryFile(mode='w+t') as file:
hb_write(file, value)
file.file.seek(0)
value_loaded = hb_read(file)
assert_csc_almost_equal(value, value_loaded)
def test_simple(self):
random_matrix = rand(10, 100, 0.1)
for matrix_format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'):
matrix = random_matrix.asformat(matrix_format, copy=False)
self.check_save_load(matrix)
|
fadushin/esp8266 | micropython/neolamp/neolamp/scheduler.py | Python | bsd-2-clause | 5,621 | 0.009251 | #
# Copyright (c) dushin.net All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of dushin.net nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import math
import utime
import logging
import uasyncio
import core.util
import core.task
def ensure_schedule(schedule) :
assert type(schedule) is dict
assert 'dow' in schedule
ensure_dow(schedule['dow'])
assert 'seq' in schedule
ensure_seq(schedule['seq'])
def ensure_dow(dow) :
assert type(dow) is list
assert all(map(lambda i : type(i) is int and i in [1,2,3,4,5,6,7], dow))
def ensure_seq(seq) :
assert type(seq) is list
assert all(map(lambda e : ensure_element(e), seq))
def ensure_element(e) :
assert type(e) is dict
assert 'time' in e
ensure_time(e['time'])
assert 'color_name' in e
ensure_color_name(e['color_name'])
return True
def ensure_time(t) :
assert type(t) is dict
assert 'h' in t and type(t['h']) is int and 0 <= t['h'] and t['h'] <= 23
assert 'm' in t and type(t['m']) is int and 0 <= t['m'] and t['m'] <= 59
assert 's' in t and type(t['s']) is int and 0 <= t['s'] and t['s'] <= 59
def ensure_color_name(color_name) :
assert type(color_name) is str
class Scheduler(core.task.TaskBase) :
def __init__(self, lamp, tzd, sche | dules, color_specs, sleep_ms=1342, verbose=False) :
core.task.TaskBase.__init__(self, sleep_ms)
self.lamp = lamp
self.tzd = tzd
self.schedules = schedules
self.color_specs = color_specs
self.verbose = verbose
self.current_schedule_name = None
self.current_seq = None
def set_schedul | es(self, schedules) :
self.schedules = schedules
def perform(self) :
localtime = self.get_localtime()
secs = Scheduler.secs_since_midnight(localtime)
(_year, _month, _mday, _hour, _minute, _second, wday, _yday) = localtime
seq = self.get_current_seq(secs, wday + 1)
if seq :
if seq != self.current_seq :
color_name = seq['color_name']
logging.info("Scheduler: Setting lamp color to {}", color_name)
self.lamp.set_colorspec(self.color_specs[color_name])
self.current_seq = seq
else :
self.lamp.set_colorspec(self.color_specs['black'])
self.current_seq = None
return True
##
## Internal operations
##
def get_localtime(self) :
secs = utime.time()
secs += self.tzd.get_offset_hours() * 60 * 60
return utime.localtime(secs)
def get_current_seq(self, secs, dow) :
for (schedule_name, schedule) in self.schedules.items() :
if dow in schedule['dow'] :
seq = schedule['seq']
i = Scheduler.find_index_in_range(secs, seq)
if i != -1 :
if self.current_schedule_name != schedule_name :
logging.info("Entering schedule {}", schedule_name)
self.current_schedule_name = schedule_name
return seq[i]
if self.current_schedule_name :
logging.info("Leaving schedule {}", self.current_schedule_name)
self.current_schedule_name = None
return None
@staticmethod
def find_index_in_range(secs, seq) :
n = len(seq)
for i in range(n) :
seq_i = seq[i]
seq_i_secs = Scheduler.get_secs(seq_i['time'])
if secs < seq_i_secs :
return -1
elif i < n - 1 : # and seq_i <= secs
seq_iplus1 = seq[i+1]
seq_iplus1_secs = Scheduler.get_secs(seq_iplus1['time'])
if secs < seq_iplus1_secs :
return i
i += 1
return -1
@staticmethod
def secs_since_midnight(localtime) :
secs = utime.mktime(localtime)
return secs - Scheduler.midnight_epoch_secs(localtime)
@staticmethod
def midnight_epoch_secs(localtime) :
(year, month, mday, hour, minute, second, wday, yday) = localtime
return utime.mktime((year, month, mday, 0, 0, 0, wday, yday))
@staticmethod
def get_secs(time) :
return time["h"]*60*60 + time["m"]*60 + time["s"]
|
dataxu/ansible-modules-core | network/eos/eos_command.py | Python | gpl-3.0 | 5,314 | 0.002635 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: eos_command
version_added: "2.1"
author: "Peter sprygada (@privateip)"
short_description: Run arbitrary command on EOS device
description:
- Sends an aribtrary set of commands to an EOS node and returns the results
read from the device. The M(eos_command) module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: eos
options:
commands:
description:
- The commands to send to the remote EOS device over the
configured provider. The resulting output from the command
is returned. If the I(waitfor) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has been exceeded.
required: true
waitfor:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
| default: null
retries:
description:
- Specifies the number of retries a command should be tried
| before it is considered failed. The command is run on the
target device every retry and evaluated against the waitfor
conditionals
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- eos_command:
commands: "{{ lookup('file', 'commands.txt') }}"
- eos_command:
commands:
- show interface {{ item }}
with_items: interfaces
- eos_command:
commands:
- show version
waitfor:
- "result[0] contains 4.15.0F"
- eos_command:
commands:
- show version | json
- show interfaces | json
- show version
waitfor:
- "result[2] contains '4.15.0F'"
- "result[1].interfaces.Management1.interfaceAddress[0].primaryIp.maskLen eq 24"
- "result[0].modelName == 'vEOS'"
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: the value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
retured: failed
type: list
sample: ['...', '...']
"""
import time
import shlex
import re
INDEX_RE = re.compile(r'(\[\d+\])')
def iterlines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
spec = dict(
commands=dict(type='list'),
waitfor=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = get_module(argument_spec=spec,
supports_check_mode=True)
commands = module.params['commands']
retries = module.params['retries']
interval = module.params['interval']
try:
queue = set()
for entry in (module.params['waitfor'] or list()):
queue.add(Conditional(entry))
except AttributeError, exc:
module.fail_json(msg=exc.message)
result = dict(changed=False)
while retries > 0:
response = module.execute(commands)
result['stdout'] = response
for index, cmd in enumerate(commands):
if cmd.endswith('json'):
response[index] = module.from_json(response[index])
for item in list(queue):
if item(response):
queue.remove(item)
if not queue:
break
time.sleep(interval)
retries -= 1
else:
failed_conditions = [item.raw for item in queue]
module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions)
result['stdout_lines'] = list(iterlines(result['stdout']))
return module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.eos import *
if __name__ == '__main__':
main()
|
wlerin/streamlink | src/streamlink/plugins/okru.py | Python | bsd-2-clause | 3,774 | 0.001325 | # -*- coding: utf-8 -*-
import logging
import re
from streamlink.compat import html_unescape, unquote
from streamlink.exceptions import PluginError
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents, validate
from streamlink.stream import HLSStream, HTTPStream, RTMPStream
from streamlink.utils import parse_json
log = logging.getLogger(__name__)
class OKru(Plugin):
_data_re = re.compile(r'''data-options=(?P<q>["'])(?P<data>{[^"']+})(?P=q)''')
_url_re = re.compile(r'''https?://(?:www\.)?ok\.ru/''')
_metadata_schema = validate.Schema(
validate.transform(parse_json),
validate.any({
'videos': validate.any(
[],
[
{
'name': validate.text,
'url': validate.text,
}
]
),
validate.optional('hlsManifestUrl'): validate.text,
validate.optional('hlsMasterPlaylistUrl'): validate.text,
validate.optional('liveDashManifestUrl'): validate.text,
validate.optional('rtmpUrl'): validate.text,
}, None)
)
_data_schema = validate.Schema(
validate.all(
validate.transform(_data_re.search),
validate.get('data'),
validate.transform(html_unescape),
validate.transform(parse_json),
validate.get('flashvars'),
validate.any({
'metadata': _metadata_schema
}, {
'metadataUrl': validate.transform(unquote)
}, None)
)
)
QUALITY_WEIGHTS = {
'full': 1080,
'1080': 1080,
'hd': 720,
'720': 720,
'sd': 480,
'480': 480,
'360': 360,
'low': 360,
'lowest': 240,
| 'mobile': 144,
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, 'okru'
return Plugin.stream_weight(key)
def _get_streams(self):
self.session.http.headers.update({
'User-Agent': useragents.FIREFOX,
'Referer': self.url,
})
try:
| data = self.session.http.get(self.url, schema=self._data_schema)
except PluginError:
log.error('unable to validate _data_schema for {0}'.format(self.url))
return
metadata = data.get('metadata')
metadata_url = data.get('metadataUrl')
if metadata_url and not metadata:
metadata = self.session.http.post(metadata_url,
schema=self._metadata_schema)
if metadata:
log.trace('{0!r}'.format(metadata))
for hls_url in [metadata.get('hlsManifestUrl'),
metadata.get('hlsMasterPlaylistUrl')]:
if hls_url is not None:
for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
yield s
if metadata.get('videos'):
for http_stream in metadata['videos']:
http_name = http_stream['name']
http_url = http_stream['url']
try:
http_name = '{0}p'.format(self.QUALITY_WEIGHTS[http_name])
except KeyError:
pass
yield http_name, HTTPStream(self.session, http_url)
if metadata.get('rtmpUrl'):
yield 'live', RTMPStream(self.session, params={'rtmp': metadata['rtmpUrl']})
__plugin__ = OKru
|
ckolumbus/mikidown | mikidown/highlighter.py | Python | mit | 4,883 | 0.001024 | import re
from PyQt4.QtGui import QSyntaxHighlighter, QColor, QFont, QTextCharFormat
from PyQt4.QtCore import Qt
from .mdx_strkundr import DEL_RE, INS_RE, STRONG_RE, EMPH_RE
class MikiHighlighter(QSyntaxHighlighter):
WORDS = r'(?iu)[\w\']+'
def __init__(self, parent=None):
super(MikiHighlighter, self).__init__(parent)
baseFontSize = 12
NUM = 15
self.patterns = []
regexp = [0] * NUM
font = [0]*NUM
color = [0]*NUM
# 0: html tags - <pre></pre>
regexp[0] = '</?[^>]+>'
font[0] = QFont("monospace", baseFontSize, -1)
color[0] = QColor("#A40000")
# 1: h1 - #
regexp[1] = '^#[^#]+'
color[1] = QColor("#4E9A06")
font[1] = QFont("decorative", 2*baseFontSize, QFont.Bold)
# 2: h2 - ##
regexp[2] = '^##[^#]+'
color[2] = QColor("#4E9A06")
font[2] = QFont("serif", 5.0/3*baseFontSize, QFont.Bold)
# 3: h3 - ###
regexp[3] = '^###[^#]+'
color[3] = QColor("#4E9A06")
font[3] = QFont("serif", 4.0/3*baseFontSize, QFont.Bold)
# 4: h4 and more - ####
regexp[4] = '^####.+'
color[4] = QColor("#4E9A06")
font[4] = QFont("serif", baseFontSize, QFont.Bold)
# 5: html symbols - >
regexp[5] = '&[^; ].+;'
color[5] = QColor("#A40000")
font[5] = QFont("monospace", baseFontSize, -1)
# 6: html comments - <!-- -->
regexp[6] = '<!--.+-->'
color[6] = QColor("#888A85")
font[6] = QFont(None, baseFontSize, -1)
# 7: delete - ~~delete~~
regexp[7] = DEL_RE
color[7] = QColor("#888A85")
font[7] = QFont(None, baseFontSize, -1)
# 8: insert - __insert__
regexp[8] = INS_RE
font[8] = QFont(None, baseFontSize, -1)
font[8].setUnderline(True)
# 9: strong - **strong**
regexp[9] = STRONG_RE
color[9] = QColor("#F57900")
font[9] = QFont(None, baseFontSize, QFont.Bold)
# 10: emphasis - //emphasis//
regexp[10] = EMPH_RE
color[10] = QColor("#F57900")
font[10] = QFont(None, baseFontSize, -1, True)
# 11: links - (links) after [] or links after []:
regexp[11] = r'(?<=(\]\())[^\(\)]*(?=\))'
font[11] = QFont(None, baseFontSize, -1, True)
font[11].setUnderline(True)
#.setUnderlineColor("#204A87")
# 12: link/image references - [] or ![]
regexp[12] = r'!?\[[^\[\]]*\]'
color[12] = QColor("#204A87")
font[12] = QFont(None, baseFontSize, -1)
# 13: blockquotes and lists - > or - or *
regexp[13] = r'(^>+)|(^- )|(^\* )'
color[13] = QColor("#F57900")
font[13] = QFont(None, baseFontSize, -1)
# 14: fence - ``` or ~~~
regexp[14] = '^(?:~{3,}|`{3,}).*$'
color[14] = QColor("#F57900")
font[14] = QFont(None, baseFontSize, QFont.Bold)
for i in range(NUM):
p = re.compile(regexp[i])
f = QTextCharFormat()
if font[i] != 0:
f.setFont(font[i])
if color[i] != 0:
f.setForeground(color[i])
self.patterns.append((p, f))
self.speller = parent.speller
fenced_font = QFont("monospace", baseFontSize, -1)
self.fenced_block = re.compile("^(?:~{3,}|`{3,}).*$")
self.fenced_format = QTextCharFormat()
self.fenced_format.setFont(fenced_font)
def highlightSpellcheck(self, text):
for word_object in re.finditer(self.WORDS, str(text)):
if not word_object.group():
# don't bother with empty words
continue
if self.speller and not self.speller.check(word_object.group()):
current_format = self.format(word_object.start())
current_format.setUnderlineColor(Qt.red)
current_format.setUnderlineStyle(QTextCharFormat.SpellCheckUnderline)
self.setFormat(word_object.start(),
word_object.end() - word_object.start(), current_format)
def highlightBlock(self, text):
# highlight patterns
for i in range(0, len(self.patterns)):
p = self.patterns[i]
for match in p[0].finditer(text):
self.setFormat(
match.start(), match.end() - match.start(), p[1])
|
# escape highlights in fenced_block
m = self.fenced_block.match(text)
self.setCurrentBlockState(0)
if self.previousBlockState() != 1:
if m:
self.setCurrentBlockState(1)
else:
if m:
self.setCurrentBlo | ckState(0)
else:
self.setCurrentBlockState(1)
self.setFormat(0, len(text), self.fenced_format)
self.highlightSpellcheck(text)
|
Schnouki/git-annex-remote-hubic | setup.py | Python | gpl-3.0 | 1,813 | 0.001655 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 Thomas Jost and the Contributors
#
# This file is part of git-annex-remote-hubic.
#
# git-annex-remote-hubic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# git-annex-remote-hubic is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# git-annex-remote-hubic. If not, see <http://www.gnu.org/lice | nses/>.
from setuptools import setup, find_packages
setup(name="git-annex-remote-hubic",
version="0.3.2",
description="A git-annex special remote for hubiC",
long_description=open("README.md", "r").read(),
author="Thomas Jost",
author_email="schnouki@schnouki.net",
url="https://github.com/Schnouki/git-annex-remote | -hubic",
packages=find_packages(),
install_requires=[
"python-dateutil",
"python-swiftclient>=2.1.0",
"rauth>=0.7",
],
entry_points={
"console_scripts": [
"git-annex-remote-hubic = hubic_remote.main:main",
"git-annex-remote-hubic-migrate = hubic_remote.migrate:main",
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Plugins",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python :: 2",
"Topic :: System :: Archiving",
],
)
|
rectory-school/rectory-apps | paw/migrations/0002_auto_20150119_1938.py | Python | mit | 525 | 0.001905 | # -*- coding: utf-8 -*- |
from __future__ import unicode_literals
from django.db import models, migrations
import paw.models
class Migration(migrations.Migration):
dependencies = [
('paw', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pageicon',
name='display_icon',
field=models.ImageField(height_field=b'icon_height', width_field=b'icon_ | width', upload_to=paw.models.iconUploadTo),
preserve_default=True,
),
]
|
bob123bob/Sick-Beard | sickbeard/show_name_helpers.py | Python | gpl-3.0 | 9,869 | 0.008613 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from sickbeard.common import countryList, showLanguages
from sickbeard.helpers import sanitizeSceneName
from sickbeard.scene_exceptions import get_scene_exceptions
from sickbeard import logger
from sickbeard import db
import re
import datetime
import string
from name_parser.parser import NameParser, InvalidNameException
resultFilters = ["sub(pack|s|bed)", "nlsub(bed|s)?", "swesub(bed)?",
"(dir|sample|nfo)fix", "sample", "(dvd)?extras"]
def filterBadReleases(name,showLang=u"en"):
"""
Filters out non-english and just all-around stupid releases by comparing them
to the resultFilters contents.
name: the release name to check
Returns: True if the release name is OK, False if it's bad.
"""
additionalFilters = []
if showLang == u"en":
additionalFilters.append("dub(bed)?")
try:
fp = NameParser()
parse_result = fp.parse(name)
except InvalidNameException:
logger.log(u"Unable to parse the filename "+name+" into a valid episode", logger.WARNING)
return False
# use the extra info and the scene group to filter against
check_string = ''
if parse_result.extra_info:
check_string = parse_result.extra_info
if parse_result.release_group:
if check_string:
check_string = check_string + '-' + parse_result.release_group
else:
check_string = parse_result.release_group
# if there's no info after the season info then assume it's fine
if not check_string:
check_string = name
# if any of the bad strings are in the name then say no
if sickbeard.IGNORE_WORDS == "":
ignore_words="ztreyfgut"
else:
ignore_words=sickbeard.IGNORE_WORDS
for x in resultFilters + ignore_words.split(',') + additionalFilters:
if x == showLanguages.get(showLang):
continue
if re.search('(^|[\W_])'+x+'($|[\W_])', check_string, re.I):
logger.log(u"Invalid scene release: "+name+" contains "+x+", ignoring it", logger.DEBUG)
return False
return True
def sceneToNormalShowNames(name):
"""
Takes a show name from a scene dirname and converts it to a more "human-readable" format.
name: The show name to convert
Returns: a list of all the possible "normal" names
"""
if not name:
return []
name_list = [name]
# use both and and &
new_name = re.sub('(?i)([\. ])and([\. ])', '\\1&\\2', name, re.I)
if new_name not in name_list:
name_list.append(new_name)
results = []
for cur_name in name_list:
# add brackets around the year
results.append(re.sub('(\D)(\d{4})$', '\\1(\\2)', cur_name))
# add brackets around the country
country_match_str = '|'.join(countryList.values())
results.append(re.sub('(?i)([. _-])('+country_match_str+')$', '\\1(\\2)', cur_name))
results += name_list
return list(set(results))
def makeSceneShowSearchStrings(show):
showNames = allPossibleShowNames(show)
# scenify the names
return map(sanitizeSceneName, showNames)
def makeSceneSeasonSearchString (show, segment, extraSearchType=None):
myDB = db.DBConnection()
if show.air_by_date:
numseasons = 0
# the search string for air by date shows is just
seasonStrings = [segment]
else:
numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [show.tvdbid])
numseasons = int(numseasonsSQlResult[0][0])
seasonStrings = ["S%02d" % segment]
# since nzbmatrix allows more than one search per request we search SxEE results too
if extraSearchType == "nzbmatrix":
seasonStrings.append("%ix" % segment)
showNames = set(makeSceneShowSearchStrings(show))
toReturn = []
term_list = []
# search each show name
for curShow in showNames:
# most providers all work the same way
if not extraSearchType:
# if there's only one season then we can just use the show name straight up
if numseasons == 1:
toReturn.append(curShow)
# for providers that don't allow multiple searches in one request we only search for Sxx style stuff
else:
for cur_season in seasonStrings:
toReturn.append(curShow + "." + cur_season)
# nzbmatrix is special, we build a search string just for them
elif extraSearchType == "nzbmatrix":
if numseasons == 1:
toReturn.append('"'+curShow+'"')
elif numseasons == 0:
toReturn.append('"'+curShow+' '+str(segment).replace('-',' ')+'"')
else:
term_list = [x+'*' for x in seasonStrings]
if show.air_by_date:
term_list = ['"'+x+'"' for x in term_list]
toReturn.append('"'+curShow+'"')
if extraSearchType == "nzbmatrix":
toReturn = ['+('+','.join(toReturn)+')']
if term_list:
toReturn.append('+('+','.join(term_list)+')')
return toReturn
def makeSceneSearchString (episode):
myDB = db.DBConnection()
| numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [episode.show.tvdbid])
numseasons = int(numseasonsSQlResult[0][0])
numepisodesSQlResult = myDB.select("SELECT COUNT(episode) as numepisodes FROM tv_episodes WHERE showid = ? and season != 0", [episode.show.tvdbid])
numepisodes = int(numepisodesSQlResult[0][0])
# see if we should use dates instead of episodes
if episode.show.air_by_date and e | pisode.airdate != datetime.date.fromordinal(1):
epStrings = [str(episode.airdate)]
else:
epStrings = ["S%02iE%02i" % (int(episode.season), int(episode.episode)),
"%ix%02i" % (int(episode.season), int(episode.episode))]
# for single-season shows just search for the show name -- if total ep count (exclude s0) is less than 11
# due to the amount of qualities and releases, it is easy to go over the 50 result limit on rss feeds otherwise
if numseasons == 1 and numepisodes < 11:
epStrings = ['']
showNames = set(makeSceneShowSearchStrings(episode.show))
toReturn = []
for curShow in showNames:
for curEpString in epStrings:
toReturn.append(curShow + '.' + curEpString)
return toReturn
def isGoodResult(name, show, log=True):
"""
Use an automatically-created regex to make sure the result actually is the show it claims to be
"""
all_show_names = allPossibleShowNames(show)
showNames = map(sanitizeSceneName, all_show_names) + all_show_names
for curName in set(showNames):
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(curName))
if show.startyear:
escaped_name += "(?:\W+"+str(show.startyear)+")?"
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|(Sea|sai)son\W+\d+\W+|E\d+\W+)'
if log:
logger.log(u"Checking if show "+name+" matches " + curRegex, logger.DEBUG)
match = re.search(curRegex, name, re.I)
if match:
l |
vrcmarcos/elasticmock | tests/fake_elasticsearch/test_index.py | Python | mit | 2,017 | 0.001487 | # -*- coding: utf-8 -*-
from tests import | TestElasticmock, INDEX_NAME, DOC_TYPE, BODY
UPDATED_BODY = {
'author': 'vrcmarcos',
'text': 'Updated Text'
}
class TestIndex(TestElasticmock):
def test_should_index_document(self):
data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY)
self.assertEqual(DOC_TYPE, data.get('_type'))
self.a | ssertTrue(data.get('created'))
self.assertEqual(1, data.get('_version'))
self.assertEqual(INDEX_NAME, data.get('_index'))
def test_should_index_document_without_doc_type(self):
data = self.es.index(index=INDEX_NAME, body=BODY)
self.assertEqual('_doc', data.get('_type'))
self.assertTrue(data.get('created'))
self.assertEqual(1, data.get('_version'))
self.assertEqual(INDEX_NAME, data.get('_index'))
def test_doc_type_can_be_list(self):
doc_types = ['1_idx', '2_idx', '3_idx']
count_per_doc_type = 3
for doc_type in doc_types:
for _ in range(count_per_doc_type):
self.es.index(index=INDEX_NAME, doc_type=doc_type, body={})
result = self.es.search(doc_type=[doc_types[0]])
self.assertEqual(count_per_doc_type, result.get('hits').get('total').get('value'))
result = self.es.search(doc_type=doc_types[:2])
self.assertEqual(count_per_doc_type * 2, result.get('hits').get('total').get('value'))
def test_update_existing_doc(self):
data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY)
document_id = data.get('_id')
self.es.index(index=INDEX_NAME, id=document_id, doc_type=DOC_TYPE, body=UPDATED_BODY)
target_doc = self.es.get(index=INDEX_NAME, id=document_id)
expected = {
'_type': DOC_TYPE,
'_source': UPDATED_BODY,
'_index': INDEX_NAME,
'_version': 2,
'found': True,
'_id': document_id
}
self.assertDictEqual(expected, target_doc)
|
opendatapress/open_data_press | tests/dummy.py | Python | mit | 2,439 | 0.00082 | # -*- coding: utf-8 -*-
#
# Dummy parameters for models created in unit tests
#
from datetime import datetime
credentials = '''{
"invalid": false,
"token_uri": "https://accounts.google.com/o/oauth2/token",
"refresh_token": "",
"client_id": "351298984682.apps.googleusercontent.com",
"token_expiry": "2050-11-14T11:26:32Z",
"user_agent": null,
"access_token": "ya29.1.AADtN_VSDGUctyiNf8Ls6ZAHvLlmti1OIYbxAUalbUWN7N0ooFMeSl03AqlC8SeoacLgNA",
"client_secret": "m_QLqYSZHe_GqlqS22rEtwDq"
}'''
# Params for a valid user record
user = {
'created_at': datetime.now(),
'credentials': credentials,
'google_birthday': u'0000-01-01',
'google_email': u'test.user@gmail.com',
'google_gender': u'male',
'google_id': u'123456789',
| 'google_locale': u'en-GB',
'google_name': u'Test User',
'google_picture_url': u'https://lh3.googleusercontent.com/image.png',
'last_login_at': datetime.now(),
'modified_at': datetime.now(),
'profile_email': u'test.user@email.com',
'profile_description': u'This is a test user account',
'profile_name': u'Test User',
' | profile_slug': 'test-user',
'profile_web_address': 'http://test-user.com',
}
# Params for a valid data source record
# Except for DataSource.user which must be defined within tests
data_source = {
'created_at': datetime.now(),
'description': u'A Dummy Data Source',
'google_spreadsheet': u'dummy_key',
'google_worksheet': u'dummy_id',
'licence': u'Do What You Like',
'modified_at': datetime.now(),
'slug': u'data-1',
'tags': u'Apples, Oranges, Pears',
'tbl_stars': 5,
'title': u'Dummy Data',
}
data_source_json = {
'description': u'A Dummy Data Source',
'google_spreadsheet': u'dummy_key',
'google_worksheet': u'dummy_id',
'licence': u'Do What You Like',
'slug': u'data-1',
'tags': u'Apples, Oranges, Pears',
'tbl_stars': 5,
'title': u'Dummy Data',
}
# Params for valid data view model
data_view = {
'created_at': datetime.now(),
'modified_at': datetime.now(),
'extension': u'txt',
'mimetype': u'text/plain'
} |
angelsanz/linkstore | linkstore/links.py | Python | mit | 6,633 | 0.001055 | import os
from os import path
import sqlite3
from .link import Link
class Links(object):
def __init__(self):
self._links = {}
def add(self, link):
self._links[link.url] = link
def find_by_tag(self, tag):
return [link for link in self._links.values() if tag in link.tags]
def get_all(self):
return self._links.values()
def remove(self, link):
del self._links[link.url]
def find_by_url(self, url) | :
return self._links[url]
class SqliteLinks( | object):
def __init__(self, table_gateways):
self._links_table = table_gateways['links']
self._tags_table = table_gateways['tags']
def add(self, link):
self._links_table.save(link.url, link.date)
self._tags_table.reset_tags(link.url, link.tags)
def find_by_tag(self, tag):
found = []
for url in self._tags_table.get_urls_of_links_with_tag(tag):
date = self._links_table.get_date(url)
tags = self._tags_table.get_tags(url)
found.append(Link(url, tags, date))
return found
def get_all(self):
all_links = []
for url, date in self._links_table.get_all():
tags = self._tags_table.get_tags(url)
all_links.append(Link(url, tags, date))
return all_links
def remove(self, link):
self._tags_table.remove_tags(link.url)
self._links_table.remove_url_and_date(link.url)
def find_by_url(self, url):
date = self._links_table.get_date(url)
tags = self._tags_table.get_tags(url)
return Link(url, tags, date)
class SqliteTable(object):
def __init__(self, sqlite_connection):
self._connection = sqlite_connection
self._set_up()
def _set_up(self):
with self._connection as connection:
connection.execute(self.SQL_COMMAND_FOR_TABLE_CREATION)
class LinksTable(SqliteTable):
SQL_COMMAND_FOR_TABLE_CREATION = '''
create table if not exists links(
url
primary key
not null,
date_saved
not null
)
'''
def get_all(self):
with self._connection as connection:
return connection.execute('select url, date_saved from links').fetchall()
def save(self, url, date):
with self._connection as connection:
connection.execute(
'insert or ignore into links(url, date_saved) values(?, ?)',
(url, date)
)
def get_date(self, url):
with self._connection as connection:
row = connection.execute(
'select date_saved from links where url = ?',
(url,)
).fetchone()
date = row[0]
return date
def remove_url_and_date(self, url):
with self._connection as connection:
connection.execute('delete from links where url = ?', (url,))
class TagsTable(SqliteTable):
SQL_COMMAND_FOR_TABLE_CREATION = '''
create table if not exists tags(
url
not null,
name
not null,
foreign key(url) references links(url)
on delete restrict
on update restrict
)
'''
def get_urls_of_links_with_tag(self, tag):
with self._connection as connection:
list_of_rows = connection.execute(
'select url from tags where name = ?',
(tag,)
).fetchall()
return tuple(url for (url,) in list_of_rows)
def get_tags(self, url):
with self._connection as connection:
list_of_rows = connection.execute(
'select name from tags where url = ?',
(url,)
).fetchall()
return tuple(tag for (tag,) in list_of_rows)
def reset_tags(self, url, tags):
self.remove_tags(url)
self.add_tags(url, tags)
def remove_tags(self, url):
with self._connection as connection:
connection.execute('delete from tags where url = ?', (url,))
def add_tags(self, url, tags):
with self._connection as connection:
connection.executemany(
'insert into tags(url, name) values(?, ?)',
[(url, tag) for tag in tags]
)
class SqliteConnectionFactory(object):
@staticmethod
def create_autoclosing_on_disk():
return AutoclosingSqliteConnection()
@classmethod
def create_in_memory(cls):
connection_to_in_memory_database = sqlite3.connect(':memory:')
cls._enable_enforcement_of_foreign_key_constraints(connection_to_in_memory_database)
return connection_to_in_memory_database
@staticmethod
def _enable_enforcement_of_foreign_key_constraints(sqlite_connection):
sqlite_connection.execute('pragma foreign_keys = on')
@classmethod
def create_on_disk(cls, data_directory):
connection_to_on_disk_database = sqlite3.connect(data_directory.path_to_database_file)
cls._enable_enforcement_of_foreign_key_constraints(connection_to_on_disk_database)
return connection_to_on_disk_database
class AutoclosingSqliteConnection(object):
def __init__(self, provider_of_sqlite_connection=None):
self._provider_of_sqlite_connection = provider_of_sqlite_connection if provider_of_sqlite_connection is not None \
else ProviderOfConnectionToOnDiskSqliteDatabase()
def __enter__(self):
self._current_connection = self._provider_of_sqlite_connection.get()
self._current_connection.__enter__()
return self._current_connection
def __exit__(self, type_, value, traceback):
self._current_connection.__exit__(type_, value, traceback)
self._current_connection.close()
return False
class ProviderOfConnectionToOnDiskSqliteDatabase(object):
def __init__(self):
self._directory = ApplicationDataDirectory()
def get(self):
return SqliteConnectionFactory.create_on_disk(self._directory)
class ApplicationDataDirectory(object):
@property
def path(self):
return path.expanduser('~/.linkstore/')
@property
def name_of_database_file(self):
return 'linkstore.sqlite'
@property
def path_to_database_file(self):
self._ensure_data_directory_exists()
return path.join(self.path, self.name_of_database_file)
def _ensure_data_directory_exists(self):
if path.exists(self.path):
return
os.mkdir(self.path)
|
DanielDeychakiwsky/python-machine-learning | src/linear_regression/gradient_descent/calculations.py | Python | mit | 633 | 0 | from math import pow
# u | se weights for slope and intercept
def predict(x, slope, intercept):
return (slope * x) + intercept
def calculate_y_hats(inputs, slope, intercept):
return [predict(x, slope, intercept) for x in inputs]
def calculate_total_mean_squared_error(inputs, outputs, slope, intercept, n):
total_error = 0
y_hats = calculate_y_hats(inputs, slope, intercept)
for i in range(n):
total_error += pow(outputs[i] - y_hats[i], 2)
| return total_error / n
def calculate_k(x, y):
if len(x) == len(y):
return len(x)
else:
raise Exception('could not calculate "n"')
|
DrClockwork/H5PP | h5pp/models.py | Python | gpl-2.0 | 9,008 | 0.004218 | from django.core.urlresolvers import reverse
from django.db import models
# Stores information about what h5p uses what libraries
class h5p_contents_libraries(models.Model):
content_id = models.PositiveIntegerField(null=False)
library_id = models.PositiveIntegerField(null=False)
dependency_type = models.CharField(
null=False, default='preloaded', max_length=31)
drop_css = models.PositiveSmallIntegerField(null=False, default=0)
weight = models.PositiveIntegerField(null=False, default=999999,)
class Meta:
db_table = 'h5p_contents_libraries'
unique_together = (('content_id', 'library_id', 'dependency_type'))
# Stores information about libraries
class h5p_libraries(models.Model):
library_id = models.AutoField(primary_key=True,
help_text='Identifier of the library')
machine_name = models.CharField(null=False, default='', max_length=127,
help_text='Full name of the library')
title = models.CharField(null=False, default='', max_length=255,
help_text='Short name of the library')
major_version = models.PositiveIntegerField(null=False)
minor_version = models.PositiveIntegerField(null=False)
patch_version = models.PositiveIntegerField(null=False)
runnable = models.PositiveSmallIntegerField(null=False, default=1,
help_text='If the library can be started alone (not a dependency) ?')
fullscreen = models.PositiveSmallIntegerField(null=False, default=0,
help_text='Display fullscreen button')
embed_types = models.CharField(null=False, blank=True, default='', max_length=255)
preloaded_js = models.TextField(null=True,
help_text='List of JavaScript files needed by the library')
preloaded_css = models.TextField(null=True,
help_text='List of Stylesheet files needed by the library')
drop_library_css = models.TextField(null=True, blank=True,
help_text='List of Libraries that should not have CSS included if this library is used')
semantics = models.TextField(null=False, blank=True,
help_text='The semantics definition in JSON format')
restricted = models.PositiveSmallIntegerField(null=False, default=0,
help_text='If this library can be used to create new content')
tutorial_url = models.CharField(null=True, max_length=1000, blank=True,
help_text='URL to a tutorial for this library')
class Meta:
db_table = 'h5p_libraries'
ordering = ['machine_name', 'major_version', 'minor_version']
verbose_name = 'Library'
verbose_name_plural = 'Libraries'
def __unicode__(self):
return self.machine_name
def __str__(self):
return "%s" % (self.machine_name)
# Stores information about library dependencies
class h5p_libraries_libraries(models.Model):
library_id = models.PositiveIntegerField(null=False)
required_library_id = models.PositiveIntegerField(null=False)
dependency_type = models.CharField(null=False, max_length=31)
class Meta:
db_table = 'h5p_libraries_libraries'
unique_together = (('library_id', 'required_library_id'))
# Stores translations for the languages
class h5p_libraries_languages(models.Model):
library_id = models.PositiveIntegerField(null=False)
language_code = models.CharField(null=False, max_length=31)
language_json = models.TextField(null=False,
| help_text='The translations defined in json format')
class Meta:
db_table = 'h5p_libraries_langu | ages'
ordering = ['language_code', 'library_id']
verbose_name = 'Library-language'
verbose_name_plural = 'Libraries-languages'
unique_together = (('library_id', 'language_code'))
def __unicode__(self):
return self.language_code
def __str__(self):
return "%s" % (self.language_code)
# Stores information about where the h5p content is stored
class h5p_contents(models.Model):
content_id = models.AutoField(primary_key=True,
help_text='Identifier of the content')
title = models.CharField(null=False, max_length=255)
json_contents = models.TextField(null=False,
help_text='The content in JSON format')
embed_type = models.CharField(null=False, default='', max_length=127)
disable = models.PositiveIntegerField(null=False, default=0)
main_library_id = models.PositiveIntegerField(null=False,
help_text='The library we first instanciate for this content')
content_type = models.CharField(null=True, max_length=127,
help_text='Content type as defined in h5p.json')
author = models.CharField(null=True, max_length=127)
license = models.CharField(null=True, blank=True, max_length=7)
meta_keywords = models.TextField(null=True, blank=True)
meta_description = models.TextField(null=True, blank=True)
filtered = models.TextField(null=False,
help_text='Filtered version of json_contents')
slug = models.CharField(null=False, max_length=127,
help_text='Human readable content identifier that is unique')
class Meta:
db_table = 'h5p_contents'
ordering = ['title', 'author', 'content_id']
verbose_name = 'Content'
verbose_name_plural = 'Contents'
def __unicode__(self):
return 'Title:%s - Author:%s - Type:%s' % (self.title, self.author, self.content_type)
def __str__(self):
return "%s - %s" % (self.content_id, self.title)
def get_absolute_url(self):
return '%s?contentId=%s' % (reverse('h5pcontent'), self.content_id)
# Stores user statistics
class h5p_points(models.Model):
content_id = models.PositiveIntegerField(null=False,
help_text='Identifier of the content having a score')
uid = models.PositiveIntegerField(null=False,
help_text='Identifier of the user with this score')
started = models.PositiveIntegerField(null=False,
help_text='Timestamp. Indicates when the user started watching the video')
finished = models.PositiveIntegerField(null=False, default=0,
help_text='Timestamp. Indicates when the user finished watching the video')
points = models.PositiveIntegerField(null=True, blank=True,
help_text='Current point of the user')
max_points = models.PositiveIntegerField(null=True, blank=True,
help_text='Maximum point that the user can have')
class Meta:
db_table = 'h5p_points'
ordering = ['content_id', 'uid']
verbose_name = 'Score'
verbose_name_plural = 'Scores'
unique_together = (('content_id', 'uid'))
# Stores user data about the content
class h5p_content_user_data(models.Model):
user_id = models.PositiveIntegerField(null=False)
content_main_id = models.PositiveIntegerField(null=False)
sub_content_id = models.PositiveIntegerField(null=False)
data_id = models.CharField(null=False, max_length=127)
timestamp = models.PositiveIntegerField(null=False)
data = models.TextField(null=False)
preloaded = models.PositiveSmallIntegerField(null=True)
delete_on_content_change = models.PositiveSmallIntegerField(null=True)
class Meta:
db_table = 'h5p_content_user_data'
unique_together = (('user_id', 'content_main_id',
'sub_content_id', 'data_id'))
# Keeps track of what happens in the H5p system
class h5p_events(models.Model):
user_id = models.PositiveIntegerField(null=False,
help_text='Identifier of the user who caused this event')
created_at = models.IntegerField(null=False)
type = models.CharField(null=False, max_length=63,
help_text='Type of the event. If it concerns a library, a content or a user')
sub_type = models.CharField(null=False, max_length=63,
help_text='Action of the event. Example : Create, Delete, Edit...')
content_id = models.PositiveIntegerField(null=False,
help_text='If not 0, identifier of the content affected by this event')
content_title = models.CharField(null=False, max_length=255,
help_text='If not blank, title of the content affected by this event')
library_name = models.CharField(null=False, max_length=127,
|
desihub/desisim | py/desisim/pixsim.py | Python | bsd-3-clause | 30,517 | 0.006455 | """
desisim.pixsim
==============
Tools for DESI pixel level simulations using specter
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import os.path
import random
from time import asctime
import socket
import astropy.units as u
import numpy as np
import desimodel.io
import desispec.io
from desispec.image import Image
import desispec.cosmics
from . import obs, io
from desiutil.log import get_logger
log = get_logger()
# Inhibit download of IERS-A catalog, even from a good server.
# Note that this is triggered by a call to astropy.time.Time(),
# which is subsequently used to compute sid | ereal_time().
# It's the initialization of astropy.time.Time() itself that makes the call.
from desiutil.iers import freeze_iers
from astropy.time import Time
def simulate_exposure(simspecfile, rawfile, cameras=None,
ccdshape=None, simpixfile=None, addcosmics=None, comm= | None,
**kwargs):
"""
Simulate frames from an exposure, including I/O
Args:
simspecfile: input simspec format file with spectra
rawfile: output raw data file to write
Options:
cameras: str or list of str, e.g. b0, r1, .. z9
ccdshape: (npix_y, npix_x) primarily used to limit memory while testing
simpixfile: output file for noiseless truth pixels
addcosmics: if True (must be specified via command input), add cosmics from real data
comm: MPI communicator object
Additional keyword args are passed to pixsim.simulate()
For a lower-level pixel simulation interface that doesn't perform I/O,
see pixsim.simulate()
Note: call desi_preproc or desispec.preproc.preproc to pre-process the
output desi*.fits file for overscan subtraction, noise estimation, etc.
"""
#- Split communicator by nodes; each node processes N frames
#- Assumes / requires equal number of ranks per node
if comm is not None:
rank, size = comm.rank, comm.size
num_nodes = mpi_count_nodes(comm)
comm_node, node_index, num_nodes = mpi_split_by_node(comm, 1)
node_rank = comm_node.rank
node_size = comm_node.size
else:
log.debug('Not using MPI')
rank, size = 0, 1
comm_node = None
node_index = 0
num_nodes = 1
node_rank = 0
node_size = 1
if rank == 0:
log.debug('Starting simulate_exposure at {}'.format(asctime()))
if cameras is None:
if rank == 0:
from astropy.io import fits
fibermap = fits.getdata(simspecfile, 'FIBERMAP')
cameras = io.fibers2cameras(fibermap['FIBER'])
log.debug('Found cameras {} in input simspec file'.format(cameras))
if len(cameras) % num_nodes != 0:
raise ValueError('Number of cameras {} should be evenly divisible by number of nodes {}'.format(
len(cameras), num_nodes))
if comm is not None:
cameras = comm.bcast(cameras, root=0)
#- Fail early if camera alreaady in output file
if rank == 0 and os.path.exists(rawfile):
from astropy.io import fits
err = False
with fits.open(rawfile) as fx:
for camera in cameras:
if camera in fx:
log.error('Camera {} already in {}'.format(camera, rawfile))
err = True
if err:
raise ValueError('Some cameras already in output file')
#- Read simspec input; I/O layer handles MPI broadcasting
if rank == 0:
log.debug('Reading simspec at {}'.format(asctime()))
mycameras = cameras[node_index::num_nodes]
if node_rank == 0:
log.info("Assigning cameras {} to comm_exp node {}".format(mycameras, node_index))
simspec = io.read_simspec(simspecfile, cameras=mycameras,
readflux=False, comm=comm)
night = simspec.header['NIGHT']
expid = simspec.header['EXPID']
if rank == 0:
log.debug('Reading PSFs at {}'.format(asctime()))
psfs = dict()
#need to initialize previous channel
previous_channel = 'a'
for camera in mycameras:
#- Note: current PSF object can't be pickled and thus every
#- rank must read it instead of rank 0 read + bcast
channel = camera[0]
if channel not in psfs:
log.info('Reading {} PSF at {}'.format(channel, asctime()))
psfs[channel] = desimodel.io.load_psf(channel)
#- Trim effective CCD size; mainly to limit memory for testing
if ccdshape is not None:
psfs[channel].npix_y, psfs[channel].npix_x = ccdshape
psf = psfs[channel]
cosmics=None
#avoid re-broadcasting cosmics if we can
if previous_channel != channel:
if (addcosmics is True) and (node_rank == 0):
cosmics_file = io.find_cosmics(camera, simspec.header['EXPTIME'])
log.info('Reading cosmics templates {} at {}'.format(
cosmics_file, asctime()))
shape = (psf.npix_y, psf.npix_x)
cosmics = io.read_cosmics(cosmics_file, expid, shape=shape)
if (addcosmics is True) and (comm_node is not None):
if node_rank == 0:
log.info('Broadcasting cosmics at {}'.format(asctime()))
cosmics = comm_node.bcast(cosmics, root=0)
else:
log.debug("Cosmics not requested")
if node_rank == 0:
log.info("Starting simulate for camera {} on node {}".format(camera,node_index))
image, rawpix, truepix = simulate(camera, simspec, psf, comm=comm_node, preproc=False, cosmics=cosmics, **kwargs)
#- Use input communicator as barrier since multiple sub-communicators
#- will write to the same output file
if rank == 0:
log.debug('Writing outputs at {}'.format(asctime()))
tmprawfile = rawfile + '.tmp'
if comm is not None:
for i in range(comm.size):
if (i == comm.rank) and (comm_node.rank == 0):
desispec.io.write_raw(tmprawfile, rawpix, image.meta,
camera=camera)
if simpixfile is not None:
io.write_simpix(simpixfile, truepix, camera=camera,
meta=image.meta)
comm.barrier()
else:
desispec.io.write_raw(tmprawfile, rawpix, image.meta, camera=camera)
if simpixfile is not None:
io.write_simpix(simpixfile, truepix, camera=camera,
meta=image.meta)
if rank == 0:
log.info('Wrote {}'.format(rawfile))
log.debug('done at {}'.format(asctime()))
previous_channel = channel
#- All done; rename temporary raw file to final location
if comm is None or comm.rank == 0:
os.rename(tmprawfile, rawfile)
def simulate(camera, simspec, psf, nspec=None, ncpu=None,
cosmics=None, wavemin=None, wavemax=None, preproc=True, comm=None):
"""Run pixel-level simulation of input spectra
Args:
camera (string) : b0, r1, .. z9
simspec : desispec.io.SimSpec object from desispec.io.read_simspec()
psf : subclass of specter.psf.psf.PSF, e.g. from desimodel.io.load_psf()
Options:
nspec (int): number of spectra to simulate
ncpu (int): number of CPU cores to use in parallel
cosmics (desispec.image.Image): e.g. from desisim.io.read_cosmics()
wavemin (float): minimum wavelength range to simulate
wavemax (float): maximum wavelength range to simulate
preproc (boolean, optional) : also preprocess raw data (default True)
Returns:
(image, rawpix, truepix) tuple, where image is the preproc Image object
(only header is meaningful if preproc=False), rawpix is a 2D
ndarray of unprocessed raw pixel data, and truepix is a 2D ndarray
of truth for image.pix
"""
freeze_iers()
if (comm is None) or (comm.rank == 0):
log.info('Starting pixsim.simulate camera {} at |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/tumblr.py | Python | gpl-3.0 | 6,399 | 0.030023 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata
)
class TumblrIE(InfoExtractor):
_VALID_URL = r'https?://(?P<blog_name>[^/?#&]+)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_NETRC_MACHINE = 'tumblr'
_LOGIN_URL = 'https://www.tumblr.com/login'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
'info_dict': {
'id': '54196191430',
'ext': 'mp4',
'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
'thumbnail': r're:http://.*\.jpg',
}
}, {
'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all',
'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359',
'info_dict': {
'id': '90208453769',
'ext': 'mp4',
'title': '5SOS STRUM ;]',
'description': 'md5:dba62ac8639482759c8eb10ce474586a',
'thumbnail': r're:http://.*\.jpg',
}
}, {
'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video',
'md5': '7ae503065ad150122dc3089f8cf1546c',
'info_dict': {
'id': '130323439814',
'ext': 'mp4',
'title': 'HD Video Testing \u2014 Test description for my HD video',
'description': 'md5:97cc3ab5fcd27ee4af6356701541319c',
'thumbnail': r're:http://.*\.jpg',
},
'params' | : {
'format': 'hd',
},
}, {
'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching',
'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab',
'info_dict': {
'id': 'Wmur',
'ext': 'mp4',
'title': 'naked smoking & stretching',
'upload_date': '20150506',
't | imestamp': 1430931613,
'age_limit': 18,
'uploader_id': '1638622',
'uploader': 'naked-yogi',
},
'add_ie': ['Vidme'],
}, {
'url': 'http://camdamage.tumblr.com/post/98846056295/',
'md5': 'a9e0c8371ea1ca306d6554e3fecf50b6',
'info_dict': {
'id': '105463834',
'ext': 'mp4',
'title': 'Cam Damage-HD 720p',
'uploader': 'John Moyer',
'uploader_id': 'user32021558',
},
'add_ie': ['Vimeo'],
}, {
'url': 'http://sutiblr.tumblr.com/post/139638707273',
'md5': '2dd184b3669e049ba40563a7d423f95c',
'info_dict': {
'id': 'ir7qBEIKqvq',
'ext': 'mp4',
'title': 'Vine by sutiblr',
'alt_title': 'Vine by sutiblr',
'uploader': 'sutiblr',
'uploader_id': '1198993975374495744',
'upload_date': '20160220',
'like_count': int,
'comment_count': int,
'repost_count': int,
},
'add_ie': ['Vine'],
}, {
'url': 'http://vitasidorkina.tumblr.com/post/134652425014/joskriver-victoriassecret-invisibility-or',
'md5': '01c12ceb82cbf6b2fe0703aa56b3ad72',
'info_dict': {
'id': '-7LnUPGlSo',
'ext': 'mp4',
'title': 'Video by victoriassecret',
'description': 'Invisibility or flight…which superpower would YOU choose? #VSFashionShow #ThisOrThat',
'uploader_id': 'victoriassecret',
'thumbnail': r're:^https?://.*\.jpg'
},
'add_ie': ['Instagram'],
}]
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'user[email]': username,
'user[password]': password
})
response, urlh = self._download_webpage_handle(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(login_form), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self._LOGIN_URL,
})
# Successful login
if '/dashboard' in urlh.geturl():
return
login_errors = self._parse_json(
self._search_regex(
r'RegistrationForm\.errors\s*=\s*(\[.+?\])\s*;', response,
'login errors', default='[]'),
None, fatal=False)
if login_errors:
raise ExtractorError(
'Unable to login: %s' % login_errors[0], expected=True)
self.report_warning('Login has probably failed')
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
video_id = m_url.group('id')
blog = m_url.group('blog_name')
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage, urlh = self._download_webpage_handle(url, video_id)
redirect_url = compat_str(urlh.geturl())
if 'tumblr.com/safe-mode' in redirect_url or redirect_url.startswith('/safe-mode'):
raise ExtractorError(
'This Tumblr may contain sensitive media. '
'Disable safe mode in your account settings '
'at https://www.tumblr.com/settings/account#safe_mode',
expected=True)
iframe_url = self._search_regex(
r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
webpage, 'iframe url', default=None)
if iframe_url is None:
return self.url_result(redirect_url, 'Generic')
iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page')
duration = None
sources = []
sd_url = self._search_regex(
r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe,
'sd video url', default=None, group='url')
if sd_url:
sources.append((sd_url, 'sd'))
options = self._parse_json(
self._search_regex(
r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe,
'hd video url', default='', group='options'),
video_id, fatal=False)
if options:
duration = int_or_none(options.get('duration'))
hd_url = options.get('hdUrl')
if hd_url:
sources.append((hd_url, 'hd'))
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'height': int_or_none(self._search_regex(
r'/(\d{3,4})$', video_url, 'height', default=None)),
'quality': quality,
} for quality, (video_url, format_id) in enumerate(sources)]
self._sort_formats(formats)
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
video_title = self._html_search_regex(
r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
webpage, 'title')
return {
'id': video_id,
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
}
|
spacetelescope/stsci.tools | lib/stsci/tools/fitsdiff.py | Python | bsd-3-clause | 1,825 | 0.000548 | """fitsdiff is now a part of Astropy.
Now this module just provides a wrapper around astropy.io.fits.diff for backwards
compatibility with the old interface in case anyone uses it.
"""
import os
import sys
from astropy.io.fits.diff import FITSDiff
from astropy.io.fits.scripts.fitsdiff import log, main
def fitsdiff(input1, input2, comment_excl_list='', value_excl_list='',
field_excl_list='', maxdiff=10, delta=0.0, neglect_blanks=True,
output=None):
if isinstance(comment_excl_list, str):
comment_excl_list = list_parse(comment_excl_list)
if isinstance(value_excl_list, str):
value_excl_list = list_parse(value_excl_list)
if isinstance(field_excl_list, str):
field_excl_list = list_parse(field_excl_list)
diff = FITSDiff(input1, input2, ignore_keywords=value_excl_list,
ignore_comments=comment_excl_list,
ignore_fields=field_excl_list, numdiffs=maxdiff,
tolerance=delta, ignore_blanks=neglect_blanks)
if output is None:
output = sys.stdout
diff.report(output)
return diff.identical
def list_parse(nam | e_list):
"""Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
"""
if name_list and name_list[0] == '@':
value = name_list[1:]
if not os.path.exists(value):
log.warning('The file %s does not exist' % value)
return
| try:
return [v.strip() for v in open(value, 'r').readlines()]
except IOError as e:
log.warning('reading %s failed: %s; ignoring this file' %
(value, e))
else:
return [v.strip() for v in name_list.split(',')]
if __name__ == "__main__":
sys.exit(main())
|
rishig/zulip | zerver/views/messages.py | Python | apache-2.0 | 69,921 | 0.003661 | from django.utils.translation import ugettext as _
from django.utils.timezone import now as timezone_now
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection, IntegrityError
from django.http import HttpRequest, HttpResponse
from typing import Dict, List, Set, Any, Iterable, \
Optional, Tuple, Union, Sequence, cast
from zerver.lib.exceptions import JsonableError, ErrorCode
from zerver.lib.html_diff import highlight_html_differences
from zerver.decorator import has_request_variables, \
REQ, to_non_negative_int
from django.utils.html import escape as escape_html
from zerver.lib import bugdown
from zerver.lib.zcommand import process_zcommands
from zerver.lib.actions import recipient_for_user_profiles, do_update_message_flags, \
compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients, truncate_body, render_incoming_message, do_delete_messages, \
do_mark_all_as_read, do_mark_stream_messages_as_read, \
get_user_info_for_message_updates, check_schedule_message
from zerver.lib.addressee import get_user_profiles, get_user_profiles_by_ids
from zerver.lib.queue import queue_json_publish
from zerver.lib.message import (
access_message,
messages_for_ids,
render_markdown,
get_first_visible_message_id,
)
from zerver.lib.response import json_success, json_error
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.streams import access_stream_by_id, can_access_stream_history_by_name
from zerver.lib.timestamp import datetime_to_timestamp, convert_to_UTC
from zerver.lib.timezone import get_timezone
from zerver.lib.topic import (
topic_column_sa,
topic_match_sa,
user_message_exists_for_topic,
DB_TOPIC_NAME,
LEGACY_PREV_TOPIC,
MATCH_TOPIC,
REQ_topic,
)
from zerver.lib.topic_mutes import exclude_topic_mutes
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool, check_string_or_int_list
from zerver.lib.zephyr import compute_mit_user_fullname
from zerver.models import Message, UserProfile, Stream, Subscription, Client,\
Realm, RealmDomain, Recipient, UserMessage, bulk_get_recipients, get_personal_recipient, \
get_stream, email_to_domain, get_realm, get_active_streams, \
get_user_including_cross_realm, get_stream_recipient
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias, Selectable, ColumnElement, table
from dateutil.parser import parse as dateparser
import re
import ujson
import datetime
LARGER_THAN_MAX_MESSAGE_ID = 10000000000000000
MAX_MESSAGES_PER_FETCH = 5000
class BadNarrowOperator(JsonableError):
code = ErrorCode.BAD_NARROW
data_fields = ['desc']
def __init__(self, desc: str) -> None:
self.desc = desc # type: str
@staticmethod
def msg_format() -> str:
return _('Invalid narrow operator: {desc}')
# TODO: Should be Select, but sqlalchemy stubs are busted
Query = Any
# TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
ConditionTransform = Any
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder:
'''
Build up a SQLAlchemy query to find messages matching a narrow.
'''
# This class has an important security invariant:
#
# None of these methods ever *add* messages to a query's result.
#
# That is, the `add_term` method, and its helpers the `by_*` methods,
# are passed a Query object representing a query for messages; they may
# call some methods on it, and then they return a resulting Query
# object. Things these methods may do to the queries they handle
# include
# * add conditions to filter out rows (i.e., messages), with `query.where`
# * add columns for more information on the same message, with `query.column`
# * add a join for more information on the same message
#
# Things they may not do include
# * anything that would pull in additional rows, or information on
# other messages.
def __init__(self, user_profile: UserProfile, msg_id_column: str) -> None:
self.user_profile = user_profile
self.msg_id_column = msg_id_column
self.user_realm = user_profile.realm
def add_term(self, query: Query, term: Dict[str, Any]) -> Query:
"""
Extend the given query to one narrowed by the given term, and return the result.
This method satisfies an important security property: the returned
query never includes a message that the given query didn't. In
particular, if the given query will only find messages that a given
user can legitimately see, then so will the returned query.
"""
# To maintain the security pr | operty, we hold all the `by_*`
# methods to the same criterion. See the class's block comment
# for details.
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.re | place('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if operand == 'private':
cond = column("flags").op("&")(UserMessage.flags.is_private.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'unread':
cond = column("flags").op("&")(UserMessage.flags.read.mask) == 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned':
cond1 = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
cond2 = column("flags").op("&")(UserMessage.flags.wildcard_mentioned.mask) != 0
cond = or_(cond1, cond2)
return query.where(maybe_negate(cond))
elif operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.has_alert_word.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern: str) -> str:
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, '\u03bb' to '\\\u03bb'. This function will correctly escape
them for postgres, '\u03bb' to '\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
|
DragonRoman/rhevm-utils | monitoring/rhev-nagios-table-host-mem.py | Python | gpl-3.0 | 2,283 | 0.003504 | #!/usr/bin/env python
#
# Author: Pablo Iranzo Gomez (Pablo.Iranzo@redhat.com)
#
# Description: Script for monitoring host Memory status and VM's rhevm-sdk
# api and produce NAGIOS valid output
#
# Requires rhevm-sdk to work
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software | Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import optparse
from ovirtsdk.xml import params
description = | """
RHEV-nagios-table-host-mem output is a script for querying RHEVM via API to get host status
It's goal is to output a table of host/vm status for simple monitoring via external utilities
"""
# Option parsing
p = optparse.OptionParser("rhev-nagios-table-host-mem.py [arguments]", description=description)
p.add_option('-v', "--verbosity", dest="verbosity", help="Show messages while running", metavar='[0-n]', default=0,
type='int')
p.add_option("--host", dest="host", help="Show messages while running", metavar='host')
p.add_option("-t", "--table", dest="table", help="Input file in CSV format", metavar='table')
(options, args) = p.parse_args()
# MAIN PROGRAM
if not options.host:
print("Host not defined, exiting")
sys.exit(1)
if not options.table:
print("CSV table not defined, exiting")
sys.exit(1)
try:
f = file(options.table) # fichero a procesar
except:
print("Problem opening the file %s" % options.table)
sys.exit(1)
# NAGIOS PRIOS:
# 0 -> ok
# 1 -> warning
# 2 -> critical
# 3 -> unknown
# By default, return unknown
# TYPE;HOST;STATE;CPU;MEM
# host;rhev01.lab.local;up;16;0.0
for line in f:
if line.split(";")[0] == "host":
if line.split(";")[1] == options.host:
usage = int(line.split(";")[4])
retorno = 3
if usage >= 90:
retorno = 1
if usage >= 95:
retorno = 2
else:
retorno = 0
print(usage)
sys.exit(retorno) |
django-json-api/rest_framework_ember | rest_framework_json_api/django_filters/backends.py | Python | bsd-2-clause | 5,902 | 0.003219 | import re
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.exceptions import ValidationError
from rest_framework.settings import api_settings
from rest_framework_json_api.utils import undo_format_field_name
class DjangoFilterBackend(DjangoFilterBackend):
"""
A Django-style ORM filter implementation, using `django-filter`.
This is not part of the JSON:API standard per-se, other than the requirement
to use the `filter` keyword: This is an optional implementation of style of
filtering in which each filter is an ORM expression as implemented by
DjangoFilterBackend and seems to be in alignment with an interpretation of
https://jsonapi.org/recommendations/#filtering, including relationship
chaining. It also returns a 400 error for invalid filters.
Filters can be:
- A resource field
equality test:
``?filter[qty]=123``
- Apply other
https://docs.djangoproject.com/en/stable/ref/models/querysets/#field-lookups
operators:
``?filter[name.icontains]=bar`` or ``?filter[name.isnull]=true...``
- Membership in
a list of values:
``?filter[name.in]=abc,123,zzz`` (name in ['abc','123','zzz'])
- Filters can be combined
for intersection (AND):
``?filter[qty]=123&filter[name.in]=abc,123,zzz&filter[...]``
- A related resource path
can be used:
``?filter[inventory.item.partNum]=123456`` (where `inventory.item` is the relationship path)
If you are also using rest_framework.filters.SearchFilter you'll want to customize
the name of the query parameter for searching to make sure it doesn't conflict
with a field name defined in the filterset.
The recommended value is: `search_param="filter[search]"` but just make sure it's
`filter[<something>]` to comply with the JSON:API spec requirement to use the filter
keyword. The default is "search" unless overriden but it's used here just to make sure
we don't complain about it being an invalid filter.
"""
search_param = api_settings.SEARCH_PARAM
# Make this regex check for 'filter' as well as 'filter[...]'
# See https://jsonapi.org/format/#document-member-names for allowed characters
# and https://jsonapi.org/format/#document-member-names-reserved-characters for reserved
# characters (for use in paths, lists or as delimiters).
# regex `\w` matches [a-zA-Z0-9_].
# TODO: U+0080 and above allowed but not recommended. Leave them out for now.e
# Also, ' ' (space) is allowed within a member name but not recommended.
filter_regex = re.compile(
r"^filter(?P<ldelim>\[?)(?P<assoc>[\w\.\-]*)(?P<rdelim>\]?$)"
)
def _validate_filter(self, keys, filterset_class):
"""
Check that all the filter[key] are valid.
:param keys: li | st of FilterSet keys
:param filterset_class: :py:class:`django_filters.rest_framework.FilterSet`
:raises ValidationError: if key not in FilterSet keys or no FilterSet.
"""
for k in keys:
if (not filterset_class) or (k not in filterset_class.base_filters):
raise ValidationError(f"invalid filter[{k}]")
def get_f | ilterset(self, request, queryset, view):
"""
Sometimes there's no `filterset_class` defined yet the client still
requests a filter. Make sure they see an error too. This means
we have to `get_filterset_kwargs()` even if there's no `filterset_class`.
"""
# TODO: .base_filters vs. .filters attr (not always present)
filterset_class = self.get_filterset_class(view, queryset)
kwargs = self.get_filterset_kwargs(request, queryset, view)
self._validate_filter(kwargs.pop("filter_keys"), filterset_class)
if filterset_class is None:
return None
return filterset_class(**kwargs)
def get_filterset_kwargs(self, request, queryset, view):
"""
Turns filter[<field>]=<value> into <field>=<value> which is what
DjangoFilterBackend expects
:raises ValidationError: for bad filter syntax
"""
filter_keys = []
# rewrite filter[field] query params to make DjangoFilterBackend work.
data = request.query_params.copy()
for qp, val in request.query_params.lists():
m = self.filter_regex.match(qp)
if m and (
not m.groupdict()["assoc"]
or m.groupdict()["ldelim"] != "["
or m.groupdict()["rdelim"] != "]"
):
raise ValidationError(f"invalid query parameter: {qp}")
if m and qp != self.search_param:
if not all(val):
raise ValidationError(f"missing value for query parameter {qp}")
# convert JSON:API relationship path to Django ORM's __ notation
key = m.groupdict()["assoc"].replace(".", "__")
key = undo_format_field_name(key)
data.setlist(key, val)
filter_keys.append(key)
del data[qp]
return {
"data": data,
"queryset": queryset,
"request": request,
"filter_keys": filter_keys,
}
def get_schema_operation_parameters(self, view):
"""
Convert backend filter `name` to JSON:API-style `filter[name]`.
For filters that are relationship paths, rewrite ORM-style `__` to our preferred `.`.
For example: `blog__name__contains` becomes `filter[blog.name.contains]`.
This is basically the reverse of `get_filterset_kwargs` above.
"""
result = super().get_schema_operation_parameters(view)
for res in result:
if "name" in res:
res["name"] = "filter[{}]".format(res["name"]).replace("__", ".")
return result
|
texttheater/produce | t/test_pretend_up_to_date.py | Python | mit | 763 | 0.003932 | from prodtest import ProduceTestCase
class PretendUpToDateTest(ProduceTestCase):
def test_pretend_up_to_date(self):
| # a
# |
# b
# / \
# c d
normal = lambda: self.produce('a')
pretending = lambda: self.produce('a', **{'-u': | 'b'})
self.assertDirectoryContents(('produce.ini',))
self.assertUpdates((), normal, ('a', 'b', 'c', 'd'), ())
self.assertUpdates(('c',), normal, ('a', 'b'), ('c', 'd'))
self.assertUpdates(('c',), pretending, (), ('a', 'b', 'c', 'd'))
self.assertUpdates((), normal, ('a', 'b'), ('c', 'd'))
self.assertUpdates(('b',), normal, ('a',), ('b', 'c', 'd'))
self.assertUpdates(('b',), pretending, ('a',), ('b', 'c', 'd'))
|
firebitsbr/ZCR-Shellcoder | lib/encoder/linux_x86/xor_random.py | Python | gpl-3.0 | 2,327 | 0.037387 | #!/usr/bin/env python
'''
ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
'''
import random,binascii,string
chars = string.digits + string.ascii_letters
def start(shellcode,job):
if 'chmod(' in job:
t = True
eax = str('0x0f')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if '00' not in eax_1 and '00' not in eax_2:
t = False
eax = 'push $%s'%(str(eax))
eax_xor = 'push $0x%s\npop %%eax\npush $0x%s\npop %%ebx\nxor %%eax,%%ebx\npush %%ebx\n'%(eax_1,eax_2)
shellcode = shellcode.replace(eax,eax_xor)
ecx = str(shellcode.rsplit('\n')[8])
ecx_value = str(shellcode.rsplit('\n')[8].rsplit()[1][1:])
t = True
while t:
ecx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ecx_2 = "%x" % (int(ecx_value, 16) ^ int(ecx_1, 16))
if '00' not in ecx_1 and '00' not in ecx_2:
t = False
ecx_xor = 'push $0x%s\npop %%ebx\npush $0x%s\npop %%ecx\nxor %%ecx,%%ebx\npush %%ebx\n_z3r0d4y_\n'%(str(ecx_1),str(ecx_2))
shellcode = shellcode.replace(ecx,ecx_xor)
n = 0
start = ''
middle = ''
end = ''
add = 0
for l in shellcode.rsplit('\n'):
n += 1
if add is 0:
if '_z3r0d4y_' not in l:
start += l + '\n'
else:
add = 1
if add is 1:
if '_z3r0d4y_' not in l:
if '%esp,%ebx' not in l:
middle += l + '\n'
else:
add = 2
if add is 2:
end += l + '\n'
for l in middle.rsplit('\n'):
if 'push $0x' in l:
ebx = l.rsplit()[1][1:]
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(ebx, 16) ^ int(ebx_1, 16))
command = 'push $0x%s\npop %%ebx\npush $0x%s\npop %%edx\nxor %%ebx,%%edx\npush %%edx'%(str(ebx_1),str(ebx_2))
middle = middle.replace | (l,command)
shellcode = start + middle + end
if 'd | ir_create(' in job:
shellcode = 'N' + shellcode
if 'download_execute(' in job:
shellcode = 'N' + shellcode
if 'download(' in job:
shellcode = 'N' + shellcode
if 'exec(' in job:
shellcode = 'N' + shellcode
if 'file_create(' in job:
shellcode = 'N' + shellcode
if 'script_executor(' in job:
shellcode = 'N' + shellcode
if 'system(' in job:
shellcode = 'N' + shellcode
if 'write(' in job:
shellcode = 'N' + shellcode
return shellcode
|
jobliz/solid-state-kinetics | ssk/models/theoretical.py | Python | mit | 1,630 | 0.025767 | from __future__ import division
import numpy as np
def iP2(a):
return a**(1/2)
def iP3(a):
return a**(1/3)
def iP4(a):
return a**(1/4)
def iPN(a, n=None):
return a**(1/n)
def iA2(a):
return (-np.log(1-a))**(1/2)
def iA3(a):
return (-np.log(1-a))**(1/3)
def iA4(a):
return (-np.log(1-a))**(1/4)
def iAN | (a, n=None):
return (-np.log(1-a))**(1/n)
def iB1(a):
return np.log(a/(1-a))
def iR2(a):
return 1 - (1 - a)**(1/2)
def iR3(a):
return 1 - (1 - a)**(1/3)
def iD1(a):
return a**2
def iD2(a):
return ((1-a) * np.log(1-a)) + a
def iD3(a):
return (1 - (1-a)**(1/3)) ** 2
def iD4(a):
return 1 - (2*a/3) - (1-a)**(2/3)
def iF1(a):
return -np.log(1-a)
def iF2(a):
return (1 | -a)**-1 - 1
def iF3(a):
return 0.5 * ((1-a)**-2 - 1)
def dP2(a):
return 2*a**(1/2)
def dP3(a):
return 3*a**(2/3)
def dP4(a):
return 4*a**(3/4)
def dA2(a):
return 2 * (1-a) * (-np.log(1-a))**(1/2)
def dA3(a):
return 3 * (1-a) * (-np.log(1-a))**(2/3)
def dA4(a):
return 4 * (1-a) * (-np.log(1-a))**(3/4)
def dB1(a):
return a * (1-a)
def dR2(a):
return 2 * (1-a)**(1/2)
def dR3(a):
return 3 * (1-a)**(2/3)
def dD1(a):
return 0.5 * a**-1
def dD2(a):
return (-np.log(1-a))**-1
# In doubt for Combined Kinetic Analysis...
def dD3_old(a):
return 3 * (1-a)**(2/3)
def dD3(a):
t1 = 3 * (1-a)**(2.0/3)
t2 = 2 * (1 - (1-a)**(1.0/3))
return t1 / t2
def dD4(a):
return (3/2 * (((1-a)**(-1/3))-1) )
def dF1(a):
return (1-a)
def dF2(a):
return (1-a)**2
def dF3(a):
return (1-a)**3
|
UB-Heidelberg/UBHD-OMPArthistorikum | languages/plural-ru.py | Python | gpl-3.0 | 743 | 0.001869 | #!/usr/bin/env python
{
# "singular form (0)": ["first plural form (1)", "second plural form (2)", ...],
'выбрана': ['выбраны', 'выбрано'],
'запись': ['записи', 'записей'],
'изменена': ['изменены', 'изменено'],
'строка': ['строки', 'строк'],
'удалена': ['удалены', 'удалено'],
'день': ['дня', 'дней'],
'месяц': ['месяца', 'месяцев'],
'неделю': ['недели', 'недель'],
'год': ['года', 'лет'],
'час': ['часа', 'часов'],
'минуту': | ['минуты', 'минут'],
'секунду': | ['секунды', 'секунд'],
}
|
hastef88/andes | modules/andes-core/systests/etc/bin/fail.py | Python | apache-2.0 | 3,242 | 0.005244 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import datetime
from optparse import OptionParser
BASE_CMD = "mvn -Dskip.python.test=true %s test"
def main():
parser = OptionParser()
parser.add_option("-t", "--test", dest="test",
action="store", type="string",
help="run specific tests")
parser.add_option("-c", "--continuous", dest="continuous",
action="st | ore_true", default=False,
help="run tests after failures, don't stop")
(options, args) = parser.parse_args()
# determine command to run
if (options.test != None):
cmd = (BASE_CMD % ("-Dtest="+options.test))
else:
cmd = (BASE_CMD % (""))
run_forever = options.continuous
failed_runs = []
iteration = 0
fail_match = re.c | ompile("BUILD SUCCESSFUL")
done = False
while (run_forever or not (len(failed_runs) > 0)):
iteration = iteration + 1
if (run_forever):
extra_text = (", %d failures so far: %s:" % (len(failed_runs), failed_runs))
else:
extra_text = ""
print ("%s Test run %d%s" % (datetime.datetime.today().isoformat(), iteration, extra_text))
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd)
output = child_stdout_and_stderr.read()
child_stdin.close()
child_stdout_and_stderr.close()
matches = fail_match.search(output)
if (matches == None):
failed_runs.append(iteration)
output_name = ("test-run-%d.out" % (iteration))
#write testouput
test_output = file(output_name, "w")
test_output.write(output)
test_output.close()
#tar test-output and surefire reports together
find_stdout = os.popen("find . -type d -name surefire-reports")
surefire_dirs = find_stdout.read().replace('\n', ' ')
find_stdout.close()
tarcmd = ("tar -zcf test-failures-%d.tar.gz %s %s" % (iteration, output_name, surefire_dirs))
tar_stdout = os.popen(tarcmd)
tar_output = tar_stdout.read()
tar_exitstatus = tar_stdout.close()
print ("Something failed! Check %s" % (output_name))
if (tar_exitstatus != None):
print ("tar exited abornmally, aborting\n %s" % (tar_output))
run_forever = False
if __name__ == "__main__":
main() |
antoniodemora/git-cola | qtpy/__init__.py | Python | gpl-2.0 | 5,141 | 0.000778 | # -*- coding: utf-8 -*-
#
# Copyright © 2009- The Spyder Development Team
# Copyright © 2014-2015 Colin Duquesnoy
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
**QtPy** is a shim over the various Python Qt bindings. It is used to write
Qt binding indenpendent libraries or applications.
If one of the APIs has already been imported, then it will be used.
Otherwise, the shim will automatically select the first available API (PyQt5,
PySide2, PyQt4 and finally PySide); in that case, you can force the use of one
specific bindings (e.g. if your application is using one specific bindings and
you need to use library that use QtPy) by setting up the ``QT_API`` environment
variable.
PyQt5
=====
For PyQt5, you don't have to set anything as it will be used automatically::
>>> from qtpy import QtGui, QtWidgets, QtCore
>>> print(QtWidgets.QWidget)
PySide2
======
Set the QT_API environment variable to 'pyside2' before importing other
packages::
>>> import os
>>> os.environ['QT_API'] = 'pyside2'
>>> from qtpy import QtGui, QtWidgets, QtCore
>>> print(QtWidgets.QWidget)
PyQt4
=====
Set the ``QT_API`` environment variable to 'pyqt' before importing any python
package::
>>> import os
>>> os.environ['QT_API'] = 'pyqt'
>>> from qtpy import QtGui, QtWidgets, QtCore
>>> print(QtWidgets.QWidget)
PySide
======
Set the QT_API environment variable to 'pyside' before importing other
packages::
>>> import os
>>> os.environ['QT_API'] = 'pyside'
>>> from qtpy import QtGui, QtWidgets, QtCore
>>> print(QtWidgets.QWidget)
"" | "
import os
import platform
import sys
# Version of QtPy
f | rom ._version import __version__
class PythonQtError(RuntimeError):
"""Error raise if no bindings could be selected."""
pass
class PythonQtWarning(Warning):
"""Warning if some features are not implemented in a binding."""
pass
# Qt API environment variable name
QT_API = 'QT_API'
# Names of the expected PyQt5 api
PYQT5_API = ['pyqt5']
# Names of the expected PyQt4 api
PYQT4_API = [
'pyqt', # name used in IPython.qt
'pyqt4' # pyqode.qt original name
]
# Names of the expected PySide api
PYSIDE_API = ['pyside']
# Names of the expected PySide2 api
PYSIDE2_API = ['pyside2']
# Detecting if a binding was specified by the user
binding_specified = QT_API in os.environ
# Setting a default value for QT_API
os.environ.setdefault(QT_API, 'pyqt5')
API = os.environ[QT_API].lower()
initial_api = API
assert API in (PYQT5_API + PYQT4_API + PYSIDE_API + PYSIDE2_API)
is_old_pyqt = is_pyqt46 = False
PYQT5 = True
PYQT4 = PYSIDE = PYSIDE2 = False
if 'PyQt5' in sys.modules:
API = initial_api if initial_api in PYQT5_API else 'pyqt5'
elif 'PySide2' in sys.modules:
API = initial_api if initial_api in PYSIDE2_API else 'pyside2'
elif 'PyQt4' in sys.modules:
API = initial_api if initial_api in PYQT4_API else 'pyqt4'
elif 'PySide' in sys.modules:
API = initial_api if initial_api in PYSIDE_API else 'pyside'
if API in PYQT5_API:
try:
from PyQt5.QtCore import PYQT_VERSION_STR as PYQT_VERSION # analysis:ignore
from PyQt5.QtCore import QT_VERSION_STR as QT_VERSION # analysis:ignore
PYSIDE_VERSION = None
except ImportError:
API = os.environ['QT_API'] = 'pyside2'
if API in PYSIDE2_API:
try:
from PySide2 import __version__ as PYSIDE_VERSION # analysis:ignore
from PySide2.QtCore import __version__ as QT_VERSION # analysis:ignore
PYQT_VERSION = None
PYQT5 = False
PYSIDE2 = True
except ImportError:
API = os.environ['QT_API'] = 'pyqt'
if API in PYQT4_API:
try:
import sip
try:
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
except (AttributeError, ValueError):
# PyQt < v4.6
pass
from PyQt4.Qt import PYQT_VERSION_STR as PYQT_VERSION # analysis:ignore
from PyQt4.Qt import QT_VERSION_STR as QT_VERSION # analysis:ignore
PYSIDE_VERSION = None
PYQT5 = False
PYQT4 = True
except ImportError:
API = os.environ['QT_API'] = 'pyside'
else:
is_old_pyqt = PYQT_VERSION.startswith(('4.4', '4.5', '4.6', '4.7'))
is_pyqt46 = PYQT_VERSION.startswith('4.6')
if API in PYSIDE_API:
try:
from PySide import __version__ as PYSIDE_VERSION # analysis:ignore
from PySide.QtCore import __version__ as QT_VERSION # analysis:ignore
PYQT_VERSION = None
PYQT5 = PYSIDE2 = False
PYSIDE = True
except ImportError:
raise PythonQtError('No Qt bindings could be found')
API_NAME = {'pyqt5': 'PyQt5', 'pyqt': 'PyQt4', 'pyqt4': 'PyQt4',
'pyside': 'PySide', 'pyside2':'PySide2'}[API]
if PYQT4:
import sip
try:
API_NAME += (" (API v{0})".format(sip.getapi('QString')))
except AttributeError:
pass
|
PaulWay/insights-core | insights/core/specs.py | Python | apache-2.0 | 5,582 | 0.001971 | import re
import os
import logging
from collections import defaultdict
from insights.config.static import get_config
from insights.config import AnalysisTarget, META_FILE_LIST, CommandSpec
logger = logging.getLogger(__name__)
logger.setLevel(logging.FATAL)
class SpecMapper(object):
"""
This class wraps a tarfile-like object with spec mapping of names.
"""
def __init__(self, tf_object, data_spec_config=None):
self.tf = tf_object
self.all_names = [f for f in self.tf.getnames() if self._name_filter(f)]
self.root = os.path.commonprefix(self.all_names)
logger.debug("SpecMapper.root: %s", self.root)
self.data_spec_config = data_spec_config if data_spec_config else get_config()
self.symbolic_files = defaultdict(list)
self.analysis_target = self._determine_analysis_target()
self.create_symbolic_file_list()
def _name_filter(self, name):
return not (self.tf.isdir(name) or name.endswith(".tar.gz"))
def _get_first_matching(self, pattern):
for match in filter(
re.compile(self.root + "?" + pattern + "$").match,
self.all_names):
return match
def _determine_analysis_target(self):
path = self._get_first_matching(META_FILE_LIST["analysis_target"])
if path:
section = self.get_content(path, symbolic=False)[0].strip()
return AnalysisTarget.get(section)
def _extend_symbolic_files(self, symbolic_name, matches):
if matches:
self.symbolic_files[symbolic_name].extend(matches)
def filter_commands(self, files):
for f in files:
if "sos_commands" in f or "insights_commands" in f or "commands/" in f:
yield f
def add_files(self, file_map):
logger.debug("ROOT: %s", self.root)
unrooted_map = {
f.split(self.root)[1]: f
for f in self.all_names
if f != self.root
}
unrooted_files = set(unrooted_map)
commands = set(self.filter_commands(unrooted_files))
non_commands | = unroot | ed_files - commands
if logger.level == logging.DEBUG:
logger.debug("\n".join(uf for uf in sorted(unrooted_files)))
for symbolic_name, spec_group in file_map.iteritems():
for spec in spec_group.get_all_specs(): # Usually just one item in paths
is_command = isinstance(spec, CommandSpec)
# foreman-debug archives contain flat structures of commands
# that can be confused with other command outputs easily so
# we'll add a ^ to the beginning of the pattern if it is not an
# insights archive
if '/' in spec.get_path() or self.analysis_target is not None:
prefix = ''
else:
prefix = '^'
r = spec.get_regex(prefix=prefix, analysis_target=self.analysis_target)
if is_command or "_commands/" in r.pattern:
filter_set = commands
else:
filter_set = non_commands
logger.debug("Pattern: %s", r.pattern)
matches = filter(r.search, filter_set)
if matches:
matches = [unrooted_map[m] for m in matches]
# In order to prevent matching *dumb* symlinks in some
# archive formats, we are going to filter out symlinks when
# calculating matches for CommandSpecs
if is_command:
matches = filter(lambda n: not self.tf.issym(n), matches)
# filter out directories that match
matches = [m for m in matches if not self.tf.isdir(m)]
if not matches:
continue
# In order to prevent accidental duplication when matching
# files, we only allow the first matched file to be added
# to the working set for non-pattern file specs.
if not spec.is_multi_output() and len(matches) > 1:
logger.debug("Non multi-output file had multiple matches: %s", matches)
self._extend_symbolic_files(symbolic_name, [matches[0]])
else:
self._extend_symbolic_files(symbolic_name, matches)
break # only add the first matching pattern
def _add_meta_files(self):
for symbolic_name, suffix in META_FILE_LIST.items():
archive_path = self._get_first_matching(suffix)
if archive_path:
self._extend_symbolic_files(symbolic_name, [archive_path])
def create_symbolic_file_list(self):
self.add_files(self.data_spec_config.get_spec_lists())
if not self.analysis_target:
self.add_files(self.data_spec_config.get_meta_specs())
else:
self._add_meta_files()
def get_content(self, path, split=True, symbolic=True, default=""):
"""Returns file content from path, where path is the full pathname inside
the archive"""
if symbolic:
path = self.symbolic_files.get(path, [""])[0]
content = self.tf.extractfile(path) if path in self.all_names else default
return list(content.splitlines()) if split else content
def exists(self, path, symbolic=True):
return path in self.symbolic_files if symbolic else path in self.all_names
|
masom/doorbot-api-python | doorbot/views/dashboard/forms/login.py | Python | mit | 333 | 0 | # -*- coding: utf-8 -*-
from wtforms import Form, PasswordField, StringField, validators
class PasswordLoginForm(Form):
email = StringField('Email', [validators.Email])
passw | ord = PasswordField('Password', [validators.Length(m | in=4)])
class ForgotPasswordForm(Form):
email = StringField('Email', [validators.Email])
|
openmash/medsphere-test | select_provider.sikuli/select_provider.py | Python | apache-2.0 | 214 | 0.009346 | doubleClick("1370381210737.png")
click("1370381239650.png")
wheel(Pattern("1 | 370381239650.png").targetOffset(-1,30), WHEEL_DOWN, 7)
click(Pattern("mary_hager.png").targetOffset(97, | -2))
find("1370381955817.png")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.