repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
snnn/tensorflow
|
tensorflow/contrib/distribute/__init__.py
|
Python
|
apache-2.0
| 2,737
| 0.001827
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A distributed computation library for TF.
See [tensorflow/contrib/distribute/README.md](
https://www.tensorflow.org/code/tensorflow/contrib/distribute/README.md)
for overview and examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.distribute.python.collective_all_reduce_strategy import CollectiveAllReduceStrategy
from tensorflow.contrib.distribute.python.cross_tower_ops import *
from tensorflow.contrib.distribute.python.mirrored_strategy import MirroredStrategy
from tensorflow.contrib.distribute.python.monitor import Monitor
from tensorflow.contrib.distribute.python.one_device_strategy import OneDeviceStrategy
from tensorflow.contrib.distribute.python.parameter_server_strategy import ParameterServerStrategy
from tensorflow.contrib.distribute.python.step_fn import *
from tensorflow.contrib.distribute.python.tpu_strategy import TPUStrategy
from tensorflow.python.distribute.distribute_config import DistributeConfig
from tensorflow.python.distribute.distribute_coordinator import run_stand
|
ard_tensorflow_server
from tensorflow.python.training.distribute import *
from tensorflow.python.training.distribution_strategy_context import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'AllReduceCrossTowerOps',
'CollectiveAllReduceStrategy',
'CrossTowerOps',
'DistributeConfig',
'Dis
|
tributionStrategy',
'MirroredStrategy',
'Monitor',
'OneDeviceStrategy',
'ParameterServerStrategy',
'ReductionToOneDeviceCrossTowerOps',
'Step',
'StandardInputStep',
'StandardSingleLossStep',
'TowerContext',
'TPUStrategy',
'get_cross_tower_context',
'get_distribution_strategy',
'get_loss_reduction',
'get_tower_context',
'has_distribution_strategy',
'require_tower_context',
'run_standard_tensorflow_server',
'UpdateContext',
]
remove_undocumented(__name__, _allowed_symbols)
|
edx/course-discovery
|
course_discovery/apps/course_metadata/migrations/0150_curriculum_program.py
|
Python
|
agpl-3.0
| 545
| 0.001835
|
# Generated by Django 1.11.15 on 2019-01-29 15:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
|
dependencies = [
('course_metadata', '0149_auto_20190201_1515'),
]
operations = [
migrations.AddField(
model_name='curriculum',
name='program',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, relat
|
ed_name='curricula', to='course_metadata.Program'),
),
]
|
simone-campagna/rubik
|
rubik/application/rubik.py
|
Python
|
apache-2.0
| 37,260
| 0.004321
|
#!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
import os
import sys
import numpy as np
import logging
import warnings
import itertools
from collections import OrderedDict
from . import log
from . import config
from ..py23 import irange
from ..units import Memory
from ..errors import RubikError, RubikMemoryError, RubikExpressionError
from ..shape import Shape
from ..filename import InputFilename, OutputFilename, InputMode, OutputMode
from ..application.argdict import ResultArgDict, InputArgDict, OutputArgDict
from ..application.arglist import ArgList
from ..application.logo import RUBIK
from ..extractor import Extractor
from ..cubes.utilities import interpolate_filename
from ..visualizer.controller_builder import controller_builder
from ..visualizer.visualizer_builder import visualizer_builder
from .. import conf
from .. import utils
from ..cubes import internals as cubes_internals
from ..cubes import dtypes as cubes_dtypes
from ..cubes import api as cubes_api
class Rubik(object):
def __init__(self):
self.name = "{} {}".format(self.__class__.__name__, conf.VERSION)
self._log_header = "{}: ".format(self.__class__.__name__.lower())
self.config = config.get_config()
self.PRINT = log.get_print()
self.input_filenames = InputArgDict(InputFilename)
self.input_modes = InputArgDict(InputMode, default=InputMode("rb"))
self.input_offsets = InputArgDict(Memory, default=None)
self.input_dtypes = InputArgDict(cubes_api.get_dtype, default=None)
self.input_formats = InputArgDict(str, default=self.config.default_file_format)
self.input_csv_separators = InputArgDict(str, default=conf.FILE_FORMAT_CSV_SEPARATOR)
self.input_text_delimiters = InputArgDict(str, default=conf.FILE_FORMAT_TEXT_DELIMITER)
self.shapes = InputArgDict(Shape)
self.extractors = InputArgDict(Extractor, default=None)
self.output_filenames = OutputArgDict(OutputFilename)
self.output_modes = OutputArgDict(OutputMode)
self.output_offsets = OutputArgDict(Memory, default=None)
self.output_dtypes = OutputArgDict(cubes_api.get_dtype, default=None)
self.output_formats = OutputArgDict(str, default=self.config.default_file_format)
self.output_csv_separators = OutputArgDict(str, default=conf.FILE_FORMAT_CSV_SEPARATOR)
self.output_text_delimiters = OutputArgDict(str, default=conf.FILE_FORMAT_TEXT_DELIMITER)
self.output_text_newlines = OutputArgDict(str, default=conf.FILE_FORMAT_TEXT_NEWLINE)
self.output_text_converters = OutputArgDict(str, default=conf.FILE_FORMAT_TEXT_CONVERTER)
self.expressions = ArgList(str)
default_dtype = cubes_api.get_dtype(self.config.default_data_type)
self.logger = log.LOGGER
self.set_accept_bigger_raw_files(False)
self.set_read_threshold_size(self.config.default_read_threshold_size)
self.set_memory_limit(self.config.default_memory_limit)
self.set_split_dimensions(None)
self.set_clobber(self.config.default_clobber)
self.set_visualizer_options()
self.set_print_report(False)
self.set_histogram_options(False)
self.set_dry_run(False)
self.set_dtype(default_dtype)
self.total_read_bytes = 0
self.input_cubes = OrderedDict()
self._used_input_filenames = set()
self._used_output_filenames = set()
self._result = None
self._locals = {}
self._pointless_expressions = []
cubes_internals.set_output_mode_callback(self.notify_output_mode)
self._controller = None
self._stats_infos = []
self._diff_cubes = []
def log(self, level, message):
if level > logging.INFO:
format = "{header}{level}: {message}"
else:
format = "{header}{message}"
self.logger.log(level, format.format(header=self._log_header, level=logging.getLevelName(level), message=message))
def log_debug(self, message):
self.log(logging.DEBUG, message)
def log_info(self, message):
self.log(logging.INFO, message)
def log_warning(self, message):
self.log(logging.WARNING, message)
def log_error(self, message):
self.log(logging.ERROR, message)
def log_critical(self, message):
self.log(logging.CRITICAL, message)
def show_logo(self):
self.PRINT(RUBIK)
def set_dry_run(self, dry_run):
self.dry_run = dry_run
def set_dtype(self, dtype):
cubes_dtypes.set_default_dtype(dtype)
self.dtype = cubes_dtypes.get_default_dtype()
self.dtype_bytes = self.dtype().itemsize
self._cache_dtype_bytes = {self.dtype: self.dtype_bytes}
def set_logger(self, logger, report_logger=None):
if report_logger is None:
report_logger = logger
self.logger = logger
self.report_logger = report_logger
def set_accept_bigger_raw_files(self, accept_bigger_raw_files):
self.accept_bigger_raw_files = accept_bigger_raw_files
def set_read_threshold_size(self, read_threshold_size):
self.read_threshold_size = read_threshold_size
def set_memory_limit(self, memory_limit):
self.memory_limit = memory_limit
self.memory_limit_bytes = memory_limit.get_bytes()
def set_split_dimensions(self, split_dimensions):
if split_dimensions is None:
split_dimensions = ()
self.split_dimensions = split_dimensions
def set_clobber(self, clobber):
self.clobber = clobber
def set_visualizer_options(self,
controller_type=None,
visualizer_type=None,
visualizer_attributes=None,
visualizer_attribute_files=None):
self.controller_type = controller_type
self.visualizer_type = visualizer_type
self.visualizer_attribut
|
es = visualizer_attributes
self.vis
|
ualizer_attribute_files = visualizer_attribute_files
def set_histogram_options(self, bins=10, length=80, range=None, mode=None, decimals=None):
self.histogram_bins = bins
self.histogram_range = range
self.histogram_length = length
self.histogram_mode = mode
fmt_base = "{b}{s_start:{l_start}s}, {s_end:{l_end}s}{k}|{h}|"
fmt_num = fmt_base + "{s_num:>{l_num}s}"
fmt_percentage = fmt_base + "{s_percentage:>{l_percentage}s}"
if mode is None:
mode = 'num'
self.histogram_mode = mode
if self.histogram_mode == "num":
self.histogram_fmt = fmt_num
elif self.histogram_mode == "percentage":
self.histogram_fmt = fmt_percentage
else:
raise ValueError("invalid histogram mode {!r}".format(mode))
self.histogram_decimals = decimals
def set_print_report(self, print_report):
self.print_report = print_report
def run(self):
if self.print_report:
self.impl_print_report()
if not self.dry_run:
self.initialize()
self.evaluate_expressions(*self.expressions)
self.finalize()
return 0
def get_dtype_bytes(self, dtype):
if not dtype in self._cache_dtype_bytes:
self._cache_dtype_bytes[dtype] = dtype().itemsize
return self._cache_dtype_bytes[dtype]
def register_input_cube(self, input_label, input_filename, cube):
self.input_cubes[input_label] = cube
def result(self):
return self._result
def set_labeled_attributes(self, label, attributes):
|
ngokevin/zamboni
|
mkt/developers/tests/test_views_versions.py
|
Python
|
bsd-3-clause
| 30,543
| 0
|
import datetime
import os
from django.conf import settings
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.tests import req_factory_factory
from mkt.comm.models import CommunicationNote
from mkt.constants.applications import DEVICE_TYPES
from mkt.developers.models import ActivityLog, AppLog, PreloadTestPlan
from mkt.developers.views import preload_submit, status
from mkt.files.models import File
from mkt.reviewers.models import EditorSubscription, EscalationQueue
from mkt.site.fixtures import fixture
from mkt.submit.tests.test_views import BasePackagedAppTest
from mkt.users.models import UserProfile
from mkt.versions.models import Version
from mkt.webapps.models import AddonUser, Webapp
class TestVersion(amo.tests.TestCase):
fixtures
|
= fixture('group_admin', 'user_999', 'user_admin',
'user_admin_group', 'webapp_337141')
def setUp(self):
self.client.login(username='admin@mozilla.com', pa
|
ssword='password')
self.webapp = self.get_webapp()
self.url = self.webapp.get_dev_url('versions')
def get_webapp(self):
return Webapp.objects.get(id=337141)
def test_nav_link(self):
r = self.client.get(self.url)
eq_(pq(r.content)('.edit-addon-nav li.selected a').attr('href'),
self.url)
def test_items(self):
doc = pq(self.client.get(self.url).content)
eq_(doc('#version-status').length, 1)
eq_(doc('#version-list').length, 0)
eq_(doc('#delete-addon').length, 1)
eq_(doc('#modal-delete').length, 1)
eq_(doc('#modal-disable').length, 1)
eq_(doc('#modal-delete-version').length, 0)
def test_delete_link(self):
# Hard "Delete App" link should be visible for only incomplete apps.
self.webapp.update(status=amo.STATUS_NULL)
doc = pq(self.client.get(self.url).content)
eq_(doc('#delete-addon').length, 1)
eq_(doc('#modal-delete').length, 1)
def test_pending(self):
self.webapp.update(status=amo.STATUS_PENDING)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#version-status .status-pending').length, 1)
eq_(doc('#rejection').length, 0)
def test_public(self):
eq_(self.webapp.status, amo.STATUS_PUBLIC)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#version-status .status-public').length, 1)
eq_(doc('#rejection').length, 0)
def test_blocked(self):
self.webapp.update(status=amo.STATUS_BLOCKED)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#version-status .status-blocked').length, 1)
eq_(doc('#rejection').length, 0)
assert 'blocked by a site administrator' in doc.text()
def test_rejected(self):
comments = "oh no you di'nt!!"
amo.set_user(UserProfile.objects.get(username='admin'))
amo.log(amo.LOG.REJECT_VERSION, self.webapp,
self.webapp.current_version, user_id=999,
details={'comments': comments, 'reviewtype': 'pending'})
self.webapp.update(status=amo.STATUS_REJECTED)
amo.tests.make_rated(self.webapp)
(self.webapp.versions.latest()
.all_files[0].update(status=amo.STATUS_DISABLED))
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#version-status')
eq_(doc('.status-rejected').length, 1)
eq_(doc('#rejection').length, 1)
eq_(doc('#rejection blockquote').text(), comments)
my_reply = 'fixed just for u, brah'
r = self.client.post(self.url, {'notes': my_reply,
'resubmit-app': ''})
self.assertRedirects(r, self.url, 302)
webapp = self.get_webapp()
eq_(webapp.status, amo.STATUS_PENDING,
'Reapplied apps should get marked as pending')
eq_(webapp.versions.latest().all_files[0].status, amo.STATUS_PENDING,
'Files for reapplied apps should get marked as pending')
action = amo.LOG.WEBAPP_RESUBMIT
assert AppLog.objects.filter(
addon=webapp, activity_log__action=action.id).exists(), (
"Didn't find `%s` action in logs." % action.short)
def test_no_ratings_no_resubmit(self):
self.webapp.update(status=amo.STATUS_REJECTED)
r = self.client.post(self.url, {'notes': 'lol',
'resubmit-app': ''})
eq_(r.status_code, 403)
self.webapp.content_ratings.create(ratings_body=0, rating=0)
r = self.client.post(self.url, {'notes': 'lol',
'resubmit-app': ''})
self.assert3xx(r, self.webapp.get_dev_url('versions'))
def test_comm_thread_after_resubmission(self):
self.webapp.update(status=amo.STATUS_REJECTED)
amo.tests.make_rated(self.webapp)
amo.set_user(UserProfile.objects.get(username='admin'))
(self.webapp.versions.latest()
.all_files[0].update(status=amo.STATUS_DISABLED))
my_reply = 'no give up'
self.client.post(self.url, {'notes': my_reply,
'resubmit-app': ''})
notes = CommunicationNote.objects.all()
eq_(notes.count(), 1)
eq_(notes[0].body, my_reply)
def test_rejected_packaged(self):
self.webapp.update(is_packaged=True)
comments = "oh no you di'nt!!"
amo.set_user(UserProfile.objects.get(username='admin'))
amo.log(amo.LOG.REJECT_VERSION, self.webapp,
self.webapp.current_version, user_id=999,
details={'comments': comments, 'reviewtype': 'pending'})
self.webapp.update(status=amo.STATUS_REJECTED)
(self.webapp.versions.latest()
.all_files[0].update(status=amo.STATUS_DISABLED))
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#version-status')
eq_(doc('.status-rejected').length, 1)
eq_(doc('#rejection').length, 1)
eq_(doc('#rejection blockquote').text(), comments)
class BaseAddVersionTest(BasePackagedAppTest):
def setUp(self):
super(BaseAddVersionTest, self).setUp()
self.app = amo.tests.app_factory(
complete=True, is_packaged=True, app_domain='app://hy.fr',
version_kw=dict(version='1.0'))
self.url = self.app.get_dev_url('versions')
self.user = UserProfile.objects.get(username='regularuser')
AddonUser.objects.create(user=self.user, addon=self.app)
def _post(self, expected_status=200):
res = self.client.post(self.url, {'upload': self.upload.pk,
'upload-version': ''})
eq_(res.status_code, expected_status)
return res
@mock.patch('mkt.webapps.tasks.update_cached_manifests.delay', new=mock.Mock)
class TestAddVersion(BaseAddVersionTest):
def setUp(self):
super(TestAddVersion, self).setUp()
# Update version to be < 1.0 so we don't throw validation errors.
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
def test_post(self):
self._post(302)
version = self.app.versions.latest()
eq_(version.version, '1.0')
eq_(version.all_files[0].status, amo.STATUS_PENDING)
def test_post_subscribers(self):
# Same test as above, but add a suscriber. We only want to make sure
# we are not causing a traceback because of that.
reviewer = UserProfile.objects.create(email='foo@example.com')
self.grant_permission(reviewer, 'Apps:Review')
EditorSubscription.objects.create(addon=self.app, user=reviewer)
self._post(302)
version = self.app.versions.latest()
eq_(version.version, '1.0')
eq_(version.all_files[0].status, amo.STATUS_PENDING)
def test_unique_version(
|
mpdehaan/func
|
func/overlord/base_command.py
|
Python
|
gpl-2.0
| 1,892
| 0.006871
|
"""
Copyright 2008, Red Hat, Inc
Adrian Likins <alikins@redhat.com>
also see AUTHORS
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU Gene
|
ral Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import command
import client
from certmaster.config import read_config, BaseConfig, ListOption
from func import commonconfig
DEFAULT_PORT = 51234
DEFAULT_MAPLOC = "/var/lib/func/map"
# FIXME
CONFIG_FILE="/etc/func/minion.conf"
class BaseCommand(command.Command):
""" wrapper class for commands with some convience functions, namely
|
getOverlord() for getting a overlord client api handle"""
interactive = False
verbose=0
port=DEFAULT_PORT
async=False
forks=1
delegate=False
mapfile=DEFAULT_MAPLOC
# temporary work around FIXME
# we really need a way to store what port each minion is
# listening on, though this is probably workable for most
# cases. Though it should probably be a different config
# file, since FuncdConfig is for the minion server, not
config = read_config(CONFIG_FILE, commonconfig.FuncdConfig)
port = config.listen_port
def getOverlord(self):
self.overlord_obj = client.Overlord(self.server_spec,
port=self.port,
interactive=self.interactive,
verbose=self.verbose,
config=self.config,
async=self.async,
nforks=self.forks,
delegate=self.delegate,
mapfile=self.mapfile)
|
akegan/plasmoids
|
pplot.py
|
Python
|
mit
| 13,037
| 0.04794
|
import numpy
import tables
import scipy
import matplotlib
import matplotlib.pyplot as plt
import collections
import scipy.signal
import csv
import egan_vorpalUtil as egan
import os
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import sys
#sys.path.append('/scr_verus/wernerg/vrun/relRecon/relReconRepo')
import calcStats
import vorpalUtil
####switch to function that will read into arrays passed out, then update 1d dists to have stage info
import pplot
def read_plasmoids(width, \
pathName,\
resultsDir, \
simLabel,\
NX_TOTS,\
DELTA,\
LX,\
B_0):
runName="relRecon2p_"
widths=[]
deltapsi=[]
dump=[]
xwidth=[]
time=[]
xpos=[]
stage=[]
wT=[] #top width
wB=[]#bottom width
for location in ["b","t"]:
if location=="b":
filename=resultsDir+runName+"width_"+str(width)+"_extended.csv"
else:
filename=resultsDir+runName+"width_"+str(width)+"T_extended.csv"
print location
print filename
with open(filename,'r') as csvfile:
freader=csv.reader(csvfile,dialect='excel')
head1=next(freader)
columns=next(freader)
for row in freader:
widths.append(float(row[11])/2+float(row[12])/2)
deltapsi.append(abs(float(row[4])-float(row[9])))
xwidth.append(abs(float(row[3])-float(row[10])))
dump.append(row[0])
xpos.append(row[3])
time.append(row[13])
stage.append(row[14])
wT.append(row[11])
wB.append(row[12])
####################################
# Convert arrays to proper form ###
####################################
#print dump
#execfile(pathName+"relReconVars.py")
dump= numpy.array(dump,dtype=int)
wT=numpy.array(wT,dtype=float)
wB=numpy.array(wB,dtype=float)
time=numpy.array(time,dtype=float)
xpos=numpy.array(xpos,dtype=int)
stage=numpy.array(stage,dtype=int)
widthNorm=[abs(float(i))/NX_TOTS[0] for i in widths]
fluxNorm=[abs(float(i))/(B_0*LX) for i in deltapsi]
xwidthNorm=[abs(float(i)/NX_TOTS[0]) for i in xwidth]
return fluxNorm,widthNorm,xpos,dump,time,stage,wT,wB
def dist_2d(width, \
pathName,\
resultsDir,\
simLabel,\
NX_TOTS,\
DELTA,\
LX,\
B_0,\
STAGES=False,\
TIME_PLOT=False,\
SAVE_TOGETHER=False):
print "2D Dist Plotting"
plotDir=resultsDir+"distplots/"
if SAVE_TOGETHER:
plotDir="today_distplots/"
runName="relRecon2p_"
origfilename=pathName+runName+'yeeB_'+str(1)+".h5"
if not os.path.exists(plotDir):
os.makedirs(plotDir)
# print filename
fluxNorm,widthNorm,xpos,dump,time,stage,wT,wB=pplot.read_plasmoids(width,pathName,resultsDir,simLabel,NX_TOTS,DELTA, LX,B_0)
#find points that I want to exclude
shift=[]
# print wT[1]
# print max(wT[1],wB[1])
for i in range(len(wT)):
shift.append(abs(wT[i]-wB[i])/max(wT[i],wB[i]))
# print shift
#########################
# Get shorter time steps
########################
if TIME_PLOT:
simName=pathName+"relRecon2p"
dnByName = "layerDnByLine"
(dnBy, c, dnByLbs, dnByUbs) = vorpalUtil.getFieldArrayHistory(simName, dnByName)
byHistTimes = vorpalUtil.getHistoryTimes(simName, dnByName)[:,0]
(ndim, numPhysCells, startCell, lowerBounds, upperBounds) = vorpalUtil.getSimGridInfo(simName)
#print "same as NX_TOTS?"
#print numPhysCells
dxs = (upperBounds-lowerBounds) / numPhysCells
dx=dxs
dz=dxs
test=dnBy.cumsum(axis=1)*-dx[0]*dz[0]
Az=numpy.array([test[i,:,0]+.1 for i in numpy.arange(len(dnBy))])
#######################
## SET UP PLOT #####
#######################
if STAGES:
plotfile="2Ddist_stages_"+simLabel+"_width_"+str(width)
title="2D Plasmoid Dist-by Stages,"+simLabel+", width="+str(width)
else:
title="2D Plasmoid Dist,"+simLabel+", width="+str(width)
plotfile="2Ddist"+simLabel+"_width_"+str(width)
ylabel="Normalized y half-widths (wy/L)" #default val
xlabel="Normalized enclosed flux (deltaPsi/B_0*L)" #default val
if TIME_PLOT:
plotfile="time_evolution_"+simLabel+"_width_"+str(width)
title="'O' pts in time, "+simLabel+", width="+str(width)
ylabel="Time (1E-6 s)"
xlabel="X-position of 'O' pts"
colors=["b","g","r","c","m","y"]
markerstyle=['.','o','v','^','<','>','s','p','*','d','D']
markerstyle=markerstyle*(max(dump)/len(markerstyle)+1)
colors=colors*(max(dump)/len(colors)+1)
#print colors
ax = host_subplot(111, axes_class=AA.Axes)
y2=ax.twinx() #For 2nd axes
ax.grid(True)
if TIME_PLOT:
ax.set_xlim(0,800)
ax.set_ylim(0,max(time)*1.2)
y2.set_ylabel("Dump number")
y2.set_ylim(0,max(dump)*1.2)
y2.axis["right"].label.set_color('k')
else:
ax.set_xlim(10**-5,1)
ax.set_ylim(10**-5,1)
y2.set_ylabel("Number of Cells")
y2.set_yscale("log")
y2.set_ylim(10**(-5)*NX_TOTS[0],1*NX_TOTS[0])
y2.axis["right"].label.set_color('k')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
#SET RIGHT AXIS
#plot extra lines
if TIME_PLOT==False:
x=numpy.arange(1,10**5,10**2)
x=x*(10**-5)
ax.loglog([.000005,1],[NX_TOTS[0]**-1,NX_TOTS[0]**-1],linestyle='--',color='k') ##Plot line of cell siz
ax.loglog([.000005,1],[DELTA/LX,DELTA/LX],linestyle='-',color='k') ##Plot line of reconnection layer width
ax.loglog(B_0*DELTA*numpy.log(numpy.cosh(x*NX_TOTS[0]/DELTA)),x,linestyle='-') #plot of background mag profile
##########
# Plot!
##########
shift=numpy.array(shift, dtype=float)
#print max(stage)
#print range(1,max(stage))
test2=numpy.where(shift<.1)
print len(test2[0])
print len(shift)
print float(len(test2[0]))/float(len(shift))
if STAGES:
for i in range(1,max(stage)+1):
stageWhere=numpy.where(numpy.logical_and(stage==i,shift<.05))
# stageWhere=numpy.where(shift<.1)
# print stageWhere
for j in stageWhere[0]:
ax.loglog(fluxNorm[j],widthNorm[j],color=colors[i],marker=markerstyle[i],linestyle="")
ax.plot(10,10, color=co
|
lors[i],marker=markerstyle[i],linestyle='',label="Stage "+str(i)) #plot pts for legend
#print "i=%d"%i
#print len(colors)
elif TIME_PLOT:
for i in numpy.arange(len(byHistTimes)):
maxima=egan.findAzMax(Az[i,:],0,width,AVG=False,SHORTSTEP=True,DEBUG=False)
minima=egan.findAzMin(Az[i,:],0,width,AVG=False, SHORTSTEP=T
|
rue)
#print minima
times=numpy.empty(len(maxima));times.fill(byHistTimes[i]*1E6)
times2=numpy.empty(len(minima));times2.fill(byHistTimes[i]*1E6)
ax.plot(minima,times2,linestyle='',marker='+',markersize=3,color='b')
ax.plot(maxima,times,linestyle='',marker='.',markersize=1.5,color='k')
for i in stageTimes:
#print i*1e6
ax.plot([0,800],[i*1e6, i*1e6],color='k',linestyle='-')
for i in range(1,max(dump)+1):
dumpwhere=numpy.where(numpy.logical_and(dump==i,shift<.01))
#print "len xpos="
#print len(xpos)
for j in dumpwhere[0]:
#print "i=%d,"%i
#print " j=%d"%j
ax.plot(xpos[j],time[j],color=colors[i],marker=markerstyle[i],linestyle='')
ax.plot(1e4,1e4, color=colors[i],marker=markerstyle[i],linestyle='',label=str(i)) #plot pts for legend
else:
for i in range(1,max(dump)+1):
dumpwhere=numpy.where(numpy.logical_and(dump==i,shift<.05))
for j in dumpwhere[0]:
ax.loglog(fluxNorm[j],widthNorm[j],color=colors[i],marker=markerstyle[i],linestyle="")
ax.plot(10,10, color=colors[i],marker=markerstyle[i],linestyle='',label=str(i)) #plot pts for legend later
ax.set_title(title)
plt.legend(loc=4,numpoints=1,fontsize='small',title="Step #")
plt.draw()
plt.savefig(plotDir+plotfile+".eps")
plt.show()
plt.close()
def dist_1d(width, \
pathName,\
resultsDir,\
simLabel,\
NX_TOTS,\
|
nikitamarchenko/open-kilda
|
services/topology-engine-rest/app/app/tests/get_links.py
|
Python
|
apache-2.0
| 694
| 0.004323
|
"""
get_links.py - get all the links
The path is NB -> TER -> NEO4J.
"""
import re
|
quests
from base64 import b64encode
url = "http://localhost:8088/api/v1/links"
headers = {
'Content-Type': 'application/json',
'Authorization': 'Basic %s' % b64encode(b"kilda:kilda").decode("ascii")
}
#
# This models one of the first flows used by ATDD. It sends the request to teh NB API so that
# kilda will construct the flow path rules.
# TODO: would be better to pull from the same data, ensure code bases on synchronized..
# at the moment, this is hardcoded here,
|
and ATDD has a separate source.
#
result = requests.get(url, headers=headers)
print result.status_code
print result.text
|
jameswenzel/billboard-charts
|
tests/test_greatest_hot_100_singles.py
|
Python
|
mit
| 1,451
| 0
|
import json
import unittest
import billboard
class TestCurrentGreatestHot100Singles(u
|
nittest.TestCase):
"""Checks that the ChartData object for the current Greatest Hot 100
Singles chart has entries and instance variables that are valid and
reasonable. Does not test whether the data i
|
s actually correct.
The Greatest Hot 100 Singles chart is special in that there are no past
charts (i.e., there is no historical data).
"""
def setUp(self):
self.chart = billboard.ChartData('greatest-hot-100-singles')
def test_date(self):
self.assertIsNone(self.chart.date) # This chart has no dates
def test_ranks(self):
ranks = list(entry.rank for entry in self.chart)
self.assertEqual(ranks, list(range(1, 101)))
def test_entries_validity(self):
self.assertEqual(len(self.chart), 100)
for entry in self.chart:
self.assertGreater(len(entry.title), 0)
self.assertGreater(len(entry.artist), 0)
self.assertIsNone(entry.peakPos)
self.assertIsNone(entry.lastPos)
self.assertIsNone(entry.weeks)
# Redundant because of test_ranks
self.assertTrue(1 <= entry.rank <= 100)
self.assertIsInstance(entry.isNew, bool)
def test_json(self):
self.assertTrue(json.loads(self.chart.json()))
for entry in self.chart:
self.assertTrue(json.loads(entry.json()))
|
ParrotPrediction/pyalcs
|
lcs/agents/xcs/Condition.py
|
Python
|
mit
| 616
| 0
|
from __future__ import annotations
f
|
rom .. import ImmutableSequence
class Condition(ImmutableSequence):
def subsumes(self, other) -> bool:
for ci, oi in zip(self, other):
|
if ci != self.WILDCARD and oi != self.WILDCARD and ci != oi:
return False
return True
@property
def wildcard_number(self) -> int:
return sum(1 for c in self if c == self.WILDCARD)
def is_more_general(self, other: Condition) -> bool:
for ci, oi in zip(self, other):
if ci != self.WILDCARD and ci != oi:
return False
return True
|
mdhaman/superdesk-core
|
apps/archive/commands.py
|
Python
|
agpl-3.0
| 19,764
| 0.004453
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import functools as ft
import logging
import superdesk
from flask import current_app as app
from eve.utils import config, ParsedRequest
from copy import deepcopy
from apps.packages import PackageService
from superdesk.celery_task_utils import get_lock_id
from superdesk.utc import utcnow
from .archive import SOURCE as ARCHIVE
from superdesk.metadata.item import ITEM_STATE, CONTENT_STATE, ITEM_TYPE, CONTENT_TYPE, ASSOCIATIONS, MEDIA_TYPES
from superdesk.lock import lock, unlock, remove_locks
from superdesk.notification import push_notification
from superdesk import get_resource_service
from bson.objectid import ObjectId
from datetime import timedelta
logger = logging.getLogger(__name__)
def log_exeption(fn):
@ft.wraps(fn)
def inner(*a, **kw):
try:
return fn(*a, **kw)
except Exception as e:
logger.exception(e)
return inner
class RemoveExpiredContent(superdesk.Command):
"""Remove expired content from Superdesk.
It removes expired items from production, published and archived colections.
Example:
::
$ python manage.py archive:remove_expired
"""
log_msg = ''
def run(self):
now = utcnow()
self.log_msg = 'Expiry Time: {}.'.format(now)
logger.info('{} Starting to remove expired content at.'.format(self.log_msg))
lock_name = get_lock_id('archive', 'remove_expired')
if not lock(lock_name, expire=610):
logger.info('
|
{} Remove expired content task is already running.'.format(self.log_msg))
return
logger.info('{} Removing expired content for expiry.'.format(self.log_msg))
# both functions should be called, even the first one throw exception,
# so they are wrapped with log_ex
|
eption
self._remove_expired_publish_queue_items()
self._remove_expired_items(now)
unlock(lock_name)
push_notification('content:expired')
logger.info('{} Completed remove expired content.'.format(self.log_msg))
remove_locks()
@log_exeption
def _remove_expired_publish_queue_items(self):
expire_interval = app.config.get('PUBLISH_QUEUE_EXPIRY_MINUTES', 0)
if expire_interval:
expire_time = utcnow() - timedelta(minutes=expire_interval)
logger.info('{} Removing publish queue items created before {}'.format(self.log_msg, str(expire_time)))
get_resource_service('publish_queue').delete({'_id': {'$lte': ObjectId.from_datetime(expire_time)}})
@log_exeption
def _remove_expired_items(self, expiry_datetime):
"""Remove the expired items.
:param datetime expiry_datetime: expiry datetime
:param str log_msg: log message to be prefixed
"""
logger.info('{} Starting to remove published expired items.'.format(self.log_msg))
archive_service = get_resource_service(ARCHIVE)
published_service = get_resource_service('published')
items_to_remove = set()
items_to_be_archived = dict()
items_having_issues = dict()
preserve_published_desks = {desk.get(config.ID_FIELD): 1 for desk in
get_resource_service('desks').find(where={'preserve_published_content': True})}
for expired_items in archive_service.get_expired_items(expiry_datetime):
if len(expired_items) == 0:
logger.info('{} No items found to expire.'.format(self.log_msg))
return
# delete spiked items
self.delete_spiked_items(expired_items)
# get killed items
killed_items = {item.get(config.ID_FIELD): item
for item in expired_items
if item.get(ITEM_STATE) in {CONTENT_STATE.KILLED, CONTENT_STATE.RECALLED}}
# check if killed items imported to legal
items_having_issues.update(self.check_if_items_imported_to_legal_archive(killed_items))
# filter out the killed items not imported to legal.
killed_items = {item_id: item for item_id, item in killed_items.items()
if item_id not in items_having_issues}
# Get the not killed and spiked items
not_killed_items = {item.get(config.ID_FIELD): item for item in expired_items
if item.get(ITEM_STATE) not in {
CONTENT_STATE.KILLED, CONTENT_STATE.SPIKED, CONTENT_STATE.RECALLED}}
log_msg_format = "{{'_id': {_id}, 'unique_name': {unique_name}, 'version': {_current_version}, " \
"'expired_on': {expiry}}}."
# Processing items to expire
for item_id, item in not_killed_items.items():
item.setdefault(config.VERSION, 1)
item.setdefault('expiry', expiry_datetime)
item.setdefault('unique_name', '')
expiry_msg = log_msg_format.format(**item)
logger.info('{} Processing expired item. {}'.format(self.log_msg, expiry_msg))
processed_items = dict()
if item_id not in items_to_be_archived and item_id not in items_having_issues and \
self._can_remove_item(item, processed_items, preserve_published_desks):
# item can be archived and removed from the database
logger.info('{} Removing item. {}'.format(self.log_msg, expiry_msg))
logger.info('{} Items to be removed. {}'.format(self.log_msg, processed_items))
issues = self.check_if_items_imported_to_legal_archive(processed_items)
if issues:
items_having_issues.update(processed_items)
else:
items_to_be_archived.update(processed_items)
# all items to expire
items_to_expire = deepcopy(items_to_be_archived)
# check once again in items imported to legal
items_having_issues.update(self.check_if_items_imported_to_legal_archive(items_to_expire))
if items_having_issues:
# remove items not imported to legal
items_to_expire = {item_id: item for item_id, item in items_to_expire.items()
if item_id not in items_having_issues}
# remove items not imported to legal from archived items
items_to_be_archived = {item_id: item for item_id, item in items_to_be_archived.items()
if item_id not in items_having_issues}
# items_to_be_archived might contain killed items
for item_id, item in items_to_be_archived.items():
if item.get(ITEM_STATE) in {CONTENT_STATE.KILLED, CONTENT_STATE.RECALLED}:
killed_items[item_id] = item
# remove killed items from the items_to_be_archived
items_to_be_archived = {item_id: item for item_id, item in items_to_be_archived.items()
if item.get(ITEM_STATE) not in {CONTENT_STATE.KILLED, CONTENT_STATE.RECALLED}}
# add killed items to items to expire
items_to_expire.update(killed_items)
# get the filter conditions
logger.info('{} filter conditions.'.format(self.log_msg))
req = ParsedRequest()
filter_conditions = list(get_resource_service('content_filters').get(req=req,
lookup={'is_archived_filter': True}))
# move to archived collection
logger.info('{} Archiving items.'.format(self.log_msg))
for item_id, item in items_to_be_archived.items():
|
anhstudios/swganh
|
data/scripts/templates/object/ship/shared_rebel_gunboat_tier5.py
|
Python
|
mit
| 416
| 0.050481
|
#### NOTICE
|
: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_rebel_gunboat_tier5.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
retur
|
n result
|
hackerspace-ntnu/website
|
news/migrations/0020_auto_20190512_1744.py
|
Python
|
mit
| 780
| 0.002564
|
# Generated by Django 2.0.10 on 2019-05-12 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.
|
Migration):
dependencies = [
('userprofile', '0020_auto_20190507_0150'),
('news', '0019_auto_
|
20190512_1608'),
]
operations = [
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='userprofile.Profile'),
),
migrations.AddField(
model_name='event',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='userprofile.Profile'),
),
]
|
bssrdf/leetcode-7
|
solutions/038.Count_and_Say/AC_simulation_n.py
|
Python
|
gpl-2.0
| 737
| 0.005427
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_simulation_n.py
# Create Date: 2015-07-30 09:41:51
# Usage: AC_simulation_n.py
# Descripton:
class Solution:
# @param {integer} n
# @return {string}
def countAndSay(self, n):
r
|
es = '1'
for _ in xrange(n - 1):
|
res = self.helper(res)
return res
def helper(self, s):
cnt, res = 1, ''
for i in xrange(len(s) - 1):
if s[i] == s[i + 1]:
cnt += 1
else:
res += str(cnt) + s[i]
cnt = 1
res += str(cnt) + s[-1]
return res
# test
s = Solution()
print(s.countAndSay(4))
|
kylef/pyppp
|
test.py
|
Python
|
bsd-2-clause
| 362
| 0
|
#!/usr/bin/env python
import pyppp
if __name__ == '__main__':
print 'PyPPP Version: %s' % pyppp._
|
_version__
p = pyppp.PyPPP()
print 'PPP Specification Version: %s' % p.__version__
p.generate_random_sequence_key()
print 'PPP Key: %s' % p.key
|
passcode = 5
print 'Passcode: %d' % passcode
print '%s' % p.retrieve_passcode(passcode)
|
binoculars/osf.io
|
osf/models/nodelog.py
|
Python
|
apache-2.0
| 7,092
| 0.002679
|
from include import IncludeManager
from django.apps import apps
from django.db import models
from django.utils import timezone
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField
from website.util import api_v2_url
class NodeLog(ObjectIDMixin, BaseModel):
FIELD_ALIASES = {
# TODO: Find a better way
'node': 'node__guids___id',
'user': 'user__guids___id',
'original_node': 'original_node__guids___id'
}
objects = IncludeManager()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = NODE_LINK_CREATED = 'pointer_created'
POINTER_FORKED = NODE_LINK_FORKED = 'pointer_forked'
POINTER_REMOVED = NODE_LINK_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
CHECKED_IN = 'checked_in'
CHECKED_OUT = 'checked_out'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
FILE_TAG_ADDED = 'file_tag_added'
FILE_TAG_REMOVED = 'file_tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
CHANGED_LICENSE = 'license_changed'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FILE_RENAMED = 'addon_file_renamed'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
COMMENT_RESTORED = 'comment_restored'
CITATION_ADDED = 'citation_added'
CITATION_EDITED = 'citation_edited'
CITATION_REMOVED = 'citation_removed'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
EMBARGO_TERMINATED = 'embargo_terminated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
PREREG_REGISTRATION_INITIATED = 'prereg_registration_initiated'
AFFILIAT
|
ED_INSTITUTION_ADDED = 'affiliated_institution_added'
AFFILIATED_INSTITUTION_REMOVED = 'affiliated_institution_removed'
PREPRINT_INITIATED = 'preprint_initiated'
P
|
REPRINT_FILE_UPDATED = 'preprint_file_updated'
PREPRINT_LICENSE_UPDATED = 'preprint_license_updated'
SUBJECTS_UPDATED = 'subjects_updated'
VIEW_ONLY_LINK_ADDED = 'view_only_link_added'
VIEW_ONLY_LINK_REMOVED = 'view_only_link_removed'
actions = ([CHECKED_IN, CHECKED_OUT, FILE_TAG_REMOVED, FILE_TAG_ADDED, CREATED_FROM, PROJECT_CREATED,
PROJECT_REGISTERED, PROJECT_DELETED, NODE_CREATED, NODE_FORKED, NODE_REMOVED,
NODE_LINK_CREATED, NODE_LINK_FORKED, NODE_LINK_REMOVED, WIKI_UPDATED,
WIKI_DELETED, WIKI_RENAMED, MADE_WIKI_PUBLIC,
MADE_WIKI_PRIVATE, CONTRIB_ADDED, CONTRIB_REMOVED, CONTRIB_REORDERED,
PERMISSIONS_UPDATED, MADE_PRIVATE, MADE_PUBLIC, TAG_ADDED, TAG_REMOVED, EDITED_TITLE,
EDITED_DESCRIPTION, UPDATED_FIELDS, FILE_MOVED, FILE_COPIED,
FOLDER_CREATED, FILE_ADDED, FILE_UPDATED, FILE_REMOVED, FILE_RESTORED, ADDON_ADDED,
ADDON_REMOVED, COMMENT_ADDED, COMMENT_REMOVED, COMMENT_UPDATED, COMMENT_RESTORED,
MADE_CONTRIBUTOR_VISIBLE,
MADE_CONTRIBUTOR_INVISIBLE, EXTERNAL_IDS_ADDED, EMBARGO_APPROVED, EMBARGO_TERMINATED,
EMBARGO_CANCELLED, EMBARGO_COMPLETED, EMBARGO_INITIATED, RETRACTION_APPROVED,
RETRACTION_CANCELLED, RETRACTION_INITIATED, REGISTRATION_APPROVAL_CANCELLED,
REGISTRATION_APPROVAL_INITIATED, REGISTRATION_APPROVAL_APPROVED,
PREREG_REGISTRATION_INITIATED,
CITATION_ADDED, CITATION_EDITED, CITATION_REMOVED,
AFFILIATED_INSTITUTION_ADDED, AFFILIATED_INSTITUTION_REMOVED, PREPRINT_INITIATED,
PREPRINT_FILE_UPDATED, PREPRINT_LICENSE_UPDATED, VIEW_ONLY_LINK_ADDED, VIEW_ONLY_LINK_REMOVED] + list(sum([
config.actions for config in apps.get_app_configs() if config.name.startswith('addons.')
], tuple())))
action_choices = [(action, action.upper()) for action in actions]
date = NonNaiveDateTimeField(db_index=True, null=True, blank=True, default=timezone.now)
# TODO build action choices on the fly with the addon stuff
action = models.CharField(max_length=255, db_index=True) # , choices=action_choices)
params = DateTimeAwareJSONField(default=dict)
should_hide = models.BooleanField(default=False)
user = models.ForeignKey('OSFUser', related_name='logs', db_index=True,
null=True, blank=True, on_delete=models.CASCADE)
foreign_user = models.CharField(max_length=255, null=True, blank=True)
node = models.ForeignKey('AbstractNode', related_name='logs',
db_index=True, null=True, blank=True, on_delete=models.CASCADE)
original_node = models.ForeignKey('AbstractNode', db_index=True,
null=True, blank=True, on_delete=models.CASCADE)
def __unicode__(self):
return ('({self.action!r}, user={self.user!r},, node={self.node!r}, params={self.params!r}) '
'with id {self.id!r}').format(self=self)
class Meta:
ordering = ['-date']
get_latest_by = 'date'
@property
def absolute_api_v2_url(self):
path = '/logs/{}/'.format(self._id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_url(self):
return self.absolute_api_v2_url
def _natural_key(self):
return self._id
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/funnel/marker/colorbar/_nticks.py
|
Python
|
mit
| 466
| 0.002146
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_
|
utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="funnel.marker.colorbar", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
p
|
arent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
|
monkeypants/MAVProxy
|
MAVProxy/modules/mavproxy_map/mp_tile.py
|
Python
|
gpl-3.0
| 23,042
| 0.004427
|
#!/usr/bin/env python
'''
access satellite map tile database
some functions are based on code from mapUtils.py in gmapcatcher
Andrew Tridgell
May 2012
released under GNU GPL v3 or later
'''
import collections
import errno
import hashlib
import sys
import math
import threading
import os
import string
import time
import cv2
import numpy as np
if sys.version_info.major < 3:
from urllib2 import Request as url_request
from urllib2 import urlopen as url_open
from urllib2 import URLError as url_error
else:
from urllib.request import Request as url_request
from urllib.request import urlopen as url_open
from urllib.error import URLError as url_error
from MAVProxy.modules.lib import mp_util
class TileException(Exception):
'''tile error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
TILE_SERVICES = {
# thanks to http://go2log.com/2011/09/26/fetching-tiles-for-offline-map/
# for the URL mapping info
"GoogleSat" : "https://khm${GOOG_DIGIT}.google.com/kh/v=812&hl=pt-PT&x=${X}&y=${Y}&z=${ZOOM}&s=${GALILEO}",
"GoogleMap" : "https://mt${GOOG_DIGIT}.google.com/vt/lyrs=m@132&hl=pt-PT&x=${X}&y=${Y}&z=${ZOOM}&s=${GALILEO}",
"GoogleTer" : "https://mt${GOOG_DIGIT}.google.com/vt/v=t@132,r@249&hl=pt-PT&x=${X}&y=${Y}&z=${ZOOM}&s=${GALILEO}",
"GoogleChina" : "http://mt${GOOG_DIGIT}.google.cn/vt/lyrs=m@121&hl=en&gl=cn&x=${X}&y=${Y}&z=${ZOOM}&s=${GALILEO}",
"MicrosoftBrMap" : "http://imakm${MS_DIGITBR}.maplink3.com.br/maps.ashx?v=${QUAD}|t&call=2.2.4",
"MicrosoftHyb" : "http://ecn.t${MS_DIGIT}.tiles.virtualearth.net/tiles/h${QUAD}.png?g=441&mkt=en-us&n=z",
"MicrosoftSat" : "http://ecn.t${MS_DIGIT}.tiles.virtualearth.net/tiles/a${QUAD}.png?g=441&mkt=en-us&n=z",
"MicrosoftMap" : "http://ecn.t${MS_DIGIT}.tiles.virtualearth.net/tiles/r${QUAD}.png?g=441&mkt=en-us&n=z",
"MicrosoftTer" : "http://ecn.t${MS_DIGIT}.tiles.virtualearth.net/tiles/r${QUAD}.png?g=441&mkt=en-us&shading=hill&n=z",
"OpenStreetMap" : "http://tile.openstreetmap.org/${ZOOM}/${X}/${Y}.png",
"OSMARender" : "http://tah.openstreetmap.org/Tiles/tile/${ZOOM}/${X}/${Y}.png",
"OpenAerialMap" : "http://tile.openaerialmap.org/tiles/?v=mgm&layer=openaerialmap-900913&x=${X}&y=${Y}&zoom=${OAM_ZOOM}",
"OpenCycleMap" : "http://andy.sandbox.cloudmade.com/tiles/cycle/${ZOOM}/${X}/${Y}.png",
"Eniro DK,NO,SE,FI,PL" : "http://map.eniro.com/geowebcache/service/tms1.0.0/map/${ZOOM}/${X}/${ENI_Y}.png",
"StatKartTopo2" : "http://opencache.statkart.no/gatekeeper/gk/gk.open_gmaps?layers=topo4&zoom=${ZOOM}&x=${X}&y=${Y}"
}
# these are the md5sums of "unavailable" tiles
BLANK_TILES = set(["d16657bbee25d7f15c583f5c5bf23f50",
"c0e76e6e90ff881da047c15dbea380c7",
"d41d8cd98f00b204e9800998ecf8427e"])
# all tiles are 256x256
TILES_WIDTH = 256
TILES_HEIGHT = 256
class TileServiceInfo:
'''a lookup object for the URL templates'''
def __init__(self, x, y, zoom):
self.X = x
self.Y = y
self.Z = zoom
quadcode = ''
for i in range(zoom - 1, -1, -1):
quadcode += str((((((y >> i) & 1) << 1) + ((x >> i) & 1))))
self.ZOOM = zoom
self.QUAD = quadcode
self.OAM_ZOOM = 17 - zoom
self.GOOG_DIGIT = (x + y) & 3
self.MS_DIGITBR = (((y & 1) << 1) + (x & 1)) + 1
self.MS_DIGIT = (((y & 3) << 1) + (x & 1))
self.Y_DIGIT = (x + y + zoom) % 3 + 1
self.GALILEO = "Galileo"[0:(3 * x + y) & 7]
self.ENI_Y = (1<<zoom)-1-y
def __getitem__(self, a):
return str(getattr(self, a))
class TileInfo:
'''description of a tile'''
def __init__(self, tile, zoom, service, offset=(0,0)):
self.tile = tile
(self.x, self.y) = tile
self.zoom = zoom
self.service = service
(self.offsetx, self.offsety) = offset
self.refresh_time()
def key(self):
'''tile cache key'''
return (self.tile, self.zoom, self.service)
def refresh_time(self):
'''reset the request time'''
self.request_time = time.time()
def coord(self, offset=(0,0)):
'''return lat,lon within a tile given (offsetx,offsety)'''
(tilex, tiley) = self.tile
(offsetx, offsety) = offset
world_tiles = 1<<self.zoom
x = ( tilex + 1.0*offsetx/TILES_WIDTH ) / (world_tiles/2.) - 1
y = ( tiley + 1.0*offsety/TILES_HEIGHT) / (world_tiles/2.) - 1
lon = x * 180.0
y = math.exp(-y*2*math.pi)
e = (y-1)/(y+1)
lat = 180.0/math.pi * math.asin(e)
return (lat, lon)
def size(self):
'''return tile size as (width,height) in meters'''
(lat1, lon1) = self.coord((0,0))
(lat2, lon2) = self.coord((TILES_WIDTH,0))
width = mp_util.gps_distance(lat1, lon1, lat2, lon2)
(lat2, lon2) = self.coord((0,TILES_HEIGHT))
height = mp_util.gps_distance(lat1, lon1, lat2, lon2)
return (width,height)
def distance(self, lat, lon):
'''distance of this tile from a given lat/lon'''
(tlat, tlon) = self.coord((TILES_WIDTH/2,TILES_HEIGHT/2))
return mp_util.gps_distance(lat, lon, tlat, tlon)
def path(self):
'''return relative path of tile image'''
(x, y) = self.tile
return os.path.join('%u' % self.zoom,
'%u' % y,
'%u.img' % x)
def url(self, service):
'''return URL for a tile'''
if service not in TILE_SERVICES:
raise TileException('unknown tile service %s' % service)
url = string.Template(TILE_SERVICES[service])
(x,y) = self.tile
tile_info = TileServiceInfo(x, y, self.zoom)
return url.substitute(tile_info)
class TileInfoScaled(TileInfo):
'''information on a tile with scale information and placement'''
def __init__(self, tile, zoom, scale, src, dst, service):
TileInfo.__init__(self, tile, zoom, service)
self.scale = scale
(self.srcx, self.srcy) = src
(self.dstx, self.dsty) = dst
class MPTile:
'''map tile object'''
def __init__(self, cache_path=None, download=True, cache_size=500,
service="MicrosoftSat", tile_delay=0.3, debug=False,
max_zoom=19, refresh_age=30*24*60*60):
if cache_path is None:
try:
cache_path = os.path.join(os.environ['HOME'], '.tilecache')
except Exception:
import tempfile
cache_path = os.path.join(tempfile.gettempdir(), 'MAVtilecache')
if not os.path.exists(cache_path):
mp_util.mkdir_p(cache_path)
self.cache_path = cache_path
self.max_zoom = max_zoom
self.min_zoom = 1
self.download = download
self.cache_size = cache_size
self.tile_delay = tile_delay
self.service = service
self.debug = debug
self.refresh_age = refresh_age
if service not in TILE_SERVICES:
raise TileException('unknown tile service %s' % service)
# _download_pending is a dictionary of TileInfo objects
self._download_pending = {}
self._download_thread = None
self._loading = mp_icon('loading.jpg')
self._unavailable = mp_icon('unavailable.jpg')
try:
self._tile_cache = collections.OrderedDict()
except AttributeError:
# OrderedDicts in python 2.6 come from the ordereddict module
# which is a 3rd party package, not in python2.6 distribution
import ordereddict
se
|
lf._tile_cache = ordereddict.OrderedDict()
def set_service(self, service):
'''set tile service'''
self.service = service
def get_service(self):
'''get tile service'''
|
return self.service
def get_service_list(self):
'''return list of available services'''
service_list = TILE_SERVICES.keys()
return sorted(service_list)
def set_download(self, download):
'''set download enable'''
self.download = download
def coord_to
|
heolin123/day_or_night
|
mainapp/migrations/0008_auto_20151023_1317.py
|
Python
|
gpl-2.0
| 650
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djan
|
go.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0007_auto_20151023_1012'),
]
operations = [
migrations.AddField(
model_name='documentclassification',
|
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='documentclassification',
name='ip',
field=models.CharField(default=b'', max_length=100),
),
]
|
tiborsimko/zenodo
|
zenodo/modules/deposit/config.py
|
Python
|
gpl-2.0
| 1,816
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo Records."""
from __future__ import absolute_import, print_function
from flask_babelex import gettext
from speaklater import make_lazy_gettext
_ = make_lazy_gettext(lambda: gettext)
ZENODO_COMMUNITIES_AUTO_ENABLED = True
"""Automatically add and request to communities upon publishing."""
ZENODO_COMMUNITIES_AUTO_REQUEST = ['zenodo', ]
"""Communities which are to be auto-requested upon first publishing."""
ZENODO_COMMUNITIES_REQUEST_IF_GRANTS = ['ecfunded', ]
"""Communities which are
|
to be auto-requested if record has g
|
rants."""
ZENODO_COMMUNITIES_ADD_IF_GRANTS = []
"""Communities which are to be auto-added if record has grants."""
ZENODO_BUCKET_QUOTA_SIZE = 50 * 1000 * 1000 * 1000 # 50 GB
"""Maximum quota per bucket."""
ZENODO_MAX_FILE_SIZE = ZENODO_BUCKET_QUOTA_SIZE
"""Maximum file size accepted."""
|
manasapte/pants
|
contrib/python/tests/python/pants_test/contrib/python/checks/tasks/checkstyle/test_new_style_classes.py
|
Python
|
apache-2.0
| 1,026
| 0.004873
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.contrib.python.checks.tasks.checkstyle.plugin_te
|
st_base import \
CheckstylePluginTestBase
from pants.contrib.python.checks
|
.tasks.checkstyle.new_style_classes import NewStyleClasses
class NewStyleClassesTest(CheckstylePluginTestBase):
plugin_type = NewStyleClasses
def test_new_style_classes(self):
statement = """
class OldStyle:
pass
class NewStyle(object):
pass
"""
self.assertNit(statement, 'T606')
statement = """
class NewStyle(OtherThing, ThatThing, WhatAmIDoing):
pass
"""
self.assertNoNits(statement)
statement = """
class OldStyle(): # unspecified mro
pass
"""
self.assertNit(statement, 'T606')
|
brendan-w/piHud
|
pihud/Page.py
|
Python
|
lgpl-2.1
| 846
| 0.004728
|
from PyQt4 import QtCore, QtGui
class Page(QtGui.QWidget):
""" A container and dropevent catcher for widgets """
def __init__(self, parent, pihud):
super(Page, self).__init__(parent)
self.setAcceptDrops(True)
self.
|
pihud = pihud # normally, this would simply be the parent()
self.widgets = []
self.show()
de
|
f dragEnterEvent(self, e):
e.accept()
def dropEvent(self, e):
# get relative position of mouse from mimedata
mime = e.mimeData().text()
x, y = map(int, mime.split(','))
e.source().move(e.pos() - QtCore.QPoint(x, y))
e.setDropAction(QtCore.Qt.MoveAction)
e.accept()
def delete_widget(self, widget):
# refer all deletion requests to the main window (PiHud.py)
self.pihud.delete_widget(self, widget)
|
sdoran35/hate-to-hugs
|
venv/lib/python3.6/site-packages/nltk/translate/__init__.py
|
Python
|
mit
| 913
| 0
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Machine Translation
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>, Tah Wei Hoon <hoon.tw@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Experimental features for machine translation.
These interfaces are prone to change.
"""
from nltk.translate.api import AlignedSent, Alignment, PhraseTable
from nltk.translate.ibm_model import I
|
BMModel
from nltk.translate.ibm1 import IBMModel1
from nltk.translate.ibm2 import IBMModel2
from nltk.translate.ibm3 import IBMModel3
from nltk.translate.ibm4 import IBMModel4
from nltk.translate.ibm5 import IBMModel5
from nltk.translate.bleu_score import sentence_bleu as bleu
from nltk.translate.ribes_score import sentence_ribes as ribes
from nltk.translate.metrics import alignment_error_rate
from nltk.translate.stack_d
|
ecoder import StackDecoder
|
letuananh/intsem.fx
|
coolisf/dao/textcorpus.py
|
Python
|
mit
| 5,974
| 0.002009
|
# -*- coding: utf-8 -*-
"""
Raw Text Corpus manager
"""
# This code is a part of coolisf library: https://github.com/letuananh/intsem.fx
# :copyright: (c) 2014 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
import os
import json
from texttaglib.chirptext import FileHelper
from texttaglib.chirptext.chio import CSV
# ----------------------------------------------------------------------
# Configuration
# ----------------------------------------------------------------------
class MetaObject(object):
def __init__(self, path):
self.__path = FileHelper.abspath(path)
self.__metadata = {}
@property
def path(self):
return self.__path
def folder_meta_path(self):
return os.path.join(self.path, 'isf.metadata.json')
def file_meta_path(self):
return self.path + '.isf.metadata.json'
@property
def metadata(self):
if not self.__metadata:
if os.path.isfile(self.path):
meta_path = self.file_meta_path()
elif os.path.isdir(self.path):
meta_path = self.folder_meta_path()
else:
raise Exception("Invalid path {}".format(self.path))
# if meta file exists, read it
if os.path.isfile(meta_path):
with open(meta_path, encoding='utf-8') as metafile:
metadata = json.load(metafile)
if metadata:
self.__metadata = metadata
return self.__metadata
def getinfo(self, metadata_key, default=None):
if metadata_key in self.metadata:
return self.metadata[metadata_key]
else:
return default
def write_folder_meta(self, metadata):
with open(self.folder_meta_path(), 'w', encoding='utf-8') as outfile:
metadata_string = json.dumps(metadata, ensure_ascii=False, indent=2)
outfile.write(metadata_string)
def write_file_meta(self, metadata):
with open(self.file_meta_path(), 'w', encoding='utf-8') as outfile:
metadata_string = json.dumps(metadata, ensure_ascii=False, indent=2)
outfile.write(metadata_string)
class RawCollection(MetaObject):
def __init__(self, path, name='', title='', *args, **kwargs):
super().__init__(path, *args, **kwargs)
self.name = name if name else self.getinfo('name', os.path.basename(self.path))
self.title = title if title else self.getinfo('title', '')
def get_corpuses(self):
corpuses = RawCorpusCollection()
corpus_names = self.getinfo('corpuses', FileHelper.get_child_folders(self.path))
for corpus_name in corpus_names:
corpus_path = os.path.join(self.path, corpus_name)
corpuses.add(RawCorpus(corpus_path))
return corpuses
def write_meta(self, name, title, corpuses):
self.write_folder_meta({'name': name, 'title': title, 'corpuses': corpuses})
class RawCorpusCollection():
def __init__(self, corpuses=None):
self.__corpuses = list(corpuses) if corpuses else []
self.__corpus_map = {c.name: c for c in
|
self.__corpuses}
def add(self, corpus):
self.__corpuses.append(corpus)
self.__corpus_map[corpus.name] = corpus
def __iter__(self):
return iter(self.__corpuses)
def __len__(self):
return len(self.__
|
corpuses)
def __getitem__(self, key):
return self.__corpus_map[key]
def __contains__(self, key):
return key in self.__corpus_map
class RawCorpus(MetaObject):
def __init__(self, path, name='', title='', *args, **kwargs):
super().__init__(path, *args, **kwargs)
self.name = name if name else self.getinfo('name', os.path.basename(self.path))
self.title = title if title else self.getinfo('title', '')
self.format = self.getinfo('format', RawDocument.TXT_FORMAT)
def get_documents(self):
docs = []
docnames = self.getinfo('documents', FileHelper.get_child_files(self.path))
for docname in docnames:
docs.append(RawDocument(os.path.join(self.path, docname), format=self.format))
return docs
def write_meta(self, name, title, documents, format='tsv'):
self.write_folder_meta({'name': name, 'title': title, 'documents': documents, 'format': format})
class RawDocument(MetaObject):
TXT_FORMAT = 'txt'
TSV_FORMAT = 'tsv'
def __init__(self, path, name='', title='', format=TXT_FORMAT, *args, **kwargs):
super().__init__(path, *args, **kwargs)
self.name = name if name else self.getinfo('name', FileHelper.getfilename(self.path))
self.title = title if title else self.getinfo('title', '')
if format:
self.format = format
else:
self.format = self.getinfo('format', RawDocument.TXT_FORMAT)
def read_sentences(self):
if self.format == RawDocument.TXT_FORMAT:
sents = enumerate(FileHelper.read(self.path).splitlines())
elif self.format == RawDocument.TSV_FORMAT:
sents = CSV.read_tsv(self.path)
return (RawSentence(ident, text) for ident, text in sents)
class RawDocumentCollection():
def __init__(self, documents=None):
self.__docs = list(documents) if documents else []
self.__doc_map = {c.name: c for c in documents}
def add(self, document):
self.__docs.append(document)
self.__doc_map[document.name] = document
def __iter__(self):
return iter(self.__docs)
def __len__(self):
return len(self.__docs)
def __getitem__(self, key):
return self.__doc_map[key]
def __contains__(self, key):
return key in self.__doc_map
class RawSentence(object):
def __init__(self, ident, text):
self.ident = ident
self.text = text
def __str__(self):
return "[{}] {}".format(self.ident, self.text)
|
talipovm/terse
|
terse/Tools/file2.py
|
Python
|
mit
| 5,523
| 0.00163
|
import io
import logging
import os
import re
log = logging.getLogger(__name__)
class file2(io.TextIOWrapper):
"""
Provides tools useful for navigating through and finding data
in the output files
"""
def __init__(self, name, mode='r', *args, **kwargs):
# super().__init__(*args, **kwargs)
self.s = ''
self.lnum = 0
self.ssplit = []
self.sstrip = ''
self.fname = name
try:
self.f = open(name, mode)
log.debug('%s was opened for reading' % name)
except FileNotFoundError:
log.error('Cannot open %s for reading' % name)
def __iter__(self):
return self
def __next__(self):
"""
Read the next line.
Return a string and update self.s
"""
self.s = next(self.f) # it will raise StopIteration at EOF
#self.lnum += 1
return self.s
def skip_n(self, n=1):
"""
Skip the next (n-1) lines and store the n-th line as self.s
If n = 0, do nothing
"""
for _ in range(n):
next(self)
def skip_until_string(self, pattern):
while not (pattern in self.s):
next(self)
def skip_until_regex(self, pattern):
z = re.compile(pattern)
while not z.search(self.s):
next(self)
def skip_until(self, pattern, offset=0, regexp=False):
"""
Skip the file lines until a pattern is found.
Pattern might be a single substring or a list of them.
If pattern is a list, then a value returned is an index of the matching string
If pattern is an empty string or list or tuple, will skip_n until the line
that is empty or contains only white spaces.
The first matching (and offset) line will be stored as self.s
The search will be started from the current line.
:param pattern: Pattern to search
:param offset: Int; how many lines to skip_n after the pattern is found.
:param regexp: Boolean; treat patterns as regexps.
"""
instance_hit = None
# next(self)
if pattern:
if not isinstance(pattern, (list, tuple)):
ps = [pattern, ]
else:
ps = pattern
while instance_hit is None:
for i,p in enumerate(ps):
if regexp:
hit = re.search(p, self.s)
else:
hit = (p in self.s)
if hit:
instance_hit = i
break
else: # not found
next(self)
else:
# pattern is an empty string
while self.s.strip() != '':
next(self)
self.skip_n(offset)
return instance_hit
def nstrip(self):
"""
Read and strip the next line
Return a string and update self.sstrip
"""
self.sstrip = next(self).strip()
return self.sstrip
def nrstrip(self):
"""
Read and right strip the next line
Return a string and update self.sstrip
"""
self.sstrip = next(self).rstrip()
return self.sstrip
def nsplit(self):
"""
Read, strip, ans split by white spaces the next line
Return a string and update self.sstrip and self.ssplit
"""
self.ssplit = self.nstrip().split()
return self.ssplit
def find_text_block(self, start_match='', start_offset=0, end_match='', end_offset=-1):
"""
reads a textblock from the file
#
#
# Say, start_offset = 3, and end_offset = -1:
# start_match : 0 : -
# Junk :
|
1 : -
# Junk : 2 : -
# Info : 3 : +
# Info : : +
# Info
|
:-1 : +
# end_match : 0 : -
# Other : 1 : -
# Other : 2 : -
#
# In this case, only strings marked by '+' sign will be extracted
# If start_match is not defined, block will start at current position+start_offset
# If end_match is not defined, block will end at an empty string
"""
# Positioning
if start_match:
self.skip_until(start_match)
self.skip_n(start_offset)
# Define criteria for stop
def find_second_match(match, s):
# noinspection PyRedundantParentheses
if (match) and (match in s): return True
if (match == '') and (s.strip() == ''): return True
return False
# Fill array
ss = [self.s.strip(), ]
while not find_second_match(end_match, self.s):
ss.append(self.nstrip())
if end_offset < 1:
ss = ss[0:(len(ss) + end_offset)]
else:
# Add lines after the stop match
for i in range(end_offset):
ss.append(self.nstrip())
return ss
def close(self):
self.f.close()
if __name__ == "__main__":
DebugLevel = logging.DEBUG
logging.basicConfig(level=DebugLevel)
fname = "test-test2.txt"
text_file = open(fname, "w")
text_file.write("""
X
Info0
X
Top1
---
Info1
Info2
---
Top2
Info3
""")
text_file.close()
f = file2(fname)
print(f.find_text_block('Top1', 2, 'Top2', 1))
f.close()
os.remove(fname)
|
CanonicalLtd/landscape-client
|
landscape/client/manager/tests/test_store.py
|
Python
|
gpl-2.0
| 2,217
| 0
|
from landscape.client.tests.helpers import LandscapeTest
from landscape.client.manager.store import ManagerStore
class ManagerStoreTest(LandscapeTest):
def setUp(self):
super(ManagerStoreTest, self).setUp()
self.filename = self.makeFile()
self.store = ManagerStore(self.filename)
self.store.add_graph(1, u"file 1", u"user1")
self.store.set_graph_accumulate(1, 1234, 1.0)
def test_get_unknown_graph(self):
graph = self.store.get_graph(1000)
self.assertIdentical(graph, None)
def test_get_graph(self):
graph = self.store.get_graph(1)
self.assertEqual(graph, (1, u"file 1", u"user1"))
def test_get_graphs(self):
graphs = self.store.get_graphs()
self.assertEqual(graphs, [(1, u"file 1", u"user1")])
def test_get_no_graphs(self):
self.store.remove_graph(1)
graphs = self.store.get_graphs()
self.assertEqual(graphs, [])
def test_add_graph(self):
self.store.add_graph(2, u"file 2", u"user2")
graph = self.store.get_graph(2)
self.assertEqual(graph, (2, u"file 2", u"user2"))
def test_add_update_graph(self):
self.store.add_graph(1, u"file 2", u"user2")
|
graph = self.store.get_graph(1)
self.assertEqual(graph, (1, u"file 2", u"user2"))
def test_remove_graph(self):
se
|
lf.store.remove_graph(1)
graphs = self.store.get_graphs()
self.assertEqual(graphs, [])
def test_remove_unknown_graph(self):
self.store.remove_graph(2)
graphs = self.store.get_graphs()
self.assertEqual(graphs, [(1, u"file 1", u"user1")])
def test_get_accumulate_unknown_graph(self):
accumulate = self.store.get_graph_accumulate(2)
self.assertIdentical(accumulate, None)
def test_set_accumulate_graph(self):
self.store.set_graph_accumulate(2, 1234, 2.0)
accumulate = self.store.get_graph_accumulate(2)
self.assertEqual(accumulate, (2, 1234, 2.0))
def test_update_accumulate_graph(self):
self.store.set_graph_accumulate(1, 4567, 2.0)
accumulate = self.store.get_graph_accumulate(1)
self.assertEqual(accumulate, (1, 4567, 2.0))
|
openstack/octavia
|
octavia/amphorae/backends/agent/api_server/amphora_info.py
|
Python
|
apache-2.0
| 7,469
| 0
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import socket
import subprocess
import pyroute2
import webob
from octavia.amphorae.backends.agent import api_server
from octavia.amphorae.backends.agent.api_server import util
from octavia.amphorae.backends.utils import network_utils
from octavia.common import constants as consts
from octavia.common import exceptions
class AmphoraInfo(object):
def __init__(self, osutils):
self._osutils = osutils
def compile_amphora_info(self, extend_lvs_driver=None):
extend_body = {}
if extend_lvs_driver:
extend_body = self._get_extend_body_from_lvs_driver(
extend_lvs_driver)
body = {'hostname': socket.gethostname(),
'haproxy_version':
self._get_version_of_installed_package('haproxy'),
'api_version': api_server.VERSION}
if extend_body:
body.update(extend_body)
return webob.Response(json=body)
def compile_amphora_details(self, extend_lvs_driver=None):
haproxy_listener_list = sorted(util.get_listeners())
extend_body = {}
lvs_listener_list = []
if extend_lvs_driver:
lvs_listener_list = util.get_lvs_listeners()
extend_data = self._get_extend_body_from_lvs_driver(
extend_lvs_driver)
lvs_count = self._count_lvs_listener_processes(
extend_lvs_driver,
lvs_listener_list)
extend_body['lvs_listener_process_count'] = lvs_count
extend_body.update(extend_data)
|
meminfo = self._get_meminfo()
cpu = self._cpu()
st = os.statvfs('/')
body = {'hostname': socket.gethostname(),
'haproxy_version':
self._get_version_of_installed_package('haproxy'),
'api_version': api_server.VERSION,
'networks': self._get_networks(),
'active': True,
'haproxy_count':
self._count_hapro
|
xy_processes(haproxy_listener_list),
'cpu': {
'total': cpu['total'],
'user': cpu['user'],
'system': cpu['system'],
'soft_irq': cpu['softirq'], },
'memory': {
'total': meminfo['MemTotal'],
'free': meminfo['MemFree'],
'buffers': meminfo['Buffers'],
'cached': meminfo['Cached'],
'swap_used': meminfo['SwapCached'],
'shared': meminfo['Shmem'],
'slab': meminfo['Slab'], },
'disk': {
'used': (st.f_blocks - st.f_bfree) * st.f_frsize,
'available': st.f_bavail * st.f_frsize},
'load': self._load(),
'topology': consts.TOPOLOGY_SINGLE,
'topology_status': consts.TOPOLOGY_STATUS_OK,
'listeners': sorted(list(
set(haproxy_listener_list + lvs_listener_list)))
if lvs_listener_list else haproxy_listener_list,
'packages': {}}
if extend_body:
body.update(extend_body)
return webob.Response(json=body)
def _get_version_of_installed_package(self, name):
cmd = self._osutils.cmd_get_version_of_installed_package(name)
version = subprocess.check_output(cmd.split())
return version
def _count_haproxy_processes(self, lb_list):
num = 0
for lb_id in lb_list:
if util.is_lb_running(lb_id):
# optional check if it's still running
num += 1
return num
def _count_lvs_listener_processes(self, lvs_driver, listener_list):
num = 0
for listener_id in listener_list:
if lvs_driver.is_listener_running(listener_id):
# optional check if it's still running
num += 1
return num
def _get_extend_body_from_lvs_driver(self, extend_lvs_driver):
extend_info = extend_lvs_driver.get_subscribed_amp_compile_info()
extend_data = {}
for extend in extend_info:
package_version = self._get_version_of_installed_package(extend)
extend_data['%s_version' % extend] = package_version
return extend_data
def _get_meminfo(self):
re_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
result = {}
with open('/proc/meminfo', 'r', encoding='utf-8') as meminfo:
for line in meminfo:
match = re_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value = match.groups(['key', 'value'])
result[key] = int(value)
return result
def _cpu(self):
with open('/proc/stat', encoding='utf-8') as f:
cpu = f.readline()
vals = cpu.split(' ')
return {
'user': vals[2],
'nice': vals[3],
'system': vals[4],
'idle': vals[5],
'iowait': vals[6],
'irq': vals[7],
'softirq': vals[8],
'total': sum([int(i) for i in vals[2:]])
}
def _load(self):
with open('/proc/loadavg', encoding='utf-8') as f:
load = f.readline()
vals = load.split(' ')
return vals[:3]
def _get_networks(self):
networks = {}
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE) as netns:
for interface in netns.get_links():
interface_name = None
for item in interface['attrs']:
if (item[0] == 'IFLA_IFNAME' and
not item[1].startswith('eth')):
break
if item[0] == 'IFLA_IFNAME':
interface_name = item[1]
if item[0] == 'IFLA_STATS64':
networks[interface_name] = dict(
network_tx=item[1]['tx_bytes'],
network_rx=item[1]['rx_bytes'])
return networks
def get_interface(self, ip_addr):
try:
interface = network_utils.get_interface_name(
ip_addr, net_ns=consts.AMPHORA_NAMESPACE)
except exceptions.InvalidIPAddress:
return webob.Response(json=dict(message="Invalid IP address"),
status=400)
except exceptions.NotFound:
return webob.Response(
json=dict(message="Error interface not found for IP address"),
status=404)
return webob.Response(json=dict(message='OK', interface=interface),
status=200)
|
CloudServer/cinder
|
cinder/volume/drivers/san/hp/hpmsa_client.py
|
Python
|
apache-2.0
| 931
| 0
|
# Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is
|
distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cinder.volume.drivers.dothill import dothill_client
class HPMSAClient(dothill_client.DotHillClient):
def __init__(self, host, login, password, protocol):
super(HPMSAClient, self).__init__(host, login, password,
|
protocol)
|
CDE-UNIBE/qcat
|
apps/accounts/authentication.py
|
Python
|
apache-2.0
| 589
| 0
|
from django.contrib.auth.backends import ModelBackend
from .client import remote_user_client
class WocatCMSAuthenticationBackend(ModelBackend):
"""
Authentication against new (2017) wocat website.
"""
def authenticate(self, username=None, password=None, **kwargs):
"""
Custom authentication. Returns a user if authentication
successful.
|
"""
user_data = remote_user_client.remote_login(username, password)
if not user_da
|
ta:
return None
return remote_user_client.get_and_update_django_user(**user_data)
|
salman-bhai/DS-Algo-Handbook
|
Algorithms/Graph_Algorithms/Bellman_Ford/bellman_ford.py
|
Python
|
mit
| 1,400
| 0.042857
|
from sys import maxin
|
t
class BellmanFord( object ):
def __init__( self ):
'''
Constructor
'''
def singleSourceShortestPath( self, weight, source ) :
# auxiliary constants
SIZE = len( weight )
EVE = -1; # to indicate no predecessor
INFINITY = maxint
# declare and initialize pred to EVE and minDist to INFINITY
pred = [EVE] * SIZE
minDist = [INFINITY] * SIZE
# set minDist[source] = 0 because sour
|
ce is 0 distance from itself.
minDist[source] = 0
# relax the edge set V-1 times to find all shortest paths
for i in range( 1, SIZE - 1 ):
for v in range( SIZE ):
for x in self.adjacency( weight, v ):
if minDist[x] > minDist[v] + weight[v][x]:
minDist[x] = minDist[v] + weight[v][x]
pred[x] = v
# detect cycles if any
for v in range( SIZE ):
for x in self.adjacency( weight, v ):
if minDist[x] > minDist[v] + weight[v][x]:
raise Exception( "Negative cycle found" )
return [pred, minDist]
#=====================================================================
# Retrieve all the neighbors of vertex v.
#=====================================================================
def adjacency( self, G, v ) :
result = []
for x in range( len( G ) ):
if G[v][x] is not None:
result.append( x )
return result;
|
Andrew-McNab-UK/DIRAC
|
AccountingSystem/scripts/dirac-accounting-report-cli.py
|
Python
|
gpl-3.0
| 788
| 0.013959
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-accounting-report-cli
# Author : Adria Casajus
########################################################################
"""
Command line i
|
nterface to DIRAC Accounting ReportGenerator Service.
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.localCfg.addDefaultEntry( "LogLevel", "info" )
Script.setUsageMessage('\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ...' % Script.scriptName, ] ) )
Script.parseCommandLine()
from DIRAC.AccountingSystem.Client.ReportCLI import ReportCLI
if __name__=="_
|
_main__":
reli = ReportCLI()
reli.start()
|
radjkarl/dataArtist
|
dataArtist/gui.py
|
Python
|
gpl-3.0
| 20,228
| 0.000395
|
#!/usr/bin/env python
# coding=utf-8
import sys
import os
from qtpy import QtGui, QtWidgets, QtCore
from appbase.MultiWorkspaceWindow import MultiWorkspaceWindow
from appbase.Application import Application
from fancytools.os.PathStr import PathStr
from imgProcessor.reader.qImageToArray import qImageToArray
# from interactiveTutorial.TutorialMenu import TutorialMenu
# OWN
import dataArtist
from dataArtist.input.html2data import html2data
from dataArtist.widgets.preferences \
import PreferencesView, PreferencesImport, PreferencesCommunication
from dataArtist.widgets.Workspace import Workspace
from dataArtist.widgets.UndoRedo import UndoRedo
from dataArtist.widgets.ProgressBar import ProgressBar
from dataArtist.widgets.dialogs.FirstStartDialog import FirstStartDialog
from dataArtist.widgets.GlobalTools import GlobalTools
from dataArtist.widgets.StatusBar import StatusBar
# by default pyqtgraph is still col-major, so:
import pyqtgraph_karl
pyqtgraph_karl.setConfigOptions(imageAxisOrder='row-major')
del pyqtgraph_karl
##########
# to allow to execute py code from a frozen environment
# type e.g. gui.exe -exec print(4+4)
if '-exec' in sys.argv:
try:
exec(sys.argv[-1])
except Exception as err:
input('-exec failed! --> %s' % err)
sys.exit()
##########
MEDIA_FOLDER = PathStr(dataArtist.__file__).dirname().join('media')
HELP_FILE = MEDIA_FOLDER.join('USER_MANUAL.pdf')
def _showActionToolTipInMenu(menu, action):
# show tooltip on the right side of [menu]
# QMenu normaly doesnt allow QActions to show tooltips...
tip = action.toolTip()
p = menu.pos()
p.setX(p.x() + 105)
p.setY(p.y() - 21)
if tip != action.text():
QtWidgets.QToolTip.showText(p, tip)
class Gui(MultiWorkspaceWindow):
'''
The main class to be called to create an instance of dataArtist
'''
def __init__(self, title='dataArtist', workspaceCls=Workspace):
MultiWorkspaceWindow.__init__(self, workspaceCls, title)
s = self.app.session
# cannot resize to px size anymore since there
# are high dpi screens around, therefore rescale relative:
PX_FACTOR = QtWidgets.QApplication.instance().PX_FACTOR = QtGui.QPaintDevice.logicalDpiY(
self) / 96
self.resize(620 * PX_FACTOR, 550 * PX_FACTOR)
# ALLOW DRAGnDROP
self.setAcceptDrops(True)
# INIT CHILD PARTS:
self.dialogs = s.dialogs
self.pref_import = PreferencesImport(self)
self._appendMenubarAndPreferences()
# CONNECT OWN SAVE/RESTORE FUNCTIONS TO THE SESSION:
s.sigSave.connect(self._save)
s.sigRestore.connect(self._restore)
st = StatusBar()
self.setStatusBar(st)
# connect output to status bar:
s.streamOut.message.connect(st.showMessage)
s.streamErr.message.connect(st.showError)
# dict that contains all displays that are unbounded
# from the main window and showed frameless:
self.framelessDisplays = {}
self.addWorkspace()
# PROGRESS BAR:
self.progressBar = ProgressBar(st)
st.setSizeGripEnabled(False)
def isEmpty(self):
return (self.centralWidget().count() == 1
and not self.currentWorkspace().displays())
def _save(self, state):
state['gui'] = self.saveState()
def _restore(self, state):
return self.restoreState(state['gui'])
def saveState(self):
l = {}
i = self.size()
p = self.pos()
l['geometry'] = (p.x(), p.y(), i.width(), i.height())
# l['desktop'] = QtGui.QApplication.desktop().screenNumber(self)
c = self.centralWidget()
l['nWorkspaces'] = c.count()
l['currentWorkspace'] = c.indexOf(self.currentWorkspace())
l['maximized'] = self.isMaximized()
l['fullscreen'] = self.menuBar().ckBox_fullscreen.isChecked()
l['showTools'] = self.menu_toolbars.a_show.isChecked()
# WORKSPACES
sw = l['workspaces'] = {}
for w in self.workspaces():
sw[w.number()] = w.saveState()
l['undoRedo'] = self.undoRedo.saveState()
# GLOBAL TOOLS
tt = []
for t in self.gTools:
tt.append(t.saveState())
l['globalTools'] = tt
return l
def restoreState(self, l):
self.setGeometry(*l['geometry'])
self.menuBar().setFullscreen(l['fullscreen'])
if l['maximized']:
self.showMaximized()
else:
self.showNormal()
# WORKSPACES
c = self.centralWidget()
n = l['nWorkspaces']
# CLOSE OLD:
for w in self.workspaces():
self.closeWorkspace(w)
# ADD NEW:
for _ in range(n - c.count()):
self.addWorkspace()
# RESTORE:
self.showWorkspace(l['currentWorkspace'])
lw = l['workspaces']
for number, w in zip(lw, self.workspaces()):
w.restoreState(lw[number])
self.menu_toolbars.a_show.setChecked(l['showTools'])
self._toggleShowSelectedToolbars(l['showTools'])
self.undoRedo.restoreState(l['undoRedo'])
# GLOBAL TOOLS
for t, tt in zip(self.gTools, l['globalTools']):
t.restoreState(tt)
def addFilePath(self, filepath):
'''
create a new display for one ore more given file paths
INPUT: "Path/To/File.txt"
'''
if filepath:
return self.currentWorkspace().addFiles([PathStr(filepath)])
def openFile(self):
'''
create a new display for one ore more files
'''
filepaths = self.dialogs.getOpenFileNames()
if filepaths:
return self.currentWorkspace().addFiles(filepaths)
def changeActiveDisplay(self, arg):
'''
change the active display
INPUT: "[displaynumber]"
e.g.:
"4" --> make display 4 active display
'''
number = int(arg)
self.currentWorkspace().changeDisplayNumber(number)
def showDisplay(self, arg):
'''
show display as frame-less window
INPUT: "[displaynumber], [(x,y,width,height)]'
e.g.:
"4, (0,0,100,200)" --> show display 4 at position 0,0 with size 100,200
"3, False" --> hide display 3
'''
displaynumber, pos = eval(arg)
if not pos:
return self.hideDisplay(displaynumber)
else:
(x, y, width, height) = pos
d = self.framelessDisplays.get(displaynumber, None)
if not d:
try:
d = self.currentWorkspace().displaydict()[displaynumber]
d.release()
d.hideTitleBar()
d.setWindowFlags(QtCore.Qt.FramelessWindowHint |
QtCore.Qt.WindowStaysOnTopHint)
self.framelessDisplays[displaynumber] = d
except KeyError:
print('displaynumber [%s] not known' % displaynumber)
return
d.move(QtCore.QPoint(x, y))
d.resize(width, height)
d.show()
def hideDisplay
|
(self, displaynumber):
'''
close frame-less dis
|
play
[displaynumber]
'''
d = self.framelessDisplays.pop(displaynumber, None)
if d:
d.showTitleBar()
d.embedd()
else:
print('displaynumber [%s] not known' % displaynumber)
def runScriptFromName(self, name):
'''
run an open script, identified by name
in the current display
INPUT: "[scriptname]"
e.g.:
'New' --> run a script, called 'New' in the current active display
'''
d = self.currentWorkspace().getCurrentDisplay()
w = d.tab.automation.tabs.widgetByName(name.decode("utf-8"))
if not w:
raise Exception(
'couldnt find script [%s] in the current display' % name)
w.thread.start()
def _appendMenubarAndPreferences(self):
m = self.menuBar()
m.aboutWidget.setModule(dataArtist)
m.aboutWidget.setInstitutio
|
light940929/omics_api
|
bio/tools.py
|
Python
|
mit
| 2,096
| 0.000477
|
from oauth2_provider.settings import oauth2_settings
from oauthlib.common import generate_token
from django.http
|
import JsonResponse
from oauth2_provider.models import AccessToken, Application, RefreshToken
from django.utils.timezone import now, timedelta
def get_token_json(access_token):
"""
Takes an AccessToken instance as an argument
and returns a Jso
|
nResponse instance from that
AccessToken
"""
token = {
'access_token': access_token.token,
'expires_in': oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
'token_type': 'Bearer',
'refresh_token': access_token.refresh_token.token,
'scope': access_token.scope
}
return JsonResponse(token)
def get_access_token(user):
"""
Takes a user instance and return an access_token as a JsonResponse
instance.
"""
# our oauth2 app
app = Application.objects.get(name="omics")
# We delete the old access_token and refresh_token
try:
old_access_token = AccessToken.objects.get(
user=user, application=app)
old_refresh_token = RefreshToken.objects.get(
user=user, access_token=old_access_token
)
except:
pass
else:
old_access_token.delete()
old_refresh_token.delete()
# we generate an access token
token = generate_token()
# we generate a refresh token
refresh_token = generate_token()
expires = now() + timedelta(seconds=oauth2_settings.
ACCESS_TOKEN_EXPIRE_SECONDS)
scope = "read write"
# we create the access token
access_token = AccessToken.objects.\
create(user=user,
application=app,
expires=expires,
token=token,
scope=scope)
# we create the refresh token
RefreshToken.objects.\
create(user=user,
application=app,
token=refresh_token,
access_token=access_token)
# we call get_token_json and returns the access token as json
return get_token_json(access_token)
|
aertoria/MiscCode
|
stack.py
|
Python
|
apache-2.0
| 585
| 0.080342
|
#!/usr/bin/python
class Solution:
# @param {string} s
# @return {boolean}
def isValid(self, s):
slist=' '.join(s).split(' ')
print slist
stack=[]
fo
|
r item in slist:
if item in ('[','{','('):
stack.append(item)
else:
if len(stack)==0:
return False
elif stack[-1:][0]==self.rev(item):
stack = stack[:-1]
else:
return False
if len(stack)==0:
return True
else:
return False
def rev(self,item):
if item == ']':
return '['
elif item
|
== '}':
return '{'
else:
return '('
s=Solution()
print s.isValid(']')
|
yunify/qingcloud-sdk-python
|
qingcloud/iaas/actions/instance_groups.py
|
Python
|
apache-2.0
| 6,153
| 0.002438
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.iaas import constants as const
from qingcloud.misc.utils import filter_out_none
class InstanceGroupsAction(object):
def __init__(self, conn):
self.conn = conn
def create_instance_groups(self, relation,
instance_group_name=None,
description=None,
**ignore):
""" Create an instance group.
@param relation: Define the relation between instances in the same group.
"repel" means these instances prefer distributing on the different physical units.
"attract" means these instances prefer converging on the same physical unit.
@param instance_group_name: The name of this group.
@param description: The description of this group.
"""
action = const.ACTION_CREATE_INSTANCE_GROUPS
valid_keys = ['relation', 'instance_group_name', 'description']
body = filter_out_none(locals(), valid_keys)
if not self.conn.req_checker.check_params(body,
required_params=['relation'],
):
return None
return self.conn.send_request(action, body)
def delete_instance_groups(self, instance_groups,
**ignore):
""" Delete the specific instance group.
@param instance_groups: An id list contains the group(s) id which will be deleted.
"""
action = const.ACTION_DELETE_INSTANCE_GROUPS
valid_keys = ['instance_groups']
body = filter_out_none(locals(), valid_keys)
if not self.conn.req_checker.check_params(body,
required_params=['instance_groups'],
list_params=['instance_groups']
):
return None
return self.conn.send_request(action, body)
def join_instance_group(self, instances,
instance_group,
**ignore):
""" Add the instance(s) to the instance group.
@param instances: An id list contains the instances(s) that will be added in the specific group.
@param instance_group: The group id.
"""
action = const.ACTION_JOIN_INSTANCE_GROUP
valid_keys = ['instances', 'instance_group']
body = filter_out_none(locals(), valid_keys)
if not self.conn.req_checker.check_params(body,
required_params=['instances', 'instance_group'],
list_params=['instances']
):
return None
return self.conn.send_request(action, body)
def leave_instance_group(self, instances,
instance_group,
**ignore):
""" Delete the specific instance(s) from the group.
@param instances: An id list contains the instance(s) who want to leave the instance group.
@param instance_group: The instance group id.
"""
action = const.ACTION_LEAVE_INSTANCE_GROUP
valid_keys = ['instances', 'instance_group']
body = filter_out_none(locals(), valid_keys)
if not self.conn.req_checker.check_params(body,
required_params=['instances', 'instance_group'],
list_params=['instances']
):
return None
return self.conn.send_request(action, body)
def describe_instance_groups(self, instance_groups=[],
relation=None,
tags=None,
owner=None,
verbose=0,
offset=0,
limit=20,
**ignore):
""" Describe the instance groups filtered by conditions.
@param instance_groups: If this param was given, only return the group(s) info in this given list.
@param relation: Filter by the relation type.
@param tags: Filter by the tag id.
@param owner: Filter by the owner id.
@param verbose: Whether return the verbose information.
@param offset: The offset of the item cursor and its default value is 0.
@param limit: The number of items that will be displayed. Default is 20, maximum is 100.
"""
action = const.AC
|
TION_DESCRIBE_INSTANCE_GROUPS
valid_keys = ['instance_groups', 'relation', 'tags', 'owner',
'verbose', 'offset', 'limit']
body = filter_out_none(locals(), valid_keys)
if not self.conn.req_checker.check_params(body,
list_params=['instance_groups', 'tags'],
integer_params=['limit', 'verbose', 'offset']
|
):
return None
return self.conn.send_request(action, body)
|
romanz/trezor-agent
|
libagent/tests/test_server.py
|
Python
|
lgpl-3.0
| 3,380
| 0
|
import io
import os
import socket
import tempfile
import threading
import mock
import pytest
from .. import server, util
from ..ssh import protocol
def test_socket():
path = tempfile.mktemp()
with server.unix_domain_socket_server(path):
pass
assert not os.path.isfile(path)
class FakeSocket:
def __init__(self, data=b''):
self.rx = io.BytesIO(data)
self.tx = io.BytesIO()
def sendall(self, data):
self.tx.write(data)
def recv(self, size):
return self.rx.read(size)
def close(self):
pass
def settimeout(self, value):
pass
def empty_device():
c = mock.Mock(spec=['parse_public_keys'])
c.parse_public_keys.return_value = []
return c
def test_handle():
mutex = threading.Lock()
handler = protocol.Handler(conn=empty_device())
conn = FakeSocket()
server.handle_connection(conn, handler, mutex)
msg = bytearray([protocol.msg_code('SSH_AGENTC_REQUEST_RSA_IDENTITIES')])
conn = FakeSocket(util.frame(msg))
server.handle_connection(conn, handler, mutex)
assert conn.tx.getvalue() == b'\x00\x00\x00\x05\x02\x00\x00\x00\x00'
msg = bytearray([protocol.msg_code('SSH2_AGENTC_REQUEST_IDENTITIES')])
conn = FakeSocket(util.frame(msg))
server.handle_connection(conn, handler, mutex)
assert conn.tx.getvalue() == b'\x00\x00\x00\x05\x0C\x00\x00\x00\x00'
msg = bytearray([protocol.msg_code('SSH2_AGENTC_ADD_IDENTITY')])
conn = FakeSocket(util.frame(msg))
server.handle_connection(conn, han
|
dler, mutex)
conn.
|
tx.seek(0)
reply = util.read_frame(conn.tx)
assert reply == util.pack('B', protocol.msg_code('SSH_AGENT_FAILURE'))
conn_mock = mock.Mock(spec=FakeSocket)
conn_mock.recv.side_effect = [Exception, EOFError]
server.handle_connection(conn=conn_mock, handler=None, mutex=mutex)
def test_server_thread():
sock = FakeSocket()
connections = [sock]
quit_event = threading.Event()
class FakeServer:
def accept(self): # pylint: disable=no-self-use
if not connections:
raise socket.timeout()
return connections.pop(), 'address'
def getsockname(self): # pylint: disable=no-self-use
return 'fake_server'
def handle_conn(conn):
assert conn is sock
quit_event.set()
server.server_thread(sock=FakeServer(),
handle_conn=handle_conn,
quit_event=quit_event)
quit_event.wait()
def test_spawn():
obj = []
def thread(x):
obj.append(x)
with server.spawn(thread, dict(x=1)):
pass
assert obj == [1]
def test_run():
assert server.run_process(['true'], environ={}) == 0
assert server.run_process(['false'], environ={}) == 1
assert server.run_process(command=['bash', '-c', 'exit $X'],
environ={'X': '42'}) == 42
with pytest.raises(OSError):
server.run_process([''], environ={})
def test_remove():
path = 'foo.bar'
def remove(p):
assert p == path
server.remove_file(path, remove=remove)
def remove_raise(_):
raise OSError('boom')
server.remove_file(path, remove=remove_raise, exists=lambda _: False)
with pytest.raises(OSError):
server.remove_file(path, remove=remove_raise, exists=lambda _: True)
|
superpenshine/RipMyProfessor
|
gethint/urls.py
|
Python
|
mit
| 150
| 0.013333
|
from django.conf
|
.urls import url
from . import views
urlpatterns = [
url(r'^(?P<typed_letter>[a-zA-Z]+[0-9]{0,3})$', views.gethint, name='hint'),
]
| |
sodhancha/Godown
|
purchase_orders/models.py
|
Python
|
gpl-3.0
| 480
| 0.027083
|
from django.db import models
from inven
|
tory.models import Product_variant
from suppliers.models import Supplier
# Create your models here.
class Purchase_order( models.Model ):
supplier_id = models.ForeignKey(Supplier)
created_at = models.DateTimeField('date published')
class Item( models.Model ):
purchase = models.ForeignKey(Purchase_order)
product = models.ForeignKey(Product_va
|
riant)
quantity = models.PositiveIntegerField()
price = models.PositiveIntegerField()
|
waynesun09/tp-libvirt
|
libvirt/tests/src/virsh_cmd/virsh_connect.py
|
Python
|
gpl-2.0
| 8,185
| 0
|
import logging
import os
import re
import shutil
from autotest.client.shared import error
from avocado.utils import process
from virttest import libvirt_vm
from virttest import utils_libvirtd
from virttest import virsh
from virttest import utils_conn
def do_virsh_connect(uri, options):
"""
Execute connect command in a virsh session and return the uri
of this virsh session after connect.
Raise a process.CmdError if execute virsh connect command failed.
:param uri: argument of virsh connect command.
:param options: options pass to command connect.
:return: the uri of the virsh session after connect.
"""
virsh_instance = virsh.VirshPersistent()
virsh_instance.connect(uri, options)
uri_result = virsh_instance.canonical_uri()
del virsh_instance
logging.debug("uri after connect is %s.", (uri_result))
return uri_result
def run(test, params, env):
"""
Test command: virsh connect.
"""
def unix_transport_setup():
"""
Setup a unix connect to local libvirtd.
"""
shutil.copy(libvirtd_conf_path, libvirtd_conf_bak_path)
libvirtdconf_file = open(libvirtd_conf_path, 'r')
line_list = libvirtdconf_file.readlines()
conf_dict = {r'auth_unix_rw\s*=': 'auth_unix_rw="none"\n', }
for key in conf_dict:
pattern = key
conf_line = conf_dict[key]
flag = False
for index in range(len(line_list)):
line = line_list[index]
if not re.search(pattern, line):
continue
else:
line_list[index] = conf_line
flag = True
break
if not flag:
line_list.append(conf_line)
libvirtdconf_file.close()
libvirtdconf_file = open(libvirtd_conf_path, 'w')
libvirtdconf_file.writelines(line_list)
libvirtdconf_file.close()
# restart libvirtd service
utils_libvirtd.libvirtd_restart()
def unix_transport_recover():
"""
Recover the libvirtd on local.
"""
if os.path.exists(libvirtd_conf_bak_path):
shutil.copy(libvirtd_conf_bak_path, libvirtd_conf_path)
utils_libvirtd.libvirtd_restart()
# get the params from subtests.
# params for general.
connect_arg = params.get("connect_arg", "")
connect_opt = params.get("connect_opt", "")
status_error = params.get("status_error", "no")
# params for transport connect.
local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
transport_type = params.get("connect_transport_type", "local")
transport = params.get("connect_transport", "ssh")
client_ip = local_ip
client_pwd = local_pwd
server_ip = local_ip
server_pwd = local_pwd
# params special for tls connect.
server_cn = params.get("connect_server_cn", "TLSServer")
client_cn = params.get("connect_client_cn", "TLSClient")
# params special for tcp connect.
tcp_port = params.get("tcp_port", '16509')
# params special for unix transport.
libvirtd_conf_path = '/etc/libvirt/libvirtd.conf'
libvirtd_conf_bak_path = '%s/libvirtd.conf.bak' % test.tmpdir
# check the config
if (connect_arg == "transport" and
transport_type == "remote" and
local_ip.count("ENTER")):
raise error.TestNAError("Parameter local_ip is not configured"
"in remote test.")
if (connect_arg == "transport" and
transport_type == "remote" and
local_pwd.count("ENTER")):
raise error.TestNAError("Parameter local_pwd is not configured"
"in remote test.")
if (connect_arg.count("lxc") and
(not os.path.exists("/usr/libexec/libvirt_lxc"))):
raise error.TestNAError("Connect test of lxc:/// is not suggested on "
"the host with no lxc driver.")
if connect_arg.count("xen") and (not os.path.exists("/var/run/xend")):
raise error.TestNAError("Connect test of xen:/// is not suggested on "
"the host with no xen driver.")
if connect_arg.count("qemu") and (not os.path.exists("/dev/kvm")):
raise error.TestNAError("Connect test of qemu:/// is not suggested"
"on the host with no qemu driver.")
if connect_arg == "transport":
canonical_uri_type = virsh.driver()
if transport == "ssh":
ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
server_pwd=server_pwd,
client_ip=client_ip,
client_pwd=client_pwd)
try:
ssh_connection.conn_check()
except utils_conn.ConnectionError:
ssh_connection.conn_setup()
ssh_connection.conn_check()
connect_uri = libvirt_vm.get_uri_with_transport(
uri_type=canonical_uri_type,
transport=transport, dest_ip=server_ip)
elif transport == "tls":
tls_connection = utils_conn.TLSConnection(server_ip=server_ip,
server_pwd=server_pwd,
client_ip=client_ip,
client_pwd=client_pwd,
server_cn=server_cn,
client_cn=client_cn)
tls_connection.conn_setup()
connect_uri = libvirt_vm.get_uri_with_transport(
uri_type=canonical_uri_type,
transport=transport, dest_ip=server_cn)
elif transport == "tcp":
tcp_connection = utils_conn.TCPConnection(server_ip=server_ip,
server_pwd=server_pwd,
tcp_port=tcp_port)
tcp_connection.conn_setup()
connect_uri = libvirt_vm.get_uri_with_transport(
uri_type=canonical_uri_type,
transport=transport,
dest_ip="%s:%s"
% (server_ip, tcp_port))
elif transport == "unix":
unix_transport_setup()
connect_uri = libvirt_vm.get_uri_with_transport(
uri_type=canonical_uri_type,
transport=transport,
dest_ip="")
else:
raise error.TestNAError("Configuration of transport=%s is "
"not recognized." % transport)
else:
connect_uri = connect_arg
try:
try:
uri = do_virsh_connect(connect_uri, connect_opt)
# connect successfully
if status_error == "yes":
raise error.TestFail("Connect successfully in the "
"case expected to fail.")
# get the expect uri when connect argument is ""
if connect_uri == "":
connect_uri = virsh.canonical_uri().split()[-1]
logging.debug("expected uri
|
is: %s", connect_uri)
logging.debug("actual uri after connect is: %s", uri)
if not uri == connect_uri:
raise error.TestFail("Command exit normally but the uri is "
"
|
not setted as expected.")
except process.CmdError, detail:
if status_error == "no":
raise error.TestFail("Connect failed in the case expected"
"to success.\n"
"Error: %s" % detail)
finally:
if transport == "unix":
unix_transport_recover()
if transport == "tcp":
tcp_connection.conn_recover()
if transport == "tls":
tls_connection.conn_recov
|
oskgeek/liftpass
|
tests/pricing.py
|
Python
|
apache-2.0
| 10,307
| 0.036286
|
import unittest
import json
import requests
import sys
import string
import random
import config
import core.content.errors as errors
import core.api.main as main
import core.content.content as content
import core.pricing.pricing as pricing
import core.storage as storage
from core.util.test import *
class TestPricingEngine(unittest.TestCase):
def testLoadEmptyPrices(self):
backend = content.Content()
a = backend.addApplication('Test')
p = pricing.PricingEngine.getApplicationPricing(a.key)
self.assertEqual(p.groupAPrices, None)
self.assertEqual(p.groupBPrices, None)
def testLoadPrices(self):
backend = content.Content()
a = backend.addApplication('Test')
jsonPrices = backend.addPrices(a.key, 'JSON', json.dumps({'sword':1000}), None)
backend.setABTest(a.key, {'groupAPrices_key': jsonPrices.key})
p = pricing.PricingEngine.getApplicationPricing(a.key)
self.assertNotEqual(p.groupAPrices, None)
self.assertEqual(p.groupBPrices, None)
def testPricesWithProgress(self):
backend = content.Content()
a = backend.addApplication('Test')
jsonPricesA = backend.addPrices(a.key, 'JSON', json.dumps({'sword':1000}), None)
jsonPricesB = backend.addPrices(a.key, 'JSON', json.dumps({'sword':2000}), None)
backend.setABTest(a.key, {'groupAPrices_key': jsonPricesA.key})
backend
|
.setABTest(a.key, {'groupBPrices_key': jsonPricesB.key})
prici
|
ng = backend.getPricingEngine(a.key)
pricesA = pricing.getPrices('a', [0]*32)
pricesB = pricing.getPrices('b', [0]*32)
self.assertEqual(pricesA[0], jsonPricesA.key)
self.assertEqual(pricesB[0], jsonPricesB.key)
self.assertEqual(pricesA[1]['sword'][0], 1000)
self.assertEqual(pricesB[1]['sword'][0], 2000)
def testPricesWithBadApplicationKey(self):
backend = content.Content()
with self.assertRaises(pricing.ApplicationNotFoundException):
applicationPrices = backend.getPricingEngine('12345')
class TestJSONEngine(unittest.TestCase):
def testEngine(self):
p = pricing.JSONDataEngine.validate({'data': json.dumps({'sword':1000})})
self.assertIsInstance(p, dict)
p = pricing.JSONDataEngine.validate({'data': json.dumps({'sword':[1000]+[None]*7})})
self.assertIsInstance(p, dict)
with self.assertRaises(pricing.DataEngineException):
p = pricing.JSONDataEngine.validate({'data': json.dumps({'sword':'1000'})})
with self.assertRaises(pricing.DataEngineException):
p = pricing.JSONDataEngine.validate({'data': json.dumps({'sword':[1,2,3,4,5,6,7,8,9]})})
with self.assertRaises(pricing.DataEngineException):
p = pricing.JSONDataEngine.validate({'data': json.dumps({'sword':[1,2,3,'4',5,6,7,8,9]})})
class TestCSVEngine(unittest.TestCase):
def testEngine(self):
data = """
sword, 1000
saber, 2000
knife, 500
"""
p = pricing.CSVDataEngine.validate({'data': data})
self.assertIsInstance(p, dict)
data = """
sword, 1000, 300
saber, 2000, 400
knife, 500
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.CSVDataEngine.validate({'data': data})
data = """
sword,
saber, 2000, 400
knife, 500
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.CSVDataEngine.validate({'data': data})
data = """
sword, "100"
saber, 2000, 400
knife, 500
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.CSVDataEngine.validate({'data': data})
def testPath(self):
backend = content.Content()
a = backend.addApplication('Test')
data = """
sword, 1000
saber, 2000
knife, 500
"""
fileStorage = storage.getStorage(config.PricingStorage)
fileStorage.save('test.csv', data)
csvPrices = backend.addPrices(a.key, 'CSV', None, 'test.csv')
backend.setABTest(a.key, {'groupAPrices_key': csvPrices.key})
backend.setABTest(a.key, {'groupBPrices_key': csvPrices.key})
p = pricing.PricingEngine.getApplicationPricing(a.key)
self.assertNotEqual(p.groupAPrices, None)
self.assertNotEqual(p.groupBPrices, None)
prices = p.getPrices('1'*32, [None]*32)
class TestMetricCSVEngine(unittest.TestCase):
def __makeProgress(self, p, v):
return [None]*(p-1) + [v] + [None]*(32-p)
def testEngine(self):
data = """
metricString5, Default, US, BR, DE
sword, 100, 200, 300, 500
saber, 200, 300, 400, 600
knife, 300, 400, 500, 700
"""
p = pricing.MetricCSVDataEngine({'data':data})
self.assertEqual(p.getPrices(self.__makeProgress(5, 'US'))['sword'][0], 200)
self.assertEqual(p.getPrices(self.__makeProgress(5, 'BR'))['saber'][0], 400)
self.assertEqual(p.getPrices(self.__makeProgress(5, 'DE'))['knife'][0], 700)
self.assertEqual(p.getPrices(self.__makeProgress(5, 'JP'))['knife'][0], 300)
# Check number metric conversion
data = """
metricString8, Default, A, B, C
sword, 100, 200, 300, 500
"""
p = pricing.MetricCSVDataEngine.validate({'data':data})
# Check number metric conversion
data = """
metricNumber1, Default, 1, 2, 3
sword, 100, 200, 300, 500
"""
p = pricing.MetricCSVDataEngine.validate({'data':data})
# Check number metric conversion
data = """
metricNumber5, Default, 10, 20, 30
sword, 100, 200, 300, 500
"""
p = pricing.MetricCSVDataEngine.validate({'data':data})
self.assertEqual(p['metric'], 12)
# Metric number out of bound
data = """
metricNumber40, Default, 10, 20, 30
sword, 100, 200, 300, 500
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.MetricCSVDataEngine.validate({'data':data})
# Unrecognized metric
data = """
something12, Default, US, BR, DE
sword, 100, 200, 300, 500
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.MetricCSVDataEngine.validate({'data':data})
# Row with missing column
data = """
metricNumber5, Default, 10, 20, 30
sword, 100, 200, 300
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.MetricCSVDataEngine.validate({'data':data})
# Row with too many elements
data = """
metricNumber5, Default, 10, 20, 30
sword, 100, 200, 300, 400, 500
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.MetricCSVDataEngine.validate({'data':data})
# Row with wrong type of value
data = """
metricNumber5, Default, US, BR, DE
sword, 100, 200, 'abc', 400, 500
"""
with self.assertRaises(pricing.DataEngineException):
p = pricing.MetricCSVDataEngine.validate({'data':data})
# Test country
data = """
country, Default, US, BR, DE
sword, 100, 200, 400, 500
"""
p = pricing.MetricCSVDataEngine({'data':data})
r = p.getPrices(self.__makeProgress(0,''), country='US')
self.assertEqual(r['sword'][0], 200)
r = p.getPrices(self.__makeProgress(0,''), country='DE')
self.assertEqual(r['sword'][0], 500)
r = p.getPrices(self.__makeProgress(0,''), country='JP')
self.assertEqual(r['sword'][0], 100)
class TestDTJSONEngine(unittest.TestCase):
def __makeProgress(self, p, v):
prog = [None] * 32
for i in range(len(p)):
prog[p[i]] = v[i]
return prog
def testLookup(self):
data = {
'metric': 12,
'method': 'lookup',
'keys': [[1,2,3], [5,6,7]],
'values': [{'sword': 1000}, {'sword': 500}]
}
data = json.dumps(data)
p = pricing.DTJSONData({'data':data})
a = p.getPrices(self.__makeProgress([12], [2]))
self.assertEqual(a['sword'][0], 1000)
b = p.getPrices(self.__makeProgress([12], [7]))
self.assertEqual(b['sword'][0], 500)
def testRange(self):
data = {
'metric': 12,
'method': 'range',
'keys': [[0,10], [11, 20]],
'values': [{'sword': 1000}, {'sword': 500}]
}
data = json.dumps(data)
p = pricing.DTJSONData({'data':data})
a = p.getPrices(self.__makeProgress([12], [8]))
self.assertEqual(a['sword'][0], 1000)
b = p.getPrices(self.__makeProgress([12], [13]))
self.assertEqual(b['sword'][0], 500)
def testLookupRange(self):
data = {
'metric': 12,
'method': 'range',
'keys': [[0,10], [11, 20]],
'values': [{
'metric': 5,
'method': 'lookup',
'keys': [['US', 'BR'], ['JP', 'IT']],
'values':[{'sword': 1000}, {'sword': 500}]
},{
'metric': 5,
'method': 'lookup',
'keys': [['US', 'BR']
|
nachtmaar/androlyze
|
androlyze/loader/exception/__init__.py
|
Python
|
mit
| 2,440
| 0.007377
|
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
'''
Loader exceptions
'''
from androlyze.error.WrapperException import WrapperException
from androlyze.model.android.Cons
|
tants import MANIFEST_FILENAME
from androlyze.error.AndroLyzeLabError import AndroLyzeLabError
############################################################
#---Apk
############################################################
class ApkImportErro
|
r(WrapperException):
''' Base class for import errors '''
pass
class CouldNotOpenFile(ApkImportError):
def __init__(self, file_path, caused_by = None):
'''
Parameters
----------
file_path : str
The path to the file that could not be opened.
caused_by : Exception
'''
super(CouldNotOpenFile, self).__init__(caused_by = caused_by)
self.file_path = file_path
def _msg(self):
return 'Could not open file: %s' % self.file_path
class CouldNotOpenApk(CouldNotOpenFile):
def _msg(self):
return 'Could not open apk file: %s' % self.file_path
class CouldNotOpenManifest(CouldNotOpenFile):
def _msg(self):
return 'Could not open %s from file: %s' % (MANIFEST_FILENAME, self.file_path)
############################################################
#---AndroScript
############################################################
class NoAndroScriptSubclass(AndroLyzeLabError):
def __init__(self, class_name):
Exception.__init__(self)
self._class_name = class_name
def __str__(self):
from androlyze.model.script import AndroScript
return "%s is no subclass of %s !" % (self._class_name, AndroScript.__name__)
class ModuleClassNameException(AndroLyzeLabError):
''' Exception for the case that the module does not have the specified class '''
def __init__(self, module_name, class_name):
self.module_name = module_name
self.class_name = class_name
def __str__(self):
return 'The module "%s" does not have the specified class "%s"!' % (self.module_name, self.class_name)
class ModuleNotSameClassNameException(ModuleClassNameException):
''' Exception for the case that the module has a different name than the class '''
def __str__(self):
return super(ModuleNotSameClassNameException, self).__str__() + ' The module name has to equal the class name !'
|
OCA/l10n-italy
|
l10n_it_fatturapa_out_sp/tests/__init__.py
|
Python
|
agpl-3.0
| 75
| 0
|
from . import fatturapa_common
|
from . import test_fattur
|
apa_xml_validation
|
lzkelley/zcode
|
zcode/inout/inout_core.py
|
Python
|
mit
| 30,163
| 0.00315
|
"""Functions for Input/Output (IO) Operations.
Classes
-------
- Keys - Provide convenience for classes used as enumerated dictionary keys.
- StreamCapture - Class to capture/redirect output to stdout and stderr.
Functions
---------
- bytes_string - Return a humanized string representation of a number of bytes.
- get_file_size - Return a human-readable size of a file or set of files.
- countLines - Count the number of lines in the given file.
- environment_is_jupyter - Determine if current environment is a jupyter notebook.
- estimateLines - Estimate the number of lines in the given file.
- check_path - Create the given filepath if it doesn't already exist.
- dictToNPZ - Save a dictionary to the given NPZ filename.
- npzToDict - Convert an NPZ file to a dictionary with the same keys and values.
- getProgressBar - Wrapper to create a progressbar object with default settings.
- combineFiles - Concatenate the contents of input files into a single output file.
- checkURL - Check that the given url exists.
- promptYesNo - Prompt the user (via CLI) for yes or no.
- modify_filename - Modify the given filename.
- mpiError - Raise an error through MPI and exit all processes.
- ascii_table - Print a table with the given contents to output.
- modify_exists - Modify the given filename if it already exists.
- python_environment - Tries to determine the current python environment.
- iterable_notstring - Return True' if the argument is an iterable and not a string type.
- str_format_dict - Pretty-format a dict into a nice looking string.
- par_dir - Get parent (absolute) directory name from given file/directory.
- top_dir - Get the top level directory name from the given path.
- underline - Add a new line of characters appended to the given string.
- warn_with_traceback - Include traceback information in warnings.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from datetime import datetime
import os
import sys
import shutil
from datetime import datetime
import re
import warnings
import numpy as np
import collections
from zcode import utils
__all__ = ['Keys', 'MPI_TAGS', 'StreamCapture',
'backup_existing', 'bytes_string', 'get_file_size',
'count_lines', 'environment_is_jupyter', 'estimateLines', 'modify_filename',
'check_path', 'dictToNPZ', 'npzToDict', 'checkURL',
'combine_files',
'promptYesNo', 'mpiError', 'ascii_table', 'modify_exists', 'python_environment',
'iterable_notstring', 'str_format_dict', 'top_dir', 'underline', 'warn_with_traceback',
'tqdm', 'unzip',
# === DEPRECATED ===
'frac_str', 'countLines', 'combineFiles']
class _Keys_Meta(type):
"""Metaclass for the ``Keys`` class. See, ``InOut.Keys``.
To-Do
-----
- Modify the attribute getter to yield more unique responses than
the values given in the user-defined class.
e.g.
class TestKeys(Keys):
one = 1
Then the actual value used should be something like
``"TestKeys.one"`` or ``"TestKeys.1"``, to make them more unique
than just ``"one"`` or ``1``.
"""
# Store all attribute values to list ``__values__`` (if they dont start with '__')
def __init__(self, name, bases, dict):
self.__init__(self)
self.__values__ = [list(self.__dict__.values())[ii] for ii, ent in enumerate(self.__dict__)
if
|
not ent.startswith('__')]
# Iterate over the list of attributes values
def __iter__(se
|
lf):
for val in self.__values__:
yield val
class Keys(six.with_metaclass(_Keys_Meta)):
"""Super class to provide convenience for classes used as enumerated dictionary keys.
Uses the metaclass ``_Key_Meta`` to override the ``__iter__`` and ``__init__`` methods. The
initializer simply stores a list of the *VALUES* of each attribute (not starting with '__'),
for later use. Iterator yields each element of the attributes values, list.
Note:
- The ordering of entries is *not* preserved, and has *no* meaning.
Example
-------
>>> from InOut import Keys
>>> class Test(Keys):
>>> one = '1'
>>> two = 'two'
>>> three = '3.0'
>>> for tt in Test:
>>> print tt
>>> if(tt == Test.two): print "Found two!"
1
3.0
two
Found two!
"""
class MPI_TAGS(Keys):
"""Commonly used MPI tags for master-slave paradigm.
"""
READY = 0
START = 1
DONE = 2
EXIT = 3
class StreamCapture(list):
"""Class to capture/redirect output to stdout and stderr.
See: stackoverflow.com/questions/16571150
Usage:
>>> with Capturing() as output:
>>> do_something(my_object)
>>> print output
"""
try:
# import for python3
from io import StringIO
except ImportError:
# import for python2
from cStringIO import StringIO
def __init__(self, out=True, err=True):
self.out = out
self.err = err
def __enter__(self):
if(self.out):
self._stdout = sys.stdout
sys.stdout = self._stringio = StreamCapture.StringIO()
if(self.err):
self._stderr = sys.stderr
sys.stderr = self._stringio = StreamCapture.StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
if(self.out): sys.stdout = self._stdout
if(self.err): sys.stderr = self._stderr
def backup_existing(fname, prepend='_', append=None, append_type=None, verbose=True):
"""If the given file already exists, move it to a backup named with the given components.
"""
types = ['date']
if append_type is not None and append_type not in types:
raise ValueError("`append_type` must be one of '{}'!".format(append_type, types))
if not os.path.exists(fname):
return
if (append is None) and (append_type is not None):
if append_type == 'date':
append = '_' + datetime.strftime(datetime.now(), '%Y-%m-%d')
else:
raise ValueError("Unrecognized `append_type` = '{}'!".format(append_type))
backup = modify_filename(fname, prepend=prepend, append=append)
if os.path.normpath(fname.lower()) == os.path.normpath(backup.lower()):
raise ValueError("Input '{}' is degenerate with backup '{}'!".format(fname, backup))
if verbose:
print("Backing up existing evolution save to '{}'".format(backup))
if os.path.exists(backup):
os.remove(backup)
shutil.move(fname, backup)
if not os.path.exists(backup):
err = "Failed to move existing backup '{}' ==> '{}'!".format(fname, backup)
raise ValueError(err)
return backup
def bytes_string(bytes, precision=1):
"""Return a humanized string representation of a number of bytes.
Arguments
---------
bytes : <scalar>, number of bytes
precision : <int>, target precision in number of decimal places
Returns
-------
strSize : <string>, human readable size
Examples
--------
>> humanize_bytes(1024*12342,2)
'12.05 MB'
"""
abbrevs = (
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'KB'),
(1, 'bytes')
)
for factor, suffix in abbrevs:
if bytes >= factor:
break
# size_str = '%.*f %s' % (precision, 1.0*bytes / factor, suffix)
size_str = '{size:.{prec:}f} {suff}'.format(
prec=precision, size=1.0*bytes / factor, suff=suffix)
return size_str
def get_file_size(fnames, precision=1):
|
lintusj1/elfi
|
elfi/methods/bo/gpy_regression.py
|
Python
|
bsd-3-clause
| 12,384
| 0.001534
|
"""This module contains an interface for using the GPy library in ELFI."""
# TODO: make own general GPRegression and kernel classes
import copy
import logging
import GPy
import numpy as np
logger = logging.getLogger(__name__)
logging.getLogger("GP").setLevel(logging.WARNING) # GPy library logger
class GPyRegression:
"""Gaussian Process regression using the GPy library.
GPy API: https://sheffieldml.github.io/GPy/
"""
def __init__(self,
parameter_names=None,
bounds=None,
optimizer="scg",
max_opt_iters=50,
gp=None,
**gp_params):
"""Initialize GPyRegression.
Parameters
----------
parameter_names : list of str, op
|
tional
Names of parameter nodes. If None, sets dimension to 1.
bounds : dict, optional
The regi
|
on where to estimate the posterior for each parameter in
model.parameters.
`{'parameter_name':(lower, upper), ... }`
If not supplied, defaults to (0, 1) bounds for all dimensions.
optimizer : string, optional
Optimizer for the GP hyper parameters
Alternatives: "scg", "fmin_tnc", "simplex", "lbfgsb", "lbfgs", "sgd"
See also: paramz.Model.optimize()
max_opt_iters : int, optional
gp : GPy.model.GPRegression instance, optional
**gp_params
kernel : GPy.Kern
noise_var : float
mean_function
"""
if parameter_names is None:
input_dim = 1
elif isinstance(parameter_names, (list, tuple)):
input_dim = len(parameter_names)
else:
raise ValueError("Keyword `parameter_names` must be a list of strings")
if bounds is None:
logger.warning('Parameter bounds not specified. Using [0,1] for each parameter.')
bounds = [(0, 1)] * input_dim
elif len(bounds) != input_dim:
raise ValueError(
'Length of `bounds` ({}) does not match the length of `parameter_names` ({}).'
.format(len(bounds), input_dim))
elif isinstance(bounds, dict):
if len(bounds) == 1: # might be the case parameter_names=None
bounds = [bounds[n] for n in bounds.keys()]
else:
# turn bounds dict into a list in the same order as parameter_names
bounds = [bounds[n] for n in parameter_names]
else:
raise ValueError("Keyword `bounds` must be a dictionary "
"`{'parameter_name': (lower, upper), ... }`")
self.input_dim = input_dim
self.bounds = bounds
self.gp_params = gp_params
self.optimizer = optimizer
self.max_opt_iters = max_opt_iters
self._gp = gp
self._rbf_is_cached = False
self.is_sampling = False # set to True once in sampling phase
def __str__(self):
"""Return GPy's __str__."""
return self._gp.__str__()
def __repr__(self):
"""Return GPy's __str__."""
return self.__str__()
def predict(self, x, noiseless=False):
"""Return the GP model mean and variance at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
noiseless : bool
whether to include the noise variance or not to the returned variance
Returns
-------
tuple
GP (mean, var) at x where
mean : np.array
with shape (x.shape[0], 1)
var : np.array
with shape (x.shape[0], 1)
"""
# Ensure it's 2d for GPy
x = np.asanyarray(x).reshape((-1, self.input_dim))
if self._gp is None:
# TODO: return from GP mean function if given
return np.zeros((x.shape[0], 1)), \
np.ones((x.shape[0], 1))
# direct (=faster) implementation for RBF kernel
if self.is_sampling and self._kernel_is_default:
if not self._rbf_is_cached:
self._cache_RBF_kernel()
r2 = np.sum(x**2., 1)[:, None] + self._rbf_x2sum - 2. * x.dot(self._gp.X.T)
kx = self._rbf_var * np.exp(r2 * self._rbf_factor) + self._rbf_bias
mu = kx.dot(self._rbf_woodbury)
var = self._rbf_var + self._rbf_bias
var -= kx.dot(self._rbf_woodbury_inv.dot(kx.T))
var += self._rbf_noisevar # likelihood
return mu, var
else:
self._rbf_is_cached = False # in case one resumes fitting the GP after sampling
if noiseless:
return self._gp.predict_noiseless(x)
else:
return self._gp.predict(x)
# TODO: find a more general solution
# cache some RBF-kernel-specific values for faster sampling
def _cache_RBF_kernel(self):
self._rbf_var = float(self._gp.kern.rbf.variance)
self._rbf_factor = -0.5 / float(self._gp.kern.rbf.lengthscale)**2
self._rbf_bias = float(self._gp.kern.bias.K(self._gp.X)[0, 0])
self._rbf_noisevar = float(self._gp.likelihood.variance[0])
self._rbf_woodbury = self._gp.posterior.woodbury_vector
self._rbf_woodbury_inv = self._gp.posterior.woodbury_inv
self._rbf_woodbury_chol = self._gp.posterior.woodbury_chol
self._rbf_x2sum = np.sum(self._gp.X**2., 1)[None, :]
self._rbf_is_cached = True
def predict_mean(self, x):
"""Return the GP model mean function at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
Returns
-------
np.array
with shape (x.shape[0], 1)
"""
return self.predict(x)[0]
def predictive_gradients(self, x):
"""Return the gradients of the GP model mean and variance at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
Returns
-------
tuple
GP (grad_mean, grad_var) at x where
grad_mean : np.array
with shape (x.shape[0], input_dim)
grad_var : np.array
with shape (x.shape[0], input_dim)
"""
# Ensure it's 2d for GPy
x = np.asanyarray(x).reshape((-1, self.input_dim))
if self._gp is None:
# TODO: return from GP mean function if given
return np.zeros((x.shape[0], self.input_dim)), \
np.zeros((x.shape[0], self.input_dim))
# direct (=faster) implementation for RBF kernel
if self.is_sampling and self._kernel_is_default:
if not self._rbf_is_cached:
self._cache_RBF_kernel()
r2 = np.sum(x**2., 1)[:, None] + self._rbf_x2sum - 2. * x.dot(self._gp.X.T)
kx = self._rbf_var * np.exp(r2 * self._rbf_factor)
dkdx = 2. * self._rbf_factor * (x - self._gp.X) * kx.T
grad_mu = dkdx.T.dot(self._rbf_woodbury).T
v = np.linalg.solve(self._rbf_woodbury_chol, kx.T + self._rbf_bias)
dvdx = np.linalg.solve(self._rbf_woodbury_chol, dkdx)
grad_var = -2. * dvdx.T.dot(v).T
else:
grad_mu, grad_var = self._gp.predictive_gradients(x)
grad_mu = grad_mu[:, :, 0] # Assume 1D output (distance in ABC)
return grad_mu, grad_var
def predictive_gradient_mean(self, x):
"""Return the gradient of the GP model mean at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
Returns
-------
np.array
with
|
Yam-cn/potato
|
testcases/btcharts_test.py
|
Python
|
apache-2.0
| 4,240
| 0.003302
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
import common
from engine.bitcoincharts import barfeed
from engine.utils import dt
class TestCase(common.TestCase):
def testLoadNoFilter(self):
feed = barfeed.CSVTradeFeed()
feed.addBarsFromCSV(common.get_data_file_path("bitstampUSD.csv"))
loaded = [(dateTime, bars) for dateTime, bars in feed]
self.assertEquals(len(loaded), 9999)
self.assertEquals(loaded[0][0], dt.as_utc(datetime.datetime(2011, 9, 13, 13, 53, 36)))
self.assertEquals(loaded[0][1]["BTC"].getDateTime(), dt.as_utc(datetime.datetime(2011, 9, 13, 13, 53, 36)))
self.assertEquals(loaded[0][1]["BTC"].getClose(), 5.8)
self.assertEquals(loaded[0][1]["BTC"].getPrice(), 5.8)
self.assertEquals(loaded[0][1]["BTC"].getVolume(), 1.0)
self.assertEquals(loaded[-1][0], dt.as_utc(datetime.datetime(2012, 5, 31, 8, 41, 18, 5)))
self.assertEquals(loaded[-1][1]["BTC"].getDateTime(), dt.as_utc(datetime.datetime(2012, 5, 31, 8, 41, 18, 5)))
self.assertEquals(loaded[-1][1]["BTC"].getClose(), 5.1)
self.assertEquals(loaded[-1][1]["BTC"].getPrice(), 5.1)
self.assertEquals(loaded[-1][1]["BTC"].getVolume(), 0.39215686)
def testLoadFilterFrom(self):
feed = barfeed.CSVTradeFeed()
feed.addBarsFromCSV(common.get_data_file_path("bitstampUSD.csv"), "bitstampUSD", fromDateTime=dt.as_utc(datetime.datetime(2012, 5, 29)))
loaded = [(dateTime, bars) for dateTime, bars in feed]
self
|
.assertEquals(len(loaded), 646)
self.asse
|
rtEquals(loaded[0][0], dt.as_utc(datetime.datetime(2012, 5, 29, 1, 47, 52)))
self.assertEquals(loaded[0][1]["bitstampUSD"].getDateTime(), dt.as_utc(datetime.datetime(2012, 5, 29, 1, 47, 52)))
self.assertEquals(loaded[0][1]["bitstampUSD"].getClose(), 5.07)
self.assertEquals(loaded[0][1]["bitstampUSD"].getPrice(), 5.07)
self.assertEquals(loaded[0][1]["bitstampUSD"].getVolume(), 1.39081288)
self.assertEquals(loaded[-1][0], dt.as_utc(datetime.datetime(2012, 5, 31, 8, 41, 18, 5)))
self.assertEquals(loaded[-1][1]["bitstampUSD"].getDateTime(), dt.as_utc(datetime.datetime(2012, 5, 31, 8, 41, 18, 5)))
self.assertEquals(loaded[-1][1]["bitstampUSD"].getClose(), 5.1)
self.assertEquals(loaded[-1][1]["bitstampUSD"].getPrice(), 5.1)
self.assertEquals(loaded[-1][1]["bitstampUSD"].getVolume(), 0.39215686)
def testLoadFilterFromAndTo(self):
feed = barfeed.CSVTradeFeed()
feed.addBarsFromCSV(common.get_data_file_path("bitstampUSD.csv"), "bitstampUSD", fromDateTime=dt.as_utc(datetime.datetime(2012, 5, 29)), toDateTime=datetime.datetime(2012, 5, 31))
loaded = [(dateTime, bars) for dateTime, bars in feed]
self.assertEquals(len(loaded), 579)
self.assertEquals(loaded[0][0], dt.as_utc(datetime.datetime(2012, 5, 29, 1, 47, 52)))
self.assertEquals(loaded[0][1]["bitstampUSD"].getDateTime(), dt.as_utc(datetime.datetime(2012, 5, 29, 1, 47, 52)))
self.assertEquals(loaded[0][1]["bitstampUSD"].getClose(), 5.07)
self.assertEquals(loaded[0][1]["bitstampUSD"].getVolume(), 1.39081288)
self.assertEquals(loaded[-1][0], dt.as_utc(datetime.datetime(2012, 5, 30, 23, 49, 21)))
self.assertEquals(loaded[-1][1]["bitstampUSD"].getDateTime(), dt.as_utc(datetime.datetime(2012, 5, 30, 23, 49, 21)))
self.assertEquals(loaded[-1][1]["bitstampUSD"].getClose(), 5.14)
self.assertEquals(loaded[-1][1]["bitstampUSD"].getVolume(), 20)
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/models/SignatureFormResourcesResponse.py
|
Python
|
apache-2.0
| 1,182
| 0.007614
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureFormResourcesResponse:
"""
NOTE: This class is au
|
to generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'SignatureFormResourcesResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # SignatureFormResourcesResult
self.status = None # str
self.error_message = None #
|
str
self.composedOn = None # int
|
cgstudiomap/cgstudiomap
|
main/local_modules/res_partner_phone_missing_details/__openerp__.py
|
Python
|
agpl-3.0
| 1,443
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) cgstudiomap <cgstudiomap@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Re
|
s Partner phone: missing details',
'version': '0.2',
'author': 'cgstudiomap',
'maintainer': 'cgstudiomap',
'license': 'AGPL-3',
'category': 'Sales',
'summary': 'Set up for phone for missing details bot',
'depends': [
'res_partner_missing_details',
'base_phone_validation',
],
'external_dependencies': {},
'data': [
'missing_details.xml',
],
'installable': Tru
|
e,
}
|
google-research/google-research
|
kws_streaming/models/ds_tc_resnet_test.py
|
Python
|
apache-2.0
| 3,076
| 0.002276
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for ds_tc_resnet model in session mode."""
import numpy as np
from kws_streaming.layers import test_utils
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
from kws_streaming.layers.modes import Modes
from kws_streaming.models import utils
import kws_streaming.models.ds_tc_resnet as ds_tc_resnet
from kws_streaming.train import inference
class DsTcResnetTest(tf.test.TestCase):
"""Test ds_tc_resnet model in non streaming and streaming modes."""
def setUp(self):
super(DsTcResnetTest, self).setUp()
config = tf1.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf1.Session(config=config)
tf1.keras.backend.set_session(self.sess)
tf.keras.backend.set_learning_phase(0)
test_utils.set_seed(123)
self.params = utils.ds_tc_resnet_model_params(True)
self.model = ds_tc_resnet.model(self.params)
self.model.summary()
self.input_data = np.random.rand(self.params.batch_size,
self.params.desired_samples)
# run non streaming inference
self.non_stream_out = self.model.predict(self.input_data)
def test_ds_tc_resnet_stream(self):
"""Test for tf streaming with internal state."""
# prepare tf streaming model
model_stream = utils.to_streaming_inference(
self.model, self.params, Modes.STREAM_INTERNAL_STATE_INFERENCE)
model_stream.summary()
# run streaming inference
stream_out = inference.run_stream_inference_classification(
self.params, model_stream, self.input_data)
self.assertAllClose(stream_out, self.non_stream_out, atol=1e-5)
def test_ds_tc_resnet_stream_tflite(self):
"""Test for tflite streaming with external state."""
tflite_streaming_model = utils.model_to_tflite(
self.sess, self.model, self.params,
Modes.STREAM_EXTERNAL_STATE_INFERENCE)
interpreter = tf.lite.Interpreter(model_content=tflite_streaming_model)
interpreter.allocate_tensors()
# before processing new test sequence we reset model state
inputs = []
for detail in interpreter.get_input_details():
inputs.append(np.zeros(detail['shape'], dtype=np.
|
float32))
stream_out = inference.run_stream_inference_classification_tflite(
self.params, interpreter, self.input_data, inputs)
self.assertAllClose(stream_out, self.non_stream_out, atol=1e-5)
if __name__ == '__main__':
tf1.d
|
isable_eager_execution()
tf.test.main()
|
BitcoinUnlimited/BitcoinUnlimited
|
qa/rpc-tests/pruning.py
|
Python
|
mit
| 18,143
| 0.005677
|
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
import os
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.address = ["",""]
self.txouts = gen_return_txouts()
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-rpcservertimeout=0", "-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-blockchain.maxReorgDepth=-1"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-rpcservertimeout=0", "-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-blockchain.maxReorgDepth=-1"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-rpcservertimeout=0", "-maxreceivebuffer=20000","-prune=550", "-blockchain.maxReorgDepth=-1"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
self.address[0] = self.nodes[0].getnewaddress()
self.address[1] = self.nodes[1].getnewaddress()
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:3])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
counts = [ x.getblockcount() for x in self.nodes ]
print(counts)
self.mine_full_block(self.nodes[0],self.address[0])
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 10:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def te
|
st_height_after_sync(self):
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug","-rpcservertimeout=0", "-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-blockchain.maxReorgDepth=-
|
1"], timewait=900))
self.prunedir = self.options.tmpdir+"/node3/regtest/blocks/"
connect_nodes(self.nodes[3], 1)
# wait for the first blocks to arrive on node3 before mining the next
# blocks. We have to make sure the first block file has a starting height
# before doing any mining.
while self.nodes[3].getblockcount() <= 0:
time.sleep(0.1)
# Mine several new blocks while the chain on node 3 is syncing. This
# should not allow new blocks to get into the block files until we
# are within 144 blocks of the chain tip. If new blocks do get into the
# first block file then we won't be able to prune it and the test will fail.
for i in range(20):
print ("generate a block")
self.nodes[1].generate(1)
counts = [ x.getblockcount() for x in self.nodes ]
print(counts)
time.sleep(0.5)
sync_blocks(self.nodes)
#check that first block file was pruned.
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 10:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-rpcservertimeout=0", "-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-blockchain.maxReorgDepth=-1"], timewait=900)
# Mine 24 blocks in node 1
self.utxo = self.nodes[1].listunspent()
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
self.utxo = self.nodes[0].listunspent()
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-rpcservertimeout=0", "-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode", "-blockchain.maxReorgDepth=-1"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
|
drawquest/drawquest-web
|
website/drawquest/apps/content_metadata/migrations/0004_auto__add_field_contentmetadata_iphone_gallery_id.py
|
Python
|
bsd-3-clause
| 1,633
| 0.007961
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ContentMetadata.iphone_gallery_id'
db.add_column(u'content_metadata_contentmetadata', 'iphone_gallery_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ContentMetadata.iphone_gallery_id'
db.delete_column(u'content_metadata_contentmetadata', 'iphone_gallery_id')
models = {
u'content_metadata.contentmetadata': {
'Meta': {'object_name': 'ContentMetadata'},
'activity_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'archive_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'conten
|
t_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'gallery_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'homepage_featured_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
u'id': ('django.db.m
|
odels.fields.AutoField', [], {'primary_key': 'True'}),
'iphone_gallery_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['content_metadata']
|
information-machine/information-machine-api-python
|
InformationMachineAPILib/Models/UserData.py
|
Python
|
mit
| 2,966
| 0.003034
|
# -*- coding: utf-8 -*-
"""
InformationMachineAPILib.Models.UserData
"""
from InformationMachineAPILib.APIHelper import APIHelper
class UserData(object):
"""Implementation of the 'UserData' model.
TODO: type model description here.
Attributes:
email (string): TODO: type description here.
zip (string): TODO: type description here.
user_id (string): TODO: type description here.
owner_app_id (string): TODO: type description here.
created_at (string): TODO: type description here.
"""
def __init__(self,
**kwargs):
"""Constructor for the UserData class
Args:
**kwargs: Keyword
|
Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
email -- string -- Sets the attribute email
zip -- string -- Sets the attribute zip
user_id -- string -- Sets the attribute user_id
|
owner_app_id -- string -- Sets the attribute owner_app_id
created_at -- string -- Sets the attribute created_at
"""
# Set all of the parameters to their default values
self.email = None
self.zip = None
self.user_id = None
self.owner_app_id = None
self.created_at = None
# Create a mapping from API property names to Model property names
replace_names = {
"email": "email",
"zip": "zip",
"user_id": "user_id",
"owner_app_id": "owner_app_id",
"created_at": "created_at",
}
# Parse all of the Key-Value arguments
if kwargs is not None:
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace_names[key], kwargs[key])
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"email": "email",
"zip": "zip",
"user_id": "user_id",
"owner_app_id": "owner_app_id",
"created_at": "created_at",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval)
|
imminent-tuba/thesis
|
server/chatterbot/chatterbot/adapters/logic/weather.py
|
Python
|
mit
| 2,362
| 0.000847
|
from .logic import LogicAdapter
from chatterbot.conversation import Statement
from chatterbot.utils.pos_tagger import POSTagger
import re
import forecastio
class WeatherLogicAdapter(LogicAdapter):
"""
A logic adapter that returns information regarding the weather and
the forecast for a specific location. Currently, only basic information
is returned, but additional features are planned in the future.
"""
def __init__(self, **kwargs):
super(WeatherLogicAdapter, self).__init__(**kwargs)
self.tagger = POSTagger()
self.forecastio_api_key = kwargs.get("forecastio_api_key")
def process(self, statement):
"""
Returns the forecast for a location (using latitude and longitude).
"""
user_input = statement.text.lower()
if "weather" not in user_input:
return 0, Statement("")
latitude = self.get_latitude(user_input)
longitude = self.get_longitude(user_input)
if latitude is not "" and longitude is not "":
# @TODO: Add more options for getting weather. This could include
# the current temperature, the current cloud cover, etc. This
# might require removing the forecastio library (which is
# pro
|
bably a good idea).
return 1, Statement("The forecast for tomorrow is: " + self.get_weather(latitude, longitude))
return 0, Statement("")
def get_latitude(self, user_input):
"""
Returns
|
the latitude extracted from the input.
"""
for token in self.tagger.tokenize(user_input):
if "latitude=" in token:
return re.sub("latitude=", "", token)
return ""
def get_longitude(self, user_input):
"""
Returns the longitude extracted from the input.
"""
for token in self.tagger.tokenize(user_input):
if "longitude=" in token:
return re.sub("longitude=", "", token)
return ""
def get_weather(self, latitude, longitude):
"""
Returns the weather for a given latitude and longitude.
"""
# @TODO: Find some way to suppress the warnings generated by this.
forecast = forecastio.load_forecast(self.forecastio_api_key, latitude, longitude)
return forecast.hourly().summary
|
abogushov/django-admin-json-editor
|
example/project/settings.py
|
Python
|
mit
| 2,826
| 0.001415
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w@r7o+u6xj*7p#bhvmrip$jx%^2cmjzb2yk%dgl7#ldsq3ulcc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth'
|
,
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_admin_json_editor',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'djan
|
go.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'django_admin_json_editor',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
criloz/todo-app
|
todoapp/models.py
|
Python
|
mit
| 350
| 0.002857
|
from django.db import models
|
# Create your models here.
class Task(models.Model):
created_date = models.DateTimeField(auto_now_add=True)
due_date = models.DateField(null=True, blank=True)
description = models.CharField(max_length=100)
completed = models.BooleanField(default=False)
archived = models.BooleanFie
|
ld(default=False)
|
antiface/audiolazy
|
examples/dft_pitch.py
|
Python
|
gpl-3.0
| 3,525
| 0.013333
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of AudioLazy, the signal processing Python package.
# Copyright (C) 2012-2014 Danilo de Jesus da Silva Bellini
#
# AudioLazy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program
|
is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHAN
|
TABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Created on Wed May 01 2013
# danilo [dot] bellini [at] gmail [dot] com
"""
Pitch follower via DFT peak with Tkinter GUI
"""
# ------------------------
# AudioLazy pitch follower
# ------------------------
import sys
from audiolazy import (tostream, AudioIO, freq2str, sHz, chunks,
lowpass, envelope, pi, thub, Stream, maverage)
from numpy.fft import rfft
def limiter(sig, threshold=.1, size=256, env=envelope.rms, cutoff=pi/2048):
sig = thub(sig, 2)
return sig * Stream( 1. if el <= threshold else threshold / el
for el in maverage(size)(env(sig, cutoff=cutoff)) )
@tostream
def dft_pitch(sig, size=2048, hop=None):
for blk in Stream(sig).blocks(size=size, hop=hop):
dft_data = rfft(blk)
idx, vmax = max(enumerate(dft_data),
key=lambda el: abs(el[1]) / (2 * el[0] / size + 1)
)
yield 2 * pi * idx / size
def pitch_from_mic(upd_time_in_ms):
rate = 44100
s, Hz = sHz(rate)
api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line
chunks.size = 1 if api == "jack" else 16
with AudioIO(api=api) as recorder:
snd = recorder.record(rate=rate)
sndlow = lowpass(400 * Hz)(limiter(snd, cutoff=20 * Hz))
hop = int(upd_time_in_ms * 1e-3 * s)
for pitch in freq2str(dft_pitch(sndlow, size=2*hop, hop=hop) / Hz):
yield pitch
# ----------------
# GUI with tkinter
# ----------------
if __name__ == "__main__":
try:
import tkinter
except ImportError:
import Tkinter as tkinter
import threading
import re
# Window (Tk init), text label and button
tk = tkinter.Tk()
tk.title(__doc__.strip().splitlines()[0])
lbldata = tkinter.StringVar(tk)
lbltext = tkinter.Label(tk, textvariable=lbldata, font=("Purisa", 72),
width=10)
lbltext.pack(expand=True, fill=tkinter.BOTH)
btnclose = tkinter.Button(tk, text="Close", command=tk.destroy,
default="active")
btnclose.pack(fill=tkinter.X)
# Needed data
regex_note = re.compile(r"^([A-Gb#]*-?[0-9]*)([?+-]?)(.*?%?)$")
upd_time_in_ms = 200
# Update functions for each thread
def upd_value(): # Recording thread
pitches = iter(pitch_from_mic(upd_time_in_ms))
while not tk.should_finish:
tk.value = next(pitches)
def upd_timer(): # GUI mainloop thread
lbldata.set("\n".join(regex_note.findall(tk.value)[0]))
tk.after(upd_time_in_ms, upd_timer)
# Multi-thread management initialization
tk.should_finish = False
tk.value = freq2str(0) # Starting value
lbldata.set(tk.value)
tk.upd_thread = threading.Thread(target=upd_value)
# Go
tk.upd_thread.start()
tk.after_idle(upd_timer)
tk.mainloop()
tk.should_finish = True
tk.upd_thread.join()
|
azaghal/ansible
|
test/support/integration/plugins/modules/k8s_info.py
|
Python
|
gpl-3.0
| 5,086
| 0.002359
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Will Thames <@willthames>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_info
short_description: Describe Kubernetes (K8s) objects
version_added: "2.7"
author:
- "Will Thames (@willthames)"
description:
- Use the OpenShift Python client to perform read operations on K8s objects.
- Access to the full range of K8s APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
- This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
options:
api_version:
description:
- Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a
specific object.
default: v1
aliases:
- api
- version
kind:
description:
- Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
specific object.
required: yes
name:
description:
- Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
specific object.
namespace:
description:
- Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name)
to identify a specific object.
label_selectors:
description: List of label selectors to use to filter results
field_selectors:
description: List of field selectors to use to filter results
extends_documentation_fragment:
- k8s_auth_options
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Get an existing Service object
k8s_info:
api_version: v1
kind: Service
name: web
namespace: testing
register: web_service
- name: Get a list of all service objects
k8s_info:
api_version: v1
kind: Service
namespace: testing
register: service_list
- name: Get a list of all pods from any namespace
k8s_info:
kind: Pod
register: pod_list
- name: Search for all Pods labelled app=web
k8s_info:
kind: Pod
label_selectors:
- app = web
- tier in (dev, test)
- name: Search for all running pods
k8s_info:
kind: Pod
field_selectors:
- status.phase=Running
'''
RETURN = '''
resources:
description:
- The object(s) that exists
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: dict
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: dict
status:
description: Current status details for the object.
returned: success
type: dict
'''
from ansible.module_utils.k8s.common import KubernetesAnsibleModule, AUTH_ARG_SPEC
import copy
class KubernetesInfoModule(KubernetesAnsibleModule):
def __init__(self, *args, **kwargs):
KubernetesAnsibleModule.__init__(self, *args,
supports_check_mode=True,
**kwargs)
if self._name == 'k8s_facts':
self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'",
version
|
='2.13', collection_name='ansible.builtin')
def execute_module(self):
self.client = self.get_api_client()
self.exit_json(changed=False,
**self.kubernetes_facts(self.params['kind'],
|
self.params['api_version'],
self.params['name'],
self.params['namespace'],
self.params['label_selectors'],
self.params['field_selectors']))
@property
def argspec(self):
args = copy.deepcopy(AUTH_ARG_SPEC)
args.update(
dict(
kind=dict(required=True),
api_version=dict(default='v1', aliases=['api', 'version']),
name=dict(),
namespace=dict(),
label_selectors=dict(type='list', default=[]),
field_selectors=dict(type='list', default=[]),
)
)
return args
def main():
KubernetesInfoModule().execute_module()
if __name__ == '__main__':
main()
|
meteoswiss-mdr/precipattractor
|
pyscripts/maple_machine-learning_predict-maps.py
|
Python
|
gpl-3.0
| 13,253
| 0.009658
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
print('+++++++++++++++
|
++++++++++++++++++++++++++++++++++++++++++++++++++
|
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
import time_tools_attractor as ti
ti.tic()
import getpass
usrName = getpass.getuser()
import argparse
import sys
import os
import numpy as np
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pylab
from pylab import get_cmap
from sklearn.externals import joblib # same purpose as pickle but more efficient with big data / can only pickle to disk
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# My modules
import gis_base as gis
import data_tools_attractor as dt
import time_tools_attractor as ti
ti.toc('to load python modules.')
fmt1 = "%.1f"
fmt2 = "%.2f"
fmt3 = "%.3f"
#########################################################################
############# PARAMETERS FOR SYNTHETIC CASES ############################
X_predictors = ['x_d', 'y_d', 'u', 'v', 'hzt_o'] #, 'daytime_sin', 'daytime_cos']
## Parameters for synthetic cases
sectorSizeDeg = 5
flowDirection_degN_array = np.arange(0,360, sectorSizeDeg)
#flowDirection_degN_array = [180+45]
flowSpeed_kmh_array = [30]
hztHeight_m_array = np.arange(500,4600,100)
hztHeight_m_array = [3000]
dayTime_array = np.arange(0,24)
dayTime_array = [12]
#########################################################################
################# INPUT ARGS ############################################
parser = argparse.ArgumentParser(description='Compute MAPLE archive statistics.')
parser.add_argument('-model', default='mlp', type=str, help='Which model to train [mlp, ols, dt] or the file containing the already trained model to use for predictions.')
parser.add_argument('-q', default=None, type=float, nargs='+', help='Which quantile to predict. Only available with qrf (quantile random forests). If two values are passed it rpedicts the difference.')
parser.add_argument('-fmt', default='png', type=str, help='Figure format.')
args = parser.parse_args()
####
tmpDir = '/scratch/lforesti/ml_tmp/'
outBaseDir = '/users/lforesti/results/maple/ml/'
fig_fmt = args.fmt
if fig_fmt == 'png':
fig_dpi = 100
elif (fig_fmt == 'pdf') or (fig_fmt == 'eps'):
fig_dpi = None
else:
fig_dpi = 200
# Check input model
model_list = ['mlp', 'ols', 'dt', 'gp', 'knn', 'qrf']
if (args.model in model_list):
print('Train', args.model, 'model.')
elif os.path.isfile(args.model):
print('Load', args.model)
else:
print('Invalid -model option. Either train a', model_list, 'model or load an already trained mdoel file.')
sys.exit()
print('')
########################################################################################################
###### LOAD already trained model ######################################################################
if os.path.isfile(args.model):
print('No training.')
best_model = joblib.load(args.model)
print(args.model, 'read.')
print(best_model)
if hasattr(best_model, 'variables'):
X_predictors = best_model.variables
else:
print('File', args.model, 'not found.')
sys.exit()
############### SELECT PREDICTORS ######################################################################
# List of all variable names
X_varnames = ['x_d', 'y_d', 'u', 'v', 'hzt_o', 'daytime_sin', 'daytime_cos']
X_varnames_dict = dict(zip(X_varnames, np.arange(0,len(X_varnames))))
print('Predictors chosen:')
print(X_predictors)
X_ids = [X_varnames_dict[k] for k in X_predictors]
##### Load file to scale the data
fileName_scaler = tmpDir + 'scaler.pkl'
scaler = joblib.load(fileName_scaler)
print(fileName_scaler, 'read.')
########################################################################################################
####### PREDICT ON NEW SYNTHETIC DATA ##################################################################
# Animate PNGs
# convert mlp_predictions_flow_225SW_speed30kmh_hzt*m.png -set delay 40 -duplicate 1,-2-1 anim_SW.gif
import maple_dataload
cmaps = maple_dataload.generate_colormaps()
geo = maple_dataload.generate_geo()
# Generate grid of spatial coordinates (extent of predictions)
res_km = 8
x_min = 310
x_max = 910
y_min = -100
y_max = 440
extent_image = np.array([x_min-res_km/2, x_max+res_km/2, y_min-res_km/2, y_max+res_km/2])*1000
print(extent_image)
x_vec = np.arange(x_min, x_max + res_km, res_km)
y_vec = np.arange(y_min, y_max + res_km, res_km)
x_grid, y_grid = np.meshgrid(x_vec, y_vec)
y_grid = np.flipud(y_grid)
# Generate array of flow directions
flowDirection_compassShort_array = dt.deg2compass(flowDirection_degN_array, 'short')
flowDirection_compassLong_array = dt.deg2compass(flowDirection_degN_array, 'long')
print('Predicting with', best_model.name.upper(), 'on synthetic cases...')
for flowDirection_degN, flowDirection_compassShort, flowDirection_compassLong in zip(flowDirection_degN_array, flowDirection_compassShort_array, flowDirection_compassLong_array):
for flowSpeed_kmh in flowSpeed_kmh_array:
for hztHeight_m in hztHeight_m_array:
for dayTime in dayTime_array:
# Generate homogeneous grids of flow vectors and hzt
flowDirection_deg = dt.degN2deg(flowDirection_degN)
flowDirection_rad = np.deg2rad(flowDirection_deg)
u = -flowSpeed_kmh*np.cos(flowDirection_rad) # Remember to turn the vectors!
v = -flowSpeed_kmh*np.sin(flowDirection_rad)
#print(flowDirection_compassShort, flowDirection_degN, flowDirection_deg, u, v)
u_grid = u*np.ones(x_grid.shape)
v_grid = v*np.ones(x_grid.shape)
hzt_grid = hztHeight_m*np.ones(x_grid.shape)
# Generate homogeneous grids of daytime (sin,cos)
dayTimeSin, dayTimeCos = ti.daytime2circular(dayTime)
dayTimeSin_grid = dayTimeSin*np.ones(x_grid.shape)
dayTimeCos_grid = dayTimeCos*np.ones(x_grid.shape)
# Flatten
X_pred = np.column_stack((x_grid.flatten(), y_grid.flatten(), u_grid.flatten(), v_grid.flatten(), hzt_grid.flatten(), dayTimeSin_grid.flatten(), dayTimeCos_grid.flatten()))
X_pred_scaled = scaler.transform(X_pred)
# Select predictors
X_pred_scaled = X_pred_scaled[:, X_ids]
#################
# Predict growth and decay on grid
y_pred = best_model.predict(X_pred_scaled)
cmap = cmaps.cmapLog
norm = cmaps.normLog
clevs = cmaps.clevsLog
# Reshape results
y_pred_grid = np.reshape(y_pred, x_grid.shape)
########### PLOTTING ##########################
# Plot predictions
ratioFigWH = (x_max-x_min)/(y_max-y_min)
figWidth = 10
fig = plt.figure(figsize=(figWidth,figWidth/ratioFigWH))
ax = fig.add_subplot(111)
# Draw DEM
ax.imshow(geo.demImg, extent=geo.extent_CCS4, cmap=cmaps.cmapDEM, norm=cmaps.normDEM, alpha=cmaps.alphaDEM)
# Draw prediction map
im = plt.imshow(y_pred_grid, extent=extent_image, cmap=cmap, norm=norm, interpolation='nearest')#, alpha=cmaps.alpha)
# Draw contour 1000 m
ax.contour(np.flipud(geo.demImg_smooth), levels=[1000], colors='gray', extent=extent_image, alpha=0.5)
# Draw mask
ax.imshow(geo.radarMask, cmap=cmaps.cmapMask, extent=geo.radarExtent, alpha=0.5)
# Axes
plt.xticks([], [])
|
AnthonyBriggs/Python-101
|
hello_python_source_py3/chapter 06/3/item.py
|
Python
|
mit
| 357
| 0.011204
|
class Ite
|
m(object):
def __init__(self, name, description, location):
self.name = name
self.description = description
se
|
lf.location = location
location.here.append(self)
actions = ['look']
def look(self, player, noun):
""" Looking at the object """
return [self.description]
|
Hellowlol/plexapi
|
tests/test_navigation.py
|
Python
|
bsd-3-clause
| 3,622
| 0.004694
|
# -*- coding: utf-8 -*-
from utils import log, register
from plexapi import CONFIG
# TODO: test_navigation/test_navigate_to_movie
# FAIL: (500) internal_server_error
# @register()
def test_navigate_to_movie(account, plex):
result_library = plex.library.get(CONFIG.movie_title)
result_movies = plex.library.section(CONFIG.movie_section).get(CONFIG.movie_title)
log(2, 'Navigating to: %s' % CONFIG.movie_title)
log(2, 'Result Library: %s' % result_library)
log(2, 'Result Movies: %s' % result_movies)
assert result_movies, 'Movie navigation not working.'
assert result_library == result_movies, 'Mo
|
vie navigation not consistent.'
@register()
def test_navigate_to_show(account, plex):
result_shows = plex.library.section(CONFIG.show_section).get(CONFIG.show_title)
log(2, 'Navigating to: %s' % CONFIG.show_title)
log(2, 'Result Shows: %s' % result_shows)
|
assert result_shows, 'Show navigation not working.'
# TODO: Fix test_navigation/test_navigate_around_show
# FAIL: Unable to list season: Season 1
# @register()
def test_navigate_around_show(account, plex):
show = plex.library.section(CONFIG.show_section).get(CONFIG.show_title)
seasons = show.seasons()
season = show.season(CONFIG.show_season)
episodes = show.episodes()
episode = show.episode(CONFIG.show_episode)
log(2, 'Navigating around show: %s' % show)
log(2, 'Seasons: %s...' % seasons[:3])
log(2, 'Season: %s' % season)
log(2, 'Episodes: %s...' % episodes[:3])
log(2, 'Episode: %s' % episode)
assert CONFIG.show_season in [s.title for s in seasons], 'Unable to list season: %s' % CONFIG.show_season
assert CONFIG.show_episode in [e.title for e in episodes], 'Unable to list episode: %s' % CONFIG.show_episode
assert show.season(CONFIG.show_season) == season, 'Unable to get show season: %s' % CONFIG.show_season
assert show.episode(CONFIG.show_episode) == episode, 'Unable to get show episode: %s' % CONFIG.show_episode
assert season.episode(CONFIG.show_episode) == episode, 'Unable to get season episode: %s' % CONFIG.show_episode
assert season.show() == show, 'season.show() doesnt match expected show.'
assert episode.show() == show, 'episode.show() doesnt match expected show.'
assert episode.season() == season, 'episode.season() doesnt match expected season.'
@register()
def test_navigate_around_artist(account, plex):
artist = plex.library.section(CONFIG.audio_section).get(CONFIG.audio_artist)
albums = artist.albums()
album = artist.album(CONFIG.audio_album)
tracks = artist.tracks()
track = artist.track(CONFIG.audio_track)
log(2, 'Navigating around artist: %s' % artist)
log(2, 'Albums: %s...' % albums[:3])
log(2, 'Album: %s' % album)
log(2, 'Tracks: %s...' % tracks[:3])
log(2, 'Track: %s' % track)
assert CONFIG.audio_album in [a.title for a in albums], 'Unable to list album: %s' % CONFIG.audio_album
assert CONFIG.audio_track in [e.title for e in tracks], 'Unable to list track: %s' % CONFIG.audio_track
assert artist.album(CONFIG.audio_album) == album, 'Unable to get artist album: %s' % CONFIG.audio_album
assert artist.track(CONFIG.audio_track) == track, 'Unable to get artist track: %s' % CONFIG.audio_track
assert album.track(CONFIG.audio_track) == track, 'Unable to get album track: %s' % CONFIG.audio_track
assert album.artist() == artist, 'album.artist() doesnt match expected artist.'
assert track.artist() == artist, 'track.artist() doesnt match expected artist.'
assert track.album() == album, 'track.album() doesnt match expected album.'
|
OpenBCI/OpenBCI_Python
|
openbci/plugins/csv_collect.py
|
Python
|
mit
| 1,932
| 0.000518
|
from __future__ import print_function
import csv
import timeit
import datetime
import plugin_interface as plugintypes
class PluginCSVCollect(plugintypes.IPluginExtended):
def __init__(self, file_name="collect.csv", delim=",", verbose=False):
now = datetime.datetime.now()
self.time_stamp = '%d-%d-%d_%d-%d-%d' \
% (now.year, now.month, now.day, now.hour, now.minute, now.second
|
)
self.file_name = self.time_stamp
self.start_time = timeit.default_timer()
self.delim = delim
self.verbose = verbose
def activate(self):
if len(self.args) > 0:
if 'no_time' in self.args:
self.file_name = self.args[0]
else:
self.file_name = self.args[0] + '_' + self.file
|
_name
if 'verbose' in self.args:
self.verbose = True
self.file_name = self.file_name + '.csv'
print("Will export CSV to:" + self.file_name)
# Open in append mode
with open(self.file_name, 'a') as f:
f.write('%' + self.time_stamp + '\n')
def deactivate(self):
print("Closing, CSV saved to:" + self.file_name)
return
def show_help(self):
print("Optional argument: [filename] (default: collect.csv)")
def __call__(self, sample):
t = timeit.default_timer() - self.start_time
# print(timeSinceStart|Sample Id)
if self.verbose:
print("CSV: %f | %d" % (t, sample.id))
row = ''
row += str(t)
row += self.delim
row += str(sample.id)
row += self.delim
for i in sample.channel_data:
row += str(i)
row += self.delim
for i in sample.aux_data:
row += str(i)
row += self.delim
# remove last comma
row += '\n'
with open(self.file_name, 'a') as f:
f.write(row)
|
jdemel/gnuradio
|
gnuradio-runtime/python/gnuradio/gr/qa_hier_block2.py
|
Python
|
gpl-3.0
| 4,084
| 0.006856
|
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import time
from gnuradio import gr_unittest, blocks, gr, analog
from gnuradio.gr.hier_block2 import _multiple_endpoints, _optional_endpoints
import pmt
class test_hblk(gr.hier_block2):
def __init__(self, io_sig=1*[gr.sizeof_gr_complex], ndebug=2):
# parent constructor
gr.hier_block2.__init__(self,
"test_hblk",
gr.io_signature(len(io_sig), len(io_sig), io_sig[0]),
gr.io_signature(0,0,0))
self.message_port_register_hier_in("msg_in");
# Internal Stream Blocks
self.vsnk = blocks.vector_sink_c()
# Internal Msg Blocks
self.blks = [];
for i in range(0, ndebug):
self.blks.append( blocks.message_debug() )
# Set up internal connections
self.connect( self, self.vsnk )
for blk in self.blks:
self.msg_connect( self, "msg_in", blk, "prin
|
t" )
class test_hier_block2(gr_unittest.TestCase):
def setUp(self):
self.call_log = []
self.Block = type("Block", (), {"to_basic_block": lambda bl: bl})
def test_f(self, *args):
"""test doc"""
self.call_log.append(args)
multi = _multiple_endpoints(test_f)
opt = _optional_endpoints(test_f)
def test_000(self):
self.ass
|
ertEqual(self.multi.__doc__, "test doc")
self.assertEqual(self.multi.__name__, "test_f")
def test_001(self):
b = self.Block()
self.multi(b)
self.assertEqual((b,), self.call_log[0])
def test_002(self):
b1, b2 = self.Block(), self.Block()
self.multi(b1, b2)
self.assertEqual([(b1, 0, b2, 0)], self.call_log)
def test_003(self):
b1, b2 = self.Block(), self.Block()
self.multi((b1, 1), (b2, 2))
self.assertEqual([(b1, 1, b2, 2)], self.call_log)
def test_004(self):
b1, b2, b3, b4 = [self.Block()] * 4
self.multi(b1, (b2, 5), b3, (b4, 0))
expected = [
(b1, 0, b2, 5),
(b2, 5, b3, 0),
(b3, 0, b4, 0),
]
self.assertEqual(expected, self.call_log)
def test_005(self):
with self.assertRaises(ValueError):
self.multi((self.Block(), 5))
def test_006(self):
with self.assertRaises(ValueError):
self.multi(self.Block(), (5, 5))
def test_007(self):
b1, b2 = self.Block(), self.Block()
self.opt(b1, "in", b2, "out")
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
def test_008(self):
f, b1, b2 = self.multi, self.Block(), self.Block()
self.opt((b1, "in"), (b2, "out"))
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
def test_009(self):
with self.assertRaises(ValueError):
self.multi(self.Block(), 5)
def test_010(self):
s, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 0)
tb = gr.top_block()
tb.connect(s,h,k)
tb.run()
def test_011(self):
s, st, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.message_strobe(pmt.PMT_NIL, 100), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 1)
tb = gr.top_block()
tb.connect(s,h,k)
tb.msg_connect(st,"strobe",k,"msg_in")
tb.start()
time.sleep(1)
tb.stop()
tb.wait()
def test_012(self):
s, st, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.message_strobe(pmt.PMT_NIL, 100), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 16)
tb = gr.top_block()
tb.connect(s,h,k)
tb.msg_connect(st,"strobe",k,"msg_in")
tb.start()
time.sleep(1)
tb.stop()
tb.wait()
if __name__ == '__main__':
gr_unittest.run(test_hier_block2, "test_hier_block2.xml")
|
ver228/tierpsy-tracker
|
tierpsy/analysis/vid_subsample/createSampleVideo.py
|
Python
|
mit
| 4,145
| 0.015923
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 18 18:22:12 2016
@author: ajaver
"""
import os
import cv2
import tables
import numpy as np
from tierpsy.helper.params import read_fps
from tierpsy.helper.misc import TimeCounter, print_flush
def getSubSampleVidName(masked_image_file):
#used by AnalysisPoints.py and CheckFinished.py
return masked_image_file.replace('.hdf5', '_subsample.avi')
def _getCorrectedTimeVec(fid, tot_frames):
'''time vector used to account for missing frames'''
try:
timestamp_ind = fid.get_node('/timestamp/raw')[:]
#remove any nan, I notice that sometimes the last number is a nan
timestamp_ind = timestamp_ind[~np.isnan(timestamp_ind)]
tot_timestamps = int(timestamp_ind[-1])
if timestamp_ind.size < tot_frames-1 or tot_timestamps < tot_frames-1: #invalid timestamp
#if there is not valid frames skip
raise ValueError
except (tables.exceptions.NoSuchNodeError, ValueError, IndexError):
return np.arange(tot_frames)
#make sure to compensate for missing frames, so the video will have similar length.
tt_vec = np.full(tot_timestamps+1, np.nan)
current_frame = 0
for ii in range(tot_timestamps+1):
tt_vec[ii] = current_frame
current_timestamp = timestamp_ind[current_frame]
if current_timestamp <= ii:
current_frame += 1
return tt_vec
def createSampleVideo(masked_image_file,
sample_video_name = '',
time_factor = 8,
size_factor = 5,
skip_factor = 2,
dflt_fps=30,
codec='MPEG',
shift_bgnd = False):
#skip factor is to reduce the size of the movie by using less frames (so we use 15fps for example instead of 30fps)
#%%
if not sample_video_name:
sample_video_name = getSubSampleVidName(masked_image_file)
# initialize timers
base_name = masked_image_file.rpartition('.')[0].rpartition(os.sep)[-1]
progressTime = TimeCounter('{} Generating subsampled video.'.format(base_name))
with tables.File(masked_image_file, 'r') as fid:
masks = fid.get_node('/mask')
tot_frames, im_h, im_w = masks.shape
im_h, im_w = im_h//size_factor, im_w//size_factor
fps = read_fps(masked_image_file, dflt_fps)
tt_vec = _getCorrectedTimeVec(fid, tot_frames)
#%%
#codec values that work 'H264' #'MPEG' #XVID
vid_writer = cv2.VideoWriter(sample_video_name, \
cv2.VideoWriter_fourcc(*codec), fps/skip_factor, (im_w,im_h), isColor=False)
assert vid_writer.isOpened()
if shift_bgnd:
#lazy bgnd calculation, just take the last and first frame and get the top 95 pixel value
mm = masks[[0,-1], :, :]
_bgnd_val = np.percentile(mm[mm!=0], [97.5])[0]
for frame_number in range(0, tot_frames, int(time_factor*skip_factor)):
current_frame = int(tt_vec[frame_number])
img = masks[current_frame]
if shift_bgnd:
img[img==0] = _bgnd_val
im_new = cv2.resize(img, (im_w,im_h))
|
vid_writer.write(im_new)
if frame_number % (500*time_factor) == 0:
# calculate the progress and put it in a string
print_flush(progressTime.get_str(frame_number))
vid_writer.release()
print_flush(progressTime.get_str(frame_number) + ' DONE.')
#%%
if __name__ == '__main__':
#mask_file_name = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/Agar_Test/MaskedVideos/Agar_Scree
|
ning_101116/N2_N10_F1-3_Set1_Pos3_Ch6_12112016_002739.hdf5'
#masked_image_file = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/Agar_Test/MaskedVideos/Agar_Screening_101116/unc-9_N3_F1-3_Set1_Pos3_Ch4_12112016_002739.hdf5'
masked_image_file = r'C:\Users\wormrig\Documents\GitHub\Multiworm_Tracking\Tests\data\test_1\MaskedVideos\Capture_Ch1_18062015_140908.hdf5'
createSampleVideo(masked_image_file)
|
praekeltfoundation/mc2-freebasics
|
freebasics/views.py
|
Python
|
bsd-2-clause
| 2,964
| 0
|
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.conf import settings
from mc2.controllers.base.views import ControllerCreateView, ControllerEditView
from mc2.views import HomepageView
from mc2.organizations.models import Organization
from mc2.organizations.utils import active_organization
from mc2.controllers.base import tasks
from freebasics.models import FreeBasicsTemplateData, FreeBasicsController
from freebasics.serializers import FreeBasicsDataSerializer
from rest_framework import generics
class TemplateDataCreate(generics.ListCreateAPIView):
queryset = FreeBasicsTemplateData.objects.all()
serializer_class = FreeBasicsDataSerializer
def perform_create(self, serializer):
data = serializer.validated_data
controller = FreeBasicsController.objects.create(
owner=self.request.user,
organization=active_organization(self.request),
docker_image=settings.FREE_BASICS_DOCKER_IMAGE,
volume_path=settings.FREE_BASICS_VOLUME_PATH,
volume_needed=True,
port=settings.FREE_BASICS_DOCKER_PORT,
marathon_health_check_path=settings.FREE_BASICS_HEALTH_CHECK_PATH,
name=data.get('site_name'),
domain_urls='%s.%s' % (
data.get('site_name_url'),
settings.FREE_BASICS_MOLO_SITE_DOMAIN)
)
serializer.save(controller=controller)
tasks.start_new_controller.delay(controller.id)
class TemplateDataManage(generics.RetrieveUpdateDestroyAPIView):
queryset = FreeBasicsTemplateData.objects.all()
serializer_class = FreeBasicsDataSerializer
def perform_update(self, serializer):
instance = serializer.save()
instance.controller.name = instance.site_name
instance.controller.domain_urls = '%s.%s' % (
instance.site_name_url,
settings.FREE_BASICS_MOLO_SITE_DOMAIN)
instance.controller.save()
tasks.update_marathon_app.delay(instance.controller.id)
class FreeBasicsHomepageView(HomepageView):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
user_orgs = Organization.objects.for_user(request.user)
if not self.get_queryset().exists() and user_orgs.exists():
return redirect(reverse('freebasics_add'))
else:
if not self.get_queryset().exists():
return redirect(reverse('freebasics_add'))
return super(
FreeBasicsHomepageView, self).dispatch(request, *args, **kwargs)
class FreeBasicsControllerCreateView(ControllerCreateView):
template_name = 'freebasics_controller_edit.html'
permissions = ['controllers.docker.add_dockercontroller']
class FreeBasic
|
sControllerEditView(ControllerEditView):
template_name = 'freebasics_controller_edit.html'
permissions = ['controllers.
|
docker.add_dockercontroller']
|
SVilgelm/CloudFerry
|
cloudferry/condensation/utils.py
|
Python
|
apache-2.0
| 1,549
| 0
|
# Copyright (c) 2014 Mirantis Inc.
#
# Lic
|
ensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compl
|
iance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import json
import yaml
from cloudferry import cfglib
def read_file(path):
"""This function reads yaml / json files"""
# check if path has extension
if '.' not in path:
raise RuntimeError("File {path} has no extension".format(path=path))
extension = path.split(".")[-1]
# check if extension is json or yml
extension_map = {"json": json, "yml": yaml, "yaml": yaml}
if extension not in extension_map:
raise RuntimeError(
"File extension of {path} is not yaml/json".format(path=path))
# do actual job
with open(path) as descriptor:
return extension_map.get(extension).load(descriptor)
def store_condense_data(flavors, nodes, vms):
files = {
cfglib.CONF.condense.flavors_file: flavors,
cfglib.CONF.condense.vms_file: vms,
cfglib.CONF.condense.nodes_file: nodes
}
for f in files:
with open(f, 'w') as store:
store.write(json.dumps(files[f]))
|
deanishe/alfred-fakeum
|
src/libs/faker/providers/phone_number/pt_PT/__init__.py
|
Python
|
mit
| 1,053
| 0
|
from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
|
formats = (
'(351) 91# ### ###',
'(351) 92# ### ###',
'(351) 93# ### ###',
'(351) 96# ### ###',
'(351) 2## ### ###',
'(351) 91#######',
'(351) 92#######',
'(351) 93#######',
'(351) 96#######',
'(351) 2########',
'(351) 91# ### ###',
'(351) 92# ### ###',
'(351) 93# ### ###',
'(351) 96# ### ###
|
',
'(351) 2## ### ###',
'(351) 91#######',
'(351) 92#######',
'(351) 93#######',
'(351) 96#######',
'(351) 2########',
'+35191#######',
'+35192#######',
'+35193#######',
'+35196#######',
'+3512########',
'91# ### ###',
'92# ### ###',
'93# ### ###',
'96# ### ###',
'2## ### ###',
'91#######',
'92#######',
'93#######',
'96#######',
'2########',
)
|
Alexander-P/Isca
|
exp/python_gfdl/namelists.py
|
Python
|
gpl-3.0
| 6,032
| 0.008289
|
import f90nml
# Adapted from the original core.nml and phys.nml files included in 2006 codebase.
# where the value is the same as the default in the code base, it is commented out
# and therefore not included in the namelist.
# Where the value is different, the code default is shown in an inline comment
basic = f90nml.Namelist({})
basic['spectral_dynamics_nml'] = {
#'damping_option' : 'resolution_dependent',
'damping_order' : 4, # default: 2
#'do_mass_correction': True
#'do_energy_correction': True
#'do_water_correction': True
'water_correction_limit' : 200.e2, # default: 0
#'use_virtual_temperature': False
#'vert_advect_uv' : 'second_centered',
#'vert_advect_t' : 'second_centered',
#'longitude_origin' : 0.0,
#'robert_coeff' : .03, # default: 0.04
#'alpha_implicit' : .5,
'reference_sea_level_press':1.0e5, # default: 101325
#'lon_max' : 128,
#'lat_max' : 64,
'num_levels' : 25, # default: 18
#'num_fourier' : 42,
#'num_spherical' : 43,
#'fourier_inc' : 1,
#'triang_trunc' :True
'valid_range_t' : [100., 800.], # default: (100, 500)
#'initial_state_option' : 'quiescent'
'initial_sphum' : 2.e-6, # default: 0
'vert_coord_option' : 'uneven_sigma', # default: 'even_sigma'
'surf_res' : 0.2, # default: 0.1
'scale_heights' : 11.0, # default: 4
'exponent' : 7.0, # default: 2.5
}
basic['main_nml'] = {
'dt_atmos': 900,
'seconds': 86400.0*30,
'calendar': 'no_calendar'
}
basic['diag_manager_nml'] = {
'mix_snapshot_average_fields': False
}
basic['fms_nml'] = {
'domains_stack_size': 600000 # default: 0
}
basic['fms_io_nml'] = {
'threading_write': 'single', # default: multi
'fileset_write': 'single', # default: multi
}
# from phys.nml
basic['idealized_moist_phys_nml'] = {
#'two_stream_gray': True,
#'do_rrtm_radiation': False,
'convection_scheme': 'betts_miller',
'do_damping': True,
'turb': True,
#'lwet_convection': False,
#'do_bm': True,
'mixed_layer_bc': True,
'do_virtual': False,
'do_simple': True,
# Roughness Lengths for Monin-Obukhov theory:
# Baseline 3.21e-5 m
# Ref: Heng et al: Mon. Not. R. Astron. Soc [418] (2011)
# Frierson et al: J Atmos. Sci [63] (2006)
# roughness_mom:
# Open water: 1e-4m
# Urban terrain: 1m
'roughness_mom': 3.21e-05, # default: 0.05
'roughness_heat': 3.21e-05, # default: 0.05
'roughness_moist': 3.21e-05 # default: 0.05
}
basic['vert_turb_driver_nml'] = {
'do_mellor_yamada': False, # default: True
'do_diffusivity': True, # default: False
'do_simple': True, # default: False
#'do_shallow_conv': False,
#'gust_scheme': 'constant',
'constant_gust': 0.0, # default: 1.0
#'use_tau': False
}
basic['diffusivit
|
y_nml'] = {
'do_entrain': False, # default: True
'do_simple': True, # default: False
#'frac_inner': 0.1,
#'rich_crit_pbl': 1.0
}
# basic['monin_obukhov_nml'] = {
# 'neutral': False,
# 'rich_crit': 2.0,
# 'stable_option': 1
# }
basic['surface_flux_nml'] = {
'use_virtual_temp': False,
'do_simple': True,
'old_dtaudv': True
}
basic['atmosphere_nml'] = {
'idealized_moist_model': True
}
# basic['sp
|
ectral_init_cond_nml'] = {
# 'initial_temperature': 264.0
# }
basic['two_stream_gray_rad_nml'] = {
'rad_scheme': 'frierson', # default: frierson
'do_seasonal': True, # default: False
#'linear_tau': 0.1,
#'solar_constant': 1360.,
#'solar_exponent': 4.0,
#'ir_tau_pole': 1.5,
#'ir_tau_eq': 6.0,
#'del_sol': 1.4,
'atm_abs': 0.2 # default: 0.0
}
basic['mixed_layer_nml'] = {
'albedo_value': 0.27, # default: 0.06
'depth': 100.0, # default: 40.0
'tconst': 285., # default: 305.0
#'delta_T': 40.,
'prescribe_initial_dist': True,
'evaporation': True,
# 'do_qflux': False
}
basic['qe_moist_convection_nml'] = {
# 'tau_bm': 7200.0,
# 'rhbm': 0.7, # default: 0.8
# 'val_inc': 0.01,
'Tmin': 160.,
'Tmax': 350.
}
basic['lscale_cond_nml'] = {
'do_simple':True,
'do_evap': True,
# 'hc': 1.0
}
basic['sat_vapor_pres_nml'] = {
'do_simple': True
}
basic['damping_driver_nml'] = {
'do_rayleigh': True,
'trayfric': -0.5, # neg. value: time in *days*
'sponge_pbottom': 50.,
'do_conserve_energy': True,
# 'do_cg_drag': False
}
# basic['rrtm_radiation_nml'] = {
# 'h2o_lower_limit': 2.e-07,
# 'co2ppmv': 300.,
# 'do_read_ozone': True,
# 'ozone_file': 'ozone_1990',
# 'dt_rad': 4500
# }
basic['astro_nml'] = {
'solr_cnst': 1360. # default: 1368.22
}
basic['betts_miller_nml'] = {
# 'tau_bm': 7200.,
'rhbm': .7 , # default: .8
'do_simp': False,
'do_shallower': True,
# 'do_changeqref': False,
# 'do_envsat': False,
# 'do_taucape': False,
# 'capetaubm': 900.,
# 'tau_min': 2400.
}
# basic['qflux_nml'] = {
# 'qflux_amp': 30.
# }
moist = basic.copy()
dry = basic.copy()
del dry['betts_miller_nml']
dry['idealized_moist_phys_nml']['convection_scheme'] = 'dry'
dry['dry_convection_nml'] = {
'tau': 14400.0, # from tapios/fms-idealized
'gamma': 1.0,
}
dry['lscale_cond_nml']['do_evap'] = False
dry['spectral_dynamics_nml']['initial_sphum'] = 0.0
dry['mixed_layer_nml'].update({
'evaporation': False,
'do_qflux': False
})
|
255BITS/HyperGAN
|
hypergan/trainers/accumulate_gradient_trainer.py
|
Python
|
mit
| 3,338
| 0.003895
|
import numpy as np
import torch
import hyperchamber as hc
import inspect
from torch.autograd import Variable
from hypergan.trainers.alternating_trainer import AlternatingTrainer
class AccumulateGradientTrainer(AlternatingTrainer):
""" G gradients accumulate over many D steps """
def _create(self):
self.d_optimizer = self.create_optimizer("d_optimizer")
self.g_optimizer = self.create_optimizer("g_optimizer")
self.accumulated_g_grads = None
self.accumulation_steps = 0
self.max_grads = None
self.min_grads = None
self.max_variance = None
self.relu = torch.nn.ReLU()
def calculate_gradients(self):
accumulate = (self.config.accumulate or 3)
if self.accumulation_steps == accumulate:
if self.config.type == 'variance':
for i, g in enumerate(self.accumulated_g_grads):
range_stddev = (self.max_grads[i]-self.min_grads[i])/4.0
spread_ratio = (range_stddev / (g+1e-12)).abs()
doubt = torch.clamp(self.relu((self.config.allowed_variance or 1.0) - spread_ratio/(self.config.max_spread or 0.2)), max=1.0)
if self.config.verbose:
print("confidence >>", i, doubt.sum(), "/", np.prod(g.shape), "=", ("%d" % (doubt.sum()/np.prod(g.shape) * 100.0).item())+"%")
self.accumulated_g_grads[i] = g * doubt
g_grads = self.accumulated_g_grads
d_grads = []
#print("G_G", sum([g.abs().sum() for g in self.accumulated_g_grads[0]]), len(self.ac
|
cumulated_g_grads))
self.accumulated_g_grads = None
self.accumulation_steps = 0
else:
gs = self.g_grads()
if self.accumulated_g_grads is None:
self.accumulated_g_grads = [g
|
.clone()/accumulate for g in gs]
if self.config.type == 'variance':
self.max_grads = [g.clone() for g in gs]
self.min_grads = [g.clone() for g in gs]
else:
for i, g in enumerate(self.accumulated_g_grads):
if self.config.type == 'agree':
self.accumulated_g_grads[i] = (self.accumulated_g_grads[i] + gs[i].clone()/accumulate) * self.relu(torch.sign(self.accumulated_g_grads[i]*gs[i].clone()))
else:
if self.config.type == 'variance':
self.max_grads[i] = torch.max(self.max_grads[i], gs[i].clone())
self.min_grads[i] = torch.min(self.min_grads[i], gs[i].clone())
self.accumulated_g_grads[i] += gs[i].clone() / accumulate
#print("D_G", sum([g.abs().sum() for g in gs]), len(self.accumulated_g_grads))
d_grads = self.d_grads()
g_grads = []
self.accumulation_steps += 1
return d_grads, g_grads
def _step(self, feed_dict):
metrics = self.gan.metrics()
self.before_step(self.current_step, feed_dict)
d_grads, g_grads = self.calculate_gradients()
self.train_d(d_grads)
self.train_g(g_grads)
self.after_step(self.current_step, feed_dict)
if self.current_step % 20 == 0:
self.print_metrics(self.current_step)
|
aronsky/home-assistant
|
homeassistant/components/fail2ban/__init__.py
|
Python
|
apache-2.0
| 30
| 0
|
"
|
""The fail2ban compon
|
ent."""
|
alepulver/changesets
|
patch_analyzer/patch_utils.py
|
Python
|
mit
| 562
| 0.007117
|
from subprocess import Popen, PIPE
def is_class(file):
return file.endswith(".class") and not file.startswith("META-INF")
def modified_paths(patch):
p = Popen(["jar", "-tf", patch], stdout=PIPE)
output, _ = p.communicate()
return filter(is_class, [file.decode() for file in output.split(b"\n")])
def path_to_class(path):
if "$" in path:
return path.split
|
("$")[0]
else:
return path.replace(".class", "")
def modified_classes(patch):
classes = map(path_to_class, modified_paths(patc
|
h))
return list(set(classes))
|
trcm/stew
|
stew/views.py
|
Python
|
mit
| 9,961
| 0.002711
|
from rest_framework import generics
from rest_framework.views import APIView
from django.views.generic.base import TemplateView
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.authentication import SessionAuthentication, TokenAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from rest_framework.authtoken.models import Token
from stew.authentication import *
from stew.models import Stewdent, Skill, Work
from stew.serializers import StewdentSerializer, SkillSerializer, WorkSerializer, UserSerializer
from django.shortcuts import render
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from django.db.utils import IntegrityError
from django.db import IntegrityError
from django.core import serializers
from django.contrib.auth.models import User
import json
def home(request):
return render(request, 'base.djhtml')
def getStewdent(pk):
try:
return Stewdent.objects.get(pk=pk)
except Stewdent.DoesNotExist:
print "error yo"
raise Http404
def getSkill(pk):
print 'getting stewdent'
try:
stew = Stewdent.objects.get(id=pk)
print stew.first_name
return Skill.objects.get(stewdent_id=pk)
except Stewdent.DoesNotExist:
print "Error: Stewdent does not exist"
return None
except Skill.DoesNotExist:
print "Error"
return None
class LoginView(TemplateView):
template_name = 'login.html'
class AuthView(APIView):
authentication_classes = (TokenAuthentication,)
serializer_class = UserSerializer
# def post(self, request, *args, **kwargs):
# print request.META.get( 'HTTP_AUTHORIZATION' )
# return Response(self.serializer_class(request.user).data)
def get(self, request, format=None):
content = {
'user': unicode(request.user), # `django.contrib.auth.User` instance.
'auth': unicode(request.auth), # None
}
return Response(content)
class AdminList(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
pass
# @csrf_exempt
class StewdentCreate(APIView):
def post(self, request, format=None):
prob = {}
try:
u = User.objects.create(username=request.data['email'])
except Exception as e:
print e
prob = {'error': 'An account for this email as already been created.'}
return Response(prob, status=status.HTTP_400_BAD_REQUEST)
ret = request.data
ret['user'] = u.id
print u.id
serializer = StewdentSerializer(data=ret)
print "serial"
if serializer.is_valid():
print 'valid'
try:
ret = request.data
ret['user'] = u.id
print u.id
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception as e:
print 'generic'
print e
error = {}
if "Duplicate entry" in e[1]:
error['email'] = "This email is already in use"
print serializers.errors
return Response(error, status=status.HTTP_400_BAD_REQUEST)
except IntegrityError:
print 'e'
print e, type(e), repr(e)
# serializer.save()
print serializers.errors
return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)
print "ret"
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
print 'err'
print serializer.errors
prob = serializer.errors.copy()
u.delete()
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# print serializer.data
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StewdentList(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
stewdents = Stewdent.objects.all()
serializer = StewdentSerializer(stewdents, many=True)
return Response(serializer.data)
# @csrf_exempt
class StewdentDetail(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
stewdent = getStewdent(pk)
skills = getSkill(pk)
serializer = StewdentSerializer(stewdent)
serialSkill = SkillSerializer(skills)
ret = serializer.data
ret['skills'] = serialSkill.data
print serializer.data
return Response(ret)
# return Response(serializer.data)
def put(self, request, pk, format=None):
print request.data
try:
stewdent = getStewdent(pk)
serializer = StewdentSerializer(stewdent, data=request.data)
print serializer.initial_data
if serializer.is_valid():
serializer.save()
return Response(status=200)
print serializer.errors
return Response(serializer.errors, status=400)
except Exceptio
|
n as e:
print e
def delete(self, request, pk, format=None):
print "delete"
stewdent = getStewdent(pk)
skill = getSkill(pk)
user = stewdent.user
user.delete()
|
stewdent.delete()
skill.delete()
try:
dents = Stewdent.objects.all()
serial = StewdentSerializer(dents, many=True)
skills = Skill.objects.all()
skillSerial = SkillSerializer(skills, many=True)
ret = [serial.data, skillSerial.data]
return Response(ret)
except Exception as e:
print e
return Response(serial.data)
class SkillList(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, requet, format=None):
try:
skills = Skill.objects.all()
serializer = SkillSerializer(skills, many=True)
return Response(serializer.data)
except Exception as e:
print e
class SkillDetail(APIView):
def get(self, request, pk, format=None):
print "Skillget"
skill = getSkill(pk)
if not skill:
print "404"
raise Http404
serializer = SkillSerializer(skill)
return Response(serializer.data)
def post(self, request, pk, format=None):
"""
post creates a new instance of a skill object to be entered into the database
Parameters:
request (HTTP request): request data sent from the angular form to be processed
pk (Integer): integer of the stewdent object this skill object will be associated with
Returns:
returns
"""
print 'skill post'
try:
stewdent = Stewdent.objects.get(id=pk)
ret = request.data
print ret
ret['stewdent_id'] = stewdent.id
ret['stewdent'] = stewdent.id
serializer = SkillSerializer(data=ret)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
# print serializer.errors
# print "fdsaf"
return Response(serializer.errors, status=400)
except Stewdent.DoesNotExist:
print "stewdent error"
raise Http404
# return Response(status=400)
except IntegrityError as e:
print "other"
print e
return Response(status=400)
def put(self, request, pk, format=None):
print request.data
try:
skill = getSkill(pk)
|
foxmask/django-th
|
th_twitter/migrations/0003_fav.py
|
Python
|
bsd-3-clause
| 581
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migr
|
ations.Migration):
"""
this migration will be used to add 2 field to deal with
things that will go wrong that will then trigger
mail to admin and author of the triggers
"""
dependencies = [
('th_twitter', '0002_int_to_bigint'),
]
operations = [
migrations.AddField(
model_name='twitter',
name='fav',
field=models.BooleanField(default=0),
),
]
| |
joerick/django-timecode
|
setup.py
|
Python
|
bsd-3-clause
| 546
| 0.001832
|
#!/usr/bin/env python
# -*
|
- coding: utf-8 -*-
from distutils.core import setup
setup(
name='django-timecode',
version='0.1.4',
description='Provides classes for working with timecodes (as used in the
|
video industry).',
long_description=open('README.md').read(),
author='Joe Rickerby',
author_email='joerick@mac.com',
url='http://github.com/joerick/django-timecode/',
packages=[
'timecode',
'timecode.test',
],
license='LICENSE.txt',
install_requires=[
"Django >= 1.5",
]
)
|
gannetson/django
|
django/db/models/sql/query.py
|
Python
|
bsd-3-clause
| 92,461
| 0.000908
|
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import warnings
from collections import Iterator, Mapping, OrderedDict
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.query_utils import Q, PathInfo, refs_expression
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, QUERY_TERMS, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, EmptyResultSet, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None, context=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
self.context = context or {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params, context=self.context.copy())
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %s>" % self
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = dict((key, adapter(val)) for key, val in six.iteritems(self.params))
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query(object):
"""
A single SQL query.
"""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
query_terms = QUERY_TERMS
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note
|
that annotations go to annotations dictionary.
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
|
# - A list of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = []
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
self.context = {}
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def annotations(self):
if self._annotations is None:
self._annotations = OrderedDict()
return self._annotations
@property
def aggregates(self):
warnings.warn(
"The aggregates property is deprecated. Use annotations instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.annotatio
|
Urinx/SomeCodes
|
Python/others/practice/finding_if_prime.py
|
Python
|
gpl-2.0
| 995
| 0.074372
|
#!/usr/bin/python3
import math
import random
def finding_prime(number):
num=abs(number)
if num<4: return True
for x in range(2,num):
if num%x == 0:
return False
return True
def finding_prime_sqrt(number):
num=abs(number)
if num<4: return True
for x in range(2,int(math.sqrt(num))+1):
if number%x == 0:
return False
return True
def
|
finding_prime_fermat(number):
if number<=102:
for a in range(2,number):
if pow(a,number-1,number)!=1:
return False
return True
else:
for i in range(100):
a=random.randint(2,number-1)
if pow(a,number-1,number)!=1:
return False
return True
def test_finding_prim
|
e():
number1=17
number2=20
assert(finding_prime(number1)==True)
assert(finding_prime(number2)==False)
assert(finding_prime_sqrt(number1)==True)
assert(finding_prime_sqrt(number2)==False)
assert(finding_prime_fermat(number1)==True)
assert(finding_prime_fermat(number2)==False)
print('Tests passed!')
if __name__=='__main__':
test_finding_prime()
|
nisavid/bedframe
|
bedframe/auth/_spaces.py
|
Python
|
lgpl-3.0
| 4,671
| 0.000856
|
"""Spaces
An authentication space is a directory of resources with a defined set
of acceptable authentication realms, security provisions, and
algorithms.
"""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import re as _re
from spruce.collections import odict as _odict, uset as _uset
from spruce.lang import instance_of as _instance_of
from .. import _collections as _coll
from . import _affordances
from . import _handlers
from . import _provisions
class Space(_handlers.AlgorithmHandler, _handlers.ProvisionSetHandler,
_handlers.RealmHandler):
def __init__(self, realms='*', provisionsets=None, algorithms='*',
scanners='*', clerks='*', supplicants='*'):
self.set_algorithms(algorithms)
self._clerks = _uset(clerks)
if provisionsets is None:
provisionsets = \
_provisions.FrozenProvisionSetSet('*')\
.union_product(_provisions.SECPROV_CLIENT_AUTH)\
.unfrozen()
self.set_provisionsets(provisionsets)
self.set_realms(realms)
self._s
|
canners = _uset(scanners)
self._supplicants = _uset(supplicants)
def __repr__(self):
return '{}({})'\
.format(self.__class__.__name__,
', '.join('{}={!r}'.format(property_, value)
for property_, value
in self._in
|
it_args(ordered=True).items()))
def __str__(self):
properties_strs = []
max_affordances = \
_affordances.FrozenProcessProspectiveAffordanceSet.max()
for property_, value in self._init_args(ordered=True).items():
displayname = self._property_displayname(property_)
if value == getattr(max_affordances, property_):
str_ = 'any ' + displayname
else:
str_ = '{} {}'.format(displayname, value)
properties_strs.append(str_)
return '<authentication space with {}>'\
.format(', '.join(properties_strs))
@property
def clerks(self):
return self._clerks
@property
def scanners(self):
return self._scanners
def set_algorithms(self, value):
self._algorithms_ = _uset(value)
def set_provisionsets(self, value):
self._provisionsets_ = _provisions.ProvisionSetSet(value)
def set_realms(self, value):
self._realms_ = _uset(value)
@property
def supplicants(self):
return self._supplicants
def _algorithms(self, upstream_affordances, downstream_affordances):
return self._algorithms_
def _init_args(self, ordered=False):
class_ = _odict if ordered else dict
max_affordances = _affordances.FrozenAffordanceSet.max()
return class_((('realms',
self.realms(upstream_affordances=max_affordances)),
('provisionsets',
self.provisionsets(upstream_affordances=
max_affordances)),
('algorithms',
self.algorithms(upstream_affordances=max_affordances)),
('clerks', self.clerks),
('scanners', self.scanners),
('supplicants', self.supplicants),
))
@classmethod
def _property_displayname(cls, name):
displayname = name
displayname = displayname.replace('_', ' ')
displayname = _re.sub(r'(?<=\w)sets', ' sets', displayname)
return displayname
def _provisionsets(self, upstream_affordances, downstream_affordances):
return self._provisionsets_
def _realms(self, upstream_affordances, downstream_affordances):
return self._realms_
class SpaceMap(_coll.HereditaryWebResourcePathMapping):
"""An authentication space map
This is a mapping from authentication spaces' locations to
specifications of their affordances and behavior. In addition to the
basic mutable mapping functionality, it also
* accepts path patterns in the form of strings or regular
expressions and
* ensures that that its items have valid types and values.
:param mapping_or_items:
A mapping or item sequence of initial items.
:type mapping_or_items: ~{~:obj:`re`: :class:`Space`}
"""
def __init__(self, *args, **kwargs):
super(SpaceMap, self)\
.__init__(*args,
valuetype=_instance_of(Space, 'authentication space'),
value_converter=False, **kwargs)
|
sam-m888/gprime
|
gprime/plug/menu/_note.py
|
Python
|
gpl-2.0
| 1,763
| 0.002836
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth F
|
loor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a string.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import StringOption
#-------------------------------------------------------------------------
#
# NoteOption class
#
#-------------------------------------------------------------
|
------------
class NoteOption(StringOption):
"""
This class describes an option that allows a note from the
database to be selected.
"""
def __init__(self, label):
"""
:param label: A friendly label to be applied to this option.
Example: "Title Note"
:type label: string
:param value: A GID of a note for this option.
Example: "n11"
:type value: string
:return: nothing
"""
StringOption.__init__(self, label, "")
|
OscarPDR/projects_morelab
|
manage.py
|
Python
|
gpl-3.0
| 259
| 0.003861
|
#!/usr/bin/env py
|
thon
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MO
|
DULE", "projects_morelab.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
praekelt/jmbo-foundry
|
foundry/urls.py
|
Python
|
bsd-3-clause
| 12,605
| 0.001428
|
from django.conf.urls import patterns, include, url
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.base import RedirectView
from preferences import preferences
from jmbo.urls import v1_api
from foundry.models import Page
from foundry import views, forms
from foundry.api import ListingResource, LinkResource, NavbarResource, \
MenuResource, PageResource, BlogPostResource
admin.autodiscover()
try:
import object_tools
object_tools.autodiscover()
except ImportError:
pass
v1_api.register(ListingResource())
v1_api.register(LinkResource())
v1_api.register(NavbarResource())
v1_api.register(MenuResource())
v1_api.register(PageResource())
v1_api.register(BlogPostResource())
urlpatterns = patterns('',
# Pre-empt url call for comment post
url(
r'^comments/post/$',
'foundry.views.post_comment',
{},
name='comments-post-comment'
),
(r'^favicon\.ico$', RedirectView.as_view(url='/static/images/favicon.ico', permanent=False)),
(r'^googlesearch/', include('googlese
|
arch.urls')),
(r'^jmbo/', include('jmbo.urls')),
(r'^comment
|
s/', include('django.contrib.comments.urls')),
(r'^likes/', include('likes.urls')),
(r'^object-tools/', include(object_tools.tools.urls)),
(r'^ckeditor/', include('ckeditor.urls')),
(r'^contact/', include('contact.urls')),
(r'^post/', include('post.urls')),
(r'^simple-autocomplete/', include('simple_autocomplete.urls')),
(r'^jmbo-analytics/', include('jmbo_analytics.urls')),
url(r'social-auth', include('social_auth.urls')),
(r'^admin/', include(admin.site.urls)),
url(
r'^$',
TemplateView.as_view(template_name='base.html'),
name='home'
),
url(
r'^logo/$',
TemplateView.as_view(template_name='foundry/logo.html'),
name='logo'
),
url(
r'^header/$',
TemplateView.as_view(template_name='foundry/inclusion_tags/header.html'),
name='header'
),
url(
r'^footer/$',
TemplateView.as_view(template_name='foundry/inclusion_tags/footer.html'),
name='footer'
),
# Join, login, password reset
url(
r'^join/$',
'foundry.views.join',
{},
name='join',
),
url(
r'^join-finish/$',
'foundry.views.join_finish',
{},
name='join-finish',
),
(r'^auth/', include('django.contrib.auth.urls')),
url(
r'^login/$',
'django.contrib.auth.views.login',
{'authentication_form': forms.LoginForm},
name='login',
),
url(
r'^logout/$',
'django.contrib.auth.views.logout',
{'next_page':'/'},
name='logout',
),
# Password reset with custom form
url(
r'^password_reset/$',
'django.contrib.auth.views.password_reset',
{
'password_reset_form': forms.PasswordResetForm,
},
name='password_reset',
),
# Pages defined in preferences
url(
r'^about-us/$',
views.StaticView.as_view(
content=lambda:preferences.GeneralPreferences.about_us,
title=_("About us")
),
name='about-us'
),
url(
r'^terms-and-conditions/$',
views.StaticView.as_view(
content=lambda:preferences.GeneralPreferences.terms_and_conditions,
title=_("Terms and conditions")
),
name='terms-and-conditions'
),
url(
r'^privacy-policy/$',
views.StaticView.as_view(
content=lambda:preferences.GeneralPreferences.privacy_policy,
title=_("Privacy policy")
),
name='privacy-policy'
),
# Age gateway
url(
r'^age-gateway/$',
'foundry.views.age_gateway',
{},
name='age-gateway',
),
# Listing
url(
r'^listing/(?P<slug>[\w-]+)/$',
'foundry.views.listing_detail',
{},
name='listing-detail'
),
# Listing feed
url(
r'^listing/(?P<slug>[\w-]+)/feed/$',
'foundry.feeds.listing_feed',
{},
name='listing-feed'
),
# Edit profile
url(r'^edit-profile/$',
login_required(
views.EditProfile.as_view(
form_class=forms.EditProfileForm,
template_name='foundry/edit_profile.html'
)
),
name='edit-profile'
),
# Complete profile
url(r'^complete-profile/$',
login_required(
views.EditProfile.as_view(
form_class=forms.EditProfileForm,
template_name='foundry/complete_profile.html'
)
),
name='complete-profile'
),
# Page detail
url(
r'^page/(?P<slug>[\w-]+)/$',
'foundry.views.page_detail',
{},
name='page-detail'
),
# Lorem ipsum
url(
r'^lorem-ipsum/$',
TemplateView.as_view(template_name='foundry/lorem_ipsum.html'),
name='lorem-ipsum'
),
# Search
url(
r'^search/$',
'foundry.views.search',
{},
name='search'
),
# Search results
url(
r'^search-results/$',
'foundry.views.search_results',
{},
name='search-results'
),
# Comment reply form in case of no javascript
url(
r'^comment-reply-form/$',
'foundry.views.comment_reply_form',
{},
name='comment-reply-form'
),
# Report comment
url(
r'^report-comment/(?P<comment_id>\d+)/$',
'foundry.views.report_comment',
{},
name='report-comment'
),
# Chatroom detail
url(
r'^chatroom/(?P<slug>[\w-]+)/$',
'foundry.views.chatroom_detail',
{},
name='chatroom-detail'
),
# Create blogpost
url(
r'^create-blogpost/$',
'foundry.views.create_blogpost',
{},
name='create-blogpost',
),
# Blogpost list
url(
r'^blogposts/$',
views.BlogPostObjectList.as_view(),
{'limit': 300},
name='blogpost_object_list'
),
# Blogpost detail
url(
r'^blogpost/(?P<slug>[\w-]+)/$',
views.BlogPostObjectDetail.as_view(),
{},
name='blogpost_object_detail'
),
# Member notifications
url(
r'^member-notifications/$',
login_required(views.member_notifications),
{},
name='member-notifications'
),
# User detail page
url(
r'^users/(?P<username>[=@\.\w-]+)/$',
'foundry.views.user_detail',
{},
name='user-detail'
),
# Coming soon
url(
r'^coming-soon/$',
TemplateView.as_view(template_name='foundry/coming_soon.html'),
name='coming-soon'
),
# Load new comments
url(
r'^fetch-new-comments-ajax/(?P<content_type_id>\d+)/(?P<oid>\d+)/(?P<last_comment_id>\d+)/$',
'foundry.views.fetch_new_comments_ajax',
{},
name='fetch-new-comments-ajax'
),
# Test views
url(
r'^test-plain-response/$',
'foundry.views.test_plain_response',
{},
name='test-plain-response'
),
url(
r'^test-redirect/$',
'foundry.views.test_redirect',
{},
name='test-redirect'
),
url(
r'^pages/$',
DetailView.as_view(),
{'queryset':Page.permitted.all().order_by('title')},
'page-list'
),
# Member detail page
url(
r'^members/(?P<username>[\w-]+)/$',
'foundry.views.member_detail',
{},
name='member-detail'
),
# Admin
url(
r'^admin-row-create-ajax/$',
'foundry.admin_views.row_create_ajax',
{},
name='admin-row-create-ajax',
),
url(
r'
|
traveloka/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py
|
Python
|
gpl-3.0
| 17,259
| 0.001159
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import ovirtsdk4.types as otypes
from ovirtsdk4.types import StorageDomainStatus as sdstate
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
wait,
)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_storage_domains
short_description: Module to manage storage domains in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage storage domains in oVirt"
options:
name:
description:
- "Name of the the storage domain to manage."
state:
description:
- "Should the storage domain be present/absent/maintenance/unattached"
choices: ['present', 'absent', 'maintenance', 'unattached']
default: present
description:
description:
- "Description of the storage domain."
comment:
description:
- "Comment of the storage domain."
data_center:
description:
- "Data center name where storage domain should be attached."
- "This parameter isn't idempotent, it's not possible to change data center of storage domain."
domain_function:
description:
- "Function of the storage domain."
- "This parameter isn't idempotent, it's not possible to change domain function of storage domain."
choices: ['data', 'iso', 'export']
default: 'data'
aliases: ['type']
host:
description:
- "Host to be used to mount storage."
nfs:
description:
- "Dictionary with values for NFS storage type:"
- "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(version) - NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
- "C(timeout) - The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
- "C(retrans) - The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
- "Note that these parameters are not idempotent."
iscsi:
description:
- "Dictionary with values for iSCSI storage type:"
- "C(address) - Address of the iSCSI storage server."
- "C(port) - Port of the iSCSI storage server."
- "C(target) - The target IQN for the storage device."
- "C(lun_id) - LUN id."
- "C(username) - A CHAP user name for logging into a target."
- "C(password) - A CHAP password for logging into a target."
- "Note that these parameters are not idempotent."
posixfs:
description:
- "Dictionary with values for PosixFS storage type:"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(vfs_type) - Virtual File System type."
- "C(mount_options) - Option which will be passed when mounting storage."
- "Note that these parameters are not idempotent."
glusterfs:
description:
- "Dictionary with values for GlusterFS storage type:"
- "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(mount_options) - Option which will be passed when mounting stor
|
age."
- "Note that these parameters are not idempotent."
fcp:
description:
- "Dictionary with values for fibre channel storage type:"
- "C(address) - Address of the fibre channel storage server."
- "C(port) - Por
|
t of the fibre channel storage server."
- "C(lun_id) - LUN id."
- "Note that these parameters are not idempotent."
destroy:
description:
- "Logical remove of the storage domain. If I(true) retains the storage domain's data for import."
- "This parameter is relevant only when C(state) is I(absent)."
format:
description:
- "If I(True) storage domain will be formatted after removing it from oVirt."
- "This parameter is relevant only when C(state) is I(absent)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add data NFS storage domain
- ovirt_storage_domains:
name: data_nfs
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/data
# Add data iSCSI storage domain:
- ovirt_storage_domains:
name: data_iscsi
host: myhost
data_center: mydatacenter
iscsi:
target: iqn.2016-08-09.domain-01:nickname
lun_id: 1IET_000d0002
address: 10.34.63.204
# Import export NFS storage domain:
- ovirt_storage_domains:
domain_function: export
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/export
# Create ISO NFS storage domain
- ovirt_storage_domains:
name: myiso
domain_function: iso
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/iso
# Remove storage domain
- ovirt_storage_domains:
state: absent
name: mystorage_domain
format: true
'''
RETURN = '''
id:
description: ID of the storage domain which is managed
returned: On success if storage domain is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
storage domain:
description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_domain."
returned: On success if storage domain is found.
'''
class StorageDomainModule(BaseModule):
def _get_storage_type(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
if self._module.params.get(sd_type) is not None:
return sd_type
def _get_storage(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
if self._module.params.get(sd_type) is not None:
return self._module.params.get(sd_type)
def _login(self, storage_type, storage):
if storage_type == 'iscsi':
hosts_service = self._connection.system_service().hosts_service()
host = search_by_name(hosts_service, self._module.params['host'])
hosts_service.host_service(host.id).iscsi_login(
iscsi=otypes.IscsiDetails(
username=storage.get('username'),
password=storage.get('password'),
address=storage.get('address'),
target=storage.get('target'),
),
)
def build_entity(self):
storage_type = self._get_storage_type()
storage = self._get_storage()
self._login(storage_type, storage)
return otypes.StorageDomain(
name=s
|
tensorflow/datasets
|
tensorflow_datasets/text/openbookqa.py
|
Python
|
apache-2.0
| 4,817
| 0.002076
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""openbookQA dataset."""
import json
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{mihaylov2018can,
title={Can a suit of armor conduct electricity? a new dataset for open book question answering},
author={Mihaylov, Todor and Clark, Peter and Khot, Tushar and Sabhar
|
wal, Ashish},
journal={arXiv preprint arXiv:1809.02789},
year={2018}
}
"""
_DESCRIPTION = """
The dataset contains 5,957 4-way multiple choice questions. Additionally, they
provide 5,167 crowd-sourced common knowledge facts, and an expanded version of
the train/dev/test questions where each question is associated with its
originating core fact, a human accuracy score, a clarity score, and an
anonymized crowd-worker ID.
"""
class Openbookqa(tfds.core.GeneratorBasedBuilder):
"""QA dataset with comm
|
on knowledge facts."""
VERSION = tfds.core.Version('0.1.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
'question': {
'stem': tfds.features.Text(),
'choice_A': tfds.features.Text(),
'choice_B': tfds.features.Text(),
'choice_C': tfds.features.Text(),
'choice_D': tfds.features.Text(),
},
'fact1': tfds.features.Text(),
'humanScore': tfds.features.Tensor(shape=(), dtype=tf.float32),
'clarity': tfds.features.Tensor(shape=(), dtype=tf.float32),
'turkIdAnonymized': tfds.features.Text(),
'answerKey': tfds.features.ClassLabel(names=['A', 'B', 'C', 'D'])
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=('question', 'answerKey'),
# Homepage of the dataset for documentation
homepage='https://leaderboard.allenai.org/open_book_qa/submissions/get-started',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
download_url = 'https://s3-us-west-2.amazonaws.com/ai2-website/data/OpenBookQA-V1-Sep2018.zip'
dl_dir = dl_manager.download_and_extract(download_url)
data_dir = os.path.join(dl_dir, 'OpenBookQA-V1-Sep2018/Data/Additional')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'train_complete.jsonl')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'dev_complete.jsonl')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'test_complete.jsonl')
},
),
]
def _generate_examples(self, data_dir, filepath):
"""Yields examples."""
with tf.io.gfile.GFile(filepath) as f:
for line in f:
row = json.loads(line)
question = {}
question['stem'] = row['question']['stem']
choices = row['question']['choices']
question['choice_A'] = choices[0]['text']
question['choice_B'] = choices[1]['text']
question['choice_C'] = choices[2]['text']
question['choice_D'] = choices[3]['text']
yield row['id'], {
'question': question,
'fact1': row['fact1'],
'humanScore': row['humanScore'],
'clarity': row['clarity'],
'turkIdAnonymized': row['turkIdAnonymized'],
'answerKey': row['answerKey']
}
|
DWilburn/automate-your-page
|
html_generator.py
|
Python
|
mit
| 2,544
| 0.00511
|
def generate_concept_HTML(concept_title, concept_description):
html_text_1 = '''
<div class="concept">
<div class="concept-title">
''' + concept_title
html_text_2 = '''
</div>
<div class="concept-description">
''' + concept_description
html_text_3 = '''
</div>
</div>'''
full_html_text = html_text_1 + html_text_2 + html_text_3
return full_html_text
def get_title(concept):
start_location = concept.find('TITLE:')
end_location = concept.find('DESCRIPTION:')
title = concept[start_location+7 : end_location-1]
return title
def get_description(concept):
start_location = concept.find('DESCRIPTION:')
description = concept[start_location+13 :]
return description
def get_concept_by_number(text, concept_number):
counter = 0
while counter < concept_number:
counter = counter + 1
next_concept_start = text.find('TITLE:')
next_concept_end = text.find('TITLE:', next_concept_start + 1)
concept = text[next_concept_start:next_concept_end]
text = text[next_concept_end:]
return concept
TEST_TEXT = """TITLE: Programming Language
DESCRIPTION: Programming languages
|
are used by programmers to tell a computer what to do. Python is one example of a programming language.
TITLE: Python
DESCRIPTION: When
|
you write Python code and "Run" the code, a Python Interpreter converts the written code into a set of instructions that the computer can understand and execute.
TITLE: Python Expressions
DESCRIPTION: In Python an "expression" is a legal Python statement. For example: print 2 + 2 is a valid expression, but print 2 + (without a number at the end) is not.
TITLE: What is a variable in Python?
DESCRIPTION: Variables give programmers a way to give names to values. If my_variable is a variable with a value of 2, then the following code would print out 0:
print my_variable - my_variable """
def generate_all_html(text):
current_concept_number = 1
concept = get_concept_by_number(text, current_concept_number)
all_html = ''
while concept != '':
title = get_title(concept)
description = get_description(concept)
concept_html = generate_concept_HTML(title, description)
all_html = all_html + concept_html
current_concept_number = current_concept_number + 1
concept = get_concept_by_number(text, current_concept_number)
return all_html
print generate_all_html(TEST_TEXT)
|
Topicos-3-2014/friendlyadvice
|
AfinidadeLikesEscolaELocalidades.py
|
Python
|
epl-1.0
| 12,463
| 0.012116
|
# coding: iso-8859-1 -*-
"""
Created on Wed Oct 22 21:49:24 2014
@author: fábioandrews
"""
import facebook
from DadosDeAmigoEmComum import DadosDeAmigoEmComum
class AfinidadeLikesEscolaELocalidades:
def __init__(self,ACCESS_TOKEN_FACEBOOK):
self.token_do_facebook = ACCESS_TOKEN_FACEBOOK
self.meusAmigos = []
self.amigosECoisasQueGostam = dict()
self.amigosELocalidades = dict()
self.pegarMeusAmigosECoisasQueElesGostam(ACCESS_TOKEN_FACEBOOK)
self.pegarAmigosELocalidades(ACCESS_TOKEN_FACEBOOK)
self.pegarAmigosEEscolas(ACCESS_TOKEN_FACEBOOK)
def pegarMeusAmigosECoisasQueElesGostam(self,ACCESS_TOKEN_FACEBOOK):
g = facebook.GraphAPI(ACCESS_TOKEN_FACEBOOK)
meusAmigosESeusIds = g.get_connections("me", "friends")['data'] #eh um hashmap com o nome do amigo sendo a chave e o id dele como valor
likesDeMeusAmigosComCategoriasDataECoisasInuteis = { friend['name'] : g.get_connections(friend['id'], "likes")['data'] for friend in meusAmigosESeusIds }
#a funcao acima retorna meus amigos associados as coisas que gostam, mas nao eh apenas o nome daquilo que gostam, tem data, categoria etc
chaves_de_likes = likesDeMeusAmigosComCategoriasDataECoisasInuteis.keys() #a chaves_de_likes eh um arranjo com nomes de meus amigos
amigos_e_likes_simplificados = dict() #criarei um hashmap que simplifica meus amigos e seus likes. So preciso do nome do amigo associado a todos os likes dele
for nomeAmigo in chaves_de_likes:
likes_de_um_amigo = likesDeMeusAmigosComCategoriasDataECoisasInuteis[nomeAmigo]
for umLike in likes_de_um_amigo:
umLikeSimplificado = umLike['name']
nomeAmigoEmUTf8 = nomeAmigo.encode(encoding='utf_8',errors='ignore') #estava retornando u'stringqualquer' se eu nao fizesse isso. Eh um tipo diferente de string normal
umLikeSimplificadoEmUtf8 = umLikeSimplificado.encode(encoding='utf_8',errors='ignore')
if(nomeAmigoEmUTf8 not in amigos_e_likes_simplificados.keys()):
amigos_e_likes_simplificados[nomeAmigoEmUTf8] = [umLikeSimplificadoEmUtf8]
else:
amigos_e_likes_simplificados[nomeAmigoEmUTf8].append(umLikeSimplificadoEmUtf8);
self.amigosECoisasQueGostam = amigos_e_likes_simplificados
self.meusAmigos = self.amigosECoisasQueGostam.keys()
def pegarAmigosELocalidades(self,ACCESS_TOKEN_FACEBOOK):
g = facebook.GraphAPI(ACCESS_TOKEN_FACEBOOK)
amigosELocalizacoesComplexo = g.get_connections("me", "friends", fields="location, name")
amigos_e_localidades = dict() #eh um dictionary que relaciona o nome de um amigo com a localidade dele
for fr in amigosELocalizacoesComplexo['data']:
if 'location' in fr:
#print fr['name'] + ' ' + fr['location']["name"] #location eh um dictionary com chaves id e name, referentes a uma localidade
nomeAmigoUtf8 = fr['name'].encode(encoding='utf_8',errors='ignore')
localidadeUtf8 = fr['location']["name"].encode(encoding='utf_8',errors='ignore')
amigos_e_localidades[nomeAmigoUtf8] = localidadeUtf8 #location eh um dictionary com chaves id e name, referentes a uma localidade
self.amigosELocalidades = amigos_e_localidades
#no final dessa funcao, eu tenho um dict tipo assim: {'Felipe Dantas Moura': ['High School%Instituto Maria Auxiliadora', 'College%Spanish Courses Colegio Delibes', 'College%Federal University of Rio Grande do Norte'],...}
def pegarAmigosEEscolas(self,ACCESS_TOKEN_FACEBOOK):
g = facebook.GraphAPI(ACCESS_TOKEN_FACEBOOK)
amigosEEscolasComplexo = g.get_connections("me","friends",fields="education, name")
amigos_e_escolas = dict() #eh um dictionary que relaciona o nome de um amigo com as escolas dele, Pode ter duas: college ou high school, por isso o valor nesse dict serah um arranjo tipo ["High School%Maria Auxilidadora","college%Federal University of Rio Grande do Norte"]
for fr in amigosEEscolasComplexo['data']:
if 'education' in fr:
nomeAmigoUtf8 = fr['name'].encode(encoding='utf_8',errors='ignore')
arranjoEducation = fr['education'] #uma pessoa pode ter varios high school ou college e tb pode ter graduate school
arranjoEducacaoMeuAmigo = []
for elementoArranjoEducation in arranjoEducation:
nomeEscola = elementoArranjoEducation['school']['name'].encode(encoding='utf_8',errors='ignore')
tipoEscola = elementoArranjoEducation['type'].encode(encoding='utf_8',errors='ignore') #pode ser high school ou college ou Graduate school. College eh a faculdade
arranjoEducacaoMeuAmigo.append(tipoEscola + "%" + nomeEscola)
amigos_e_escolas[nomeAmigoUtf8] = arranjoEducacaoMeuAmigo
self.amigosEEscolas = amigos_e_escolas
#dado um amigo, eu irei receber tipo {giovanni:DadosDeAmigoEmComum}, onde giovanni eh amigo de meuAmigo
#e
|
DadosDeAmigoEmComum terah a nota associada e um arranjo com os likes que giovanni tem em comum com meuAmigo
def acharCompatibilidadeEntreLikesDePaginas(self,meuAmigo):
meuAmigo = meuAmigo.encode(encoding='utf_8',errors='ignore')
pessoasDeMesmoInteresseDeMeuAmigoEQuaisInteresses = dict()
for outroAmigo in self.amigosECoisasQueGostam.keys():
if(outroAmigo != meuAmigo):
#os amigos sao diferentes. Vamos ver se tem likes ig
|
uais
likesEmComumEntreOsDois = []
for umLikeMeuAmigo in self.amigosECoisasQueGostam[meuAmigo]:
for umLikeOutroAmigo in self.amigosECoisasQueGostam[outroAmigo]:
if(umLikeMeuAmigo == umLikeOutroAmigo):
#achamos um like em comum entre um Amigo e outro Amigo
likesEmComumEntreOsDois.append(umLikeMeuAmigo)
if(len(likesEmComumEntreOsDois) > 0):
# ha algo em comum entre os dois amigos e eles sao diferentes
pessoasDeMesmoInteresseDeMeuAmigoEQuaisInteresses[outroAmigo] = likesEmComumEntreOsDois
#ate agora eu tenho tipo {giovanni:['games','musica']} giovanni eh compativel com meuAmigo
#hora de calcular pontuacoes
quantasCoisasMeuAmigoGosta = len(self.amigosECoisasQueGostam[meuAmigo])
pessoasCompativeisComMeuAmigoSegundoLikes = dict() #o retorno da funcao
for amigoParecidoComMeuAmigo in pessoasDeMesmoInteresseDeMeuAmigoEQuaisInteresses.keys():
quantasCoisasEmComumEntreMeuAmigoEAmigoParecidoComMeuAmigo = len(pessoasDeMesmoInteresseDeMeuAmigoEQuaisInteresses[amigoParecidoComMeuAmigo])
nota = (10.0 * quantasCoisasEmComumEntreMeuAmigoEAmigoParecidoComMeuAmigo) / quantasCoisasMeuAmigoGosta
dadosDeAmigoEmComumAmigoParecido = DadosDeAmigoEmComum(nota,pessoasDeMesmoInteresseDeMeuAmigoEQuaisInteresses[amigoParecidoComMeuAmigo])
pessoasCompativeisComMeuAmigoSegundoLikes[amigoParecidoComMeuAmigo] = dadosDeAmigoEmComumAmigoParecido
return pessoasCompativeisComMeuAmigoSegundoLikes
def acharCompatibilidadeEntreLocalidade(self,meuAmigo):
meuAmigo = meuAmigo.encode(encoding='utf_8',errors='ignore')
pessoasDeMesmaLocalidadeDeMeuAmigoEQualLocalidade = dict()
for outroAmigo in self.amigosELocalidades.keys():
if(outroAmigo != meuAmigo):
#os amigos sao diferentes. Vamos ver se tem mesma localidade
if(self.amigosELocalidades[outroAmigo] == self.amigosELocalidades[meuAmigo]):
# ha algo em comum entre os dois amigos e eles sao diferentes
pessoasDeMesmaLocalidadeDeMeuAmigoEQualLocalidade[outroAmigo] = self.amigosELocalidades[outroAmigo]
#ate agora eu tenho tipo {giovanni:'natal'} giovanni eh compativel com meuAmigo
#hora de calcular pontuacoes
pes
|
nonemaw/pynet
|
pyth_ans_ecourse/class9/exercise6/mytest/world.py
|
Python
|
gpl-2.0
| 1,292
| 0.00387
|
'''
Python class on writing reusable code
'''
def func1():
'''Simple test function'''
print "Hello world"
class MyClass(object):
'''Simple test class'''
def __init__(self, var1, var2, var3):
self.var1 = var1
self.var2 = var2
self.var3 = var3
def hello(self):
'''Simple test method'''
print "Hello World: {} {} {}".format(self.var1, self.var2, self.var3)
def not_hello(self):
'''Simple test method'''
print "Goodbye: {} {} {}".format(self.var1, self.var2, self.var3)
class MyChildClass(MyClass):
'''
Test class augmenting __init__
Could use super() also
'''
def __init__(self, var1, var2, var3):
print "Do something more in __init__()"
MyClass.__init__(self, var1, var2, var3)
def hello(self):
'''Simp
|
le test method'''
print "Something else: {} {} {}".format(self.var1, self.var2, self.var3)
if __name__ == "__main__":
print "\nMain program - world"
print "\nTesting MyClass:"
my_obj = MyClass('SF', 'NYC', 'LA')
print m
|
y_obj.var1, my_obj.var2, my_obj.var3
my_obj.hello()
my_obj.not_hello()
print "\nTesting MyChildClass:"
new_obj = MyChildClass('X', 'Y', 'Z')
new_obj.hello()
new_obj.not_hello()
print
|
justyns/home-assistant
|
tests/components/automation/test_mqtt.py
|
Python
|
mit
| 2,684
| 0
|
"""The tests for the MQTT automation."""
import unittest
import homeassistant.components.automation as automation
from tests.common import (
mock_mqtt_component, fire_mqtt_message, get_test_home_assistant)
class TestAutomationMQTT(unittest.TestCase):
"""Test the event automation."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_if_fires_on_topic_match(self):
"""Test if message is fired on topic match."""
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'mqtt',
'topic': 'test-topic'
},
'action': {
'service': 'test.automation'
}
}
}))
fire_mqtt_message(self.hass, 'test-topic', '')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_topic_and_payload_match(self):
"""Test if message is fired on topic and payload match."""
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'mqtt',
'topic': 'test-topic',
|
'payload': 'hello'
},
'action': {
'service': 'test.automation'
}
}
}))
fire_mqtt_message(self.hass, 'test-topic', 'hello')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_on_topic_but_no_payload_match(self):
"""Test if messa
|
ge is not fired on topic but no payload."""
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'mqtt',
'topic': 'test-topic',
'payload': 'hello'
},
'action': {
'service': 'test.automation'
}
}
}))
fire_mqtt_message(self.hass, 'test-topic', 'no-hello')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
|
youdar/work
|
work/NCS/ncs_paper/submit_model_vs_data_to_queue.py
|
Python
|
mit
| 4,061
| 0.006156
|
from __future__ import division
import collect_ncs_files
from libtbx.command_line import easy_qsub
from glob import glob
import sys
import os
class run_queue_tests(object):
def __init__(self):
"""
Testing using: /work/NCS/ncs_paper/collect_ncs_files.py
"""
# set working environment
paths = "/net/chevy/raid1/youval/Work_chevy/build/setpaths.csh"
self.phenix_source = paths
sources = os.environ["workfolder"]
# command path
c = collect_ncs_files.ncs_paper_data_collection()
self.com_path = sources + '/NCS/ncs_paper/get_model_vs_data.py'
# where all queue output will be deposited
self.where_to_run_dir = sources + '/NCS/ncs_paper/ncs_queue_results'
self.collect_files_from = c.data_dir
self.pdb_code = []
self.pdb_file_with_path = []
# The commands list is a list that will be sent to the queue for processing
self.commands = []
# the number of command in each "chunk" sent to the queue
self.size_of_chunks = 200
def get_pdb_files(self):
"""() -> list
Get all pdb IDs from LBL PDB mirror index
"""
# Run on all PDB - Only on LBL machine
osType = sys.platform
msg = 'Please run this only on LBL computer'
assert not osType.startswith('win'),msg
# set environment
self.pdb_log_file_list = glob(self.collect_files_from + '/log_*')
self.pdb_log_file_list = [x[-4:] for x in self.pdb_log_file_list]
print 'Processing {} files'.format(len(self.pdb_log_file_list))
# for testing
self.pdb_log_file_list = ['2a3x']
self.pdb_log_file_list = [
'3ksb', '2p5t', '4h1l', '4gh4', '2ja7', '2hn2', '4nkz', '3km2', '4gx2',
'3ks2', '1iss', '4h1i', '1n3n', '4gk5', '3hxk', '3g05', '3ksa', '4hi2',
'1ott', '4kn4', '3nmj', '4hic', '2gw1', '4h37', '4gx5']
self.pdb_log_file_list = [
'4g1u', '4hj0', '4nry', '4a7p', '3vbs', '4qjh', '3b8c', '2h1j', '2pf4',
'3pi4', '3bbp', '4u7u', '4l6y', '3vwu', '3n97', '3u60', '1nov', '4od4',
'4od5', '4lrx', '3u61', '3p2d', '1wce', '4kr7', '2fjh', '2w29', '2ost',
'2o94', '1f8v', '3l4b', '4u4f', '3wcn', '3asn', '3be0', '3rjr', '4fn9',
'2fs3', '3fzj', '1tnv', '2r1a', '3oij', '3fm7', '4fqk', '4fnr', '3b61',
'2xpj', '3tu4', '4fqm', '4x4q', '3u5z', '3rfu', '3hqq', '2xyo', '3nou',
'4x4r', '4fnu', '4pdw', '2fsy', '3rh7', '3bna', '4u0h', '2vf9', '3v4p',
'4ryj', '2r0q', '3q4f', '3g76', '1fu2', '3frt', '3uo7', '4hl8', '1uf2',
'4qsm', '4f5x', '3kz4', '3l73', '3vs7', '3txx', '1ohg', '3t4a', '1gr5',
'1fub', '3l5j', '4pqp', '4u6g', '4idw', '1m0f', '4ld9', '3ug6', '4aed',
'4qt0', '2r6f', '4u6u', '4lsx', '4f2m', '3pt6', '3r4d', '4ffz', '2gqq',
'3l3o', '1lzh', '4puf', '1lp3', '4hkj', '4fb3',
|
'2vf1', '2wws', '2xxa',
'2w4b', '3gfq', '3gdu', '4fi3', '2frp', '3cz3', '3ze1', '3zjy', '4qvz',
'2ft1', '3v4v'
|
, '2vq0', '4nko', '4gi2', '4hg4', '3uaj', '3zfg']
def get_commands(self):
"""
Build the command list
get_commands process the command we want to run, with the options we want to use,
on all the files we want the command to run on
It produces a list of commands: list of strings,
containing the command, options and file.
in the same format that you would use to run is from the command prompt
"""
for file_name in self.pdb_log_file_list:
outString = '{0} {1}'.format(self.com_path,file_name)
self.commands.append("python {}".format(outString))
def send_to_queue(self):
# Send the job to the queue
easy_qsub.run(
phenix_source = self.phenix_source,
where = self.where_to_run_dir,
# Optional, when you want all jobs to run on machine_name
# list of newer hosts: beck, morse, gently, rebus
# qsub_cmd = 'qsub -q all.q@beck',
commands = self.commands,
# set the number of commands to send together to the queue.
size_of_chunks= self.size_of_chunks)
if __name__ == "__main__":
queue_job = run_queue_tests()
queue_job.get_pdb_files()
queue_job.get_commands()
queue_job.send_to_queue()
|
Bathlamos/Project-Euler-Solutions
|
solutions/p425.py
|
Python
|
mit
| 1,249
| 0.030424
|
#
# Solution to Project Euler problem 425
# Philippe Legault
#
# https://github.com/Bathlamos/Project-Euler-Solutions
from lib import primes_up_to
import heapq
# Use Dijkstra's algorithm to identify
# the path wit
|
h smallest maximum number,
# based on the undirected graph of connected
# primes
def compute():
limit = 10 ** 7
primes = primes_up_to(limit + 1)
neighbors = [set() for n in range(0, len(primes))]
# Create an undirected graph which identifies all connected numbers
similar = {}
for p in range(0, len(primes)):
prime = str(primes[p])
to_check = ['x' + prime] + [prime[:i] + 'x' + prime[i + 1:] for i in range(0, len(prime))]
for t in to_check:
if t not in similar:
similar[t] = se
|
t()
for r in similar[t]:
neighbors[r].add(p)
neighbors[p].update(similar[t])
similar[t].add(p)
# Use Dijkstra's algorithm to find 2's relative
heap = []
# (cost, prime index)
heapq.heappush(heap, (2, 0))
visited = set()
s = 0
while heap:
cost, p = heapq.heappop(heap)
if p in visited:
continue
visited.add(p)
if cost <= primes[p]:
s += primes[p]
for i in neighbors[p]:
heapq.heappush(heap, (max(cost, primes[p]), i))
return sum(primes) - s
if __name__ == "__main__":
print(compute())
|
sparkslabs/kamaelia
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/Europython09/BB/LineOrientedInputBuffer.py
|
Python
|
apache-2.0
| 2,466
| 0.005272
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import Axon
from Kamaelia.Apps.Europython09.BB.Exceptions import GotShutdownMessage
class LineOrientedInputBuffer(Axon.Component.component):
def main(self):
linebuffer = []
gotline = False
line = ""
try:
while 1:
# Get a line
while (not gotline):
if self.dataReady("control"):
raise GotShutdownMessage()
if self.dataReady("inbox"):
msg = self.recv("inbox")
if "\r\n" in msg:
linebuffer.append( msg[:msg.find("\r\n")+2] )
line = "".join(linebuffer)
gotline = True
linebuffer = [ msg[msg.find("\r\n")+2:] ]
else:
linebuffer.append( msg )
yield 1
if self.dataReady("control"):
raise GotShutdownMessage()
# Wait for receiver to be ready to accept the line
while len(self.outboxes["outbox"]) > 0:
self.pause()
yield 1
|
if self.dataReady("control"):
raise GotShutdownMessage()
# Send them the line, then rinse and repeat.
self.send(line, "outbox")
|
yield 1
gotline = False
line = ""
except GotShutdownMessage:
self.send(self.recv("control"), "signal")
return
self.send(producerFinished(), "signal")
|
VitalPet/hr
|
hr_employee_reference/models/__init__.py
|
Python
|
agpl-3.0
| 152
| 0
|
# -*- coding: utf-8 -*-
# © 2
|
016 OpenSynergy In
|
donesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import hr_employee
|
internap/netman
|
netman/api/objects/vlan.py
|
Python
|
apache-2.0
| 2,413
| 0.000829
|
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netaddr import IPNetwork, IPAddress
from netman.api.objects import vrrp_group
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.vlan import Vlan
def to_api(vlan):
return dict(
number=vlan.number,
name=vlan.name,
ips=serialize_ip_network(vlan.ips),
vrrp_groups=sorted([vrrp_group.to_api(group) for group in vlan.vrrp_groups], key=lambda i: i['id']),
vrf_forwarding=vlan.vrf_forwarding,
|
access_groups={
"in": vlan.access_groups[IN],
"out": vlan.access_groups[OUT]
},
dhcp_relay_servers=[str(server) for server in vlan.dhcp_relay_servers],
arp_routing=vlan.arp_routing,
icmp_redirects=vlan.icmp_redirects,
unicast_rpf_mode=vlan.unicast_rpf_mode,
ntp=vlan.ntp,
varp_ips=serialize_ip_network(vlan.varp_ips),
load_interval=vlan.load_interval,
mpls_ip=vlan.mpls_ip
)
def
|
to_core(serialized):
access_groups = serialized.pop('access_groups')
ips = serialized.pop('ips')
vrrp_groups = serialized.pop('vrrp_groups')
dhcp_relay_servers = serialized.pop('dhcp_relay_servers')
varp_ips = serialized.pop('varp_ips')
return Vlan(
access_group_in=access_groups['in'],
access_group_out=access_groups['out'],
ips=deserialize_ip_network(ips),
vrrp_groups=[vrrp_group.to_core(group) for group in vrrp_groups],
dhcp_relay_servers=[IPAddress(i) for i in dhcp_relay_servers],
varp_ips=deserialize_ip_network(varp_ips),
**serialized
)
def deserialize_ip_network(ips):
return [IPNetwork('{address}/{mask}'.format(**ip)) for ip in ips]
def serialize_ip_network(ips):
return sorted([{'address': ipn.ip.format(), 'mask': ipn.prefixlen} for ipn in ips], key=lambda i: i['address'])
|
NPRA/EmissionCalculatorLib
|
emission/__init__.py
|
Python
|
bsd-2-clause
| 1,488
| 0.014113
|
"""Emission python module
This module is for everyone who wants to build tools / systems for calculating vehicle
emissions for various types of vehicles (Trucks, Buses, Cars, Vans, scooters, ..).
The calculation and factors is provided by the EU EEA guidebook: http://www.eea.europa.eu/publications/emep-eea-guidebook-2016
Main features:
1. Give you the current emission for one or multiple types of pollutants for a given vehicle (check "emission.")
2. Given two points (UTM 33N coordinates - Norway) and a vehicle type, the module will use a
routing-service to calculate the best route to reach your destinatio
|
n. The module will also calculate the
emission for all the pollutant types you have defined. You can then sort the various routes depending of
which critieria that's most important (duration, pollution (NOx, CO, ..)).
Fuel consumption is directly related to the emission - therefore the lower emission the lower fuel consumption.
For the transportation industry this will be of great importance.
"""
import logging
log = logging.getLogger("emission")
from .Extrapola
|
te import Extrapolate
from .Interpolate import Interpolate
from .Pollutants import Pollutants
from .EmissionJSONReader import EmissionsJsonParser
from .planner import Planner
from .planner import PollutantTypes
from .__version__ import __version__
__author__ = "NPRA - Norwegian Public Roads Administration"
from .models import session
from . import models
from . import update_db as update
|
TurBoss/HostilPlanet
|
data/themes/itheme/build.py
|
Python
|
gpl-2.0
| 4,710
| 0.002335
|
import pygame
from pygame.locals import *
import glob
# specify the image names
|
and how many slices they have
# None for a icon
# ([1...3],[1...3]) for the number of slices an image is cut into.
IM
|
AGES = {
'box.png': (2, 2),
'check.png':
'color.png':None,
'desktop.png': None,
'dot.png': (2, 2),
'down.png': None,
'radio.png': None,
'right.png': None,
'up.png': None,
'x.png': None,
}
HUE = 190 # default hue, used for hue rotations
# keywords set alterations of the source data
# hue -- (-360...360)
# saturation -- (0...100...more) a percent - lower values = grayscale, higher=colorful
# value -- (0...100...more) a percent - lower values = dark, higher = bright
KEYWORDS = {
'red': ('hue', -HUE + 0),
'yellow': ('hue', -HUE + 60),
'green': ('hue', -HUE + 120),
# normal:('hue',-HUE+HUE),
'hover': ('saturation', 75),
'normal': ('saturation', 0),
# 'down':('saturation',100),
'decor': ('value':150),
'vertical': ('rotate', -90),
}
pygame.display.init()
pygame.display.set_mode((80, 80), 32)
def prep(name):
fname = name + ".png"
img = pygame.image.load(fname)
w, h = img.get_width() / 2, img.get_height() / 2
out = pygame.Surface((w * 3, h * 3), SWSURFACE | SRCALPHA, 32)
out.fill((0, 0, 0, 0))
out.blit(img.subsurface(0, 0, w, h), (0, 0))
out.blit(img.subsurface(w, 0, w, h), (w * 2, 0))
out.blit(img.subsurface(0, h, w, h), (0, h * 2))
out.blit(img.subsurface(w, h, w, h), (w * 2, h * 2))
for i in range(0, w):
img = out.subsurface((w - 1, 0, 1, h * 3)).convert_alpha()
out.blit(img, (w + i, 0))
for i in range(0, h):
img = out.subsurface((0, h - 1, w * 3, 1)).convert_alpha()
out.blit(img, (0, h + i))
return out, w, h
todo = [
('button.normal', 'dot.normal', None, 3, 3, '789456123'),
('button.hover', 'dot.hover', None, 3, 3, '789456123'),
('button.down', 'dot.down', None, 3, 3, '789456123'),
('checkbox.off.normal', 'box.normal', None, 2, 2, '7913'),
('checkbox.on.normal', 'box.down', 'check', 2, 2, '7913'),
('checkbox.off.hover', 'box.hover', None, 2, 2, '7913'),
('checkbox.on.hover', 'box.hover', 'check', 2, 2, '7913'),
('radio.off.normal', 'dot.normal', None, 2, 2, '7913'),
('radio.on.normal', 'dot.down', 'radio', 2, 2, '7913'),
('radio.off.hover', 'dot.hover', None, 2, 2, '7913'),
('radio.on.hover', 'dot.hover', 'radio', 2, 2, '7913'),
('tool.normal', 'box.normal', None, 3, 3, '789456123'),
('tool.hover', 'box.hover', None, 3, 3, '789456123'),
('tool.down', 'box.down', None, 3, 3, '789456123'),
('hslider', 'idot.normal', None, 3, 3, '789456123'),
('hslider.bar.normal', 'dot.normal', None, 3, 3, '789456123'),
('hslider.bar.hover', 'dot.hover', None, 3, 3, '789456123'),
('hslider.left', 'sbox.normal', 'left', 2, 2, '7913'),
('hslider.right', 'sbox.normal', 'right', 2, 2, '7913'),
('vslider', 'idot.normal', None, 3, 3, '789456123'),
('vslider.bar.normal', 'vdot.normal', None, 3, 3, '789456123'),
('vslider.bar.hover', 'vdot.hover', None, 3, 3, '789456123'),
('vslider.up', 'vsbox.normal', 'up', 2, 2, '7913'),
('vslider.down', 'vsbox.normal', 'down', 2, 2, '7913'),
('dialog.close.normal', 'rdot.hover', None, 2, 2, '7913'),
('dialog.close.hover', 'rdot.hover', 'x', 2, 2, '7913'),
('dialog.close.down', 'rdot.down', 'x', 2, 2, '7913'),
('menu.normal', 'desktop', None, 1, 1, '7'),
('menu.hover', 'box.normal', None, 3, 3, '789456123'),
('menu.down', 'box.down', None, 3, 3, '789456123'),
('select.selected.normal', 'box.normal', None, 3, 3, '788455122'),
('select.selected.hover', 'box.hover', None, 3, 3, '788455122'),
('select.selected.down', 'box.down', None, 3, 3, '788455122'),
('select.arrow.normal', 'box.hover', None, 3, 3, '889556223'),
('select.arrow.hover', 'box.hover', None, 3, 3, '889556223'),
('select.arrow.down', 'box.down', None, 3, 3, '889556223'),
('progressbar', 'sbox.normal', None, 3, 3, '789456123'),
('progressbar.bar', 'box.hover', None, 3, 3, '789456123'),
]
for fname, img, over, ww, hh, s in todo:
print
fname
img, w, h = prep(img)
out = pygame.Surface((ww * w, hh * h), SWSURFACE | SRCALPHA, 32)
out.fill((0, 0, 0, 0))
n = 0
for y in range(0, hh):
for x in range(0, ww):
c = int(s[n])
xx, yy = (c - 1) % 3, 2 - (c - 1) / 3
out.blit(img.subsurface((xx * w, yy * h, w, h)), (x * w, y * h))
n += 1
if over != None:
over = pygame.image.load(over + ".png")
out.blit(over, (0, 0))
pygame.image.save(out, fname + ".tga")
|
febert/DeepRL
|
q_learning_sarsa/dqn_learning.py
|
Python
|
gpl-3.0
| 22,540
| 0.00661
|
from __future__ import print_function
from __future__ import division
import numpy as np
np.set_printoptions(threshold=np.inf)
import matplotlib.pyplot as plt
from matplotlib import cm
# from mpl_toolkits.mplot3d import axes3d
from matplotlib.colors import LogNorm
# import time
import math
# import cPickle
import gym as gym
from PIL import Image
from PIL import ImageOps
from collections import deque
import copy
import qnn
class q_learning():
def __init__(self,
gamma=0.99,
init_epsilon=1.0,
end_epsilon=0.1,
update_epsilon=True,
exploration_decrease_length = 1e6,
policy_mode='deterministic',
environment='MountainCar-v0',
# environment = 'Acrobot-v0',
lambda_=0.5,
plot_resolution=30,
nn_size_hidden = [300,400,400],
nn_batch_size = 50,
nn_learning_rate = 1e-4,
qnn_target = 'q-learning', # 'sarsa'
replay_memory_size = 1e6,
descent_method = 'grad',
dropout_keep_prob = 1.0,
ema_decay_rate = 0.999,
init_weights = None,
num_steps_until_train_step = None,
train_frequency = 1.0,
from_pixels = False,
repeat_action_times = 2,
reg_weight = 0.0,
do_pretrain = False,
pretrain_steps = 5000
):
self.from_pixels = from_pixels
self.repeat_action_times = repeat_action_times
self.frame_downscaling = 6
if num_steps_until_train_step is None:
num_steps_until_train_step = nn_batch_size
self.env = gym.make(environment)
self.env_name = environment
self.num_actions = self.env.action_space.n
self.prob_distrib = np.zeros(self.num_actions)
self.statedim = self.env.observation_space.shape[0]
# lengths of all the played episodes
self.episode_lengths = []
self.total_train_episodes = 0
# lengths of all the tested episodes
if self.env_name=='MountainCar-v0':
self.max_test_length = 10000
elif self.env_name=='CartPole-v0':
self.max_test_length = 10000
else:
self.max_test_length = 10000
self.test_lengths = []
self.test_lengths_std = []
self.test_its = []
self.test_runs_to_average = 5
self.plot_resolution = plot_resolution
self.lambda_ = lambda_
## stochastic or deterministic softmax-based actions
self.policy_mode = policy_mode
normalization_mean = None
normalization_var = None
if not self.from_pixels:
# STATE NORMALIZATION
print('Calculating normalization by random action sampling...')
states = []
while len(states) < 1e5:
self.env.reset()
done = False
while not done:
state, _, done, _ = self.env.step(self.env.action_space.sample())
states.append(state)
normalization_mean = np.mean(states, axis=(0)).astype(np.float32)
normalization_var = np.var(states, axis=(0)).astype(np.float32)
# if self.env_name == 'CartPole-v0':
# normalization_mean = np.zeros_like(normalization_mean)
# normalization_var = np.ones_like(normalization_var)
## exploration parameters
# too much exploration is wrong!!!
self.epsilon = init_epsilon # explore probability
self.init_epsilon = init_epsilon
self.end_epsilon = end_epsilon
self.exploration_decrease_length = exploration_decrease_length
self.update_epsilon = update_epsilon
self.total_runs = 0.
# too long episodes give too much negative reward!!!!
# self.max_episode_length = 1000000
# ----> Use gamma!!!!! TODO: slower decrease?
self.gamma = gamma # similar to 0.9
# DEPRECATED
if qnn_target == 'q-learning':
self.is_a_prime_external = False
elif qnn_target == 'sarsa':
self.is_a_prime_external = True
else:
throw('ValueError')
# set pixel state parameters
if self.from_pixels or True:
self.env.render()
self.img_height = self.env.viewer.height
self.img_width = self.env.viewer.width
self.reduced_height = 84#self.img_height//self.frame_downscaling
self.reduced_width = 84#self.img_width//self.frame_downscaling
self.qnn = qnn.qnn(self.statedim,
self.num_actions,
discount=self.gamma,
|
size_hidden=nn_size_hidden,
batch_size=nn_batch_size,
learning_rate=nn_learning_rate,
is_a_prime_external=self.is_a_prime_external,
replay_memory_size=replay_memory_size,
|
descent_method=descent_method,
keep_prob_val=dropout_keep_prob,
ema_decay_rate=ema_decay_rate,
normalization_mean=normalization_mean,
normalization_var=normalization_var,
env_name=environment,
init_weights=init_weights,
from_pixels=self.from_pixels,
input_width=self.reduced_width,
input_height=self.reduced_height,
input_channels=self.repeat_action_times,
reg_weight=reg_weight,
do_pretrain=do_pretrain,
pretrain_steps=pretrain_steps
)
self.learning_rate = nn_learning_rate
self.train_frequency = train_frequency
print('using environment', environment)
print('qnn target', qnn_target, self.is_a_prime_external, self.qnn.is_a_prime_external)
# epsilon-greedy but deterministic or stochastic is a choice
def policy(self, state, mode='deterministic', deepQ=False):
explore = bool(np.random.choice([1, 0], p=[self.epsilon, 1 - self.epsilon]))
# print(explore, features, end="")
if mode == 'deterministic' and not explore:
if deepQ:
q = self.qnn.evaluate_all_actions(state)
# print(state, q)
return np.argmax(q.squeeze())#np.random.choice(np.argwhere(q == np.amax(q)).flatten())
if not deepQ:
raise ValueError('Option not defined')
# q = features.dot(w)
# return np.random.choice(np.argwhere(q == np.amax(q)).flatten())
elif explore:
# print('explore')
return self.env.action_space.sample()
def get_render(self):
return np.asarray(\
ImageOps.flip(\
self.env.render('rgb_array')\
.convert('L')\
.resize((self.reduced_width, self.reduced_height), \
Image.BILINEAR)))
def get_cnn_input_tensor_from_deque(self, pixel_state_deque):
return np.swapaxes(\
np.swapaxes(\
np.array(pixel_state_deque, ndmin=4),1,2),2,3)
def deepq_learning(self, num_iter=1000, max_steps=5000, max_learning_steps=np.inf, learning_rate=None, reset_replay_memory=False):
if learning_rate is None:
learning_rate = self.learning_rate
if reset_replay_memory:
self.qnn.replay_memory.clear()
# Show initial state, since algorithm is highly biased by the initial conditions
if self.statedim == 2:
# print('last w', self.w)
self.plot_deepQ_policy(mode='deterministic')
self.plot_deepQ_function()
prev_writeou
|
tkerola/chainer
|
chainer/links/normalization/decorrelated_batch_normalization.py
|
Python
|
mit
| 5,209
| 0
|
import numpy
from chainer import configuration
from chainer import functions
from chainer import link
from chainer.utils import argument
class DecorrelatedBatchNormalization(link.Link):
"""Decorrelated batch normaliza
|
tion layer.
This link wraps the
:func:`~chainer.functions.decorrelated_batch_normaliz
|
ation` and
:func:`~chainer.functions.fixed_decorrelated_batch_normalization`
functions. It works on outputs of linear or convolution functions.
It runs in three modes: training mode, fine-tuning mode, and testing mode.
In training mode, it normalizes the input by *batch statistics*. It also
maintains approximated population statistics by moving averages, which can
be used for instant evaluation in testing mode.
In fine-tuning mode, it accumulates the input to compute *population
statistics*. In order to correctly compute the population statistics, a
user must use this mode to feed mini-batches running through whole training
dataset.
In testing mode, it uses pre-computed population statistics to normalize
the input variable. The population statistics is approximated if it is
computed by training mode, or accurate if it is correctly computed by
fine-tuning mode.
Args:
size (int or tuple of ints): Size (or shape) of channel
dimensions.
groups (int): Number of groups to use for group whitening.
decay (float): Decay rate of moving average
which is used during training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
See: `Decorrelated Batch Normalization <https://arxiv.org/abs/1804.08450>`_
.. seealso::
:func:`~chainer.functions.decorrelated_batch_normalization`,
:func:`~chainer.functions.fixed_decorrelated_batch_normalization`
Attributes:
avg_mean (:ref:`ndarray`): Population mean.
avg_projection (:ref:`ndarray`): Population
projection.
groups (int): Number of groups to use for group whitening.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average
which is used during training.
~DecorrelatedBatchNormalization.eps (float): Epsilon value for
numerical stability. This value is added to the batch variances.
"""
def __init__(self, size, groups=16, decay=0.9, eps=2e-5,
dtype=numpy.float32):
super(DecorrelatedBatchNormalization, self).__init__()
self.avg_mean = numpy.zeros(size // groups, dtype=dtype)
self.register_persistent('avg_mean')
self.avg_projection = numpy.eye(size // groups, dtype=dtype)
self.register_persistent('avg_projection')
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
self.groups = groups
def forward(self, x, **kwargs):
"""forward(self, x, *, finetune=False)
Invokes the forward propagation of DecorrelatedBatchNormalization.
In training mode, the DecorrelatedBatchNormalization computes moving
averages of the mean and projection for evaluation during training,
and normalizes the input using batch statistics.
Args:
x (:class:`~chainer.Variable`): Input variable.
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, DecorrelatedBatchNormalization runs in fine-tuning
mode; it accumulates the input array to compute population
statistics for normalization, and normalizes the input using
batch statistics.
"""
finetune, = argument.parse_kwargs(kwargs, ('finetune', False))
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
avg_mean = self.avg_mean
avg_projection = self.avg_projection
if configuration.config.in_recomputing:
# Do not update statistics when extra forward computation is
# called.
if finetune:
self.N -= 1
avg_mean = None
avg_projection = None
ret = functions.decorrelated_batch_normalization(
x, groups=self.groups, eps=self.eps,
running_mean=avg_mean, running_projection=avg_projection,
decay=decay)
else:
# Use running average statistics or fine-tuned statistics.
mean = self.avg_mean
projection = self.avg_projection
ret = functions.fixed_decorrelated_batch_normalization(
x, mean, projection, groups=self.groups)
return ret
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
|
fluxw42/youtube-dl
|
test/test_compat.py
|
Python
|
unlicense
| 5,492
| 0.004066
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.compat import (
compat_getenv,
compat_setenv,
compat_etree_fromstring,
compat_expanduser,
compat_shlex_split,
compat_str,
compat_struct_unpack,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
)
class TestCompat(unittest.TestCase):
def test_compat_getenv(self):
test_str = 'тест'
compat_setenv('YOUTUBE_DL_COMPAT_GETENV', test_str)
self.assertEqual(compat_getenv('YOUTUBE_DL_COMPAT_GETENV'), test_str)
def test_compat_setenv(self):
test_var = 'YOUTUBE_DL_COMPAT_SETENV'
test_str = 'тест'
compat_setenv(test_var, test_str)
compat_getenv(test_var)
self.assertEqual(compat_getenv(test_var), test_str)
def test_compat_expanduser(self):
old_home = os.environ.get('HOME')
test_str = 'C:\Documents and Settings\тест\Application Data'
compat_setenv('HOME', test_str)
self.assertEqual(compat_expanduser('~'), test_str)
compat_setenv('HOME', old_home or '')
def test_all_present(self):
import youtube_dl.compat
all_names = youtube_dl.compat.__all__
present_names = set(filter(
lambda c: '_' in c and not c.startswith('_'),
dir(youtube_dl.compat))) - set(['unicode_literals'])
self.assertEqual(all_names, sorted(present_names))
def test_compat_urllib_parse_unquote(self):
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def')
self.assertEqual(compat_urllib_parse_unquote(''), '')
self.assertEqual(compat_urllib_parse_unquote('%'), '%')
self.assertEqual(compat_urllib_parse_unquote('%%'), '%%')
self.assertEqual(compat_urllib_parse_unquote('%%%'), '%%%')
self.assertEqual(compat_urllib_parse_unquote('%2F'), '/')
self.assertEqual(compat_urllib_parse_unquote('%2f'), '/')
self.assertEqual(compat_urllib_parse_unquote('%E6%B4%A5%E6%B3%A2'), '津波')
self.assertEqual(
compat_urllib_parse_unquote('''<meta property="og:description" content="%E2%96%81%E2%96%82%E2%96%83%E2%96%84%25%E2%96%85%E2%96%86%E2%96%87%E2%96%88" />
%<a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B3%D9%88%D9%86%D8%A7%D9%85%D9%8A">%a'''),
'''<meta property="og:description" content="▁▂▃▄%▅▆▇█" />
%<a href="https://ar.wikipedia.org/wiki/تسونامي">%a''')
self.assertEqual(
compat_urllib_parse_unquote('''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%'''),
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
def test_compat_urllib_parse_unquote_plus(self):
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
def test_compat_urllib_parse_urlencode(self):
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlenco
|
de({b'abc': 'def'})
|
, 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([('abc', 'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([('abc', b'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def')
def test_compat_shlex_split(self):
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
self.assertEqual(compat_shlex_split('-option "one\ntwo" \n -flag'), ['-option', 'one\ntwo', '-flag'])
self.assertEqual(compat_shlex_split('-val 中文'), ['-val', '中文'])
def test_compat_etree_fromstring(self):
xml = '''
<root foo="bar" spam="中文">
<normal>foo</normal>
<chinese>中文</chinese>
<foo><bar>spam</bar></foo>
</root>
'''
doc = compat_etree_fromstring(xml.encode('utf-8'))
self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
self.assertTrue(isinstance(doc.find('normal').text, compat_str))
self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
def test_compat_etree_fromstring_doctype(self):
xml = '''<?xml version="1.0"?>
<!DOCTYPE smil PUBLIC "-//W3C//DTD SMIL 2.0//EN" "http://www.w3.org/2001/SMIL20/SMIL20.dtd">
<smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>'''
compat_etree_fromstring(xml)
def test_struct_unpack(self):
self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
if __name__ == '__main__':
unittest.main()
|
yuvipanda/edit-stats
|
dmz/generate_dbnames.py
|
Python
|
mit
| 821
| 0.004872
|
"""Generate JSON files with dbnames per project
Just generates for wikipedias for now"""
from urllib2 import urlopen
import unicodecsv as csv
import
|
json
def get_dbnames(wiki_class):
"""Return dbnames for all wikis in a particular wiki_class, such as 'wikipedias'"""
URL = "https://wikistats.wmflabs.org/api.php?action=dump&table=%s&format=csv&s=good" % wiki_class
data = csv.reader(urlopen(URL))
dbnames = []
is_first = True
for row in data:
i
|
f is_first:
is_first = False
continue # skip headers!
# dbnames is just langcode with - replaced by _ and a 'wiki' suffix
dbnames.append(u'%swiki' % (row[2].replace('-', '_'), ))
return dbnames
data = {
'wikipedias': get_dbnames("wikipedias")
}
json.dump(data, open('dbnames.json', 'w'))
|
ledtvavs/repository.ledtv
|
script.tvguide.Vader/stop.py
|
Python
|
gpl-3.0
| 386
| 0.010363
|
import sys
import xbmc,xbmcaddon
import sqlite3
ADDON = xbmcaddon.Addon(i
|
d='script.tvguide.Vader')
channel = sys.argv[1]
start = sys.argv[2]
if ADDON.getSetting('playing.channel') != channel:
quit()
elif ADDON.getSetting('playing.start') != start:
quit()
ADDON.setSetting('playing.channel','')
ADDON.setSetting('playing.start','')
xbmc.executebuiltin('PlayerCont
|
rol(Stop)')
|
miptliot/edx-platform
|
lms/djangoapps/instructor_task/tasks_helper/module_state.py
|
Python
|
agpl-3.0
| 15,346
| 0.00417
|
"""
Instructor Tasks related to module state.
"""
import json
import logging
from time import time
from django.contrib.auth.models import User
from opaque_keys.edx.keys import UsageKey
from xblock.runtime import KvsFieldData
import dogstats_wrapper as dog_stats_api
from capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from courseware.models import StudentModule
from courseware.module_render import get_module_for_descriptor_internal
from eventtracking import tracker
from lms.djangoapps.grades.scores import weighted_score
from track.contexts import course_context_from_course_id
from track.event_transaction_utils import create_new_event_transaction_id, set_event_transaction_type
from track.views import task_track
from util.db import outer_atomic
from xmodule.modulestore.django import modulestore
from ..exceptions import UpdateProblemModuleStateError
from .runner import TaskProgress
from .utils import UNKNOWN_TASK_ID, UPDATE_STATUS_FAILED, UPDATE_STATUS_SKIPPED, UPDATE_STATUS_SUCCEEDED
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to be used in grading events
GRADES_RESCORE_EVENT_TYPE = 'edx.grades.problem.rescored'
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified
problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed four arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, the xmodule_instance_args, and the task_input
being passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in ques
|
tion
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in
|
student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update, task_input)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
@outer_atomic
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module, task_input):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_L
|
aborrero/pkg-rpmlint
|
ConfigCheck.py
|
Python
|
gpl-2.0
| 1,798
| 0.003893
|
# -*- coding: utf-8 -*-
#############################################################################
# File : ConfigCheck.py
# Package : rpmlint
# Author : Frederic Lepied
# Created on : Sun Oct 3 21:48:20 1999
# Purpose :
#############################################################################
import AbstractCheck
from Filter import addDetails, printError, printWarning
class ConfigCheck(AbstractCheck.AbstractCheck):
def __init__(self):
AbstractCheck.AbstractCheck.__init__(self, "ConfigCheck")
def check_binary(self, pkg):
config_files = pkg.configFiles()
noreplace_files = pkg.noreplaceFiles()
for c in config_files:
if c.startswith("/var/lib/games/"):
printError(pkg, "score-file-must-not-be-conffile", c)
elif not c.startswith("/etc/") and not c.startswith("/var/"):
printWarning(pkg, "non-etc-or-var-file-marked-as-conffile", c)
if c not in noreplace_files:
printWarning(pkg, "conffile-without-noreplace-flag", c)
# Create an object to enable the auto registration of the test
check = ConfigCheck()
# Add information about checks
addDetails(
'score-file-must-not-be-conffile',
"""A file in /var/lib/games/ is a configuration file. Store your conf
files in /etc instead.""",
'non-etc-or-var-file-mar
|
ked-as-conffile',
"""A file not in /etc or /var is marked as being a configuration file.
Please put your conf files in /etc or /var.""",
'conffile-without-norepla
|
ce-flag',
"""A configuration file is stored in your package without the noreplace flag.
A way to resolve this is to put the following in your SPEC file:
%config(noreplace) /etc/your_config_file_here
""",
)
# ConfigCheck.py ends here
# ex: ts=4 sw=4 et
|
chienlieu2017/it_management
|
odoo/addons/hw_scanner/controllers/main.py
|
Python
|
gpl-3.0
| 8,110
| 0.018989
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import time
from os import listdir
from os.path import join
from Queue import Queue, Empty
from select import select
from threading import Thread, Lock
from odoo import http
import odoo.addons.hw_proxy.controllers.main as hw_proxy
_logger = logging.getLogger(__name__)
try:
import evdev
except ImportError:
_logger.error('Odoo module hw_scanner depends on the evdev python module')
evdev = None
class ScannerDevice():
def __init__(self, path):
self.evdev = evdev.InputDevice(path)
self.evdev.grab()
self.barcode = []
self.shift = False
class Scanner(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/input/by-id/'
self.open_devices = []
self.barcodes = Queue()
self.keymap = {
2: ("1","!"),
3: ("2","@"),
4: ("3","#"),
5: ("4","$"),
6: ("5","%"),
7: ("6","^"),
8: ("7","&"),
9: ("8","*"),
10:("9","("),
11:("0",")"),
12:("-","_"),
13:("=","+"),
# 14 BACKSPACE
# 15 TAB
16:("q","Q"),
17:("w","W"),
18:("e","E"),
19:("r","R"),
20:("t","T"),
21:("y","Y"),
22:("u","U"),
23:("i","I"),
24:("o","O"),
25:("p","P"),
26:("[","{"),
27:("]","}"),
# 28 ENTER
# 29 LEFT_CTRL
30:("a","A"),
31:("s","S"),
32:("d","D"),
33:("f","F"),
34:("g","G"),
35:("h","H"),
36:("j","J"),
37:("k","K"),
38:("l","L"),
39:(";",":"),
40:("'","\""),
41:("`","~"),
# 42 LEFT SHIFT
43:("\\","|"),
44:("z","Z"),
45:("x","X"),
46:("c","C"),
47:("v","V"),
48:("b","B"),
49:("n","N"),
50:("m","M"),
51:(",","<"),
52:(".",">"),
53:("/","?"),
# 54 RIGHT SHIFT
57:(" "," "),
}
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
|
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.statu
|
s['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Barcode Scanner Error: '+message)
elif status == 'disconnected' and message:
_logger.info('Disconnected Barcode Scanner: %s', message)
def get_devices(self):
try:
if not evdev:
return None
new_devices = [device for device in listdir(self.input_dir)
if join(self.input_dir, device) not in [dev.evdev.fn for dev in self.open_devices]]
scanners = [device for device in new_devices
if (('kbd' in device) and ('keyboard' not in device.lower()))
or ('barcode' in device.lower()) or ('scanner' in device.lower())]
for device in scanners:
_logger.debug('opening device %s', join(self.input_dir,device))
self.open_devices.append(ScannerDevice(join(self.input_dir,device)))
if self.open_devices:
self.set_status('connected','Connected to '+ str([dev.evdev.name for dev in self.open_devices]))
else:
self.set_status('disconnected','Barcode Scanner Not Found')
return self.open_devices
except Exception as e:
self.set_status('error',str(e))
return []
def release_device(self, dev):
self.open_devices.remove(dev)
def get_barcode(self):
""" Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
return barcode scanned in the past if they are not older than 5 seconds and have not
been returned before. This is necessary to catch barcodes scanned while the POS is
busy reading another barcode
"""
self.lockedstart()
while True:
try:
timestamp, barcode = self.barcodes.get(True, 5)
if timestamp > time.time() - 5:
return barcode
except Empty:
return ''
def get_status(self):
self.lockedstart()
return self.status
def _get_open_device_by_fd(self, fd):
for dev in self.open_devices:
if dev.evdev.fd == fd:
return dev
def run(self):
""" This will start a loop that catches all keyboard events, parse barcode
sequences and put them on a timestamped queue that can be consumed by
the point of sale's requests for barcode events
"""
self.barcodes = Queue()
barcode = []
shift = False
devices = None
while True: # barcodes loop
devices = self.get_devices()
try:
while True: # keycode loop
r,w,x = select({dev.fd: dev for dev in [d.evdev for d in devices]},[],[],5)
if len(r) == 0: # timeout
break
for fd in r:
device = self._get_open_device_by_fd(fd)
if not evdev.util.is_device(device.evdev.fn):
_logger.info('%s disconnected', str(device.evdev))
self.release_device(device)
break
events = device.evdev.read()
for event in events:
if event.type == evdev.ecodes.EV_KEY:
# _logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
if event.value == 1: # keydown events
if event.code in self.keymap:
if device.shift:
device.barcode.append(self.keymap[event.code][1])
else:
device.barcode.append(self.keymap[event.code][0])
elif event.code == 42 or event.code == 54: # SHIFT
device.shift = True
elif event.code == 28: # ENTER, end of barcode
_logger.debug('pushing barcode %s from %s', ''.join(device.barcode), str(device.evdev))
self.barcodes.put( (time.time(),''.join(device.barcode)) )
device.barcode = []
elif event.value == 0: #keyup events
if event.code == 42 or event.code == 54: # LEFT SHIFT
device.shift = False
except Exception as e:
self.set_status('error',str(e))
scanner_thread = None
if evdev:
scanner_thread = Scanner()
hw_proxy.drivers['scanner'] = scanner_thread
class ScannerDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self):
return scanner_thread.get_barcode() if scanner_thread else None
|
robwebset/script.ebooks
|
resources/lib/mobi/utils.py
|
Python
|
gpl-2.0
| 340
| 0.014706
|
#!/usr/bin/env python
# encoding: utf-8
"""
utils.py
Created by Elliot Kroo on 2009-12-25.
Copyright (c) 2009 Elliot Kroo. All rights reserved.
"""
import sys
import os
def toDict(tuples):
result
|
sDict = {}
for field, value
|
in tuples:
if len(field) > 0 and field[0] != "-":
resultsDict[field] = value
return resultsDict;
|
JamesNickerson/py-junos-eznc
|
tests/unit/test_decorators.py
|
Python
|
apache-2.0
| 1,937
| 0.001549
|
__author__ = "Rick Sherman"
import unittest2 as unittest
from nose.plugins.attrib import attr
from jnpr.junos.device import Device
from jnpr.junos.decorators import timeoutDecorator
from mock import patch, PropertyMock, call
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
@attr('unit')
class Test_Decorators(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
def test_timeout(self):
with patch('jnpr.junos.Device.timeout', new_callable=PropertyMock) as mock_timeout:
mock_timeout.return_value = 30
function = lambda x: x
decorator = timeoutDecorator(function)
decorator(self.dev, dev_timeout=10)
calls = [call(), call(10), call(30)]
mock_timeout.assert_has_calls(calls)
def test_timeout_except(self):
with patch('jnpr.junos.Device.timeout', new_callable=PropertyMock) as mock_timeout:
mock_timeout.return_value = 30
def function(*args, **kwargs):
raise Exception()
decorator = timeoutDecorator(function)
# test to ensure the exception is raised
with self.assertRaises(Exception):
decorator(self.
|
dev, dev_timeout=10)
calls = [call(), call(10), call(30)]
# verify timeout was set/reset
mock_timeout.assert_has_calls(calls)
def _mock_manager(self, *args, **kwargs):
if kwargs:
device_params = kwargs['device_params']
|
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.