repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/psrc/opus_package_info.py
|
Python
|
gpl-2.0
| 292
| 0.006849
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from
|
opus_core.opus_package import OpusPackage
|
class package(OpusPackage):
name = 'psrc'
required_opus_packages = ["opus_core", "opus_emme2", "urbansim"]
|
zhenzhai/edx-platform
|
common/lib/sandbox-packages/hint/hint_class/Week3/Prob2_Part1.py
|
Python
|
agpl-3.0
| 1,401
| 0.013562
|
# Make sure you name your file with className.py
from hint.hint_class_helper
|
s.find_matches import find_matches
class Prob2_Part1:
"""
Author: Shen Ting Ang
Date: 10/11/2016
""
|
"
def check_attempt(self, params):
self.attempt = params['attempt'] #student's attempt
self.answer = params['answer'] #solution
self.att_tree = params['att_tree'] #attempt tree
self.ans_tree = params['ans_tree'] #solution tree
matches = find_matches(params)
matching_node = [m[0] for m in matches]
try:
if '^' not in self.attempt:
hint='Missing ^ in the answer. '
return hint + 'What is the probability of a specific combination of 3 coin flips? ', '1/2^3'
#check if the form of the parse tree has the right
#shape: an operator and two leafs that correspond to
#the operands
elif 'C(' not in self.attempt and '!' not in self.attempt:
hint='Missing choose function in the answer. '
return hint + 'How many possible ways are there to get 2 questions correct out of 5 questions? C(5,_)', '2'
else:
return "",""
except Exception:
return '',''
def get_problems(self):
self.problem_list = ["Combinatorics/GrinsteadSnell3.2.18/part1"]
return self.problem_list
|
spencerlyon2/distcan
|
distcan/scipy_wrap.py
|
Python
|
mit
| 10,929
| 0
|
"""
Common distributions with standard parameterizations in Python
@author : Spencer Lyon <spencer.lyon@stern.nyu.edu>
@date : 2014-12-31 15:59:31
"""
from math import sqrt
import numpy as np
__all__ = ["CanDistFromScipy"]
pdf_docstr = r"""
Evaluate the probability density function, which is defined as
.. math::
{pdf_tex}
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the pdf
Returns
-------
out : {ret1_type}
The pdf of the distribution evaluated at x
Notes
-----
For applicable distributions, equivalent to calling `d__dist_name(x,
*args, log=0)` from R
"""
logpdf_docstr = r"""
Evaluate the log of the pdf, where the pdf is defined as
.. math::
{pdf_tex}
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the log of the pdf
Returns
-------
out : {ret1_type}
The log of pdf of the distribution evaluated at x
Notes
-----
For applicable distributions, equivalent to calling `d__dist_name(x,
*args, log=1)` from R
"""
cdf_docstr = r"""
Evaluate the cumulative density function
.. math::
{cdf_tex}
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the cdf
Returns
-------
out : {ret1_type}
The cdf of the distribution evaluated at x
Notes
-----
For applicable distributions, equivalent to calling `p__dist_name(x,
*args, lower.tail=1, log.p=0)` from R
"""
l
|
ogcdf_docstr = r"""
Evaluate the log of the cdf, where the cdf is defined as
.. math::
{cdf_tex}
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the log of the cdf
Returns
-------
out : {ret1_type}
The log of cdf of the distr
|
ibution evaluated at x
Notes
-----
For applicable distributions, equivalent to calling `p__dist_name(x,
*args, lower.tail=1, log.p=1)` from R
"""
rvs_docstr = r"""
Draw random samples from the distribution
Parameters
----------
size : tuple
A tuple specifying the dimensions of an array to be filled with
random samples
Returns
-------
out : {ret1_type}
The random sample(s) requested
"""
sf_docstr = r"""
Compute the survival function (or complementary cumulative density
function) of the distribution at given points. This is defined as
.. math::
sf(x) = ccdf(x) = 1 - cdf(x)
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the sf (ccdf)
Returns
-------
out : {ret1_type}
One minus the cdf of the distribution evaluated at x
Notes
-----
For applicable distributions, equivalent to calling `p__dist_name(x,
*args, lower.tail=0, log.p=0)` from R
"""
logsf_docstr = r"""
Compute the log of the survival function (or complementary cumulative
density function) of the distribution at given points. This is defined
as
.. math::
\log(sf(x)) = \log(ccdf(x)) = \log(1 - cdf(x))
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the log of the sf (ccdf)
Returns
-------
out : {ret1_type}
Log of one minus the cdf of the distribution evaluated at x
Notes
-----
For applicable distributions, equivalent to calling `p__dist_name(x,
*args, lower.tail=1, log.p=1)` from R
"""
isf_docstr = r"""
Compute the inverse of the survival function (or complementary
cumulative density function) of the distribution at given points. This
is commonly used to find critical values of a distribution
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the log of the sf (ccdf)
Returns
-------
out : {ret1_type}
Log of one minus the cdf of the distribution evaluated at x
Examples
--------
>>> d.isf([0.1, 0.05, 0.01]) # upper tail critical values
"""
ppf_docstr = r"""
Compute the percent point function (or quantile), which is the inverse
of the cdf. This is commonly used to compute critical values.
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the log of the sf (ccdf)
Returns
-------
out : {ret1_type}
Log of one minus the cdf of the distribution evaluated at x
Examples
--------
>>> d.isf([0.1, 0.05, 0.01]) # upper tail critical values
Notes
-----
The ppf(x) = ccdf(1 - x), for x in (0, 1)
For applicable distributions, equivalent to calling `q__dist_name(x,
*args, lower.tail=1, log.p=0)` from R
"""
rand_docstr = r"""
Draw random samples from the distribution
Parameters
----------
*args : int
Integer arguments are taken to be the dimensions of an array that
should be filled with random samples
Returns
-------
out : {ret1_type}
The random sample(s) requested
Examples
--------
>>> samples = d.rand(2, 2, 3); samples.shape # 2, 3, 3 array of samples
(2, 3, 3)
>>> type(d.rand())
numpy.float64
"""
ll_docstr = r"""
The loglikelihood of the distribution with respect to all the samples
in x. Equivalent to sum(d.logpdf(x))
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the log likelihood
Returns
-------
out : scalar
The log-likelihood of the observations in x
"""
invlogcdf_docstr = r"""
Evaluate inverse function of the logcdf of the distribution at x
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the inverse of the log of the cdf
Returns
-------
out : {ret1_type}
The random variable(s) such that the log of the cdf is equal to x
Notes
-----
For applicable distributions, equivalent to calling `q__dist_name(x,
*args, lower.tail=1, log.p=1)` from R
"""
cquantile_docstr = r"""
Evaluate the complementary quantile function. Equal to `d.ppf(1-x)` for
x in (0, 1). Could be used to compute the lower critical values of a
distribution
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate 1 minus the quantile
Returns
-------
out : {ret1_type}
The lower-tail critical values of the distribution
Notes
-----
For applicable distributions, equivalent to calling `q__dist_name(x,
*args, lower.tail=0, log.p=0)` from R
"""
invlccdf_docstr = r"""
Evaluate inverse function of the logccdf of the distribution at x
Parameters
----------
x : {arg1_type}
The point(s) at which to evaluate the inverse of the log of the cdf
Returns
-------
out : {ret1_type}
The random variable(s) such that the log of 1 minus the cdf is equal
to x
Notes
-----
For applicable distributions, equivalent to calling `q__dist_name(x,
*args, lower.tail=0, log.p=1)` from R
"""
default_docstr_args = {"pdf_tex": r"\text{not given}",
"cdf_tex": r"\text{not given}",
"arg1_type": "array_like or scalar",
"ret1_type": "array_like or scalar"}
def _default_fit(self, x):
msg = "If you would like to see this open an issue or submit a pull"
msg += " request at https://github.com/spencerlyon2/distcan/issues"
raise NotImplementedError(msg)
def _default_expect(self, x):
msg = "If you would like to see this open an issue or submit a pull"
msg += " request at https://github.com/spencerlyon2/distcan/issues"
raise NotImplementedError(msg)
class CanDistFromScipy(object):
def __init__(self):
# assign scipy.stats.distributions.method_name to names I like
# standard names
self.pdf = self.dist.pdf
self.logpdf = self.dist.logpdf
self.cdf = self.dist.cdf
self.logcdf = self.dist.logcdf
self.rvs = self.dist.rvs
self.moment = self.dist.moment
self.stats = self.dist.stats
# not all distributions have the following: fit, expect
if hasattr(self.dist, "fit"):
self.fit = self.dist.fit
else:
self.fit = _default_fit
if hasattr(self.dist, "expect"):
self.expect = self.dist.expect
else:
self.fit = _default_expect
# survival function. Called the complementary cumulative
# function (ccdf) in .jl
self.sf = self.ccdf = self.dist.sf
self.logsf = self.logccdf = self.dist.logsf
self.isf = self.dist.isf
# Distributions.jl calls scipy's ppf function quantile. I like that
self.ppf = self.quantile = self.dist.ppf
# set docstrings
self._set_docstrings()
self.__doc__ = "foobar"
def _set_docstrings(self):
fmt_arg
|
ronen25/nautilus-copypath
|
nautilus-copypath.py
|
Python
|
gpl-3.0
| 2,595
| 0.004624
|
#----------------------------------------------------------------------------------------
# nautilus-copypath - Quickly copy file paths to the clipboard from Nautilus.
# Copyright (C) Ronen Lapushner 2017-2018.
# Distributed under the GPL-v3+ license. See LICENSE for more information
#----------------------------------------------------------------------------------------
import gi
gi.require_version('Nautilus', '3.0')
gi.require_version('Gtk', '3.0')
from gi.repository import Nautilus, GObject, Gtk, Gdk
class CopyPathExtension(GObject.GObject, Nautilus.MenuProvider):
def __init__(self):
# Initialize clipboard
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
def __sanitize_path(self, path):
# Replace spaces and parenthesis with their Linux-compatible equivalents.
return path.replace(' ', '\\ ').replace('(', '\\(').replace(')', '\\)')
def __copy_files_path(self, menu, files):
pathstr = None
# Get the paths for all the files.
# Also, strip any protocol headers, if required.
paths = [self.__sanitize_path(fileinfo.get_location().get_path())
for fileinfo in files]
# Append to the path string
if len(files) > 1:
|
pathstr = '\n'.join(paths)
elif len(files) == 1:
pathstr = paths[0]
# Set clipboard text
if pathstr is not None:
self.clipboard.set_text(pathstr, -1)
def __copy_dir_path(self, menu, path):
if path is not None:
pathstr =
|
self.__sanitize_path(path.get_location().get_path())
self.clipboard.set_text(pathstr, -1)
def get_file_items(self, window, files):
# If there are many items to copy, change the label
# to reflect that.
if len(files) > 1:
item_label = 'Copy Paths'
else:
item_label = 'Copy Path'
item_copy_path = Nautilus.MenuItem(
name='PathUtils::CopyPath',
label=item_label,
tip='Copy the full path to the clipboard'
)
item_copy_path.connect('activate', self.__copy_files_path, files)
return item_copy_path,
def get_background_items(self, window, file):
item_copy_dir_path = Nautilus.MenuItem(
name='PathUtils::CopyCurrentDirPath',
label='Copy Directory Path',
tip='''Copy the current directory's path to the clipboard'''
)
item_copy_dir_path.connect('activate', self.__copy_dir_path, file)
return item_copy_dir_path,
|
karllessard/tensorflow
|
tensorflow/lite/testing/op_tests/greater.py
|
Python
|
apache-2.0
| 2,735
| 0.001828
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for conv."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_greater_tests(options):
"""Make a set of tests to do greater."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
"fully_quantize": [False],
}, {
"input_dtype": [tf.float32],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 3], [2, 3])],
"fully_quantize": [True],
}]
def build_graph(parameters):
"""Build the greater op testing graph."""
input_value1 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.compat.v1.placeholder(
dtype=parameters["
|
input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs,
|
outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
|
Hearen/OnceServer
|
pool_management/bn-xend-core/util/path.py
|
Python
|
mit
| 330
| 0.036364
|
SBINDIR="/usr/sbin"
BINDIR="/usr/bin"
LIBEXEC="/usr/lib/xen/bin"
LIBDIR="/usr/lib64"
SHAREDIR="/usr/share"
PRIVA
|
TE_BINDIR="/usr/lib64/xen/bin"
XENFI
|
RMWAREDIR="/usr/lib/xen/boot"
XEN_CONFIG_DIR="/etc/xen"
XEN_SCRIPT_DIR="/etc/xen/scripts"
XEN_LOCK_DIR="/var/lock"
XEN_RUN_DIR="/var/run/xen"
XEN_PAGING_DIR="/var/lib/xen/xenpaging"
|
lehmannro/translate
|
storage/csvl10n.py
|
Python
|
gpl-2.0
| 7,279
| 0.001786
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This
|
file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
|
of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""classes that hold units of comma-separated values (.csv) files (csvunit)
or entire files (csvfile) for use with localisation
"""
import csv
from translate.misc import sparse
from translate.storage import base
class SimpleDictReader:
def __init__(self, fileobj, fieldnames):
self.fieldnames = fieldnames
self.contents = fileobj.read()
self.parser = sparse.SimpleParser(defaulttokenlist=[",", "\n"], whitespacechars="\r")
self.parser.stringescaping = 0
self.parser.quotechars = '"'
self.tokens = self.parser.tokenize(self.contents)
self.tokenpos = 0
def __iter__(self):
return self
def getvalue(self, value):
"""returns a value, evaluating strings as neccessary"""
if (value.startswith("'") and value.endswith("'")) or (value.startswith('"') and value.endswith('"')):
return sparse.stringeval(value)
else:
return value
def next(self):
lentokens = len(self.tokens)
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
if self.tokenpos >= lentokens:
raise StopIteration()
thistokens = []
while self.tokenpos < lentokens and self.tokens[self.tokenpos] != "\n":
thistokens.append(self.tokens[self.tokenpos])
self.tokenpos += 1
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
fields = []
# patch together fields since we can have quotes inside a field
currentfield = ''
fieldparts = 0
for token in thistokens:
if token == ',':
# a field is only quoted if the whole thing is quoted
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
currentfield = ''
fieldparts = 0
else:
currentfield += token
fieldparts += 1
# things after the last comma...
if fieldparts:
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
values = {}
for fieldnum in range(len(self.fieldnames)):
if fieldnum >= len(fields):
values[self.fieldnames[fieldnum]] = ""
else:
values[self.fieldnames[fieldnum]] = fields[fieldnum]
return values
class csvunit(base.TranslationUnit):
spreadsheetescapes = [("+", "\\+"), ("-", "\\-"), ("=", "\\="), ("'", "\\'")]
def __init__(self, source=None):
super(csvunit, self).__init__(source)
self.comment = ""
self.source = source
self.target = ""
def add_spreadsheet_escapes(self, source, target):
"""add common spreadsheet escapes to two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(unescaped):
source = source.replace(unescaped, escaped, 1)
if target.startswith(unescaped):
target = target.replace(unescaped, escaped, 1)
return source, target
def remove_spreadsheet_escapes(self, source, target):
"""remove common spreadsheet escapes from two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(escaped):
source = source.replace(escaped, unescaped, 1)
if target.startswith(escaped):
target = target.replace(escaped, unescaped, 1)
return source, target
def fromdict(self, cedict):
self.comment = cedict.get('location', '').decode('utf-8')
self.source = cedict.get('source', '').decode('utf-8')
self.target = cedict.get('target', '').decode('utf-8')
if self.comment is None:
self.comment = ''
if self.source is None:
self.source = ''
if self.target is None:
self.target = ''
self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)
def todict(self, encoding='utf-8'):
comment, source, target = self.comment, self.source, self.target
source, target = self.add_spreadsheet_escapes(source, target)
if isinstance(comment, unicode):
comment = comment.encode(encoding)
if isinstance(source, unicode):
source = source.encode(encoding)
if isinstance(target, unicode):
target = target.encode(encoding)
return {'location': comment, 'source': source, 'target': target}
class csvfile(base.TranslationStore):
"""This class represents a .csv file with various lines.
The default format contains three columns: location, source, target"""
UnitClass = csvunit
Name = _("Comma Separated Value")
Mimetypes = ['text/comma-separated-values', 'text/csv']
Extensions = ["csv"]
def __init__(self, inputfile=None, fieldnames=None):
base.TranslationStore.__init__(self, unitclass=self.UnitClass)
self.units = []
if fieldnames is None:
self.fieldnames = ['location', 'source', 'target']
else:
if isinstance(fieldnames, basestring):
fieldnames = [fieldname.strip() for fieldname in fieldnames.split(",")]
self.fieldnames = fieldnames
self.filename = getattr(inputfile, 'name', '')
if inputfile is not None:
csvsrc = inputfile.read()
inputfile.close()
self.parse(csvsrc)
def parse(self, csvsrc):
csvfile = csv.StringIO(csvsrc)
reader = SimpleDictReader(csvfile, self.fieldnames)
for row in reader:
newce = self.UnitClass()
newce.fromdict(row)
self.addunit(newce)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
csvfile = csv.StringIO()
writer = csv.DictWriter(csvfile, self.fieldnames)
for ce in self.units:
cedict = ce.todict()
writer.writerow(cedict)
csvfile.reset()
return "".join(csvfile.readlines())
|
ESOedX/edx-platform
|
openedx/core/djangoapps/credit/api/provider.py
|
Python
|
agpl-3.0
| 16,234
| 0.002649
|
"""
API for initiating and tracking requests for credit from a provider.
"""
from __future__ import absolute_import
import datetime
import logging
import uuid
import pytz
import six
from django.db import transaction
from edx_proctoring.api import get_last_exam_completion_date
from openedx.core.djangoapps.credit.exceptions import (
CreditProviderNotConfigured,
CreditRequestNotFound,
InvalidCreditStatus,
RequestAlreadyCompleted,
UserIsNotEligible
)
from openedx.core.djangoapps.credit.models import (
CreditEligibility,
CreditProvider,
CreditRequest,
CreditRequirementStatus
)
from openedx.core.djangoapps.credit.signature import get_shared_secret_key, signature
from student.models import CourseEnrollment, User
from util.date_utils import to_timestamp
from util.json_request import JsonResponse
# TODO: Cleanup this mess! ECOM-2908
log = logging.getLogger(__name__)
def get_credit_providers(providers_list=None):
"""Retrieve all available credit providers or filter on given providers_list.
Arguments:
providers_list (list of strings or None): contains list of ids of credit providers
or None.
Returns:
list of credit providers represented as dictionaries
Response Values:
>>> get_credit_providers(['hogwarts'])
[
{
"id": "hogwarts",
"name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
|
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
"""
return CreditProvider.get_credit_providers(providers_list=providers_list)
def get_credit_provider_info(request, provider_id): # pylint: disable=unused-argument
"""
|
Retrieve the 'CreditProvider' model data against provided
credit provider.
Args:
provider_id (str): The identifier for the credit provider
Returns: 'CreditProvider' data dictionary
Example Usage:
>>> get_credit_provider_info("hogwarts")
{
"provider_id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"provider_url": "https://credit.example.com/",
"provider_status_url": "https://credit.example.com/status/",
"provider_description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": False,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
"thumbnail_url": "https://credit.example.com/logo.png"
}
"""
credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id)
credit_provider_data = {}
if credit_provider:
credit_provider_data = {
"provider_id": credit_provider.provider_id,
"display_name": credit_provider.display_name,
"provider_url": credit_provider.provider_url,
"provider_status_url": credit_provider.provider_status_url,
"provider_description": credit_provider.provider_description,
"enable_integration": credit_provider.enable_integration,
"fulfillment_instructions": credit_provider.fulfillment_instructions,
"thumbnail_url": credit_provider.thumbnail_url
}
return JsonResponse(credit_provider_data)
@transaction.atomic
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": "0.95",
"user_username": "ron",
"user_email": "ron@example.com",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
u'User "%s" tried to initiate a request for credit in course "%s", '
u'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible
except CreditProvider.DoesNotExist:
log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": cred
|
frastlin/PyAudioGame
|
pyaudiogame/accessible_output2/outputs/say.py
|
Python
|
mit
| 535
| 0.050467
|
import os
from .base import Output
class AppleSay(Output):
"""Speech output supporting the Apple Say subsystem."""
name = 'Apple Say'
def __init__(self, voice = 'Alex', rate = '300'):
self.voice = voice
self.rate = rate
super(AppleSay, self).__init__()
def is_active(sel
|
f):
return not os.system('which say')
def
|
speak(self, text, interrupt = 0):
if interrupt:
self.silence()
os.system('say -v %s -r %s "%s" &' % (self.voice, self.rate, text))
def silence(self):
os.system('killall say')
output_class = AppleSay
|
samrushing/cys2n
|
cys2n/__init__.py
|
Python
|
bsd-2-clause
| 2,452
| 0.01509
|
# -*- Mode: Python -*-
import socket
import unittest
__version__ = '0.1.1'
from .cys2n import *
protocol_version_map = {
'SSLv2' : 20,
'SSLv3' : 30,
'TLS10' : 31,
'TLS11' : 32,
'TLS12' : 33,
}
class PROTOCOL:
reverse_map = {}
for name, val in protocol_version_map.items():
setattr (PROTOCOL, name, val)
PROTOCOL.reverse_map[val] = name
class s2n_socket:
def __init__ (self, cfg, pysock, conn=None):
self.cfg = cfg
self.sock = pysock
self.fd = pysock.fileno()
self.conn = conn
self.negotiated = False
def __repr__ (self):
return '<s2n sock=%r conn=%r @%x>' % (self.sock, self.conn, id (self))
def bind (self, *args, **kwargs):
return self.sock.bind (*args, **kwargs)
def listen (self, *args, **kwargs):
return self.sock.listen (*args, **kwargs)
def accept (self):
sock, addr = self.sock.accept()
c
|
onn = Connection (MODE.SERVER)
conn.set_config (self.cfg)
conn.set_fd (sock.fileno())
# XXX verify
new = self.__class__ (self.cfg, sock, conn)
return new, addr
# XXX client mode as yet untested.
def connect (self, addr):
self.sock.connect (addr)
self.conn = C
|
onnection (MODE.CLIENT)
self.conn.set_config (self.cfg)
self.conn.set_fd (self.fd)
def _check_negotiated (self):
if not self.negotiated:
self.negotiate()
def negotiate (self):
if not self.negotiated:
self.conn.negotiate()
self.negotiated = True
def recv (self, block_size):
self._check_negotiated()
r = []
left = block_size
while left:
b, more = self.conn.recv (left)
r.append (b)
if not more:
break
else:
left -= len(b)
return b''.join (r)
def send (self, data):
self._check_negotiated()
pos = 0
left = len(data)
while left:
n, more = self.conn.send (data, pos)
pos += n
if not more:
break
else:
pass
left -= n
return pos
def shutdown (self, how=None):
more = 1
while more:
more = self.conn.shutdown()
def close (self):
try:
self.shutdown()
finally:
self.sock.close()
|
thunderoy/dgplug_training
|
assignments/assign4.py
|
Python
|
mit
| 121
| 0.008264
|
#!/usr/bin/env python3
import pwd
for p in pwd.getp
|
wall()
|
:
if p.pw_shell.endswith('/bin/bash'):
print(p[0])
|
girvan/pili
|
test/base_real.py
|
Python
|
lgpl-3.0
| 501
| 0.003992
|
print " Content-Type: text/html; charset=utf-8"
print ""
print """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<script type="text/javascript" src="http://yui.yahooapis.com/combo?3.3.0/build/yui/yui-min.js&3.3.0/build/loader/loader-min.js"></script>
</head>
|
<body class="yui3-skin-sam yui-skin-sam">
<h1>Test pili.lite in real case</h1>
<div id="testLogger"></div>
<script t
|
ype='text/javascript'>
%s
</script>
</body>
</html>
""" % open("base_real.js").read()
|
mumuwoyou/vnpy-master
|
sonnet/python/modules/spatial_transformer.py
|
Python
|
mit
| 23,304
| 0.006222
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""""Implementation of Spatial Transformer networks core components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from itertools import chain
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
import tensorflow as tf
class GridWarper(base.AbstractModule):
"""Grid warper interface class.
An object implementing the `GridWarper` interf
|
ace generates a reference grid
of feature points at construction time, and warps it via a parametric
transformation model, specified at run time by an input parameter Tensor.
Grid w
|
arpers must then implement a `create_features` function used to generate
the reference grid to be warped in the forward pass (according to a determined
warping model).
"""
def __init__(self, source_shape, output_shape, num_coeff, name, **kwargs):
"""Constructs a GridWarper module and initializes the source grid params.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
num_coeff: Number of coefficients parametrizing the grid warp.
For example, a 2D affine transformation will be defined by the 6
parameters populating the corresponding 2x3 affine matrix.
name: Name of Module.
**kwargs: Extra kwargs to be forwarded to the `create_features` function,
instantiating the source grid parameters.
Raises:
Error: If `len(output_shape) > len(source_shape)`.
TypeError: If `output_shape` and `source_shape` are not both iterable.
"""
super(GridWarper, self).__init__(name=name)
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
if len(self._output_shape) > len(self._source_shape):
raise base.Error('Output domain dimensionality ({}) must be equal or '
'smaller than source domain dimensionality ({})'
.format(len(self._output_shape),
len(self._source_shape)))
self._num_coeff = num_coeff
self._psi = self._create_features(**kwargs)
@abc.abstractmethod
def _create_features(self, **kwargs):
"""Generates matrix of features, of size `[num_coeff, num_points]`."""
pass
@property
def n_coeff(self):
"""Returns number of coefficients of warping function."""
return self._n_coeff
@property
def psi(self):
"""Returns a list of features used to compute the grid warp."""
return self._psi
@property
def source_shape(self):
"""Returns a tuple containing the shape of the source signal."""
return self._source_shape
@property
def output_shape(self):
"""Returns a tuple containing the shape of the output grid."""
return self._output_shape
def _create_affine_features(output_shape, source_shape):
"""Generates n-dimensional homogenous coordinates for a given grid definition.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
output_shape: Iterable of integers determining the shape of the grid to be
warped.
source_shape: Iterable of integers determining the domain of the signal to be
resampled.
Returns:
List of flattened numpy arrays of coordinates in range `[-1, 1]^N`, for
example:
```
[[x_0_0, .... , x_0_{n-1}],
....
[x_{M-1}_0, .... , x_{M-1}_{n-1}],
[x_{M}_0=0, .... , x_{M}_{n-1}=0],
...
[x_{N-1}_0=0, .... , x_{N-1}_{n-1}=0],
[1, ..., 1]]
```
where N is the dimensionality of the sampled space, M is the
dimensionality of the output space, i.e. 2 for images
and 3 for volumes, and n is the number of points in the output grid.
When the dimensionality of `output_shape` is smaller that that of
`source_shape` the last rows before [1, ..., 1] will be filled with 0.
"""
ranges = [np.linspace(-1, 1, x, dtype=np.float32)
for x in reversed(output_shape)]
psi = [x.reshape(-1) for x in np.meshgrid(*ranges, indexing='xy')]
dim_gap = len(source_shape) - len(output_shape)
for _ in xrange(dim_gap):
psi.append(np.zeros_like(psi[0], dtype=np.float32))
psi.append(np.ones_like(psi[0], dtype=np.float32))
return psi
class AffineGridWarper(GridWarper):
"""Affine Grid Warper class.
The affine grid warper generates a reference grid of n-dimensional points
and warps it via an affine transormation model determined by an input
parameter Tensor. Some of the transformation parameters can be fixed at
construction time via an `AffineWarpConstraints` object.
"""
def __init__(self,
source_shape,
output_shape,
constraints=None,
name='affine_grid_warper'):
"""Constructs an AffineGridWarper.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
constraints: Either a double list of shape `[N, N+1]` defining constraints
on the entries of a matrix defining an affine transformation in N
dimensions, or an `AffineWarpConstraints` object. If the double list is
passed, a numeric value bakes in a constraint on the corresponding
entry in the tranformation matrix, whereas `None` implies that the
corresponding entry will be specified at run time.
name: Name of module.
Raises:
Error: If constraints fully define the affine transformation; or if
input grid shape and contraints have different dimensionality.
TypeError: If output_shape and source_shape are not both iterable.
"""
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
num_dim = len(source_shape)
if isinstance(constraints, AffineWarpConstraints):
self._constraints = constraints
elif constraints is None:
self._constraints = AffineWarpConstraints.no_constraints(num_dim)
else:
self._constraints = AffineWarpConstraints(constraints=constraints)
if self._constraints.num_free_params == 0:
raise base.Error('Transformation is fully constrained.')
if self._co
|
qvazzler/Flexget
|
flexget/plugins/operate/rerun.py
|
Python
|
mit
| 643
| 0
|
from __future__ import unicode_literals, division, absolute_
|
import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('rerun')
class MaxReRuns(object):
"""Force a task to rerun for debugging purposes."""
schema = {'type': ['boolean', 'intege
|
r']}
def on_task_start(self, task, config):
task.max_reruns = int(config)
def on_task_input(self, task, config):
task.rerun()
@event('plugin.register')
def register_plugin():
plugin.register(MaxReRuns, 'rerun', api_ver=2, debug=True)
|
kianmeng/codekata
|
rosalind/001_dna_counting_nucleotides/counting_nucleotides.py
|
Python
|
gpl-3.0
| 1,370
| 0.00073
|
# -*- coding: utf-8 -*-
# author : kian-meng, ang
#
# input : a dna string at most 100 nt (NucleoTides)
# output : 20 12 17 21
#
# $ python counting_nucleotides.py
# input : a dna string at most 100 nt (NucleoTides)
# output : 20 12 17 21
f = open("rosalind_dna.txt", "r")
dna_string = f.read()
# method 1: using count()
print "%d %d %d %d" % (
dna_string.count('A'), dna_string.count('C'),
dna_string.count('G'), dna_string.count('T')
)
# method 2: using list
for char in ['A', 'C', 'G', 'T']:
print dna_string.count(char),
print ""
# method 3: generate the unique characters
# we need to strip the newline \n and sort the result.
# @see http://stackoverflow.com/a/13902829
nucleo
|
tides = ''.join(sorted(set(dna_string.strip())))
for char in nucleotides:
print dna_string.count(char),
print ""
# method 4: using collections
# @see http://codereview.stackexchange.com/a/27784
# @see https://docs.python.org/2/library/collections.html#counter-objects
# @see http://stackoverflow.com/a/17930886
from collections import Counter
nucleotides_count = sorted(Counter(dna_string.strip()).items(
|
))
for _, count in nucleotides_count:
print count,
print ""
# method 5: using collections but different approach
counter = Counter()
for char in ''.join(dna_string.strip()):
counter[char] += 1
for _, count in sorted(counter.items()):
print count,
|
icloudrnd/automation_tools
|
openstack_dashboard/dashboards/tasks/history/panel.py
|
Python
|
apache-2.0
| 243
| 0.004115
|
from django.utils.translation import ugettext_lazy as _
import horizon
fro
|
m openstack_dashboard.dashboards.tasks import dashboard
class History(horizon.Panel):
name = _("History")
slug = "history"
dashboard.Tasks.register(
|
History)
|
gregoriorobles/drPencilcode
|
app/migrations/0010_file_time.py
|
Python
|
agpl-3.0
| 417
| 0
|
# -*- coding: utf-8 -*-
from __future__ impor
|
t unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0009_file_method'),
]
operations = [
migrations.AddField(
model_name='file',
name='
|
time',
field=models.TextField(default=0),
preserve_default=False,
),
]
|
Toshakins/wagtail
|
wagtail/contrib/wagtailroutablepage/models.py
|
Python
|
bsd-3-clause
| 3,477
| 0.000575
|
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from django.core.urlresolvers import RegexURLResolver
from django.http import Http404
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.url_routing import RouteResult
_creation_counter = 0
def route(pattern, name=None):
def decorator(view_func):
global _creation_counter
_creation_counter += 1
# Make sure page has _routablepage_routes attribute
if not hasattr(view_func, '_routablepage_ro
|
utes'):
view_func._routablepage_routes = []
# Add new route to view
view_func._routablepage_routes.append((
url(pattern, view_func, name=(name or view_func.__name__)),
_creation_counter,
))
return view_func
return decorator
|
class RoutablePageMixin(object):
"""
This class can be mixed in to a Page model, allowing extra routes to be
added to it.
"""
@classmethod
def get_subpage_urls(cls):
routes = []
for attr in dir(cls):
val = getattr(cls, attr, None)
if hasattr(val, '_routablepage_routes'):
routes.extend(val._routablepage_routes)
return tuple([
route[0]
for route in sorted(routes, key=lambda route: route[1])
])
@classmethod
def get_resolver(cls):
if '_routablepage_urlresolver' not in cls.__dict__:
subpage_urls = cls.get_subpage_urls()
cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)
return cls._routablepage_urlresolver
def reverse_subpage(self, name, args=None, kwargs=None):
"""
This method takes a route name/arguments and returns a URL path.
"""
args = args or []
kwargs = kwargs or {}
return self.get_resolver().reverse(name, *args, **kwargs)
def resolve_subpage(self, path):
"""
This method takes a URL path and finds the view to call.
"""
view, args, kwargs = self.get_resolver().resolve(path)
# Bind the method
view = view.__get__(self, type(self))
return view, args, kwargs
def route(self, request, path_components):
"""
This hooks the subpage URLs into Wagtail's routing.
"""
if self.live:
try:
path = '/'
if path_components:
path += '/'.join(path_components) + '/'
view, args, kwargs = self.resolve_subpage(path)
return RouteResult(self, args=(view, args, kwargs))
except Http404:
pass
return super(RoutablePageMixin, self).route(request, path_components)
def serve(self, request, view=None, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
if view is None:
return super(RoutablePageMixin, self).serve(request, *args, **kwargs)
return view(request, *args, **kwargs)
def serve_preview(self, request, mode_name):
view, args, kwargs = self.resolve_subpage('/')
request.is_preview = True
return view(request, *args, **kwargs)
class RoutablePage(RoutablePageMixin, Page):
"""
This class extends Page by adding methods which allows extra routes to be
added to it.
"""
class Meta:
abstract = True
|
EdDev/vdsm
|
lib/vdsm/virt/metadata.py
|
Python
|
gpl-2.0
| 17,301
| 0
|
#
# Copyright 2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
"""
This module allows to store and retrieve key/value pairs into the etree
representation of a libvirt domain XML. Each set of key/value pairs will be
stored under one first-level child of the metadata. Example:
<metadata>
<group1>
<a>1</a>
<b>2</b>
</group1>
<group2>
<c>3</c>
<d>4</d>
</group2>
<metadata>
The key/value pairs must comply with those requirements:
- keys must be python basestrings
- values must be one of: basestring, int, float
- containers are not supported values; the metadata
namespace is flat, and you cannot nest objects.
- partial updates are forbidden. You must overwrite all the key/value
pairs in a given set (hereafter referred as 'group') at the same time.
The flow is:
1. read the metadata using this module
2. update the data you need to work with
3. send back the metadata using this module
"""
from contextlib import contextmanager
import xml.etree.ElementTree as ET
import libvirt
import six
from vdsm.common import errors
from vdsm.virt import vmxml
from vdsm.virt import xmlconstants
_DEVICE = 'device'
class Error(errors.Base):
"""
Generic metadata error
"""
class UnsupportedType(Error):
"""
Unsupported python type. Supported python types are:
* ints
* floats
* string
"""
class MissingDevice(Error):
"""
Failed to uniquely identify one device using the given attributes.
"""
class Metadata(object):
"""
Use this class to load or dump a group (see the module docstring) from
or to a metadata element.
Optionally handles the XML namespaces. You will need the namespace
handling when building XML for the VM startup; when updating the
metadata, libvirt will take care of that.
See also the docstring of the `create` function.
"""
def __init__(self, namespace=None, namespace_uri=None):
"""
:param namespace: namespace to use
:type namespace: text string
:param namespace_uri: URI of the namespace to use
:type namespace_uri: text string
"""
self._namespace = namesp
|
ace
self._namespace_uri = namespace_uri
self._prefix = None
if namespace is not None:
|
ET.register_namespace(namespace, namespace_uri)
self._prefix = '{%s}' % self._namespace_uri
def load(self, elem):
"""
Load the content of the given metadata element `elem`
into a python object, trying to recover the correct types.
To recover the types, this function relies on the element attributes
added by the `dump` method. Without them, the function will
still load the content, but everything will be a string.
Example:
<example>
<a>some value</a>
<b type="int">1</b>
</example>
elem = vmxml.parse_xml(...)
md = Metadata()
md.load(elem) -> {'a': 'some value', 'b': 1}
:param elem: root of the ElementTree to load
:type elem: ElementTree.Element
:returns: content of the group
:rtype: dict of key/value pairs. See the module docstring for types
"""
values = {}
for child in elem:
key, val = _elem_to_keyvalue(child)
values[self._strip_ns(key)] = val
return values
def dump(self, name, **kwargs):
"""
Dump the given arguments into the `name` metadata element.
This function transparently adds the type hints as element attributes,
so `load` can restore them.
Example:
md = Metadata()
md.dump('test', bar=42) -> elem
vmxml.format_xml(elem) ->
<test>
<bar type="int">42</bar>
</test>
:param name: group to put in the metadata
:type name: text string
:param namespace: namespace to use
:type namespace: text string
:param namespace_uri: URI of the namespace to use
:type namespace_uri: text string
:return: the corresponding element
:rtype: ElementTree.Element
kwargs: stored as subelements
"""
elem = ET.Element(self._add_ns(name))
for key, value in kwargs.items():
_keyvalue_to_elem(self._add_ns(key), value, elem)
return elem
def _add_ns(self, tag):
"""
Decorate the given tag with the namespace, if used
"""
return (self._prefix or '') + tag
def _strip_ns(self, tag):
"""
Remove the namespace from the given tag
"""
return tag.replace(self._prefix, '') if self._prefix else tag
def create(name, namespace, namespace_uri, **kwargs):
"""
Create one `name` element.
Use this function to initialize one empty metadata element,
at XML creation time.
Example:
metadata.create('vm', 'ovirt-vm', 'http://ovirt.org/vm/1.0',
version=4.2) -> elem
vmxml.format_xml(elem) ->
<ovirt-vm:vm xmlns:ovirt-vm="http://ovirt.org/vm/1.0">
<ovirt-vm:version type="float">4.2</ovirt-vm:version>
</ovirt-vm:vm>
:param name: group to put in the metadata
:type name: text string
:param namespace: namespace to use
:type namespace: text string
:param namespace_uri: URI of the namespace to use
:type namespace_uri: text string
:return: the corresponding element
:rtype: ElementTree.Element
kwargs: stored as subelements
"""
# here we must add the namespaces ourselves
metadata_obj = Metadata(namespace, namespace_uri)
return metadata_obj.dump(name, **kwargs)
def from_xml(xml_str):
"""
Helper function to parse the libvirt domain metadata used by oVirt
form one domain XML. Useful in the VM creation flow, when the
libvirt Domain is not yet started.
Example:
given this XML:
test_xml ->
<?xml version="1.0" encoding="utf-8"?>
<domain type="kvm" xmlns:ovirt-vm="http://ovirt.org/vm/1.0">
<metadata>
<ovirt-vm:vm>
<ovirt-vm:version type="float">4.2</ovirt-vm:version>
<ovirt-vm:custom>
<ovirt-vm:foo>bar</ovirt-vm:foo>
</ovirt-vm:custom>
</ovirt-vm:vm>
</metadata.>
</domain>
metadata.from_xml(test_xml) ->
{
'version': 4.2,
'custom':
{
'foo': 'bar'
},
}
:param xml_str: domain XML to parse
:type name: text string
:return: the parsed metadata
:rtype: Python dict, whose keys are always strings.
No nested objects are allowed, with the only exception of
the special 'custom' key, whose value will be another
Python dictionary whose keys are strings, with no
further nesting allowed.
"""
metadata_obj = Metadata(
xmlconstants.METADATA_VM_VDSM_PREFIX,
xmlconstants.METADATA_VM_VDSM_URI
)
root = vmxml.parse_xml(xml_str)
md_elem = root.find(
'./metadata/{%s}%s' % (
xmlconstants.METADATA_VM_VDSM_URI,
xmlconstants.METADATA_VM_VDSM_ELEMENT
)
)
if md_elem is None:
return {}
md_data = metadata_obj.load(md_elem)
custom_elem = root.find(
'./metadata/{%s}%s/{%s}custom' % (
xmlconstants.METADATA_VM_VDSM_URI,
xmlconstants.METAD
|
felipenaselva/repo.felipe
|
plugin.video.salts/scrapers/xmovies8v2_scraper.py
|
Python
|
gpl-2.0
| 7,473
| 0.007895
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import time
import kodi
import log_utils
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
from salts_lib.constants import Q_ORDER
from salts_lib.constants import XHR
import scraper
BASE_URL = 'http://xmovies8.tv'
PLAYER_URL = '/ajax/movie/load_player_v2'
EPISODES_URL = '/ajax/movie/load_episodes'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'xmovies8.v2'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
sources = {}
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
html = self.__get_players(html, page_url)
players = list(set(re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", html)))
player_url = urlparse.urljoin(self.base_url, PLAYER_URL)
for link_id, height in players:
params = {'id': link_id, 'quality': height, '_': int(time.time() * 1000)}
player_url2 = player_url + '?' + urllib.urlencode(params)
headers = {'Referer': page_url, 'Accept-Encoding': 'gzip, deflate', 'Server': 'cloudflare-nginx', 'Accept-Formating': 'application/json, text/javascript'}
headers.update(XHR)
html = self._http_get(player_url2, headers=headers, cache_limit=0)
js_data = scraper_utils.parse_json(html, player_url)
if 'link' in js_data and js_data['link']:
link_url = js_data['link']
if 'player_v2.php' in link_url:
headers = {'Referer': page_url}
html = self._http_get(link_url, headers=headers, allow_redirect=False, method='HEAD', cache_limit=.25)
if html.startswith('http'):
if self._get_direct_hostname(html) == 'gvideo':
quality = scraper_utils.gv_get_quality(html)
sources[html] = {'quality': quality, 'direct': True}
else:
if height != '0':
quality = scraper_utils.height_get_quality(height)
else:
quality = QUALITIES.HIGH
sources[html] = {'quality': quality, 'direct': False}
if not kodi.get_setting('scraper_url') and Q_ORDER[quality] >= Q_ORDER[QUALITIES.HD720]: break
for source in sources:
direct = sources[source]['direct']
quality = sources[source]['quality']
if direct:
host = self._get_direct_hostname(source)
else:
host = urlparse.urlparse(source).hostname
stream_url = source + '|User-Agent=%s' % (scraper_utils.get_ua())
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
hosters.append(hoster)
return hosters
def __get_players(self, html, page_url):
url = urlparse.urljoin(self.base_url, EPISODES_URL)
match = re.search("data\s*:\s*{\s*id:\s*(\d+),\s*episode_id:\s*(\d+),\s*link_id:\s*(\d+),\s*fr
|
om:\s*'([^']+)", html)
if match:
show_id, ep_id, link_id, from_id = match.groups()
data = {'id': show_id, 'episode_id': ep_id, 'link_id': link_id, 'from': from_id}
headers = {'Referer': page_url, 'Accept-Formating': 'application/json, text/javascript', 'Server': 'cloudflare-nginx'}
headers.update(XHR)
html = self._h
|
ttp_get(url, data=data, headers=headers, cache_limit=1)
return html
def _get_episode_url(self, season_url, video):
season_url = urlparse.urljoin(self.base_url, season_url)
html = self._http_get(season_url, cache_limit=.5)
html = self.__get_players(html, season_url)
episode_pattern = 'href="([^"]+)[^>]+class="[^"]*btn-episode[^>]*>(?:Episode)?\s*0*%s<' % (video.episode)
match = re.search(episode_pattern, html)
if match:
return scraper_utils.pathify_url(match.group(1))
def search(self, video_type, title, year, season=''):
results = []
search_url = urlparse.urljoin(self.base_url, '/movies/search?s=%s' % urllib.quote_plus(title))
html = self._http_get(search_url, cache_limit=8)
for item in dom_parser.parse_dom(html, 'div', {'class': '[^"]*c-content-product-2[^"]*'}):
match_title_year = dom_parser.parse_dom(item, 'h2', {'class': '[^"]*c-title[^"]*'})
match_url = dom_parser.parse_dom(item, 'a', ret='href')
if match_title_year and match_url:
match_title_year = match_title_year[0]
match_url = match_url[0]
is_season = re.search('Season\s+\d+', match_title_year, re.I)
if (video_type == VIDEO_TYPES.MOVIE and not is_season) or (video_type == VIDEO_TYPES.SEASON and is_season):
match_year = ''
if video_type == VIDEO_TYPES.SEASON:
match_title = match_title_year
if season and not re.search('Season\s+(%s)\s+' % (season), match_title_year, re.I):
continue
else:
match = re.search('(.*?)\s+\((\d{4})\)', match_title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = match_title_year
match_year = ''
match_url = urlparse.urljoin(match_url, 'watching.html')
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
|
cfelton/myhdl
|
example/manual/GrayInc.py
|
Python
|
lgpl-2.1
| 1,048
| 0.016221
|
import myhdl
from myhdl import *
from bin2gray2 import bin2gray
from inc import Inc
def GrayInc(graycnt, enable, clock, reset, width):
bincnt = Signal(modbv(0)[width:])
inc_1 = Inc(bincnt, enable, clock, reset)
bin2gray_1 = bin2gray(B=bincnt, G=graycnt, width=width)
return inc_1, bin2gray_1
def GrayIncReg(graycnt, enable, clock, reset, width):
graycnt_comb = Signal(modbv(0)[width:])
gray_inc_1 = GrayInc(graycnt_comb, enable, clock, reset, width)
@always(clock.posedge)
def reg_1():
graycnt.next = graycnt_comb
return gray_inc_1, reg_
|
1
def main():
width = 8
graycnt = Signal(modbv(0)[width:])
enable = Signal(bool())
clock = Signal(bool())
reset = ResetSignal(0, active=0, async=True)
toVerilog(GrayIncReg, graycnt, enable, clock,
|
reset, width)
toVHDL(GrayIncReg, graycnt, enable, clock, reset, width)
if __name__ == '__main__':
main()
|
ixc/glamkit-eventtools
|
eventtools/utils/diff.py
|
Python
|
bsd-3-clause
| 2,524
| 0.004754
|
# -*- coding: utf-8 -*-
# pinched from django-moderation.
# modified to include rather than exclude, fields
import re
import difflib
def get_changes_between_models(model1, model2, include=[]):
from django.db.models import fields
changes = {}
for field_name in include:
field = type(model1)._meta.get_field(field_name)
value2 = unicode(getattr(model2, field_name))
value1 = unicode(getattr(model1, field_name))
if value1 != value2:
changes[field.verbose_name] = (value1, value2)
return changes
def get_diff(a, b):
out = []
sequence_matcher = difflib.SequenceMatcher(None, a, b)
for opcode in sequence_matcher.get_opcodes():
operation, start_a, end_a, start_b, end_b = opcode
deleted = ''.join(a[start_a:end_a])
inserted = ''.join(b[start_b:end_b])
if operation == "replace":
out.append('<del class="diff modified">%s</del>'\
'<ins class="diff modified">%s</ins>' % (deleted,
inserted))
elif operation == "delete":
out.append('<del class="diff">%s</del>' % deleted)
elif operation == "insert":
out.append('<ins class="diff">%s</ins>' % inserted)
elif operation == "equal":
out.append(inserted)
return out
def html_diff(a, b):
"""Takes in strings a and b and ret
|
urns a human-readable HTML diff."""
a, b = html_to_list(a), html_to_list(b)
diff = get_diff(a, b)
return u"".join(diff)
def html_to_list(html):
pattern = re.compile(r'&.*?;|(?:<[^<]*?>)|'\
'(?:\w[\w-]*[ ]*)|(?:<[^<]*?>)|'\
'(?:\s*[,\.\?]*)', re.UNICODE)
return [''.join(element) for element in filter(None,
pattern.findall(html))]
def generate_
|
diff(instance1, instance2, include=[]):
from django.db.models import fields
changes = get_changes_between_models(instance1, instance2, include)
fields_diff = []
for field_name in include:
field = type(instance1)._meta.get_field(field_name)
field_changes = changes.get(field.verbose_name, None)
if field_changes:
change1, change2 = field_changes
if change1 != change2:
diff = {'verbose_name': field.verbose_name, 'diff': html_diff(change1, change2)}
fields_diff.append(diff)
return fields_diff
|
Adai0808/SummyChou
|
BlogforSummyChou/manage.py
|
Python
|
gpl-2.0
| 259
| 0.003861
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BlogforSummyChou.s
|
etting
|
s")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
ecotg/Flash-Card-App
|
app/views.py
|
Python
|
mit
| 1,100
| 0.048182
|
# views are handlers that respond to requests from web browsers or other clients
# Each view function maps to one or more request URLs
from flask import render_template, flash, redirect
from app import app
from .forms import Deck
#./run.py
@app.route('/submit', methods=('GET', 'POST'))
def submit():
form = Deck()
if
|
form.validate_on_submit():
return redirect('/index')
return render_template('submit.html',
title='Create Card',
form=form)
@app.route('/')
@app.route('/index')
def index():
# This is displayed on client's web browser
user = {'nickname': 'Enrique Iglesias'} #fake user
decks =
|
[
{
'title': 'GRE Words',
'cards': [
{
'word': 'combust',
'definition': 'to catch on fire'
},
{
'word': 'phaze',
'definition': 'to be affected'
}
]
},
{
'title': 'Food words',
'cards': [
{
'word': 'amuse bouche',
'definition': 'little serving'
},
{
'word': 'kimchii',
'definition': 'femented cabbage'
}
]
}
]
return render_template('index.html',
title ='Home',
user=user,
posts=decks)
|
rht/zulip
|
zerver/lib/dev_ldap_directory.py
|
Python
|
apache-2.0
| 3,149
| 0.00127
|
import glob
import logging
import os
from typing import Any, Dict, List, Optional
from django.conf import settings
from zerver.lib.storage import static_path
# See https://jackstromberg.com/2013/01/useraccountcontrol-attributeflag-values/
# for docs on what these values mean.
LDAP_USER_ACCOUNT_CONTROL_NORMAL = "512"
LDAP_USER_ACCOUNT_CONTROL_DISABLED = "514"
def generate_dev_ldap_dir(mode: str, num_users: int = 8) -> Dict[str, Dict[str, Any]]:
mode = mode.lower()
ldap_data = []
for i in range(1, num_users + 1):
name = f"LDAP User {i}"
email = f"ldapuser{i}@zulip.com"
phone_number = f"999999999{i}"
birthdate = f"19{i:02}-{i:02}-{i:02}"
ldap_data.append((name, email, phone_number, birthdate))
profile_images = []
for path in glob.glob(os.path.join(static_path("images/team"), "*")):
with open(path, "rb") as f:
profile_images.append(f.read())
ldap_dir = {}
for i, user_data in enumerate(ldap_data):
email = user_data[1].lower()
email_username = email.split("@")[0]
common_data = {
"cn": [user_data[0]],
"userPassword": [email_username],
"phoneNumber": [user_data[2]],
"birthDate": [user_data[3]],
|
}
if mode == "a":
ldap_dir["uid=" + email + ",ou=users,dc=zulip,dc=com"] = dict(
uid=[email],
thumbnailPhoto=[profile_images[i % len(profile_images)]],
userAccountControl=[LDAP_USER_ACCOUNT_CONTROL_NORMAL],
**common_data,
)
elif mode == "b":
ldap_dir["uid=" + email_username + ",ou
|
=users,dc=zulip,dc=com"] = dict(
uid=[email_username],
jpegPhoto=[profile_images[i % len(profile_images)]],
**common_data,
)
elif mode == "c":
ldap_dir["uid=" + email_username + ",ou=users,dc=zulip,dc=com"] = dict(
uid=[email_username], email=[email], **common_data
)
return ldap_dir
def init_fakeldap(
directory: Optional[Dict[str, Dict[str, List[str]]]] = None
) -> None: # nocoverage
# We only use this in development. Importing mock inside
# this function is an import time optimization, which
# avoids the expensive import of the mock module (slow
# because its dependency pbr uses pkgresources, which is
# really slow to import.)
from unittest import mock
from fakeldap import MockLDAP
# Silent `django_auth_ldap` logger in dev mode to avoid
# spammy user not found log messages.
ldap_auth_logger = logging.getLogger("django_auth_ldap")
ldap_auth_logger.setLevel(logging.CRITICAL)
fakeldap_logger = logging.getLogger("fakeldap")
fakeldap_logger.setLevel(logging.CRITICAL)
ldap_patcher = mock.patch("django_auth_ldap.config.ldap.initialize")
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = directory or generate_dev_ldap_dir(
settings.FAKE_LDAP_MODE, settings.FAKE_LDAP_NUM_USERS
)
|
ixc/django-fluent-contents
|
makemessages.py
|
Python
|
apache-2.0
| 634
| 0.012618
|
#!/usr/bin/env python
import os
import django
fro
|
m os import path
from django.conf import settings
from django.core.management import call_command
def main():
if not settings.configured:
module_root = path.dirname(path.realpath(__file__))
settings.configure(
DEBUG = False,
INSTALLED_APPS = (
'fluent_contents',
),
)
if django.VERSION >= (1,7):
django.setup()
makemessages()
def makemessages():
os.chdir('fluent_contents')
call_command('
|
makemessages', locale=('en', 'nl'), verbosity=1)
if __name__ == '__main__':
main()
|
pashango2/sphinx-explorer
|
sphinx_explorer/property_widget/property_model.py
|
Python
|
mit
| 24,672
| 0.000851
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
from .value_types import find_value_type, TypeBase
from six import string_types
import re
from qtpy.QtCore import *
from qtpy.QtGui import *
from qtpy.QtWidgets import *
from .default_value_dict import DefaultValues
from collections import OrderedDict
import markdown
if False:
from typing import Iterator, Dict, List, Any
__all__ = [
"PropertyModel",
"BaseItem",
"CategoryItem",
"PropertyItem",
"ValueItem",
"FlatTableModel",
]
CategoryItemType = QStandardItem.UserType + 1
PropertyItemType = CategoryItemType + 1
class PropertyModel(QStandardItemModel):
PrefixRe = re.compile(r"(^[#*-]*)\s*(.*)")
def __init__(self, parent=None):
"""
:param QWidget parent: parent widgets
"""
super(PropertyModel, self).__init__(parent)
self.setHorizontalHeaderLabels([
self.tr("Property"),
self.tr("Value")
])
self._default_dict = DefaultValues()
self._use_default = False
self.required_flag = True
def __getattr__(self, key):
return self.get(key)
def create_table_model(self, root_index, parent):
"""
Create table type model.
:param QModelIndex root_index: root index
:param QWidget parent: parent widget
:return: table type model
:rtype: FlatTableModel
"""
return FlatTableModel(self, root_index, parent)
def _load_settings(self, settings, parent_item, params_dict, default_values):
last_item = None
for setting in settings:
if isinstance(setting, dict) and setting:
key = list(setting.keys())[0]
setting_param = setting.get(key, [{}])
elif isinstance(setting, (list, tuple)):
assert last_item is not None
self._load_settings(setting, last_item, params_dict, default_values)
continue
elif isinstance(setting, string_types):
key = setting.strip()
setting_param = {}
else:
continue
if not key:
continue
g = self.PrefixRe.match(key)
category_flag = False
header_flag = False
vbox_flag = False
if g:
prefix, key = g.group(1), g.group(2)
category_flag = "#" in prefix
header_flag = "*" in prefix
vbox_flag = "-" in prefix
if category_flag:
label = setting_param.get("label", key)
last_item = self.add_category(
parent_item, key, label,
header_flag, setting_param
)
last_item.vbox_flag = vbox_flag
else:
_params_dict = params_dict.get(key, {}).copy()
_params_dict.update(setting_param)
value = setting_param.get("value")
default = setting_param.get("default")
if default is None:
default = self._get_default_value(parent_item, key, default_values)
if default is None:
default = params_dict.get(key, {}).get("default")
if header_flag:
_params_dict["required"] = True
_params_dict["require_input"] = True
last_item = self.add_property(parent_item, key, value, default, _params_dict)
@staticmethod
def _get_default_value(parent_item, key, default_values):
# hierarchy access
if parent_item and parent_item.index().isValid():
try:
d = default_values
for pkey in parent_item.tree_key():
d = d[pkey]
return d[key]
except (KeyError, TypeError):
pass
# root access
try:
return default_values[key]
except KeyError:
pass
return None
def load_settings(self, settings, params_dict=None, default_values=None):
root_item = self.invisibleRootItem()
default_values = default_values or {}
params_dict = params_dict or {}
self._load_settings(settings, root_item, params_dict, default_values)
# setup link
prop_map = self.property_map()
for key, item in prop_map.items():
if "link" not in item.params:
continue
item = prop_map[key]
item.setup_link(prop_map)
@staticmethod
def create_category(key, label=None, header_flag=False, params=None):
# type: (string_types, string_types) -> CategoryItem
return CategoryItem(key, label or key, header_flag, params)
@staticmethod
def create_property(key, value_item, value_type, params=None, label_name=None):
params = params or {}
return PropertyItem(
key,
label_name or params.get("label", key),
value_item,
value_type,
params
)
def add_category(self, parent_item, *arg
|
s, **kwargs):
value_item = QStandardItem()
left_item = self.create_category(*args, **kwargs)
parent_item.appendRow([left_item, value_item])
return left_item
def add_property(self, parent_item, key, value=None, default=None, params=None, label_name=None):
parent_item = parent_item or self.invisibleRootItem()
params = params or {}
value = value if value is not None else params.get("value")
default =
|
default if default is not None else params.get("default")
label_name = label_name or params.get("label") or key
# value type
value_type = params.get("value_type")
if isinstance(value_type, string_types):
value_type = find_value_type(value_type, params)
value_item = ValueItem(value, default, value_type)
left_item = self.create_property(key, value_item, value_type, params, label_name)
if params.get("description"):
html = self._html(params.get("description").strip(), label_name, "###")
left_item.setToolTip(html)
parent_item.appendRow([left_item, value_item])
left_item.check_enable()
return left_item
def rowItem(self, index):
# type: (QModelIndex) -> PropertyItem
index = self.index(index.row(), 0, index.parent()) if index.column() != 0 else index
item = self.itemFromIndex(index) # type: PropertyItem
return item
def _property_item(self, index):
# type: (QModelIndex) -> PropertyItem or None
if not index.isValid():
return None
item = self.itemFromIndex(self.index(index.row(), 0, index.parent()))
if item.type() == PropertyItemType:
return item
return None
def get(self, keys, root_index=QModelIndex()):
if isinstance(keys, string_types):
keys = keys.split(".")
parent = self.itemFromIndex(root_index) if root_index.isValid() else self.invisibleRootItem()
for key in keys:
for row in range(parent.rowCount()):
item = parent.child(row) # type: PropertyItem
if item.key == key:
parent = item
break
else:
return None
return parent
def set_values(self, values, root=None):
root = root or self.invisibleRootItem()
values = values or {}
for property_item in self.properties(root.index()):
value = self._get_default_value(property_item.parent(), property_item.key, values)
property_item.set_value(value)
def properties(self, root_index=None):
# type: () -> Iterator[PropertyItem]
root_index = root_index or QModelIndex()
for index in self.model_iter(root_index, False):
item = self.itemFromIndex(index)
if item and item.type() == PropertyItemType:
|
tobi2006/nomosdb
|
main/tests/test_views.py
|
Python
|
gpl-3.0
| 145,259
| 0.000296
|
from django.test import TestCase, RequestFactory
from main.models import *
from main.views import *
from bs4 import BeautifulSoup
from .base import *
import datetime
from feedback.models import IndividualFeedback
class StatusCheckTest(TestCase):
"""Testing the decorator test functions"""
def test_user_teacher_test_works(self):
elmar = create_teacher()
self.assertTrue(is_staff(elmar.user))
self.assertTrue(is_teacher(elmar.user))
self.assertFalse(is_admin(elmar.user))
self.assertFalse(is_student(elmar.user))
def test_staff_admin_status_is_properly_undertood_at_login(self):
admin = create_admin()
self.assertTrue(is_staff(admin.user))
self.assertFalse(is_teacher(admin.user))
self.assertTrue(is_admin(admin.user))
self.assertFalse(is_student(admin.user))
def test_student_is_student_and_neither_admin_nor_teacher(self):
bugs_user = User.objects.create_user(
username='bb42', password='ilovecarrots')
bugs = Student.objects.create(
student_id='bb42',
last_name='Bunny',
first_name='Bugs',
user=bugs_user
)
self.assertTrue(is_student(bugs_user))
self.assertFalse(is_staff(bugs_user))
self.assertFalse(is_admin(bugs_user))
self.assertFalse(is_teacher(bugs_user))
class HomePageTest(TeacherUnitTest):
"""Simple tests for the home page"""
def test_home_page_renders_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_title_contains_uni_name(self):
response = self.client.get('/')
self.assertContains(response, 'Acme University')
class HomePageForStudentTest(StudentUnitTest):
"""Student homepage is shown"""
def test_student_home_shows_student_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'student_home.html')
def test_student_sees_links_to_all_marksheets(self):
student = self.user.student
module1 = create_module()
performance1 = Performance.objects.create(
student=student, module=module1)
assessment1 = Assessment.objects.create(
module=module1,
value=50,
title='Essay',
available=True,
resit_available=True
)
assessment2 = Assessment.objects.create(
module=module1,
value=50,
title='Exam',
available=True
)
assessment_result_1 = AssessmentResult.objects.create(
assessment=assessment1,
mark=30,
resit_mark=40,
)
feedback_1_1 = IndividualFeedback.objects.create(
assessment_result=assessment_result_1,
attempt='first',
completed=True
)
feedback_1_2 = IndividualFeedback.objects.create(
assessment_result=assessment_result_1,
attempt='resit',
completed=True
)
|
performance1.assessment_results.add(assessment_result_1)
link1 = (
'<a href="/export_feedback/' +
mod
|
ule1.code +
'/' +
str(module1.year) +
'/' +
assessment1.slug +
'/' +
student.student_id +
'/'
)
link1_1 = link1 + 'first/'
link1_2 = link1 + 'resit/'
assessment_result_2 = AssessmentResult.objects.create(
assessment=assessment2,
mark=30,
resit_mark=40,
)
feedback_2_1 = IndividualFeedback.objects.create(
assessment_result=assessment_result_2,
attempt='first',
completed=True
)
performance1.assessment_results.add(assessment_result_2)
link2_1 = (
'<a href="/export_feedback/' +
module1.code +
'/' +
str(module1.year) +
'/' +
assessment2.slug +
'/' +
student.student_id +
'/first/'
)
module2 = Module.objects.create(
title="Introduction to Squaredance",
year=1901,
code='i2sq42'
)
student.modules.add(module2)
performance2 = Performance.objects.create(
student=student, module=module2)
assessment3 = Assessment.objects.create(
module=module2,
value=50,
title='Essay',
available=False,
resit_available=False
)
assessment_result_3 = AssessmentResult.objects.create(
assessment=assessment3,
mark=30,
resit_mark=40,
)
feedback_3_1 = IndividualFeedback.objects.create(
assessment_result=assessment_result_3,
attempt='first',
completed=True
)
feedback_3_2 = IndividualFeedback.objects.create(
assessment_result=assessment_result_3,
attempt='resit',
completed=True
)
performance2.assessment_results.add(assessment_result_3)
link3 = (
'<a href="/export_feedback/' +
module2.code +
'/' +
str(module2.year) +
'/' +
assessment3.slug +
'/' +
student.student_id
)
link3_1 = link3 + '/first/'
link3_2 = link3 + '/resit/'
assessment4 = Assessment.objects.create(
module=module2,
value=50,
title='Exam',
available=False
)
assessment_result_4 = AssessmentResult.objects.create(
assessment=assessment4,
mark=30,
resit_mark=40,
)
feedback_4_1 = IndividualFeedback.objects.create(
assessment_result=assessment_result_4,
attempt='first',
completed=True
)
performance2.assessment_results.add(assessment_result_4)
link4_1 = (
'<a href="/export_feedback/' +
module2.code +
'/' +
str(module2.year) +
'/' +
assessment2.slug +
'/' +
student.student_id +
'/first/'
)
response = self.client.get('/')
self.assertContains(response, link1_1)
self.assertContains(response, link1_2)
self.assertContains(response, link2_1)
self.assertNotContains(response, link3_1)
self.assertNotContains(response, link3_2)
self.assertNotContains(response, link4_1)
class AdminDashboardTest(AdminUnitTest):
"""Checks the Admin Dashboard"""
def test_admin_page_uses_right_template(self):
response = self.client.get('/admin_dashboard/')
self.assertNotContains(response, 'Main Settings')
self.user.staff.main_admin = True
self.user.staff.save()
response = self.client.get('/admin_dashboard/')
self.assertContains(response, 'Main Settings')
def test_admin_page_shows_all_subjects_and_years_for_main_admin(self):
self.user.staff.main_admin = True
self.user.staff.save()
subject_area_1 = SubjectArea.objects.create(name='Cartoon Studies')
subject_area_2 = SubjectArea.objects.create(name='Evil Plotting')
course_1 = Course.objects.create(
title='BA in Cartoon Studies',
short_title='Cartoon Studies',
)
course_1.subject_areas.add(subject_area_1)
course_2 = Course.objects.create(
title='BA in Evil Plotting',
short_title='Evil Plotting',
)
course_2.subject_areas.add(subject_area_2)
course_3 = Course.objects.create(
title='BA in Cartoon Studies with Evil Plotting',
short_title='Cartoon Studies / Evil Plotting',
)
course_3.subject_areas.add(subject_area_1)
course_3.subject_areas.add(subject_area_2)
stuff = set_up_stuff()
student_1 = stuff[1]
student_1.course = course_1
|
kernevil/samba
|
wintest/test-s3.py
|
Python
|
gpl-3.0
| 10,296
| 0.003011
|
#!/usr/bin/env python3
'''automated testing of Samba3 against windows'''
import wintest
def set_libpath(t):
t.putenv("LD_LIBRARY_PATH", "${PREFIX}/lib")
def set_krb5_conf(t):
t.run_cmd("mkdir -p ${PREFIX}/etc")
t.write_file("${PREFIX}/etc/krb5.conf",
'''[libdefaults]
dns_lookup_realm = false
dns_lookup_kdc = true''')
t.putenv("KRB5_CONFIG", '${PREFIX}/etc/krb5.conf')
def build_s3(t):
'''build samba3'''
t.info('Building s3')
t.chdir('${SOURCETREE}/source3')
t.putenv('CC', 'ccache gcc')
t.run_cmd("./autogen.sh")
t.run_cmd("./configure -C --prefix=${PREFIX} --enable-developer")
t.run_cmd('make basics')
t.run_cmd('make -j4')
t.run_cmd('rm -rf ${PREFIX}')
t.run_cmd('make install')
def start_s3(t):
t.info('Starting Samba3')
t.chdir("${PREFIX}")
t.run_cmd('killall -9 -q samba smbd nmbd winbindd', checkfail=False)
t.run_cmd("rm -f var/locks/*.pid")
t.run_cmd(['sbin/nmbd', "-D"])
t.run_cmd(['sbin/winbindd', "-D"])
t.run_cmd(['sbin/smbd', "-D"])
t.port_wait("${INTERFACE_IP}", 139)
def test_wbinfo(t):
t.info('Testing wbinfo')
t.chdir('${PREFIX}')
t.cmd_contains("bin/wbinfo --version", ["Version 4."])
t.cmd_contains("bin/wbinfo -p", ["Ping to winbindd succeeded"])
t.retry_cmd("bin/wbinfo --online-status",
["BUILTIN : online",
"${HOSTNAME} : online",
"${WIN_DOMAIN} : online"],
casefold=True)
t.cmd_contains("bin/wbinfo -u",
["${WIN_DOMAIN}/administrator",
"${WIN_DOMAIN}/krbtgt"],
casefold=True)
t.cmd_contains("bin/wbinfo -g",
["${WIN_DOMAIN}/domain users",
"${WIN_DOMAIN}/domain guests",
"${WIN_DOMAIN}/domain admins"],
casefold=True)
t.cmd_contains("bin/wbinfo --name-to-sid administrator",
"S-1-5-.*-500 SID_USER .1",
regex=True)
t.cmd_contains("bin/wbinfo --name-to-sid 'domain users'",
"S-1-5-.*-513 SID_DOM_GROUP .2",
regex=True)
t.retry_cmd("bin/wbinfo --authenticate=${WIN_DOMAIN}/administrator%${WIN_PASS}",
["plaintext password authentication succeeded",
"challenge/response password authentication succeeded"])
t.retry_cmd("bin/wbinfo --krb5auth=${WIN_DOMAIN}/administrator%${WIN_PASS}",
["succeeded"])
def test_smbclient(t):
t.info('Testing smbclient')
smbclient = t.getvar("smbclient")
t.chdir('${PREFIX}')
t.cmd_contains("%s --version" % (smbclient), ["Version 4."])
t.cmd_contains('%s -L ${INTERFACE_IP} -U%%' % (smbclient), ["Domain=[${WIN_DOMAIN}]", "test", "IPC$", "Samba 4."],
casefold=True)
child = t.pexpect_spawn('%s //${HOSTNAME}.${WIN_REALM}/test -Uroot@${WIN_REALM}%%${PASSWORD2}' % (smbclient))
child.expect("smb:")
child.sendline("dir")
child.expect("blocks available")
child.sendline("mkdir testdir")
child.expect("smb:")
child.sendline("cd testdir")
child.expect('testdir')
child.sendline("cd ..")
child.sendline("rmdir testdir")
child = t.pexpect_spawn('%s //${HOSTNAME}.${WIN_REALM}/test -Uroot@${WIN_REALM}%%${PASSWORD2} -k' % (smbclient))
child.expect("smb:")
child.sendline("dir")
child.expect("blocks available")
child.sendline("mkdir testdir")
child.expect("smb:")
child.sendline("cd testdir")
child.expect('testdir')
child.sendline("cd ..")
child.sendline("rmdir testdir")
def create_shares(t):
t.info("Adding test shares")
t.chdir('${PREFIX}')
t.write_file("etc/smb.conf", '''
[test]
path = ${PREFIX}/test
read only = no
''',
mode='a')
t.run_cmd("mkdir -p test")
def prep_join_as_member(t, vm):
'''prepare to join a windows domain as a member server'''
t.setwinvars(vm)
t.info("Starting VMs for joining ${WIN_VM} as a member using net ads join")
t.chdir('${PREFIX}')
t.run_cmd('killall -9 -q samba smbd nmbd winbindd', checkfail=False)
t.vm_poweroff("${WIN_VM}", checkfail=False)
t.vm_restore("${WIN_VM}", "${WIN_SNAPSHOT}")
child = t.open_telnet("${WIN_HOSTNAME}", "administrator", "${WIN_PASS}", set_time=True)
t.get_ipconfig(child)
t.del_files(["var", "private"])
t.write_file("etc/smb.conf", '''
[global]
netbios name = ${HOSTNAME}
log leve
|
l = ${DEBUGLEVEL}
realm = ${WIN_REALM}
workgroup = ${WIN_DOMAIN}
security = ADS
bind interfaces only = yes
interfaces = ${INTERFACE}
winbind separator = /
idmap uid = 1000000-2000000
idmap gid = 1000000-2000000
winbind enum users
|
= yes
winbind enum groups = yes
max protocol = SMB2
map hidden = no
map system = no
ea support = yes
panic action = xterm -e gdb --pid %d
''')
def join_as_member(t, vm):
'''join a windows domain as a member server'''
t.setwinvars(vm)
t.info("Joining ${WIN_VM} as a member using net ads join")
t.port_wait("${WIN_IP}", 389)
t.retry_cmd("host -t SRV _ldap._tcp.${WIN_REALM} ${WIN_IP}", ['has SRV record'])
t.cmd_contains("bin/net ads join -Uadministrator%${WIN_PASS}", ["Joined"])
t.cmd_contains("bin/net ads testjoin", ["Join is OK"])
t.cmd_contains("bin/net ads dns register ${HOSTNAME}.${WIN_REALM} -P", ["Successfully registered hostname with DNS"])
t.cmd_contains("host -t A ${HOSTNAME}.${WIN_REALM}",
['${HOSTNAME}.${WIN_REALM} has address'])
def create_root_account(t, vm):
t.setwinvars(vm)
t.info("Creating 'root' account for testing Samba3 member server")
t.chdir('${PREFIX}')
t.run_cmd('bin/net ads user add root -Uadministrator%${WIN_PASS}')
child = t.pexpect_spawn('bin/net ads password root -Uadministrator%${WIN_PASS}')
child.expect("Enter new password for root")
child.sendline("${PASSWORD2}")
child.expect("Password change for ")
child.expect(" completed")
child = t.pexpect_spawn('bin/net rpc shell -S ${WIN_HOSTNAME}.${WIN_REALM} -Uadministrator%${WIN_PASS}')
child.expect("net rpc>")
child.sendline("user edit disabled root no")
child.expect("Set root's disabled flag")
def test_join_as_member(t, vm):
'''test the domain join'''
t.setwinvars(vm)
t.info('Testing join as member')
t.chdir('${PREFIX}')
test_wbinfo(t)
test_smbclient(t)
def test_s3(t):
'''basic s3 testing'''
t.setvar("SAMBA_VERSION", "Version 4")
t.setvar("smbclient", "bin/smbclient")
t.check_prerequesites()
set_libpath(t)
if not t.skip("configure_bind"):
t.configure_bind()
if not t.skip("stop_bind"):
t.stop_bind()
if not t.skip("stop_vms"):
t.stop_vms()
if not t.skip("build"):
build_s3(t)
set_krb5_conf(t)
if not t.skip("configure_bind2"):
t.configure_bind()
if not t.skip("start_bind"):
t.start_bind()
dc_started = False
if t.have_var('W2K8R2A_VM') and not t.skip("join_w2k8r2"):
t.start_winvm('W2K8R2A')
dc_started = True
prep_join_as_member(t, "W2K8R2A")
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
join_as_member(t, "W2K8R2A")
create_shares(t)
start_s3(t)
create_root_account(t, "W2K8R2A")
test_join_as_member(t, "W2K8R2A")
if t.have_var('WINDOWS7_VM') and t.have_var('W2K8R2A_VM') and not t.skip("join_windows7_2008r2"):
if not dc_started:
t.start_winvm('W2K8R2A')
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
dc_started = True
else:
t.setwinvars('W2K8R2A')
realm = t.getvar("WIN_REALM")
dom_username = t.getvar("WIN_USER")
dom_password = t.getvar("WIN_PASS")
dom_realm = t.getvar("WIN_REALM")
t.start_winvm('WINDOWS7')
t.test_remote_smbclient("WINDOWS7")
t.run_winjoin('WINDOWS7', realm, username=dom_username, password=dom_password)
t.test_re
|
nedaszilinskas/Odoo-CMS-Variant-Pictures
|
website_sale_variant_pictures/__openerp__.py
|
Python
|
mit
| 783
| 0
|
# -*- coding: utf-8 -*-
# © 20
|
15 Nedas Žilinskas <nedas.zilinskas@gmail.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Variant Pictures",
"category": "Website",
"summary":
|
"Shows picture of the product variant instead of plain color",
"version": "8.0.1.0",
"description": """
Variant Pictures
======================================
Shows picture of the product variant instead of plain color
""",
"author": "Nedas Žilinskas <nedas.zilinskas@gmail.com>",
"website": "http://nedaszilinskas.com",
"depends": ["website_sale"],
"data": [
"views/assets.xml",
"views/templates.xml"
],
"images": [
"static/description/main_screenshot.png",
],
"installable": True
}
|
kevinkahn/softconsole
|
hubs/ha/domains/__oldthermostat.py
|
Python
|
apache-2.0
| 3,386
| 0.025399
|
from hubs.ha import haremote as ha
from hubs.ha.hasshub import HAnode, RegisterDomain
from controlevents import CEvent, PostEvent, ConsoleEvent, PostIfInterested
from utils import timers
import functools
# noinspection PyTypeChecker
class Thermostat(
|
HAnode): # deprecated version
def __init__(
|
self, HAitem, d):
super(Thermostat, self).__init__(HAitem, **d)
self.Hub.RegisterEntity('climate', self.entity_id, self)
self.timerseq = 0
# noinspection PyBroadException
try:
self.temperature = self.attributes['temperature']
self.curtemp = self.attributes['current_temperature']
self.target_low = self.attributes['target_temp_low']
self.target_high = self.attributes['target_temp_high']
self.mode = self.attributes['operation_mode']
self.fan = self.attributes['fan_mode']
self.fanstates = self.attributes['fan_list']
self.modelist = self.attributes['operation_list']
except:
pass
# noinspection PyUnusedLocal
def ErrorFakeChange(self, param=None):
PostEvent(ConsoleEvent(CEvent.HubNodeChange, hub=self.Hub.name, node=self.entity_id, value=self.internalstate))
def Update(self, **ns):
if 'attributes' in ns: self.attributes = ns['attributes']
self.temperature = self.attributes['temperature']
self.curtemp = self.attributes['current_temperature']
self.target_low = self.attributes['target_temp_low']
self.target_high = self.attributes['target_temp_high']
self.mode = self.attributes['operation_mode']
self.fan = self.attributes['fan_mode']
PostIfInterested(self.Hub, self.entity_id, self.internalstate)
# noinspection DuplicatedCode
def PushSetpoints(self, t_low, t_high):
ha.call_service_async(self.Hub.api, 'climate', 'set_temperature',
{'entity_id': '{}'.format(self.entity_id), 'target_temp_high': str(t_high),
'target_temp_low': str(t_low)})
self.timerseq += 1
_ = timers.OnceTimer(5, start=True, name='fakepushsetpoint-{}'.format(self.timerseq),
proc=self.ErrorFakeChange)
def GetThermInfo(self):
if self.target_low is not None:
return self.curtemp, self.target_low, self.target_high, self.HVAC_state, self.mode, self.fan
else:
return self.curtemp, self.temperature, self.temperature, self.HVAC_state, self.mode, self.fan
# noinspection PyUnusedLocal,PyUnusedLocal,PyUnusedLocal
def _HVACstatechange(self, storeitem, old, new, param, chgsource):
self.HVAC_state = new
PostIfInterested(self.Hub, self.entity_id, new)
def _connectsensors(self, HVACsensor):
self.HVAC_state = HVACsensor.state
# noinspection PyProtectedMember
HVACsensor.SetSensorAlert(functools.partial(self._HVACstatechange))
def GetModeInfo(self):
return self.modelist, self.fanstates
def PushFanState(self, mode):
ha.call_service_async(self.Hub.api, 'climate', 'set_fan_mode',
{'entity_id': '{}'.format(self.entity_id), 'fan_mode': mode})
self.timerseq += 1
_ = timers.OnceTimer(5, start=True, name='fakepushfanstate-{}'.format(self.timerseq),
proc=self.ErrorFakeChange)
def PushMode(self, mode):
# noinspection PyBroadException
ha.call_service_async(self.Hub.api, 'climate', 'set_operation_mode',
{'entity_id': '{}'.format(self.entity_id), 'operation_mode': mode})
self.timerseq += 1
_ = timers.OnceTimer(5, start=True, name='fakepushmode -{}'.format(self.timerseq),
proc=self.ErrorFakeChange)
RegisterDomain('climate', Thermostat)
|
mpuig/faker
|
faker/providers/company.py
|
Python
|
isc
| 624
| 0
|
# -*- coding: utf-8 -*-
from baseprovider import BaseProvider
from utils import clean
class Company(BaseProvider):
"""Basic definition of a Company"""
def __init__(self, locales):
super(Company, self).__init__(locales)
def new(self):
self.name = self.parse('company.name')
self.suffix = self.
|
fetch('company.suffix')
self.website = "http://www.%s.%s" % (
clean(self.name),
self.fetch('internet.domain_suffix')
)
def __str__(self):
r
|
eturn "%s %s\n%s" % (
self.name,
self.suffix,
self.website)
|
races1986/SafeLanguage
|
CEM/tests/test_textlib.py
|
Python
|
epl-1.0
| 4,110
| 0.004623
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for pywikibot/textlib.py"""
__version__ = '$Id: cadd8620e9dba64e3c2f3082e6f03f4551fd7641 $'
import unittest
from tests.test_pywiki import PyWikiTestCase
import wikipedia as pywikibot
import pywikibot.textlib as textlib
import catlib
class PyWikiTextLibTestCase(PyWikiTestCase):
end_of_line = '\r\n'
blank_line = '\r\n\r\n'
foo_page_start = 'Foo' + blank_line
iwresult1 = '[[de:German]]\r\n[[fr:French]]\r\n'
catresult1 = '[[Category:Cat1]]\r\n[[Category:Cat2]]\r\n'
result1 = foo_page_start + \
catresult1.strip() + \
blank_line + \
iwresult1.strip()
def setUp(self):
self.site = pywikibot.getSite('en', 'wikipedia')
self.data = [catlib.Category(self.site, 'Category:Cat1'),
catlib.Category(self.site, 'Category:Cat2')]
self.site_de = pywikibot.getSite('de', 'wikipedia')
self.site_fr = pywikibot.getSite('fr', 'wikipedia')
def test_interwikiFormat(self):
interwikis = {
'de':pywikibot.Page(self.site_de, 'German'),
'fr':pywikibot.Page(self.site_fr, 'French')
}
self.assertEqual(self.iwresult1,
textlib.interwikiFormat(interwikis, self.site))
def assertRoundtripInterwiki(self, text, count):
old_interwikis = textlib.getLanguageLinks(text, self.site)
new_text = textlib.replaceLanguageLinks(text, old_interwikis, site = self.site)
self.assertEqual(len(old_interwikis), count)
self.assertEqual(text, new_text)
def assertFailedRoundtripInterwiki(self, text):
old_interwikis = textlib.getLanguageLinks(text, self.site)
new_text = textlib.replaceLanguageLinks(text, old_interwikis, site = self.sit
|
e)
self.assertNotEqual(text, new_text)
def test_replaceLanguageLinks(self):
# This case demonstrates that eol isnt stripped
self.assertFailedRoundtripInterwiki(self.result1)
self.assertRoundtripInterwiki(self.result1 + self.end_of_line, 2)
def test_replaceLanguageLinks1(self):
# This case demonstrates that eol isnt stripped
result = self.foo_page_start + self.iwr
|
esult1
self.assertFailedRoundtripInterwiki(self.iwresult1)
self.assertRoundtripInterwiki(result, 2)
def test_categoryFormat_raw(self):
self.assertEqual(self.catresult1,
textlib.categoryFormat(['[[Category:Cat1]]',
'[[Category:Cat2]]'],
self.site))
def test_categoryFormat_bare(self):
self.assertEqual(self.catresult1,
textlib.categoryFormat(['Cat1', 'Cat2'], self.site))
def test_categoryFormat_Category(self):
self.assertEqual(self.catresult1,
textlib.categoryFormat(self.data, self.site))
def test_categoryFormat_Page(self):
data = [pywikibot.Page(self.site, 'Category:Cat1'),
pywikibot.Page(self.site, 'Category:Cat2')]
self.assertEqual(self.catresult1, textlib.categoryFormat(self.data,
self.site))
def assertRoundtripCategory(self, text, catcount):
cats = textlib.getCategoryLinks(text)
self.assertEqual(len(cats), catcount)
self.assertEqual(text, textlib.replaceCategoryLinks(text,
cats,
site = self.site))
def test_replaceCategoryLinks(self):
self.assertRoundtripCategory(self.result1,2)
def test_replaceCategoryLinks1(self):
result = 'Blah\r\n\r\n[[Category:Cat1]]\r\n[[Category:Cat2]]'
self.assertRoundtripCategory(result,2)
def test_replaceCategoryLinks2(self):
result = 'Blah\r\n\r\n[[Category:Cat1]]\r\n[[Category:Cat2]]\r\n\r\n[[fr:Test]]'
self.assertRoundtripCategory(result,2)
if __name__ == "__main__":
unittest.main()
|
JoostHuizinga/ea-plotting-scripts
|
configure_plots.py
|
Python
|
mit
| 25,413
| 0.002873
|
import os
import numpy as np
from typing import List, Dict, Union, Optional
import subprocess as sp
import matplotlib
import matplotlib.transforms
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import matplotlib.transforms as tf
import matplotlib.cm as cm
from matplotlib.axes import Axes
from matplotlib.artist import Artist
from matplotlib.figure import Figure
from createPlotUtils import debug_print, get_renderer
from dataclasses import dataclass
import global_options as go
import parse_file as pf
@dataclass
class PlotConfiguration:
plot_id: int
fig: Figure
gridspec_dict: Dict[str, Union[gs.GridSpec, gs.GridSpecFromSubplotSpec]]
subplot_dict: Dict[int, Axes]
extra_artists: List[Artist]
legend_handles: List[Artist]
def latex_available():
with open(os.devnull, "w") as f:
try:
status = sp.call(["latex", "--version"], stdout=f, stderr=f)
except OSError:
status = 1
if status:
return False
else:
return True
def init_params():
# Setup the matplotlib params
preamble = [r'\usepackage[T1]{fontenc}',
r'\usepackage{amsmath}',
r'\usepackage{txfonts}',
r'\usepackage{textcomp}']
matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
matplotlib.rc('text.latex', preamble="\n".join(preamble))
params = {'backend': 'pdf',
'axes.labelsize': go.get_int("font_size"),
'font.size': go.get_int("font_size"),
'legend.fontsize': go.get_int("legend_font_size"),
'xtick.labelsize': go.get_int("tick_font_size"),
'ytick.labelsize': go.get_int("tick_font_size"),
'text.usetex': latex_available(),
'figure.dpi': 100,
'savefig.dpi': 100}
matplotlib.rcParams.update(params)
def init_subplot(plot_config: PlotConfiguration, subplot_id, subplot_spec):
fig = plt.figure(plot_config.plot_id)
ax = fig.add_subplot(subplot_spec, label=str(subplot_id))
ax.set_ylim(go.get_float("y_axis_min", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None),
go.get_float("y_axis_max", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None))
ax.set_xlim(go.get_float("x_axis_min", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None),
go.get_float("x_axis_max", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None))
ax.set_ylabel(go.get_str("y_labels", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
ax.set_xlabel(go.get_str("x_labels", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
if go.get_bool("title"):
ax.set_title(go.get_str_list(
"titles",
plot_config.plot_id,
when_not_exist=go.RETURN_FIRST
)[subplot_id], fontsize=go.get_int("title_size"))
if go.get_exists("x_ticks"):
ax.set_xticks(go.get_float_list("x_ticks", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
if go.get_exists("y_ticks"):
ax.set_yticks(go.get_float_list("y_ticks", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
# ax.set_aspect(1.0)
# ax.apply_aspect()
plot_config.subplot_dict[subplot_id] = ax
return ax
def setup_figure(plot_id: int, gridspec: gs.GridSpec = gs.GridSpec(1, 1)) -> PlotConfiguration:
"""
Sets up a figure based on plot id.
By default, we assume there will only be one sub-figure, which is the main plot.
:param plot_id: The plot id.
:param gridspec: Gridspec layout for if the plot should contain multiple sub-figures.
:return: Returns the plot configuration for this figure.
"""
fig = plt.figure(plot_id, figsize=go.get_float_list("fig_size"))
plot_config = PlotConfiguration(
plot_id=plot_id,
fig=fig,
gridspec_dict={"main": gridspec},
subplot_dict={},
extra_artists=[],
legend_handles=[],
)
return plot_config
def get_plot_ids() -> List[int]:
"""
Currently we assume that the list of file-names holds the ground-truth on the
number of plots we want to create.
:return: A list of plot-ids.
"""
return list(range(len(go.get_indices("file_names"))))
def setup_plot(plot_config: PlotConfiguration, gridspec: Optional[gs.GridSpec] = None):
if gridspec is None:
gridspec = plot_config.gridspec_dict["main"]
init_subplot(plot_config, 0, gridspec[0])
def setup_plots(plot_ids: List[int] = None, gridspec=gs.GridSpec(1, 1)):
"""
A set
|
up for the different plots
(both the main plot and the small bar at the bottom).
"""
init_params()
if plot_ids is None:
plot_ids = [0]
plot_configs = []
for plot_id in plot_ids:
plot_configs.append(setup_figure(plot_id, gridspec))
# We assume that the first entry in the gridspec will contain the "mai
|
n" plot,
# so we initialize it with the parameters we read from the global options.
init_subplot(plot_configs[-1], 0, gridspec[0])
# axis = [init_subplot(plot_id, grid_spec[0]) for i, plot_id in enumerate(plot_ids)]
return plot_configs
class ParseColumns:
def __init__(self, columns: List[int]):
self.data = {col: [] for col in columns}
self.generations: List[int] = []
def __call__(self, split_line: List[str], generation: int):
self.generations.append(generation)
for col in self.data:
self.data[col].append(float(split_line[col]))
def plot_annotations(ax):
for index in go.get_indices("line_from_file"):
line_file = go.get_str("line_from_file", index)
x_column = go.get_int("line_from_file_x_column", index, when_not_exist=go.RETURN_FIRST)
y_column = go.get_int("line_from_file_y_column", index, when_not_exist=go.RETURN_FIRST)
color = go.get_str("line_from_file_color", index, when_not_exist=go.RETURN_FIRST)
linestyle = go.get_str("line_from_file_linestyle", index, when_not_exist=go.RETURN_FIRST)
linewidth = go.get_float("line_from_file_linewidth", index, when_not_exist=go.RETURN_FIRST)
column_parser = ParseColumns([x_column, y_column])
pf.read_file(line_file, column_parser)
ax.plot(column_parser.data[x_column],
column_parser.data[y_column],
color=color,
linestyle=linestyle,
linewidth=linewidth)
def plot_background(ax):
"""
Draw a gradient image based on a provided function.
:param ax: Axes The axes to draw on.
"""
y_min = go.get_float("y_axis_min")
y_max = go.get_float("y_axis_max")
x_max = go.get_float("x_axis_max")
x_min = go.get_float("x_axis_min")
background_func = go.get_str("background")
cmap = go.get_str("background_colormap")
cmap_min = go.get_float("background_colormap_min")
cmap_max = go.get_float("background_colormap_max")
x_res = round(ax.bbox.width)
y_res = round(ax.bbox.height)
image = np.zeros((y_res, x_res), dtype=np.float64)
for x in range(x_res):
for y in range(y_res):
x_val = (x * (x_max - x_min) / (x_res - 1))
y_val = (y * (y_max - y_min) / (y_res - 1))
val = eval(background_func, {}, {"x_val": x_val, "y_val": y_val})
image[y, x] = cmap_min + (cmap_max - cmap_min) * val
interpolation = 'nearest'
im = ax.imshow(image, extent=(x_min, x_max, y_min, y_max),
interpolation=interpolation,
vmin=0, vmax=1, aspect="equal", origin="lower",
cmap=plt.get_cmap(cmap))
return im
def create_color_bar(plot_config):
cmap = go.get_str("color_bar_colormap")
current_box = tf.Bbox.union([ax.get_position() for ax in plot_config.fig.axes])
cax = plot_config.fig.add_axes([
current_box.xmax + go.get_float("color_bar_margin"),
current_box.ymin,
go.get_float("color_bar_width"),
current_box.height
])
cbar = plot_config.fig.colorbar(cm.ScalarMappable(norm=None, cmap=plt.get_cmap(cmap)), cax=cax)
cbar.set_label(
go.get_
|
joshua-cogliati-inl/raven
|
tests/framework/unit_tests/utils/testTreeStructure.py
|
Python
|
apache-2.0
| 8,503
| 0.029401
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Module performs Unit Tests for the TreeStructure classes
It cannot be considered part of the active code but of the regression test system
"""
#For future compatibility with Python 3
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
import os,sys
import numpy as np
frameworkDir = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,os.pardir,'framework'))
print('framework:',frameworkDir)
sys.path.append(frameworkDir)
from utils import TreeStructure as TS
results = {"pass":0,"fail":0}
#type comparison
def checkAnswer(comment,value,expected,tol=1e-10,updateResults=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, updateResults, bool, optional, if True updates global results
@ Out, None
"""
if abs(value - expected) > tol:
print("checking answer",comment,value,"!=",expected)
if updateResults:
results["fail"] += 1
return False
else:
if updateResults:
results["pass"] += 1
return True
def checkSame(comment,value,expected,updateResults=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, updateResults, bool, optional, if True updates global results
@ Out, None
"""
if value != expected:
print("checking answer",comment,value,"!=",expected)
if updateResults:
results["fail"] += 1
return False
else:
if updateResults:
results["pass"] += 1
return True
def checkArray(comment,check,expected,tol=1e-10):
"""
This method is aimed to compare two arrays of floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, check, list, the value to compare
@ In, expected, list, the expected value
@ In, tol, float, optional, the tolerance
@ Out, None
"""
same=True
if len(check) != len(expected):
same=False
else:
for i in range(len(check)):
same = same*checkAnswer(comment+'[%i]'%i,check[i],expected[i],tol,False)
if not same:
print("checking array",comment,"did not match!")
results['fail']+=1
return False
else:
results['pass']+=1
return True
def checkType(comment,value,expected,updateResults=True):
"""
This method compares the data type of two values
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, updateResults, bool, optional, if True updates global results
@ Out, None
"""
if type(value) != type(expected):
print("checking type",comment,value,'|',type(value),"!=",expected,'|',type(expected))
if updateResults:
results["fail"] += 1
return False
else:
if updateResults:
results["pass"] += 1
return True
##############
# Node Tests #
##############
# TODO not complete!
#test equivalency (eq, neq, hash)
## test all same are same
a = TS.HierarchicalNode('rightTag',valuesIn={'attrib1':1,'attrib2':'2'},text='sampleText')
b = TS.HierarchicalNode('rightTag',valuesIn={'attrib1':1,'attrib2':'2'},text='sampleText')
checkSame('Equivalency of nodes ==:',a==b,True)
checkSame('Equivalency of nodes !=:',a!=b,False)
## test different tag
b = TS.HierarchicalNode('diffTag',valuesIn={'attrib1':1,'attrib2':'2'},text='sampleText')
checkSame('Inequivalent tag ==:',a==b,False)
checkSame('Inequivalent tag !=:',a!=b,True)
## test different attribute name
b = TS.HierarchicalNode('rightTag',valuesIn={'attrib3':1,'attrib2':'2'},text='sampleText')
checkSame('Inequivalent value name ==:',a==b,False)
checkSame('Inequivalent value name !=:',a!=b,True)
## test different attribute value
b = TS.HierarchicalNode('rightTag',valuesIn={'attrib1':3,'attrib2':'2'},text='sampleText')
checkSame('Inequivalent value name ==:',a==b,False)
checkSame('Inequivalent value name !=:',a!=b,True)
## test different text value
b = TS.HierarchicalNode('rightTag',valuesIn={'attrib1':3,'attrib2':'2'},text='diffText')
checkSame('Inequival
|
ent value name ==:',a==b,False)
checkSame('Inequivalent value name !=:',a!=b,True)
## test equivalent, only tags
a = TS.HierarchicalNode('rightTag')
b = TS.HierarchicalNode('rightTag')
checkSame('Equivalency only tag ==:',a==b,True)
checkSame('Equivalency only tag !=:',a!=b,False)
## test equivalent,
|
only values
a = TS.HierarchicalNode('rightTag',valuesIn={'attrib1':1,'attrib2':'2'})
b = TS.HierarchicalNode('rightTag',valuesIn={'attrib1':1,'attrib2':'2'})
checkSame('Equivalency only values ==:',a==b,True)
checkSame('Equivalency only values !=:',a!=b,False)
## test equivalent, only text
a = TS.HierarchicalNode('rightTag',text='sampleText')
b = TS.HierarchicalNode('rightTag',text='sampleText')
checkSame('Equivalency only text ==:',a==b,True)
checkSame('Equivalency only text !=:',a!=b,False)
##############
# Tree Tests #
##############
# TODO
##################
# Metadata Tests #
##################
# construction
static = TS.StaticMetadataTree('myStaticData')
dynamic = TS.DynamicMetadataTree('myDynamicData','timeParam')
# test "dynamic" attribute set correctly
checkSame('Static "dynamic" property correctly set:',static.getrootnode().get('dynamic'),'False')
checkSame('Dynamic "dynamic" property correctly set:',dynamic.getrootnode().get('dynamic'),'True')
# test message handler works (implicit test, no error means success)
static.raiseADebug('Debug message in Static successful!')
dynamic.raiseADebug('Debug message in Dynamic successful!')
results['pass']+=2
#test adding scalar entries (implicit test, no error means success)
static.addScalar('myTarget','myMetric',3.14159)
results['pass']+=1
dynamic.addScalar('myTarget','myMetric',3.14159,pivotVal=0.1) #pivot value as float
results['pass']+=1
dynamic.addScalar('myTarget','myMetric',299792358,pivotVal='0.2') #pivot value as string
results['pass']+=1
#test finding pivotNode (dynamic only)
a = TS.HierarchicalNode('timeParam',valuesIn={'value':0.2})
b = dynamic._findPivot(dynamic.getrootnode(),0.2)
checkSame('Finding pivot node:',b,a)
#test finding targetNode
## static
a = TS.HierarchicalNode('myTarget')
b = static._findTarget(static.getrootnode(),'myTarget')
checkSame('Finding target (static):',b,a)
## dynamic
a = TS.HierarchicalNode('myTarget')
c = dynamic._findTarget(dynamic.getrootnode(),'myTarget',0.2)
checkSame('Finding target (dynamic):',c,a)
#test values recorded
checkAnswer('Recorded data (static):',b.findBranch('myMetric').text,3.14159)
c = dynamic._findTarget(dynamic.getrootnode(),'myTarget',0.1)
checkAnswer('Recorded data (dynamic 1):',c.findBranch('myMetric').text,3.14159)
c = dynamic._findTarget(dynamic.getrootnode(),'myTarget',0.2)
checkAnswer('Recorded data (dynamic 2):',c.findBranch('myMetric').text,299792358)
print('{0}ed: {2}, {1}ed: {3}'.format(*(list(str(r) for r in results.keys())+list(results.values()))))
sys.exit(results["fail"])
"""
<TestInfo>
<name>framework.treeStructure</name>
<author>talbpaul</author>
<created>2016-11-01</created>
<classesTested>utils.TreeStructure</classesTested>
<description>
This test performs Unit Tests for the TreeStructure classes
It c
|
Panos512/invenio
|
modules/webjournal/lib/widgets/bfe_webjournal_widget_latestPhoto.py
|
Python
|
gpl-2.0
| 4,058
| 0.005914
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal widget - display photos from given collections
"""
from invenio.bibformat_engine import BibFormatObject
from invenio.search_engine import perform_request_search
from
|
invenio.config import CFG_CERN_SITE, CFG_SITE_URL, CFG_SITE_RECORD
def format_element(bfo, collections, max_photos="3", separator="<br/>"):
"""
Display the latest pictures from the given collection(s)
@param
|
collections: comma-separated list of collection form which photos have to be fetched
@param max_photos: maximum number of photos to display
@param separator: separator between photos
"""
try:
int_max_photos = int(max_photos)
except:
int_max_photos = 0
try:
collections_list = [coll.strip() for coll in collections.split(',')]
except:
collections_list = []
out = get_widget_html(bfo.lang, int_max_photos,
collections_list, separator, bfo.lang)
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
def get_widget_html(language, max_photos, collections, separator, ln):
"""
Returns the content of the widget
"""
latest_photo_ids = perform_request_search(c=collections,
rg=max_photos,
of='id')
images_urls = []
for recid in latest_photo_ids[:max_photos]:
try:
photo_record = BibFormatObject(recid)
except:
# todo: Exception, no photo in this selection
continue
if language == "fr":
try:
title = photo_record.fields('246_1a', escape=1)[0]
except KeyError:
try:
title = photo_record.fields('245__a', escape=1)[0]
except:
title = ""
else:
try:
title = photo_record.fields('245__a', escape=1)[0]
except KeyError:
# todo: exception, picture with no title
title = ""
if CFG_CERN_SITE and photo_record.fields('8567_'):
# Get from 8567_
dfs_images = photo_record.fields('8567_')
for image_block in dfs_images:
if image_block.get("y", '') == "Icon":
if image_block.get("u", '').startswith("http://"):
images_urls.append((recid, image_block["u"], title))
break # Just one image per record
else:
# Get from 8564_
images = photo_record.fields('8564_')
for image_block in images:
if image_block.get("x", '').lower() == "icon":
if image_block.get("q", '').startswith("http://"):
images_urls.append((recid, image_block["q"], title))
break # Just one image per record
# Build output
html_out = separator.join(['<a href="%s/%s/%i?ln=%s"><img class="phr" width="100" height="67" src="%s"/>%s</a>' % (CFG_SITE_URL, CFG_SITE_RECORD, recid, ln, photo_url, title) for (recid, photo_url, title) in images_urls])
return html_out
|
KorolevskyMax/TestFrameworkTemplate
|
pages/base_page.py
|
Python
|
mit
| 1,954
| 0.002559
|
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from webium import BasePage as WebiumBasePage, Find
class BasePage(WebiumBasePage):
url_path = None
a_tag = "//a[contains(.,'{link_text}')]"
login_link = Find(by=By.XPATH, value=a_tag.format(link_text='Sign in'))
logout_btn = Find(by=By.XPATH, value="//button[contains(.,'Sign out')]")
account_options_btn = Find(by=By.XPATH, value=a_tag.replace('.', '@aria-label').format(link_text='View profile and more'))
loader_xpath = "//div[@id='prestatus']"
def clear_send_keys(self, element_name, kwargs):
value = kwargs.get(element_name)
element = getattr(self, element_name)
element.clear()
element.send_keys(value)
def hover(self, element):
hov = ActionChains(self._driver).move_to_element(element)
hov.perform()
self.wait_for_loading()
self.wait_for_loader_disappear()
def get_login_status(self):
try:
self.account_options_btn.click()
return 'logged in' if self.logout_btn.is
|
_displayed() == True else 'logged out'
except NoSuchElementException:
return 'logged out'
def wait_for_loading(self, seconds=180):
wait = WebDriverWait(self._driver, seconds)
wait.until
|
(lambda x: self._driver.execute_script('return jQuery.active == 0') is True)
def replace_bad_elements(self, css_locator):
self._driver.execute_script("$('{}').remove()".format(css_locator))
def is_loader_displayed(self, *args):
return self._driver.find_element_by_xpath(self.loader_xpath).is_displayed()
def wait_for_loader_disappear(self):
WebDriverWait(self._driver, timeout=500).until_not(
self.is_loader_displayed, "Timeout waiting for loader disappear")
|
arpruss/plucker
|
parser/python/InvokePluckerBuildFromJava.py
|
Python
|
gpl-2.0
| 1,742
| 0.004592
|
#
# This file is only used in the Java (Jython) version
# It serves as an entry point.
#
import sys
import PyPlucker.Spider
from java.lang import Runtime
from java.util import Hashtable
import org.plkr.distiller.API
class InvokePluckerBuildFromJava (org.plkr.distiller.API.Invocation):
def __init__(self):
"@sig publi
|
c InvokePluckerBuildFromJava()"
pass
def create_dict_from_hashtable (self, ht):
dict = {}
e = ht.keys()
while e.hasMoreElements():
key = e.nextElement()
|
value = ht.get(key)
dict[str(key)] = str(value)
return dict
def invoke(self, args, os, inputstring, config, callback):
"@sig public int invoke(java.lang.String[] args, java.io.OutputStream os, java.lang.String inputString, java.util.Hashtable config, org.plkr.distiller.API.Callback status)"
varargs = ['plucker-build']
if args:
for arg in args:
varargs.append(str(arg))
if os:
outputstream = org.python.core.PyFile(os, "<stream>", "wb")
else:
outputstream = None
if config:
configdict = self.create_dict_from_hashtable(config)
else:
configdict = None
val = PyPlucker.Spider.realmain(varargs, outputstream, inputstring, configdict, callback)
return val
if __name__ == '__main__':
theRuntime = Runtime.getRuntime()
try:
val = InvokePluckerBuildFromJava().invoke(sys.argv[1:], None, None, None, None)
theRuntime.exit(val)
except:
import traceback
traceback.print_exc(None, sys.stderr)
theRuntime.exit(1)
|
nevermoreluo/privateoverseas
|
overseas/migrations/0014_auto_20160918_0010.py
|
Python
|
gpl-3.0
| 1,111
| 0.0027
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-17 16:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('overseas', '0013_auto_20160914_1706'),
]
operations = [
migrations.CreateModel(
name='CDN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cdn_name', models.CharField(max_length=100)),
('active', models.BooleanField(default=True)),
],
),
migrations.Alte
|
rField(
model_name='networkidentifiers',
name='service',
field=
|
models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='overseas.Service'),
),
migrations.AddField(
model_name='networkidentifiers',
name='cdn',
field=models.ManyToManyField(blank=True, null=True, to='overseas.CDN'),
),
]
|
thalesians/tsa
|
src/main/python/thalesians/tsa/simulation.py
|
Python
|
apache-2.0
| 4,012
| 0.008724
|
import datetime as dt
import numpy as np
import pandas as pd
import thalesians.tsa.checks as checks
import thalesians.tsa.numpyutils as npu
import thalesians.tsa.processes as proc
import thalesians.tsa.randomness as rnd
def xtimes(start, stop=None, step=None):
checks.check_not_none(start)
if step is None:
if isinstance(start, (dt.date, dt.time, dt.datetime)) or isinstance(stop, (dt.date, dt.time, dt.datetime)):
step = dt.timedelta(days=1)
elif isinstance(start, float) or isinstance(stop, float):
step = 1.
else:
step = 1
resultwrap = lambda x: x
if isinstance(start, dt.time):
start = dt.datetime.combine(dt.datetime(1,1,1,0,0,0), start)
resultwrap = lambda x: x.time()
if isinstance(stop, dt.time):
stop = dt.datetime.combine(dt.datetime(1,1,1,0,0,0), stop) if stop is not None else None
resultwrap = lambda x: x.time()
stepfunc = step if checks.is_callable(step) else lambda x: step
s = stepfunc(start)
checks.check(npu.sign(s) != 0, 'Step must be positive or negative, not zero')
if stop is None:
while True:
yield resultwrap(start)
start += s
s = stepfunc(start)
else:
while npu.sign(start - stop) == -npu.sign(s):
yield resultwrap(start)
start += s
s = stepfunc(start)
def times(start, stop=None, step=None):
return list(xtimes(start, stop, step))
class EulerMaruyama(object):
def __init__(self, process, initial_value=None, times=None, variates=None, time_unit=dt.timedelta(days=1), flatten=False):
checks.check_instance(process, proc.ItoProcess)
self.__process = process
self.__value = npu.to_ndim_2(initial_value, ndim_1_to_col=True, copy=True) if initial_value is not None else npu.col_of(process.process_dim, 0.)
self.__times = iter(times) if times is not None else xtimes(0., None, 1.)
self.__variates = vari
|
ates if variates is not None else rnd.multivariate_normals(ndim=process.noise_dim)
self._time = None
self._time_unit = time_unit
|
self.__flatten = flatten
def __next__(self):
if self._time is None:
self._time = next(self.__times)
else:
newtime = next(self.__times)
time_delta = newtime - self._time
if isinstance(time_delta, dt.timedelta):
time_delta = time_delta.total_seconds() / self._time_unit.total_seconds()
npu.col_of(self.__process.noise_dim, 0.)
variate_delta = np.sqrt(time_delta) * npu.to_ndim_2(next(self.__variates), ndim_1_to_col=True, copy=False)
drift = npu.to_ndim_2(self.__process.drift(self._time, self.__value), ndim_1_to_col=True, copy=False)
diffusion = npu.to_ndim_2(self.__process.diffusion(self._time, self.__value), ndim_1_to_col=True, copy=False)
self.__value += drift * time_delta + diffusion.dot(variate_delta)
self._time = newtime
v = np.copy(self.__value)
if self.__flatten: v = v.flatten()
return self._time, v
def __iter__(self):
return self
def run(sim, nstep=None, last_time=None):
checks.check_at_most_one_not_none(nstep, last_time)
ts, vs = [], []
if nstep is not None:
for _ in range(nstep):
try:
t, v = next(sim)
except StopIteration: break
ts.append(t)
vs.append(v.flatten())
elif last_time is not None:
while True:
try:
t, v = next(sim)
except StopIteration: break
ts.append(t)
vs.append(v.flatten())
if t >= last_time: break
else:
for t, v in sim:
ts.append(t)
vs.append(v.flatten())
return pd.DataFrame(data=vs, index=ts)
|
ChrisCuts/fnode
|
src/ezClasses/ezClasses.py
|
Python
|
gpl-2.0
| 755
| 0.025166
|
# -*- coding: utf-8 -*-
'''
Created on 05.09.2015
@author: derChris
'''
class ezDict(dict):
def __missing__(self, key):
self.update({key: ezDict()})
return self[key]
def reduce(self):
items =
|
list(self.items())
for key, value in items:
if isinstance(value, ezDict):
value.reduce()
if not value:
self.pop(key)
return self
if __name__ == '__main__':
x =
|
ezDict()
x['heinz']['klaus'] = 'wolfgang'
x['heinz']['juergen'] = 'stefan'
x['stefanie']['ursula'] = {}
print(x)
print(x.reduce())
|
ryanvade/nanpy
|
setup.py
|
Python
|
mit
| 1,020
| 0.017647
|
#!/usr/bin/env python
from setuptools import setup, find_packages
classifiers = [
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: MIT License",
"
|
Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python"
|
,
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
]
setup(name="nanpy",
version="0.9.4",
description="Use your Arduino board with Python",
license="MIT",
author="Andrea Stagi",
author_email="stagi.andrea@gmail.com",
url="http://github.com/nanpy/nanpy",
packages = find_packages(),
keywords= "arduino library prototype",
install_requires=[
"pyserial",
],
classifiers=classifiers,
zip_safe = True)
|
tcheneau/linux-zigbee
|
test-serial/test_recv.py
|
Python
|
gpl-2.0
| 2,105
| 0.023278
|
#!/usr/bin/env python
# Linux IEEE 802.15.4 userspace tools
#
# Copyright (C) 2008, 2009 Siemens AG
#
# Written-by: Dmitry Eremin-Solenikov
# Written-by: Sergey Lapin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys,os,time
from termios import *
from test_DQ import *
if len(sys.argv) < 3:
print "Bad arguments."
print "Usage: %s tty channel" %(sys.argv[0])
sys.exit(2)
cn = DQ(sys.argv[1])
print 'Result of close ' + hex(cn.close())
print 'Result of open ' + hex(cn.open())
print 'Result of set_channel' +hex(cn.set_channel(int(sys.argv[2])))
print 'Result of set_state' +hex(cn.set_state(RX_MODE))
try:
while 1:
print 'Result of recv_block' +hex(cn.recv_blo
|
ck())
except KeyboardInterrupt:
cn.close()
sys.exit(2)
for i in range(1, 12):
print 'Result of set_channel ' + hex(cn.set_channel(i))
time.sleep(1)
m = 0
res = 5
while res != 0 or m > 60:
res = cn.set_state(RX_MODE)
print "Got res %d" %(res)
m = m + 1
time.sleep(1)
if res == 5 or res == 8:
print "Unable to set RX mode :("
cn.close()
sys.exit(2)
print 'Result of ed ' + hex(cn.ed()) + ' ' + hex(ord(cn.data))
print 'Result of close ' + hex
|
(cn.close())
sys.exit(2)
#state = 0
#try:
# f.write(cmd_open)
#except IOError:
# print "Error on write"
# sys.exit(2)
#
#resp = get_response(f)
#print "got response %d" % (resp);
#sys.exit(2)
#
#try:
# state = 0
# while 1:
# if state == 0:
# f.write(cmd_open)
# state = 1
# val = f.read(1)
#except KeyboardInterrupt:
# f.close()
#
|
soerendip42/rdkit
|
Contrib/mmpa/search_mmp_db.py
|
Python
|
bsd-3-clause
| 16,511
| 0.012234
|
# Copyright (c) 2013, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTE
|
RRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, July 2013
from __future__ import print_function
import sys
import os
import re
import sqlite3
import subprocess
from optparse import OptionParser
from indexing import cansmirk,heavy_atom_count
from rfrag import fragment_mol
def cmpd_not_in_db_mmp_query(in_smi,cmpd_id):
query_contexts = set()
cmpd_frags = fragment_mol(in_smi,cmpd_id)
for row in cmpd_frags:
row = row.rstrip()
row_fields = re.split(',',row)
if(row_fields[3].count(".") == 1):
a,b = row_fields[3].split(".")
query_contexts.add(a)
query_contexts.add(b)
else:
query_contexts.add(row_fields[3])
q_string = "','".join(query_contexts)
q_string = "'%s'" % (q_string)
query_sql = """
select c.cmpd_id,
c.core_smi,
con.context_smi,
con.context_size
from core_table c, context_table con
where c.context_id in (select context_id from context_table where context_smi in (%s))
and c.context_id = con.context_id""" % (q_string)
cursor.execute(query_sql)
results = cursor.fetchall()
cmpd_size = heavy_atom_count(in_smi)
print_smallest_change_mmp(results,cmpd_id,cmpd_size)
def run_mmp_query(cmpd_id,cmpd_size):
query_sql = """
select c.cmpd_id,
c.core_smi,
con.context_smi,
con.context_size
from core_table c, context_table con
where c.context_id in (select context_id from core_table where cmpd_id = '%s')
and c.context_id = con.context_id""" % (cmpd_id)
cursor.execute(query_sql)
results = cursor.fetchall()
print_smallest_change_mmp(results,cmpd_id,cmpd_size)
def print_smallest_change_mmp(db_results,cmpd_id,query_size):
uniq_list={}
for r in db_results:
if(r[0] != cmpd_id):
#print r
#for each unique compound keep the largest one in common
if(r[0] not in uniq_list):
uniq_list[r[0]] = r
elif(r[3] > uniq_list[r[0]][3] ):
uniq_list[r[0]] = r
for key, value in uniq_list.items():
size_of_change = query_size-value[3]
#print "q_size: %s, Size od change: %s, Ratio: %s" % (query_size,size_of_change,float(size_of_change)/query_size)
if(use_ratio):
if(float(size_of_change)/query_size <= ratio):
cursor.execute("SELECT smiles FROM cmpd_smisp WHERE cmpd_id = ?", (key, ))
rsmi = cursor.fetchone()[0]
print("%s,%s,%s,%s,%s,%s" % (smi,rsmi,id,value[0],value[1],value[2]))
elif(size_of_change <= max_size):
cursor.execute("SELECT smiles FROM cmpd_smisp WHERE cmpd_id = ?", (key, ))
rsmi = cursor.fetchone()[0]
print("%s,%s,%s,%s,%s,%s" % (search_string,rsmi,id,value[0],value[1],value[2]))
def run_subs_query(subs):
query_sql = """
select lhs_smi.smiles,
lhs.cmpd_id,
lhs.core_smi,
rhs_smi.smiles,
rhs.cmpd_id,
rhs.core_smi,
context_table.context_smi,
rhs_smi.cmpd_size-context_table.context_size
from (select cmpd_id,core_smi,context_id from core_table where core_smi_ni = '%s') lhs,
core_table rhs,
cmpd_smisp lhs_smi,
cmpd_smisp rhs_smi,
context_table
where lhs.context_id = rhs.context_id
and context_table.context_id = rhs.context_id
and lhs_smi.cmpd_id = lhs.cmpd_id
and rhs_smi.cmpd_id = rhs.cmpd_id
and lhs.cmpd_id != rhs.cmpd_id
and rhs_smi.cmpd_size-context_table.context_size <= %s""" % (subs,max_size)
cursor.execute(query_sql)
results = cursor.fetchall()
for r in results:
#make sure it is not the same core on both sides
if(r[2] != r[5]):
#cansmirk
smirks,context = cansmirk(str(r[2]),str(r[5]),str(r[6]))
if(have_id):
print("%s,%s,%s,%s,%s,%s,%s,%s" % (subs,id,r[0],r[3],r[1],r[4],smirks,context))
else:
print("%s,%s,%s,%s,%s,%s,%s" % (subs,r[0],r[3],r[1],r[4],smirks,context))
def run_subs_smarts_query(subs_smarts):
#set os enviroment for rdkit to use sqllite
os.environ['RD_USESQLLITE'] = '1'
temp_core_ni_file = 'temp_core_ni_file_%s' % (os.getpid())
cmd = "python $RDBASE/Projects/DbCLI/SearchDb.py --dbDir=%s_smarts --smarts='%s' --silent >%s" % (pre,subs_smarts,temp_core_ni_file)
subprocess.Popen(cmd, shell=True).wait()
infile=open(temp_core_ni_file, 'r')
for row in infile:
row = row.rstrip()
query_sql = """
select lhs_smi.smiles,
lhs.cmpd_id,
lhs.core_smi,
rhs_smi.smiles,
rhs.cmpd_id,
rhs.core_smi,
context_table.context_smi,
rhs_smi.cmpd_size-context_table.context_size
from (select cmpd_id,core_smi,context_id from core_table where core_smi_ni = '%s') lhs,
core_table rhs,
cmpd_smisp lhs_smi,
cmpd_smisp rhs_smi,
context_table
where lhs.context_id = rhs.context_id
and context_table.context_id = rhs.context_id
and lhs_smi.cmpd_id = lhs.cmpd_id
and rhs_smi.cmpd_id = rhs.cmpd_id
and lhs.cmpd_id != rhs.cmpd_id
and rhs_smi.cmpd_size-context_table.context_size <= %s
and lhs_smi.cmpd_size-context_table.context_size <= %s""" % (row,max_size,max_size)
cursor.execute(query_sql)
results = cursor.fetchall()
for r in results:
#cansmirk
smirks,context = cansmirk(str(r[2]),str(r[5]),str(r[6]))
if(have_id):
print("%s,%s,%s,%s,%s,%s,%s" % (id,r[0],r[3],r[1],r[4],smirks,context))
else:
print("%s,%s,%s,%s,%s,%s" % (r[0],r[3],r[1],r[4],smirks,context))
infile.close()
#remove temporary files
os.unlink(temp_core_ni_file)
def run_trans_smarts_query(transform):
lhs,rhs = transform.split(">>")
matching_lhs = []
matching_rhs = []
#set os enviroment for rdkit to use sqllite
os.environ['RD_USESQLLITE'] = '1'
cmd = "python $RDBASE/Projects/DbCLI/SearchDb.py --dbDir=%s_smarts --smarts='%s' --silent" % (pre,lhs)
p1 = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out
|
buddly27/champollion
|
source/champollion/parser/helper.py
|
Python
|
apache-2.0
| 5,395
| 0.001297
|
# :coding: utf
|
-8
import re
#: Regular Expression pattern for single line comments
_ONE_LINE_COMMENT_PATTERN = re.compile(r"(\n|^| )//.*?\n")
#: Regular Expression pattern for multi-line comments
_MULTI_LINES_COMMENT_PATTERN = re.compile(r"/\*.*?\*/", re.DOTALL)
#: Regular Expression pattern for nested element symbols
_NESTED_ELEMENT_PATTERN = re.compile(r"{[^{}]*}")
def filter_comments(
cont
|
ent, filter_multiline_comment=True, keep_content_size=False
):
"""Return *content* without the comments.
If *filter_multiline_comment* is set to False, only the one line comment
will be filtered out.
If *keep_content_size* is set to True, the size of the content is preserved.
.. note::
The filtered content keep the same number of lines as the
original content.
.. seealso:: https://www.w3schools.com/js/js_comments.asp
"""
def _replace_comment(element):
"""Replace matched *element* in content."""
replacement = ""
matched = element.group()
# Ensure that only the comment part is being replaced
if not matched.startswith("/"):
replacement += matched[0]
matched = matched[1:]
count = matched.count("\n")
# Add empty spaces with the size of the content if the size
# must be kept.
if keep_content_size:
_buffer = len(matched) - count
replacement += " " * _buffer + "\n" * count
# Otherwise simply keep the number of lines
else:
replacement += "\n" * count
return replacement
content = _ONE_LINE_COMMENT_PATTERN.sub(_replace_comment, content)
if filter_multiline_comment:
content = _MULTI_LINES_COMMENT_PATTERN.sub(_replace_comment, content)
return content
def collapse_all(content, filter_comment=False):
"""Return tuple of *content* with the top level elements only and dictionary
containing the collapsed content associated with the *line number*.
If *filter_comment* is set to True, all comment are removed from the content
before collapsing the elements. The collapsed content dictionary preserve
the comments.
.. note::
The content with collapsed elements keep the same number of
lines as the original content.
"""
_initial_content = content
collapsed_content = {}
if filter_comment:
# Filter comment before collapsing elements to prevent comment analysis
content = filter_comments(content, keep_content_size=True)
def _replace_element(element):
"""Replace matched *element* in content."""
# Guess line number
count = element.group().count("\n")
# Ensure that the replacement string keep the same length that
# the original content to be able to use the match positions
_buffer = len(element.group()) - count - 2
if len(element.group()) > 2:
line_number = content[:element.start()].count("\n")+1
collapsed_content[line_number] = (
_initial_content[element.start():element.end()]
)
return "<>{buffer}{lines}".format(
buffer=" " * _buffer,
lines="\n" * count
)
_content = None
while _content != content:
_content = content
content = _NESTED_ELEMENT_PATTERN.sub(_replace_element, content)
# Remove the space buffer before returning the content
content = re.sub(r"<> *", lambda x: "{}", content)
return content, collapsed_content
def get_docstring(line_number, lines):
"""Return docstrings for an element at a specific *line_number*.
Loop into the file *lines* in reverse, starting from the element's
*line_number* in order to parse the docstring if available.
The docstring must be in the form of::
/**
* Class doc.
*
* Detailed description.
*/
class AwesomeClass {
...
}
Which will return the following result::
"Class doc.\\n\\nDetailed description."
The docstring can also fit on one line, in the form of::
/** Class doc. */
class AwesomeClass {
...
}
"""
docstring = None
for index in reversed(range(line_number-1)):
line = lines[index].strip()
if len(line) == 0 or line.startswith("//"):
# Do not look for docstring when more than two blank lines precede
# the element.
if index < line_number - 1:
return
continue
# Start of the docstring (from the end)
if docstring is None:
# If the entire docstring fit in one line
match = re.search("(?<=/\*\* ).*(?= \*/)", line)
if match is not None:
return match.group()
# No docstring
if not line.startswith("*/"):
return
docstring = []
# Valid docstring line starts with a '*'
elif re.search("^\*( *| +.+)$", line) is not None:
indentation = 2 if len(line) > 1 else 1
docstring.append(line[indentation:].rstrip())
# Beginning of valid docstrings starts with '/**'
elif line.startswith("/**"):
return "\n".join(docstring[::-1])
# Error in the docstring
else:
return
|
Hybrid-Cloud/cinder
|
cinder/tests/unit/volume/drivers/test_quobyte.py
|
Python
|
apache-2.0
| 38,830
| 0.000052
|
# Copyright (c) 2014 Quobyte Inc.
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Quobyte driver module."""
import errno
import os
import six
import traceback
import mock
from oslo_concurrency import processutils as putils
from oslo_utils import imageutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers import quobyte
class FakeDb(object):
msg = "Tests are broken: mock this out."
def volume_get(self, *a, **kw):
raise Exception(self.msg)
def snapshot_get_all_for_volume(self, *a, **kw):
"""Mock this if you want results from it."""
return []
class QuobyteDriverTestCase(test.TestCase):
"""Test case for Quobyte driver."""
TEST_QUOBYTE_VOLUME = 'quobyte://quobyte-host/openstack-volumes'
TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL = 'quobyte-host/openstack-volumes'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/quobyte'
TEST_MNT_POINT_BASE = '/mnt'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
TEST_TMP_FILE = '/tmp/tempfile'
VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab'
SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca'
SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede'
def setUp(self):
super(QuobyteDriverTestCase, self).setUp()
self._configuration = mock.Mock(conf.Configuration)
self._configuration.append_config_values(mock.ANY)
self._configuration.quobyte_volume_url = \
self.TEST_QUOBYTE_VOLUME
self._configuration.quobyte_client_cfg = None
self._configuration.quobyte_sparsed_volumes = True
self._configuration.quobyte_qcow2_volumes = False
self._configuration.quobyte_mount_point_base = \
self.TEST_MNT_POINT_BASE
self._configuration.nas_secure_file_operations = "auto"
self._configuration.nas_secure_file_permissions = "auto"
self._driver =\
quobyte.QuobyteDriver(configuration=self._configuration,
db=FakeDb())
self._driver.shares = {}
self._driver.set_nas_security_options(is_new_cinder_install=False)
self.context = context.get_admin_context()
def assertRaisesAndMessageMatches(
self, excClass, msg, callableObj, *args, **kwargs):
"""Ensure that the specified exception was raised. """
caught = False
try:
callableObj(*args, **kwargs)
except Exception as exc:
caught = True
self.assertIsInstance(exc, excClass,
'Wrong exception caught: %s Stacktrace: %s' %
(exc, traceback.format_exc()))
self.assertIn(msg, six.text_type(exc))
if not caught:
self.fail('Expected raised exception but nothing caught.')
def test_local_path(self):
"""local_path common use case."""
drv = self._driver
vol_id = self.VOLUME_UUID
volume = self._simple_volume(_name_id=vol_id)
self.assertEqual(
'/mnt/1331538734b757ed52d0e18c0a7210cd/volume-%s' % vol_id,
drv.local_path(volume))
def test_mount_quobyte_should_mount_correctly(self):
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_open:
# Content of /proc/mount (not mounted yet).
mock_open.return_value = six.StringIO(
"/dev/sda5 / ext4 rw,relatime,data=ordered 0 0")
self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT)
mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT)
mount_call = mock.call(
'mount.quobyte', self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT, run_as_root=False)
getfattr_call = mock.call(
'getfattr', '-n', 'quobyte.info', self.TEST_MNT_POINT,
run_as_root=False)
mock_execute.assert_has_calls(
[mkdir_call, mount_call, getfattr_call], any_order=False)
def test_mount_quobyte_already_mounted_detected_seen_in_proc_mount(self):
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_open:
# Content of /proc/mount (already mounted).
mock_open.return_value = six.StringIO(
"quobyte@%s %s fuse rw,nosuid,nodev,noatime,user_id=1000"
",group_id=100,default_permissions,allow_other 0 0"
% (self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT))
self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT)
mock_execute.assert_called_once_with(
'getfattr', '-n', 'quobyte.info', self.TEST_MNT_POINT,
run_as_root=False)
def test_mount_quobyte_should_suppress_and_log_already_mounted_error(self):
"""test_mount_quobyte_should_suppress_and_log_already_mounted_error
Based on /proc/mount, the file system is not mounted yet. However,
mount.quobyte returns with an 'already mounted' error. This is
a last-resort safe-guard in case /proc/mount parsing was not
successful.
Because _mount_quobyte gets called with ensure=True, the error will
be suppressed and logged instead.
"""
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.vo
|
lume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_open, \
mock.patch('cinder.volume.drivers.quobyte.LOG') as mock_LOG:
# Content of /proc/mount (empty).
mock_open.return_value = six.StringIO()
mock_execute.side_effect = [None, putils.ProcessExe
|
cutionError(
stderr='is busy or already mounted')]
self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT,
ensure=True)
mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT)
mount_call = mock.call(
'mount.quobyte', self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT, run_as_root=False)
mock_execute.assert_has_calls([mkdir_call, mount_call],
any_order=False)
mock_LOG.warning.assert_called_once_with('%s is already mounted',
self.TEST_QUOBYTE_VOLUME)
def test_mount_quobyte_should_reraise_already_mounted_error(self):
"""test_mount_quobyte_should_reraise_already_mounted_error
Like test_mount_quobyte_should_suppress_and_log_already_mounted_error
but with ensure=False.
"""
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_op
|
lgmerek/ScalaProjectGeneratorFacade
|
scalaProjectGeneratorFacade.py
|
Python
|
mit
| 6,824
| 0.002638
|
import sublime
import sublime_plugin
import re
import subprocess
from array import *
from .giterCommandThread import CommandThread
from .commandBuilders import buildCommand
from .jsonDecoderBuilder import JsonDecoderBuilder
from .sbtBuildFileEditor import SbtBuildFileEditor
from .logger import LoggerFacade
from .utils import EXECUTABLES
from .settings import SettingsManager
from .generatorFacadeExceptions import GeneratorFacadeInitializationError
from functools import *
class ScalaProjectGeneratorFacadeCommand(sublime_plugin.TextCommand):
def __init__(self, k):
sublime_plugin.TextCommand.__init__(self, k)
self.ProjectNamePrefix = "SBT Template: "
self.templateDefaultProperties = []
self.templateUserProps = []
self.selectedTemplateName = ''
self.projectPath = ''
self.ProjectBaseDir = ''
self.propertyIndex = 0
def __initProjectGeneratorFacade(self):
self.logger.info("Generator initialization started")
self.settingsManager = SettingsManager(EXECUTABLES)
self.settingsManager.create_executable_paths()
self.jsonDataDecoder = JsonDecoderBuilder(
self.settingsManager).createJsonDecoder()
self.sbtTemplates = [
self.ProjectNamePrefix + t for t in
self.jsonDataDecoder.getProjectTemplatesNames()]
def run(self, edit):
LoggerFacade.clear_log_file()
self.logger = LoggerFacade.getLogger()
self.logger.debug(
'\n\n----- Scala Project Generator Facade has started -----\n\n')
try:
self.__initProjectGeneratorFacade()
self.view.window().show_quick_panel(
self.sbtTemplates, self.on_projectTemplateSelected)
except GeneratorFacadeInitializationError as e:
self.logger.error(e.message + e.causedBy)
def on_projectTemplateSelected(self, user_input):
# this if is only temporary workaround for Sublime 3 Beta API problem with
# on_done event for show_quick_panel. The current Bug is that the event method is
# called twice. First invocation returns -1, the other one is correct.
if user_input == -1:
return 0
self.selectedTemplateName = (
self.sbtTemplates[user_input])[len(self.ProjectNamePrefix):]
self.logger.debug("Selected template: %s", self.selectedTemplateName)
self.templateDefaultProperties = self.jsonDataDecoder.getTemplateDefaultProperties(
self.selectedTemplateName)
self.view.window().show_input_panel(
"Project Path", '', self.on_projectPathEntered, None, None)
def on_projectPathEntered(self, user_input):
self.projectPath = user_input
item = self.templateDefaultProperties[self.propertyIndex]
self.view.window().show_input_panel(
item[0], item[1], self.on_propetySelected, None, None)
def on_propetySelected(self, user_input):
prop = self.templateDefaultProperties[self.propertyIndex]
if prop[0] == 'name':
self._buildProjectBaseDir(user_input)
self.templateUserProps.append((prop[0], user_input))
self.propertyIndex += 1
if self.propertyIndex < len(self.templateDefaultProperties):
item = self.templateDefaultProperties[self.propertyIndex]
self.view.window().show_input_panel(
item[0], item[1], self.on_propetySelected, None, None)
else:
self.propertyIndex = 0
self.gitterThread()
def _buildProjectBaseDir(self, user_input):
g8ProjectDirName = re.sub("\s+", '-', user_input).lower()
self.ProjectBaseDir = self.projectPath + '/' + g8ProjectDirName
self.logger.debug("Project Base Dir Path: %s", self.ProjectBaseDir)
def handleThread(self, thread, timeout, key, message, handleLiveThread, nextStep, i=0, dir=1):
if thread.is_alive():
handleLiveThread(key, message, partial(self.handleThread,
thread, 100, key, message, handleLiveThread, nextStep), i, dir)
else:
self.view.set_status(key, '')
nextStep()
def handleLiveThread(self, key, message, currentThread, i
|
=0, dir=1):
def animate(i, dir):
before = i % 8
after =
|
(7) - before
if not after:
dir = -1
if not before:
dir = 1
i += 1
self.view.set_status(
key, message + ' [%s=%s]' % (' ' * before, ' ' * after))
return (i, dir)
a = animate(i, dir)
sublime.set_timeout(lambda: currentThread(a[0], a[1]))
def _prepareAndRunThread(self, commandName, path, isShellUsed, statusMessage, nextStep, additionalData=[]):
command = buildCommand(commandName,
self.settingsManager.get_executables(), additionalData)
thread = CommandThread(command, path, isShellUsed)
thread.start()
self.handleThread(
thread, 100, commandName, statusMessage, self.handleLiveThread, nextStep)
def gitterThread(self):
self._prepareAndRunThread(
'gitter', self.projectPath, False, 'Giter Template generation',
self.ensimeThread, additionalData=[self.selectedTemplateName, self.templateUserProps])
def ensimeThread(self):
self._prepareAndRunThread(
'ensime', self.ProjectBaseDir, True, "Ensime confiugration", self.genSublimeThread)
def genSublimeThread(self):
self.modifySbtBuildFile()
self._prepareAndRunThread(
'gen-sublime', self.ProjectBaseDir, True, "Gen Sublime", self.openProject)
def openProject(self):
self._execute_on_sublime_command_line(
['-a', self.ProjectBaseDir], self.settingsManager.get_executables())
def _execute_on_sublime_command_line(self, args, execs):
args.insert(0, execs.SUBLIME[2]['executable_path'])
return subprocess.Popen(args)
def modifySbtBuildFile(self):
sbtFile = open(self.ProjectBaseDir + "/build.sbt", "a")
sbtFileEditor = SbtBuildFileEditor(sbtFile)
sbtFileEditor.simpleTransformationBatch(
[('sublimeExternalSourceDirectoryName',
'"' + self._getSettingByKey('sublime_gen_external_source_dir') + '"'),
('sublimeTransitive',
self._getSettingByKey('sublime_gen_transitiv'))])
sbtFileEditor.transformUsingOtherKey(
('sublimeExternalSourceDirectoryParent',
self._getSettingByKey('sublime_gen_extenal_source_dir_parent')))
sbtFile.close()
def _getSettingByKey(self, key):
return self.settingsManager.get_setting(key)
|
astrorigin/pyswisseph
|
tests/test_swe_lun_eclipse.py
|
Python
|
gpl-2.0
| 2,097
| 0.005246
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import swisseph as swe
import unittest
class TestSweLunEclipse(unittest.TestCase):
@classmethod
def setUpClass(cls):
swe.set_ephe_path()
def test_01(self):
jd = 2454466.5
flags = swe.FLG_SWIEPH
geopos = (12.1, 49.0, 330)
rflags, tret = swe.lun_eclipse_when(jd, flags, 0)
self.assertEqual(rflags, 4)
self.assertEqual(len(tret), 10)
t1 = (2454517.6430690456, 0.0, 2454517.57172334, 2454517.7144189165,
2454517.6258038115, 2454517.6603509136, 2454517.525389122,
2454517.7608554545, 0.0, 0.0)
for i in range(10):
self.assertAlmostEqual(tret[i], t1[i])
tjdut = tret[0]
rflags, tret, attr = swe.lun_eclipse_when_loc(tjdut, geopos, flags)
self.assertEqual(rflags, 29584)
self.assertEqual(len(tret), 10)
t1 = (2454695.3820517384, 0.0, 2454695.316710297, 2454695.447390333,
0.0, 0.0, 2454695.2672055247, 2454695.496797575, 0.0, 0.0)
for i in range(10):
self.assertAlmostEqual(tret[i], t1[i])
self.assertEqual(len(attr), 20)
t1 = (0.8076127691060245, 1.8366497324296667, 0.0, 0.0,
326.9885866287668, 21.362590458352507, 21.402251051495636,
0.5301609960196174, 0.8076127691060245, 138.0, 28.0, 28.0,
28.0, 28.0, 28.0, 28.0, 28.0, 28.0, 28.0, 28.0)
for i in range(20):
self.assertAlmostEqual(attr[i], t1[i])
rflags, attr = swe.lun_eclipse_how(tjdut, geopos, flags)
self.assertEqual(rflags, 4)
self.assertEqual(len(attr), 20)
|
t1 = (1.1061093373639495, 2.145134309769692, 0.0, 0.0,
73.8203145568749, 26.299290272560974, 26.330700027276947,
0.3801625589840114, 1.1061093373639495, 133.0, 26.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
|
for i in range(20):
self.assertAlmostEqual(attr[i], t1[i])
if __name__ == '__main__':
unittest.main()
# vi: sw=4 ts=4 et
|
sander76/home-assistant
|
tests/components/mqtt/test_common.py
|
Python
|
apache-2.0
| 41,174
| 0.00085
|
"""Common test objects."""
import copy
from datetime import datetime
import json
from unittest.mock import ANY, patch
from homeassistant.components import mqtt
from homeassistant.components.mqtt import debug_info
from homeassistant.components.mqtt.const import MQTT_DISCONNECTED
from homeassistant.components.mqtt.mixins import MQTT_ATTRIBUTES_BLOCKED
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
from tests.common import async_fire_mqtt_message, mock_registry
DEFAULT_CONFIG_DEVICE_INFO_ID = {
"identifiers": ["helloworld"],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
}
DEFAULT_CONFIG_DEVICE_INFO_MAC = {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
}
async def help_test_availability_when_connection_lost(hass, mqtt_mock, domain, config):
"""Test availability after MQTT disconnection."""
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILAB
|
LE
mqtt_mock.connected = False
async_dispatcher_send(hass, MQTT_DISCONNECTED)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async def help_test_availability_without_topic(hass, mqtt_mock, domain, config):
"""Te
|
st availability without defined availability topic."""
assert "availability_topic" not in config[domain]
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_topic"] = "availability-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_all(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "all"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_any(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "any"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.t
|
wangtuanjie/airflow
|
airflow/executors/sequential_executor.py
|
Python
|
apache-2.0
| 1,230
| 0
|
from builtins import str
import logging
import subprocess
from airflow.executors.base_executor import BaseExecutor
from airflow.utils import State
class SequentialExecutor(BaseExecutor):
"""
This executor will only run one task instance at a time, can be used
for debugging. It is also the only executor that can be used with sqlite
since sqlite doesn't support multiple connections.
Since we want airflow to work out of the box, it defaults to this
SequentialExecutor alongside sqlite as you first install it.
"""
def __init__(self):
super(SequentialExecutor, sel
|
f).__init__()
self.commands_to_run = []
def execute_async(self, key, command, queue=None):
self.commands_to_run.append((key, command,))
def sync(self):
for key, command in self.comma
|
nds_to_run:
logging.info("command" + str(command))
try:
sp = subprocess.Popen(command, shell=True)
sp.wait()
except Exception as e:
self.change_state(key, State.FAILED)
raise e
self.change_state(key, State.SUCCESS)
self.commands_to_run = []
def end(self):
self.heartbeat()
|
seanli9jan/tensorflow
|
tensorflow/contrib/rnn/python/ops/lstm_ops.py
|
Python
|
apache-2.0
| 25,114
| 0.005017
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2).dims[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peeph
|
ole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
|
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
dtype = x[0].dtype
batch_size = x[0].get_shape().with_rank(2).dims[0].value
cell_size4 = b.get_shape().with_rank(1).dims[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtype, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtype, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylin
|
nandub/yammer
|
lib/TicketExceptionHandler.py
|
Python
|
gpl-2.0
| 9,659
| 0.013562
|
# Copyright 2002, 2004 John T. Reese.
# email: jtr at ofb.net
#
# This file is part of Yammer.
#
# Yammer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Yammer is distributed in the hope that it will be useful,
# bu
|
t WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FI
|
TNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Yammer; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from WebKit.ExceptionHandler import ExceptionHandler, htTitle, singleton
from WebUtils.Funcs import urlEncode, htmlForDict
from WebUtils.HTMLForException import HTMLForException
import YammerUtils, traceback, string, os.path, sys
from types import DictType
from InstallSettings import settings
class TicketExceptionHandler(ExceptionHandler):
def __init__(self, application, transaction, excInfo):
ExceptionHandler.__init__(self, application, transaction, excInfo)
def getGaleId(self):
trans= self._tra
if trans.hasSession():
session= trans.session()
if session.hasValue('username'):
username= session.value('username')
return username
return None
def publicErrorPage(self):
html= '''<html>
<head>
<title>Error</title>
</head>
<body fgcolor=black bgcolor=white>
%s
<p> %s
''' % (htTitle('Error'), self.setting('UserErrorMessage'))
debugInfo= self.generateDebugInfo()
html += debugInfo[0]
html += '</body></html>'
return html
def privateErrorPage(self):
''' Returns an HTML page intended for the developer with useful information such as the traceback. '''
html = ['''
<html>
<head>
<title>Error</title>
</head>
<body fgcolor=black bgcolor=white>
%s
<p> %s''' % (htTitle('Error'), self.setting('UserErrorMessage'))]
html.append(self.htmlDebugInfo())
html.append('</body></html>')
return string.join(html, '')
def htmlDebugInfo(self):
return string.join(self.generateDebugInfo(), '<hr>')
def generateDebugInfo(self):
''' Return HTML-formatted debugging information about the current exception. '''
self.html= []
self.bugdesc= "(please click *Edit* and enter a brief description of " + \
"what you were doing, here)\n\n====\n"
self.reporttitle= 'unexpected error'
self.writeHTML()
html= ''.join(self.html)
self.html= None
contact= self.getGaleId()
if contact:
contact= 'gale ' + contact
else:
contact= ''
version= YammerUtils.getVersionString()
desc= self.bugdesc.replace('"', '"')
title= self.reporttitle
return ("""<form method="post" action="http://cvstrac.ofb.net/tktnew">
<input type="hidden" name="t" value="%(title)s">
<input type="hidden" name="w" value="jtr">
<input type="hidden" name="c" value="%(contact)s">
<input type="hidden" name="s" value="yammer.net">
<input type="hidden" name="v" value="%(version)s">
<input type="hidden" name="y" value="event">
<input type="hidden" name="r" value="3">
<input type="hidden" name="p" value="3">
<input type="hidden" name="d" value="%(desc)s">
You can file an incident report about this error. If you file
an incident report, relevant information about the problem will
be saved in the bug database and you will be given a chance to
type in extra information, such as a description of what you
were doing. Filling out an incident report is very helpful and
makes it much more likely that the developer will be able to fix
the problem. If you would like to file an incident report,
please click here:<p>
<input type="submit" name="submit" value="submit incident report">
""" % locals(), html)
def htmlWrite(self, s):
ExceptionHandler.write(self, s)
def descWrite(self, s):
self.bugdesc += str(s)
def write(self, s):
self.htmlWrite(s)
self.descWrite(s)
def htmlWriteln(self, s):
ExceptionHandler.writeln(self, s)
def descWriteln(self, s):
self.bugdesc += str(s) + '\n\n'
def writeln(self, s):
self.htmlWriteln(s)
self.descWriteln(s)
def writeDict(self, d):
self.htmlWriteln(htmlForDict(d, filterValueCallBack=self.filterDictValue,
maxValueLength=self._maxValueLength))
keys= d.keys()
keys.sort()
for key in keys:
self.descWrite(self.descRepr(key) + ':')
values= string.split(str(d[key]), '\n')
self.descWriteln(values[0])
for value in values[1:]:
self.descWriteln(' ' + self.descRepr(value))
def htmlWriteTable(self, listOfDicts, keys=None):
"""
Writes a table whose contents are given by listOfDicts. The
keys of each dictionary are expected to be the same. If the
keys arg is None, the headings are taken in alphabetical order
from the first dictionary. If listOfDicts is "false", nothing
happens.
The keys and values are already considered to be HTML.
Caveat: There's no way to influence the formatting or to use
column titles that are different than the keys.
Note: Used by writeAttrs().
"""
if not listOfDicts:
return
if keys is None:
keys = listOfDicts[0].keys()
keys.sort()
wr = self.htmlWriteln
wr('<table>\n<tr>')
for key in keys:
wr('<td bgcolor=#F0F0F0><b>%s</b></td>' % key)
wr('</tr>\n')
for row in listOfDicts:
wr('<tr>')
for key in keys:
wr('<td bgcolor=#F0F0F0>%s</td>' % self.filterTableValue(row[key], key, row, listOfDicts))
wr('</tr>\n')
wr('</table>')
def descWriteTable(self, listOfDicts, keys=None):
if not listOfDicts: return
if keys is None:
keys= listOfDicts[0].keys()
keys.sort()
wrp= self.descWrite
wr= self.descWriteln
wr('keys: ' + string.join(keys, ' '))
for row in listOfDicts:
for key in keys:
wrp('{%s} ' % self.filterTableValue(row[key], key, row,
listOfDicts))
wr('')
def writeTable(self, listOfDicts, keys=None):
self.htmlWriteTable(listOfDicts, keys)
self.descWriteTable(listOfDicts, keys)
def htmlWriteTraceback(self):
self.htmlWriteTitle('Traceback')
self.htmlWrite('<p> <i>%s</i>' % self.servletPathname())
self.htmlWrite(HTMLForException(self._exc))
def htmlWriteTitle(self, s):
self.htmlWriteln(htTitle(s))
def writeTitle(self, s):
self.htmlWriteTitle(s)
self.descWriteln('\n\n====\n\n' + s)
def writeAttrs2(self, obj, attrNames, reprfunc, wrTableFunc):
"""
Writes the attributes of the object as given by attrNames.
Tries obj._name first, followed by obj.name(). Is resilient
regarding exceptions so as not to spoil the exception report.
"""
rows = []
for name in attrNames:
value = getattr(obj, '_'+name, singleton) # go for data attribute
try:
if value is singleton:
value = getattr(obj, name, singleton) # go for method
if value is singleton:
value = '(could not find attribute or method)'
else:
try:
if callable(value):
value = value()
except Exception, e:
value = '(exception during method call: %s: %s)' % (e.__class__.__name__, e)
value = reprfunc(value)
else:
value = reprfunc(value)
except Exception, e:
value = '(exception during value processing: %s: %s)' % (e.__class__.__name__, e)
rows.append({'attr': name, 'value': value})
wrTableFunc(rows, ('attr', 'value'))
def writeAttrs(self, obj, attrNames):
self.writeAttrs2(obj, attrNames, self.repr, self.htmlWriteTable)
self.writeAttrs2(obj, attrNames, self.descRepr, self.descWriteTable)
def descRepr(self, x):
if type(x) is DictType:
reps= []
for k in x.keys():
reps.append(self.descRepr(k)
|
ntt-sic/taskflow
|
taskflow/flow.py
|
Python
|
apache-2.0
| 2,637
| 0
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from taskflow.utils import reflection
class Flow(six.with_metaclass(abc.ABCMeta)):
"""The base abstract class of all flow implementations.
A flow is a structure that defines relationships between tasks. You can
add tasks and other flows (as subflows) to the flow, and the flow provides
a way to implicitly or explicitly define how they are interdependent.
Exact structure of the relationships is defined by concrete
implementation, while this class defines common interface and adds
human-readable (not necessary unique) name.
NOTE(harlowja): if a flow is placed in another flow as a subflow, a desired
way to compose flows together, then it is valid and permissible that during
execution the subflow & parent flow may be flattened into a new flow. Since
|
a flow is just a 'structuring' concept this is typically a behavior that
should not be worried about (as it is not visible to the user), but it is
worth mentioning here.
Flows are expected to provide the following methods/properties:
- add
- __len__
- requires
- provides
"""
def __init__(self, name):
self._name = str(name)
@property
def name(self):
"""A non-unique name for this flow
|
(human readable)"""
return self._name
@abc.abstractmethod
def __len__(self):
"""Returns how many items are in this flow."""
def __str__(self):
lines = ["%s: %s" % (reflection.get_class_name(self), self.name)]
lines.append("%s" % (len(self)))
return "; ".join(lines)
@abc.abstractmethod
def add(self, *items):
"""Adds a given item/items to this flow."""
@abc.abstractproperty
def requires(self):
"""Browse argument requirement names this flow requires to run."""
@abc.abstractproperty
def provides(self):
"""Browse argument names provided by the flow."""
|
tommai78101/IRCBot
|
UserInput.py
|
Python
|
mit
| 2,759
| 0.029358
|
import os
import atexit
import string
import importlib
import threading
import socket
from time import sleep
def BYTE(message):
return bytes("%s\r\n" % message, "UTF-8")
class UserInput(threading.Thread):
isRunning = Fals
|
e
parent = None
def __init__(self, bot):
super().__init__()
self.parent = bot
self.setDaemon(True)
self.isRunning = False
self.start()
def createMessage(self, message):
temp = ""
for i in range(len(message)):
if (i != len(message) - 1):
temp
|
+= message[i] + " "
else:
temp += message[i]
return temp
def run(self):
self.isRunning = True
while (self.isRunning):
try:
message = input()
message = message.split(" ")
if (message[0] != ""):
if (message[0] == "/r" or message[0] == "/reload"):
self.parent.reloadAll()
elif (message[0] == "/q" or message[0] == "/quit"):
print("Quitting.")
self.parent.quit()
self.isRunning = False
elif (message[0] == "/j" or message[0] == "/join"):
if (len(message) < 2 or len(message) > 2):
print("Incorrect usage.")
else:
self.parent.switch(message[1])
elif (message[0] == "/l" or message[0] == "/leave"):
if (len(message) >= 2):
if (len(message) > 2):
for i in range(1, len(message)):
self.parent.leave(message[i], False)
if (len(self.parent.channels) > 0):
self.parent.focusedChannel = self.parent.channels[0]
print("Left channels. Focusing on %s" % self.parent.focusedChannel)
else:
print("No channels left.")
else:
self.parent.leave(message[1], False)
if (len(self.parent.channels) > 0):
self.parent.focusedChannel = self.parent.channels[0]
print("Left %s. Focusing on %s" % (message[1], self.parent.focusedChannel))
else:
print("No channels left.")
else:
print("Incorrect usage.")
elif (message[0] == "/?" or message[0] == "/help"):
print("1. Type anything to chat with others in %s." % self.parent.focusedChannel)
print("2. /? or /help -- Bring up the bot commands.")
print("3. /j or /join -- Join a new channel. Channel focus will switch over.")
print("4. /l or /leave -- Leave channel. Channel focus will change.")
print("5. /r or /reload -- Reload all plugins. (Hotswapping is supported.)")
print("6. /q or /quit -- Quit the bot.")
else:
self.parent.s.send(BYTE("PRIVMSG %s :%s" % (self.parent.focusedChannel, self.createMessage(message))))
except WindowsError as winError:
print(winError)
if (self.parent.s != None):
self.parent.s.close(socket.SHUT_RDWR)
self.parent.s = None
self.parent.connect()
except Exception as error:
print(error)
|
jawilson/home-assistant
|
homeassistant/helpers/selector.py
|
Python
|
apache-2.0
| 5,138
| 0.000779
|
"""Selectors for Home Assistant."""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, cast
import voluptuous as vol
from homeassistant.const import CONF_MODE, CONF_UNIT_OF_MEASUREMENT
from homeassistant.util import decorator
SELECTORS = decorator.Registry()
def validate_selector(config: Any) -> dict:
"""Validate a selector."""
if not isinstance(config, dict):
raise vol.Invalid("Expected a dictionary")
if len(config) != 1:
raise vol.Invalid(f"Only one type can be specified. Found {', '.join(config)}")
selector_type = list(config)[0]
if (selector_class := SELECTORS.get(selector_type)) is None:
raise vol.Invalid(f"Unknown selector type {selector_type} found")
# Selectors can be empty
if config[selector_type] is None:
return {selector_type: {}}
return {
selector_type: cast(dict, selector_class.CONFIG_SCHEMA(config[selector_type]))
}
class Selector:
"""Base class for selectors."""
CONFIG_SCHEMA: Callable
@SELECTORS.register("entity")
class EntitySelector(Selector):
"""Selector of a single entity."""
CONFIG_SCHEMA = vol.Schema(
{
# Integration that provided the entity
vol.Optional("integration"): str,
# Domain the entity belongs to
vol.Optional("domain"): str,
# Device class of the entity
vol.Optional("device_class"): str,
}
)
@SELECTORS.register("device")
class DeviceSelector(Selector):
"""Selector of a single device."""
CONFIG_SCHEMA = vol.Schema(
{
# Integration linked to it with a config entry
vol.Optional("integration"): str,
# Manufacturer of device
vol.Optional("manufacturer"): str,
# Model of device
vol.Optional("model"): str,
# Device has to contain entities matching this selector
vol.Optional("entity"): EntitySelector.CONFIG_SCHEMA,
}
)
@SELECTORS.register("area")
class AreaSelector(Selector):
"""Selector of a single area."""
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional("entity"): vol.Schema(
{
vol.Optional("domain"): str,
vol.Optional("device_class"): str,
vol.Optional("integration"): str,
}
),
vol.Optional("device"): vol.Schema(
{
vol.Optional("integration"): str,
vol.Optional("manufacturer"): str,
vol.Optional("model"): str,
}
),
}
)
@SELECTORS.register("number")
class NumberSelector(Selector):
"""Selector of a numeric value."""
CONFIG_SCHEMA = vol.Schema(
{
vol.Required("min"): vol.Coerce(float),
vol.Required("max"): vol.Coerce(float),
vol.Optional("step", default=1): vol.All(
vol.Coerce(float), vol.Range(min=1e-3)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): str,
vol.Optional(CONF_MODE, default="slider"): vol.In(["box", "slider"]),
}
)
@SELECTORS.register("addon")
class AddonSelector(Selector):
"""Selector of a add-on."""
CONFIG_SCHEMA = vol.Schema({})
@SELECTORS.register("boolean")
class BooleanSelector(Selector):
"""Selector of a boolean value."""
CONFIG_SCHEMA = vol.Schema({})
@SELECTORS.register("time")
class TimeSelector(Selector):
"""Selector
|
of a time value."""
CONFIG_SCHEMA = vol.Schema({})
@SELECTORS.register("target")
class TargetSelector(Selector):
"""Selector of a target value (area ID, device ID, entity ID etc).
Value should follow cv.ENTITY_SERVICE_FIELDS format.
"""
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional("entity"): vol.Schema(
{
vol.Optional("domain"): str,
|
vol.Optional("device_class"): str,
vol.Optional("integration"): str,
}
),
vol.Optional("device"): vol.Schema(
{
vol.Optional("integration"): str,
vol.Optional("manufacturer"): str,
vol.Optional("model"): str,
}
),
}
)
@SELECTORS.register("action")
class ActionSelector(Selector):
"""Selector of an action sequence (script syntax)."""
CONFIG_SCHEMA = vol.Schema({})
@SELECTORS.register("object")
class ObjectSelector(Selector):
"""Selector for an arbitrary object."""
CONFIG_SCHEMA = vol.Schema({})
@SELECTORS.register("text")
class StringSelector(Selector):
"""Selector for a multi-line text string."""
CONFIG_SCHEMA = vol.Schema({vol.Optional("multiline", default=False): bool})
@SELECTORS.register("select")
class SelectSelector(Selector):
"""Selector for an single-choice input select."""
CONFIG_SCHEMA = vol.Schema(
{vol.Required("options"): vol.All([str], vol.Length(min=1))}
)
|
MSA-Argentina/relojito_project
|
relojito/app/tasks.py
|
Python
|
mit
| 6,517
| 0.001536
|
# -*- coding: utf-8 -*-
from datetime import date, timedelta
from subprocess import check_output
from celery import task
from django.template.loader import render_to_string
from django.db.models import Sum
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.utils.translation import ugettext as _
from .models import Holiday, Project, Task
def get_fortune():
"""Gets a random fortune from the system."""
fortune = check_output(['/usr/games/fortune'])
return fortune
def verify_yesterday_tasks(user):
"""Returns True if a user created at least one task
yesterday. Checks if 'yesterday' was on weekend or was
a holiday."""
yesterday = date.today() - timedelta(days=1)
if Holiday.objects.filter(date=yesterday).exists() or \
yesterday.weekday() in [5, 6]:
return True
return Task.objects.filter(date=yesterday, owner=user).exists()
@task()
def disable_overdue_projects():
"""Disable all projects with an overdue date"""
today = date.today()
overdue_projects = Project.objects.filter(is_active=True,
due_date__lt=today).all()
for op in overdue_projects:
op.is_active = False
op.save()
@task()
def weekly_irregular_users():
"""Sends a weekly hall of shame email to admin users."""
subject = "Weekly hall of shame"
# active users, not in blacklist
active_users = User.objects.filter(is_active=True).all()
users = list(filter(lambda x: x.username not in
settings.ALERT_USERS_BLACKLIST,
active_users))
users_few_days = list(filter(lambda x: x.total_days_last_week() < 5,
users))
users_few_hours = list(filter(lambda x: x.avg_hours_last_week() < 7,
users))
data = {
"users_few_days": users_few_days,
"users_few_hours": users_few_hours
}
text_body = render_to_string(
'mails/weekly_shame_mail.txt', data)
to_mail = []
to_mail.append(settings.ADMIN_USERS_EMAIL)
print(text_body)
send_mail(
subject, text_body, settings.DEFAULT_FROM_EMAIL, to_mail)
@task()
def weekly_summary_user(user):
"""Sends a weekly summary."""
subject = "Resumen semanal de tareas"
wt = user.last_week_tasks()
if wt:
last_task = user.get_last_task()
week_days = user.total_days_last_week()
total_hours = user.total_hours_last_week()
avg_hours = user.avg_hours_last_week()
data = {
"username": user.username,
"week_days": week_days,
"total_hours": total_hours,
"avg_hours": avg_hours,
"last_task": last_task,
"weekly_tasks": wt
}
text_body = render_to_string(
'mails/weekly_tasks.txt', data)
to_mail = []
to_mail.append(user.email)
print(text_body)
send_mail(
subject, text_body, settings.DEFAULT_FROM_EMAIL, to_mail)
@task()
def send_alert_to_user(user):
subject = "No creaste tareas en Relojito ayer"
project_url = settings.SITE_URL
last_task = user.get_last_task()
fortune = get_fortune()
data = {
"username": user.username,
"project_url": project_url,
"last_task": last_task,
"fortune": fortune
}
text_body = render_to_string(
'mails/no_tasks_yesterday.txt', data)
to_mail = []
to_mail.append(user.email)
print(text_body)
send_mail(
subject, text_body, settings.DEFAULT_FROM_EMAIL, to_mail)
@task()
def mail_alert_no_created_task():
"""Sends an alert if a user didn't create any tasks the
day before."""
users = User.objects.filter(is_active=True).all()
for user in users:
if user.email and user.username not in settings.ALERT_USERS_BLACKLIST:
if not verify_yesterday_tasks(user):
send_alert_to_user(user)
@task()
def mail_weekly_summary():
"""Sends a weekly summary to all users."""
users = User.objects.filter(is_active=True).all()
for user in users:
if user.email and user.username not in settings.ALERT_USERS_BLACKLIST:
weekly_summary_user(user)
@task()
def mail_new_year_greeting():
"""Sends a happy new year greeting."""
users = User.objects.filter(is_active=True).all()
for user in users:
if user.email and user.username not in settings.ALERT_USERS_BLACKLIST:
if not verify_yesterday_tasks(user):
taskset = user.get_tasks()
projects = user.get_projects()
tx = taskset.aggregate(Sum('total_hours'))
|
total_hours = tx['total_hours__sum']
subject = _(u"Feliz año nuevo de parte de Relojito")
body = _(u"""Hola %(username)s, Relojito te cuenta que hasta ahora completaste %(total_tareas)s tareas,
para un total de %(total_proyectos)s proyectos. En total, cargaste %(total_horas)s horas.\n
Más allá de las estadísticas, Relojito te desea un excelente comienzo de año!""") % {'total_tareas': len(taskset),
|
'username': user.first_name,
'total_proyectos': len(projects),
'total_horas': total_hours
}
to_mail = []
to_mail.append(user.email)
print(user.username, subject, body)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, to_mail)
@task()
def mail_alert_new_collaborator(instance):
project_name = instance.project.name
project_url = settings.SITE_URL + instance.project.get_absolute_url
subject = _(u'You are now a collaborator in %(project_name)s') % {
'project_name': project_name}
body = _(u"""Hi, you've been added as a colaborator in %(project_name)s.\n\n
Check the details at %(project_url)s.\n\n Bye!""") % {'project_name': project_name,
'project_url': project_url}
to_mail = []
to_mail.append(instance.user.email)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, to_mail)
|
thijsdezoete/BrBaFinals
|
poolgame/poolgame/urls.py
|
Python
|
gpl-2.0
| 816
| 0.003676
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'brbappl.views.index', name='index'),
url(r'^done$', 'brbappl.views.done', name='done'),
url(r'^participate$
|
', 'brbappl.views.participate', name='participate'),
url(r'^admin', include(admin.site.urls)),
url(r'^(?P<contestant>\w+)$', 'brbappl.views.questionnaire', name='questions'),
# url(r'^poolgame/', include('poolgame.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.ur
|
ls)),
)
|
pyta-uoft/pyta
|
examples/ending_locations/del_name.py
|
Python
|
gpl-3.0
| 6
| 0
|
del
|
x
| |
jeremiedecock/snippets
|
python/pyqt/pyqt5/widget_QTableView_edit_print_signal_when_data_changed.py
|
Python
|
mit
| 2,784
| 0.002514
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref: http://doc.qt.io/qt-5/modelview.html#2-1-a-read-only-table
import sys
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView
class MyData:
def __init__(self):
self._num_rows = 3
self._num_columns = 2
self._data = [["hello" for j in range(self._num_columns)] for i in range(self._num_rows)]
def get_num_rows(self):
return self._num_rows
def get_num_columns(self):
return self._num_columns
def get_data(self, row_index, column_index):
value = self._data[row_index][column_index]
return value
def set_data(self, row_index, column_index, value):
self._data[row_index][column_index] = value
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data # DON'T CALL THIS ATTRIBUTE "data", A QAbstractItemModel METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
def rowCount(self, parent):
return self._data.get_num_rows()
def columnCount(self, parent):
return self._data.get_num_columns()
def data(self, index, role):
if role == Qt.DisplayRole:
return self._data.get_data(index.row(), index.column())
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data.set_data(index.row(), index.column(), value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
def changedCallback():
print("changed")
if __name__ == '__main__':
app = QApplication(sys.argv)
|
data = MyData()
table_view = QTableView()
my_model = MyModel(data)
my_model.dataChanged.connect(changedCallback)
my_model.rowsInserted.connect(changedCallback)
my_model.rowsRemoved.connect(changedCallback)
t
|
able_view.setModel(my_model)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
|
ayepezv/GAD_ERP
|
openerp/report/render/rml2pdf/utils.py
|
Python
|
gpl-3.0
| 6,163
| 0.008924
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import copy
import locale
import logging
import re
import reportlab
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
return (s or '').replace('&', '&').replace('<', '<').replace('>', '>')
def xml2str(s):
return (s or '').replace('&','&').replace('<','<').replace('>','>')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop'):
for ctx in safe_eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
safe_eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
|
_logger.info('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
(tag,attr) = safe_eval(n.get('rml_tag'),{}, self.localcontext)
|
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except GeneratorExit:
yield n
except Exception, e:
_logger.info('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
continue
if self and self.localcontext and n.get('rml_except'):
try:
safe_eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.info('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = safe_eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
tagname = ''
except GeneratorExit:
pass
except Exception, e:
_logger.info('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
"""Translate ``txt`` according to the language in the local context,
replace dynamic ``[[expr]]`` with their real value, then escape
the result for XML.
:param str txt: original text to translate (must NOT be XML-escaped)
:return: translated text, with dynamic expressions evaluated and
with special XML characters escaped (``&,<,>``).
"""
if not self.localcontext:
return str2xml(txt)
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
to_translate = tools.ustr(sps.pop(0))
result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate))
if sps:
txt = None
try:
expr = sps.pop(0)
txt = safe_eval(expr, self.localcontext)
if txt and isinstance(txt, basestring):
txt = tools.ustr(txt)
except Exception:
_logger.info("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext)
if isinstance(txt, basestring):
result += txt
elif txt and (txt is not None) and (txt is not False):
result += ustr(txt)
return str2xml(result)
def text_get(node):
return ''.join([ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
if size.find('.') == -1:
decimal_point = '.'
try:
decimal_point = locale.nl_langinfo(locale.RADIXCHAR)
except Exception:
decimal_point = locale.localeconv()['decimal_point']
size = size.replace(decimal_point, '.')
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = tools.ustr(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
elif dict[key] == 'float' :
res[key] = float(node.get(key))
return res
|
shwetams/arm-samples-py
|
arm_basic_samples/arm_settings/migrations/0002_authsettings_user_id.py
|
Python
|
mit
| 600
| 0.001667
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_depende
|
ncy(settings.AUTH_USER_MODEL),
('arm_settings', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='authsettings',
|
name='user_id',
field=models.ForeignKey(related_name='user_id', default=1, to=settings.AUTH_USER_MODEL, unique=True),
preserve_default=False,
),
]
|
daaugusto/ppi
|
script/domination-many.py
|
Python
|
gpl-3.0
| 2,043
| 0.026921
|
#!/usr/bin/env python
import sys
import argparse
def
|
less(a, b):
return a < b
def greater(a, b):
return a >
|
b
better = less
def dominated(x, y):
if len(x) != len(y):
print "Error: size mismatch!"
return None
dominated = False
for i,j in zip(x,y):
if better(i, j):
return False
if better(j, i):
dominated = True
return dominated
def dominates(x, y):
return dominated(y, x)
# Usage:
# echo '1,0,1[;0,0,0...]' | ./domination.py [-h] -t {less,greater} -a {dominated,dominates} '0,1,0[;1,1,1...]'
# Reading the input (accepts either '1,0,0;0,0,0;1,1,1' or '1,0,0;0,0,0\n1,1,1', for instance)
tmp = [i.split(';') for i in sys.stdin.read().splitlines()]
points_dataset = []
for i in tmp:
for j in i:
if len(j) == 0: continue
points_dataset.append([float(k) for k in j.split(',')])
#print points_dataset
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", required=True, choices=['less','greater'], help="Comparison type: less or greater")
parser.add_argument("-a", "--action", required=True, choices=['dominated','dominates'], help="Action type: dominated or dominates")
parser.add_argument("point", help="the point to compare against the dataset of points; format: 'x1,...,xN'")
args = parser.parse_args()
if args.type=='less':
better = less
elif args.type=='greater':
better = greater
if len(args.point.split(';')) > 1:
raise Exception("Only one point is accepted! For instance: domination.py '0,1,0'")
point = [float(i) for i in args.point.split(',')]
result = None
exit = 1 # Either the point does not dominate a single one or it isn't dominated by one of them
if args.action=='dominated':
for y in points_dataset:
result = dominated(point,y)
if result: exit = 0
print "Is", point, "dominated by", y, "? ->", result
elif args.action=='dominates':
for y in points_dataset:
result = dominates(point,y)
if result: exit = 0
print "Does", point, "dominate", y, "? ->", result
sys.exit(exit)
|
chrisantonellis/pymongo_basemodel
|
test/test_model.py
|
Python
|
mit
| 49,483
| 0.000364
|
import sys; sys.path.append("../") # noqa
import unittest
import copy
import pymongo
import datetime
import bson
from baemo.connection import Connections
from baemo.delimited import DelimitedDict
from baemo.references import References
from baemo.projection import Projection
from baemo.entity import Entity
from baemo.exceptions import ModelTargetNotSet
from baemo.exceptions import ModelNotUpdated
from baemo.exceptions import ModelNotFound
from baemo.exceptions import ModelNotDeleted
from baemo.exceptions import ProjectionTypeMismatch
from baemo.exceptions import DereferenceError
class TestModel(unittest.TestCase):
def setUp(self):
global connection_name, collection_name, TestModel
connection_name = "baemo"
collection_name = "{}_{}".format(self.__class__.__name__, self._testMethodName)
connection = pymongo.MongoClient(connect=False)[connection_name]
Connections.set(connection_name, connection)
TestModel, TestCollection = Entity("TestModel", {
"connection": connection_name,
"collection": collection_name
})
def tearDown(self):
global connection_name, collection_name
Connections.get(connection_name).drop_collection(collection_name)
# __init__
def test___init____no_params(self):
m = TestModel()
self.assertEqual(m.id_attribute, "_id")
self.assertEqual(type(m.collection), str)
self.assertEqual(type(m.target), DelimitedDict)
self.assertEqual(type(m.attributes), DelimitedDict)
self.assertEqual(type(m.references), References)
self.assertEqual(type(m.find_projection), Projection)
self.assertEqual(type(m.get_projection), Projection)
self.assertEqual(m._delete, False)
self.assertEqual(type(m.original), DelimitedDict)
self.assertEqual(type(m.updates), DelimitedDict)
def test___init____dict_target_param(self):
m = TestModel({"k": "v"})
self.assertEqual(m.target.get(), {"k": "v"})
def test___init____target_param(self):
m = TestModel("value")
self.assertEqual(m.target.get(), {"_id": "value"})
# __copy__
def test___copy__(self):
m1 = TestModel({"k": "v"})
m2 = copy.copy(m1)
self.assertIsNot(m1, m2)
self.assertEqual(m1.attributes, m2.attributes)
m1.attributes["k"] = "bar"
self.assertEqual(m1.attributes, m2.attributes)
# __deepcopy__
def test___deepcopy__(self):
m1 = TestModel({"k": "v"})
m2 = copy.deepcopy(m1)
self.assertIsNot(m1, m2)
self.assertEqual(m1.attributes, m2.attributes)
m1.attributes["k"] = "bar"
self.assertNotEqual(m1.attributes, m2.attributes)
# __eq__
def test___eq____same_attributes__returns_True(self):
m1 = TestModel()
m1.attributes({"k": "v"})
m2 = TestModel()
m2.attributes({"k": "v"})
self.assertTrue(m1 == m2)
def test___eq____different_attributes__returns_False(self):
m1 = TestModel()
m1.attributes({"foo": "bar"})
m2 = TestModel()
m2.attributes({"baz": "qux"})
self.assertFalse(m1 == m2)
def test___eq____different_types__returns_False(self):
m1 = TestModel()
m1.attributes({"k": "v"})
m2 = object()
self.assertFalse(m1 == m2)
# __ne__
def test___ne____same_attributes__returns_False(self):
m1 = TestModel()
m1.attributes({"k": "v"})
m2 = TestModel()
m2.attributes({"k": "v"})
self.assertFalse(m1 != m2)
def test___ne____different_attributes__returns_True(self):
m1 = TestModel()
m1.attributes({"foo": "bar"})
m2 = TestModel()
m2.attributes({"baz": "qux"})
self.assertTrue(m1 != m2)
def test___ne____different_types__returns_True(self):
m1 = TestModel({"foo": "bar"})
m2 = object()
self.assertTrue(m1 != m2)
# set_target
def test_set_target__dict_param(self):
m = TestModel()
m.set_target({"k": "v"})
self.assertEqual(m.target.get(), {"k": "v"})
def test_set_target__string_param(self):
m = TestModel()
m.set_target("foo")
self.assertEqual(m.target.get(), {"_id": "foo"})
# get_target
def test_get_target__target_not_set__returns_None(self):
m = TestModel()
self.assertEqual(m.get_target(), None)
def test_get_target__target_set__returns_dict(self):
m = TestModel()
m.target = DelimitedDict({"k": "v"})
self.assertEqual(m.get_target(), {"k": "v"})
def test_get_id__id_not_set__returns_None(self):
m = TestModel()
self.assertEqual(m.get_id(), None)
def test_get_id__id_set__returns_id_type(self):
m = TestModel()
m.target = DelimitedDict({"_id": "foo"})
self.assertEqual(m.get_id(), "foo")
# find
def test_find(self):
original = TestModel()
original.attributes({"k": "v"})
original_id = original.save().get(original.id_attribute)
m = TestModel()
m.target({original.id_attribute: original_id})
m.find()
self.assertIn("k", m.attributes)
self.assertEqual(m.attributes["k"], "v")
def test_find__raises_ModelTargetNotSet(self):
m = TestModel()
with self.assertRaises(ModelTargetNotSet):
m.find()
def test_find__default_find_projection(self):
global connection_name, collection_name
TestModel, TestCollection = Entity("Test", {
"connection": connection_name,
"collection": collection_name,
"find_projection": {
"k1": 0
}
})
original = TestModel()
original.attributes({"k1": "v", "k2": "v", "k3": "v"})
original_id = original.save().attributes[TestModel().id_attribute]
m = TestModel()
m.target({original.id_attribute: original_id})
m.find()
self.assertEqual(m.attributes.get(), {
TestModel.id_attribute: original_id,
"k2": "v",
"k3": "v"
})
def test_find__projection_param(self):
original = TestModel()
original.attributes({"k1": "v", "k2": "v", "k3": "v"})
original_id = original.save().attributes[TestModel.id_attribute]
m = TestModel()
m.target({original.id_attribute: original_id})
m.find(projection={"k1": 0})
self.assertEqual(m.attributes.get(), {
original.id_attribute: original_id,
"k2": "v",
"k3": "v"
})
def test_find__default_find_projection__projection_param(self):
TestModel, TestCollection = Entity("Test", {
"connection": connection_name,
"collection": collection_nam
|
e,
"find_projection": {
"k1": 0
}
})
original = TestModel()
original.attributes({"k1
|
": "v", "k2": "v", "k3": "v"})
original_id = original.save().attributes[TestModel.id_attribute]
m = TestModel()
m.target({original.id_attribute: original_id})
m.find(projection={"k3": 0}, default=True)
self.assertEqual(m.attributes.get(), {
original.id_attribute: original_id, "k2": "v"
})
self.tearDown()
def test_find__pre_find_hook(self):
class ModelAbstract(object):
def pre_find_hook(self):
self.target({"k": "v"})
TestModel, TestCollection = Entity("Test", {
"connection": connection_name,
"collection": collection_name,
"find_projection": {
"k1": 0
},
"bases": ModelAbstract
})
m = TestModel()
m.target({"foo": "baz"})
try:
m.find()
except:
pass
self.assertEqual(m.target.get(), {"k": "v"})
def test_find__post_find_hook(self):
class ModelAbstract(object):
def post_find_hook(self):
self.target({"k": "v"})
TestModel, TestCollection
|
eunchong/build
|
scripts/master/master_config.py
|
Python
|
bsd-3-clause
| 7,620
| 0.005906
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import uuid
from buildbot.changes.filter import ChangeFilter
from buildbot.scheduler import Dependent
from buildbot.scheduler import Nightly
from buildbot.scheduler import Periodic
from buildbot.schedulers.basic import SingleBranchScheduler as Scheduler
from buildbot.scheduler import Triggerable
from master import slaves_list
from master.url_poller import URLPoller
def GetGSUtilUrl(gs_bucket, root_folder):
return 'gs://%s/%s' % (gs_bucket, root_folder)
class Helper(object):
def __init__(self, defaults):
self._defaults = defaults
self._builders = []
self._factories = {}
self._schedulers = {}
def Builder(self, name, factory, gatekeeper=None, scheduler=None,
builddir=None, auto_reboot=True, notify_on_missing=False,
slavebuilddir=None, category=None):
category = category or self._defaults.get('category')
self._builders.append({'name': name,
'factory': factory,
'gatekeeper': gatekeeper,
'schedulers': scheduler.split('|'),
'builddir': builddir,
'category': category,
'auto_reboot': auto_reboot,
'notify_on_missing': notify_on_missing,
'slavebuilddir': slavebuilddir})
def Hourly(self, name, branch, hour='*'):
"""Helper method for the Nightly scheduler."""
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Nightly',
'builders': [],
'branch': branch,
|
'hour': hour}
def Periodic(self, name, periodicBuildTimer):
"""Helper method for the Periodic scheduler."""
if name in
|
self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Periodic',
'builders': [],
'periodicBuildTimer': periodicBuildTimer}
def Dependent(self, name, parent):
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Dependent',
'parent': parent,
'builders': []}
def Triggerable(self, name):
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Triggerable',
'builders': []}
def Factory(self, name, factory):
if name in self._factories:
raise ValueError('Factory %s already exists' % name)
self._factories[name] = factory
def Scheduler(self, name, branch, treeStableTimer=60, categories=None):
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Scheduler',
'branch': branch,
'treeStableTimer': treeStableTimer,
'builders': [],
'categories': categories}
def URLScheduler(self, name, url, pollInterval=300, include_revision=False):
self._schedulers[name] = {'type': 'URLScheduler',
'url': url,
'builders': [],
'include_revision': include_revision,
'pollInterval': pollInterval}
def Update(self, c):
global_schedulers = dict((s.name, s) for s in c['schedulers']
if s.name.startswith('global_'))
assert not set(global_schedulers) & set(self._schedulers)
for builder in self._builders:
# Update the schedulers with the builder.
schedulers = builder['schedulers']
if schedulers:
for scheduler in schedulers:
if scheduler in global_schedulers:
global_schedulers[scheduler].builderNames.append(builder['name'])
else:
self._schedulers[scheduler]['builders'].append(builder['name'])
# Construct the category.
categories = []
if builder.get('category', None):
categories.append(builder['category'])
if builder.get('gatekeeper', None):
categories.extend(builder['gatekeeper'].split('|'))
category = '|'.join(categories)
# Append the builder to the list.
new_builder = {'name': builder['name'],
'factory': self._factories[builder['factory']],
'category': category,
'auto_reboot': builder['auto_reboot']}
if builder['builddir']:
new_builder['builddir'] = builder['builddir']
if builder['slavebuilddir']:
new_builder['slavebuilddir'] = builder['slavebuilddir']
c['builders'].append(new_builder)
# Process the main schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Scheduler':
instance = Scheduler(name=s_name,
branch=scheduler['branch'],
treeStableTimer=scheduler['treeStableTimer'],
builderNames=scheduler['builders'],
categories=scheduler['categories'])
scheduler['instance'] = instance
c['schedulers'].append(instance)
elif scheduler['type'] == 'URLScheduler':
ident = str(uuid.uuid4())
c['change_source'].append(
URLPoller(changeurl=scheduler['url'],
pollInterval=scheduler['pollInterval'],
category=ident,
include_revision=scheduler['include_revision']))
instance = Scheduler(name=s_name,
change_filter=ChangeFilter(category=ident),
builderNames=scheduler['builders'])
scheduler['instance'] = instance
c['schedulers'].append(instance)
# Process the dependent schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Dependent':
c['schedulers'].append(
Dependent(s_name,
self._schedulers[scheduler['parent']]['instance'],
scheduler['builders']))
# Process the triggerable schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Triggerable':
c['schedulers'].append(Triggerable(s_name,
scheduler['builders']))
# Process the periodic schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Periodic':
c['schedulers'].append(
Periodic(s_name,
periodicBuildTimer=scheduler['periodicBuildTimer'],
builderNames=scheduler['builders']))
# Process the nightly schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Nightly':
c['schedulers'].append(Nightly(s_name,
branch=scheduler['branch'],
hour=scheduler['hour'],
builderNames=scheduler['builders']))
|
Jonekee/chromium.src
|
tools/telemetry/telemetry/core/command_line.py
|
Python
|
bsd-3-clause
| 3,100
| 0.010645
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import optparse
from telemetry.core import camel_case
class ArgumentHandlerMixIn(object):
"""A structured way to handle command-line arguments.
In AddCommandLineArgs, add command-line arguments.
In ProcessCommandLineArgs, validate them and store them in a private class
variable. This way, each class encapsulates its own arguments, without needing
to pass an arguments object around everywhere.
"""
@classmethod
def AddCommandLineArgs(cls, parser):
"""Override to accept custom command-line arguments."""
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
"""Override to process command-line arguments.
We pass in parser so we can call parser.error()."""
class Command(ArgumentHandlerMixIn):
"""An abstraction for things that run from the command-line."""
@classmethod
def Name(cls):
return camel_case.ToUnderscore(cls.__name__)
@classmethod
def Description(cls):
if cls.__doc__:
return cls.__doc__.splitlines()[0]
else:
return ''
def Run(self, args):
raise NotImplementedError()
@classmethod
def main(cls, args=None):
"""Main method to run this command as a standalone script."""
parser = argparse.ArgumentParser()
cls.AddCommandLineArgs(parser)
args = parser.parse_args(args=args)
cls.ProcessCommandLineArgs(parser, args)
return min(cls().Run(args), 255)
# TODO: Convert everything to argparse.
|
class OptparseCommand(Command):
usage =
|
''
@classmethod
def CreateParser(cls):
return optparse.OptionParser('%%prog %s %s' % (cls.Name(), cls.usage),
description=cls.Description())
def Run(self, args):
raise NotImplementedError()
@classmethod
def main(cls, args=None):
"""Main method to run this command as a standalone script."""
parser = cls.CreateParser()
cls.AddCommandLineArgs(parser)
options, args = parser.parse_args(args=args)
options.positional_args = args
cls.ProcessCommandLineArgs(parser, options)
return min(cls().Run(options), 255)
class SubcommandCommand(Command):
"""Combines Commands into one big command with sub-commands.
E.g. "svn checkout", "svn update", and "svn commit" are separate sub-commands.
Example usage:
class MyCommand(command_line.SubcommandCommand):
commands = (Help, List, Run)
if __name__ == '__main__':
sys.exit(MyCommand.main())
"""
commands = ()
@classmethod
def AddCommandLineArgs(cls, parser):
subparsers = parser.add_subparsers()
for command in cls.commands:
subparser = subparsers.add_parser(
command.Name(), help=command.Description())
subparser.set_defaults(command=command)
command.AddCommandLineArgs(subparser)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
args.command.ProcessCommandLineArgs(parser, args)
def Run(self, args):
return args.command().Run(args)
|
rsk-mind/rsk-mind-framework
|
tests/dataset/test_dataset_pandas.py
|
Python
|
mit
| 2,486
| 0.000402
|
import os
from nose.tools import assert_equals, assert_items_equal
from rsk_mind.dataset import PandasDataset
from rsk_mind.transformer import *
import pandas as pd
class CustomTransformer(Transformer):
class Feats():
a1 = Feat()
a2 = Feat()
f1 = CompositeFeat(['a1', 'a2'])
def get_a1(self, feat):
return [-float(feat), 'fa']
def get_a2(self, feat):
return [-float(feat)]
def get_f1(self, a1, a2):
return [float(a1) + float(a2)]
class TestPandasDataset:
def __init__(self):
self.path = os.path.join(os.getcwd(), 'tests/files/in.csv')
self.reader = pd.read_table(self.path, sep=',', chunksize=1000)
def tearDown(self):
# delete variables to release memory
del self.path
del self.reader
def test_init(self):
_dataset = PandasDataset(self.reader)
assert_equals(_dataset.reader, self.reader)
assert_equals(_dataset.header, None)
assert_equals(_dataset.rows, None)
assert_items_equal(_dataset.transformed_rows, [])
assert_equals(_dataset.transformer, None)
assert_equals(_dataset.transformed_header, None)
def test_setTransformer(self):
_dataset = PandasDataset(self.reader)
_transformer = CustomTransformer()
_dataset.setTransformer(_transformer)
assert_equals(_dataset.transformer, _transformer)
def test_applyTransformations(self):
_dataset = PandasDataset(self.reader)
_transformer = CustomTransformer()
_dataset.setTransformer(_transformer)
_header = ['a1_0', 'a1_1', 'a2', 'f1', 'a3', 'a
|
4', 'target']
_rows = [[-0.0, 'fa', -0.0, 0.0, 0, 0, 1], [-1.0, 'fa', -1.0, 2.0, 0, 1, 0], [-1.0, 'fa', -0.0, 1.0, 0, 1, 0]]
_dataset.applyTransformations()
assert_equals(_dataset.transformed_header, _header)
assert_items_equal(_dataset.transformed_rows, _rows)
assert_equals(_dataset.transformer, _transformer)
def t
|
est_applyTransformations_Without_Transformer(self):
_dataset = PandasDataset(self.reader)
_expected_header = ['a1', 'a2', 'a3', 'a4', 'target']
_expected_rows = [[0, 0, 0, 0, 1], [1, 1, 0, 1, 0], [1, 0, 0, 1, 0]]
_dataset.applyTransformations()
assert_equals(_dataset.transformed_header, _expected_header)
assert_items_equal(_dataset.transformed_rows, _expected_rows)
assert_equals(_dataset.transformer, None)
|
foxdog-studios/pyddp
|
tests/messages/client/test_method_message_parser.py
|
Python
|
apache-2.0
| 1,258
| 0.000795
|
# -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future_
|
_ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from ddp.messages.client import MethodMessage
from ddp.messages.client import MethodMessageParser
class MethodMessageParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = MethodMessageParser()
def test_parse(self):
id = 'id'
method = 'method'
|
params = [True, 1.0]
message = self.parser.parse({'msg': 'method', 'id': id,
'method': method, 'params': params})
self.assertEqual(message, MethodMessage(id, method, params))
|
GPflow/GPflow
|
gpflow/models/vgp.py
|
Python
|
apache-2.0
| 14,665
| 0.002524
|
# Copyright 2016-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import tensorflow as tf
import gpflow
from .. import posteriors
from ..base import InputData, MeanAndVariance, Parameter, RegressionData
from ..conditionals import conditional
from ..config import default_float, default_jitter
from ..kernels import Kernel
from ..kullback_leiblers import gauss_kl
from ..likelihoods import Likelihood
from ..mean_functions import MeanFunction
from ..utilities import is_variable, triangular, triangular_size
from .model import GPModel
from .training_mixins import InternalDataTrainingLossMixin
from .util import data_input_to_tensor
class VGP_deprecated(GPModel, InternalDataTrainingLossMixin):
r"""
This method approximates the Gaussian process posterior using a multivariate Gaussian.
The idea is that the posterior over the function-value vector F is
approximated by a Gaussian, and the KL divergence is minimised between
the approximation and the posterior.
This implementation is equivalent to SVGP with X=Z, but is more efficient.
The whitened representation is used to aid optimization.
The posterior approximation is
.. math::
q(\mathbf f) = N(\mathbf f \,|\, \boldsymbol \mu, \boldsymbol \Sigma)
"""
def __init__(
self,
data: RegressionData,
kernel: Kernel,
likelihood: Likelihood,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None,
):
"""
data = (X, Y) contains the input points [N, D] and the observations [N, P]
kernel, likelihood, mean_function are appropriate GPflow objects
"""
if num_latent_gps is None:
num_latent_gps = self.calc_num_latent_gps_from_data(data, kernel, likelihood)
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.data = data_input_to_tensor(data)
X_data, _Y_data = self.data
static_num_data = X_data.shape[0]
if static_num_data is None:
q_sqrt_unconstrained_shape = (self.num_latent_gps, None)
else:
q_sqrt_unconstrained_shape = (self.num_latent_gps, triangular_size(static_num_data))
self.num_data = Parameter(tf.shape(X_data)[0], shape=[], dtype=tf.int32, trainable=False)
# Many functions below don't like `Parameter`s:
dynamic_num_data = tf.convert_to_tensor(self.num_data)
self.q_mu = Parameter(
tf.zeros((dynamic_num_data, self.num_latent_gps)),
shape=(static_num_data, num_latent_gps),
)
q_sqrt = tf.eye(dynamic_num_data, batch_shape=[self.num_latent_gps])
self.q_sqrt = Parameter(
q_sqrt,
transform=triangular(),
unconstrained_shape=q_sqrt_unconstrained_shape,
constrained_shape=(num_latent_gps, static_num_data, static_num_data),
)
# type-ignore is because of changed method signature:
def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore
return self.elbo()
def elbo(self) -> tf.Tensor:
r"""
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(\mathbf f) = N(\mathbf f \,|\, \boldsymbol \mu, \boldsymbol \Sigma)
"""
X_data, Y_data = self.data
num_data = tf.convert_to_tensor(self.num_data)
# Get prior KL.
KL = gauss_kl(self.q_mu, self.q_sqrt)
# Get conditionals
K = self.kernel(X_data) + tf.eye(num_data, dtype=default_float()) * default_jitter()
L = tf.linalg.cholesky(K)
fmean = tf.linalg.matmul(L, self.q_mu) + self.mean_function(X_data) # [NN, ND] -> ND
q_sqrt_dnn = tf.linalg.band_part(self.q_sqrt, -1, 0) # [D, N, N]
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.stack([self.num_latent_gps, 1, 1]))
LTA = tf.linalg.matmul(L_tiled, q_sqrt_dnn) # [D, N, N]
fvar = tf.reduce_sum(tf.square(LTA), 2)
fvar = tf.transpose(fvar)
# Get variational expectations.
var_exp = self.likelihood.variational_expectations(fmean, fvar, Y_data)
return tf.reduce_sum(var_exp) - KL
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
X_data, _Y_data = self.data
mu, var = conditional(
Xnew,
X_data,
self.kernel,
self.q_mu,
q_sqrt=self.q_sqrt,
full_cov=full_cov,
white=True,
)
return mu + self.mean_function(Xnew), var
class VGP_with_posterior(VGP_deprecated):
"""
This is an implementation of VGP that provides a posterior() method that
enables caching for faster subsequent predictions.
"""
def posterior(
self,
precompute_cache: posteriors.PrecomputeCacheType = posteriors.PrecomputeCacheType.TENSOR,
) -> posteriors.VGPPosterior:
"""
Create the Posterior object which contains precomputed matrices for
faster prediction.
precompute_cache has three settings:
- `PrecomputeCacheType.TENSOR` (or `"tensor"`): Precomputes the cached
quantities and stores them as tensors (which allows differentiating
through the prediction). This is the default.
- `PrecomputeCacheType.VARIABLE` (or `"variable"`): Precomputes the cached
quantities and stores them as variables, which allows for updating
their values without changing the compute graph (relevant for AOT
compilation).
- `PrecomputeCacheType.NOCACHE` (or `"nocache"` or `None`): Avoids
immediate cache com
|
putation. This is useful for avoiding extraneous
computations when you only want to call the posterior's
`fused_predict_f` method.
"""
X_data, _Y_data = self.data
return posteriors.VGPPosterior(
self.kernel,
|
X_data,
self.q_mu,
self.q_sqrt,
mean_function=self.mean_function,
precompute_cache=precompute_cache,
)
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
For backwards compatibility, VGP's predict_f uses the fused (no-cache)
computation, which is more efficient during training.
For faster (cached) prediction, predict directly from the posterior object, i.e.,:
model.posterior().predict_f(Xnew, ...)
"""
return self.posterior(posteriors.PrecomputeCacheType.NOCACHE).fused_predict_f(
Xnew, full_cov=full_cov, full_output_cov=full_output_cov
)
class VGP(VGP_with_posterior):
# subclassed to ensure __class__ == "VGP"
pass
def update_vgp_data(vgp: VGP_deprecated, new_data: RegressionData) -> None:
"""
Set the data on the given VGP model, and update its variational parameters.
As opposed to many of the other models the VGP has internal parameters whose shape depends on
the shape of the data. This functions updates the internal data of the given vgp, and updates
the variational parameters to fit.
This function requires that the input :param:`vgp` were create with :class:`tf.Variable`s for
:param:`data`.
"""
old_X_data, old_Y_data = vgp.data
assert is_variable(old_X_data) and is_variable(
old_Y_data
), "update_vgp_data requi
|
bveina/TempSensorNode
|
main.py
|
Python
|
mit
| 26
| 0
|
from brv1 import *
main()
| ||
chembl/the-S3-amongos
|
the-S3-amongos.py
|
Python
|
apache-2.0
| 21,343
| 0.014431
|
#!/usr/bin/env python
# Copyright 2015 The ChEMBL group.
# Author: Nathan Dedman <ndedman@ebi.ac.uk>
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an S3-like storage server, using Pymongo, MongoDB and Tornado.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module:
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
Use s3cmd command line tool:
s3cmd mb s3://wibble
s3cmd put mytestfile.txt s3://wibble
s3cmd rb s3://wibble --force --recursive
"""
import bisect
import datetime
import hashlib
import os
import os.path
import urllib
import logging
import glob
import getpass
import re
from tornado import escape
from tornado import httpserver
from tornado import ioloop
from tornado import web
from pymongo import MongoClient
from pymongo import ASCENDING
import bson
from bson.binary import Binary
from tornado.log import enable_pretty_logging
def start(port,debug=False):
"""Starts the pymongo S3 server"""
application = mongoS3(debug)
http_server = httpserver.HTTPServer(application)
# Utilize all CPUs
if not debug:
http_server.bind(port)
http_server.start(0)
else:
enable_pretty_logging()
http_server.listen(port)
ioloop.IOLoop.current().start()
class mongoS3(web.Application):
"""Implementation of an S3-like storage server based on MongoDB using PyMongo
* Added compatibility with the s3cmd command line utility
* File names of arbitrary length are supported (stored as meta data)
* Multipart upload suported
"""
def __init__(self, debug=False):
web.Application.__init__(self, [
(r"/", RootHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
(r"/ping",StatusHandler),
(r'/(favicon.ico)', web.StaticFileHandler, {"path": ""}),
# s3cmd
('http://s3.amazonaws.com/', s3cmdlHandler),
(r"(http://.+.s3.amazonaws.com.*)", s3cmdlHandler),
],debug=debug)
# Lazy connect the client
self.client = MongoClient(connect=False)
self.S3 = self.client.S3
self.metadata = self.client.metadata
class StatusHandler(web.RequestHandler):
SUPPORTED_METHODS = ("GET")
# Send a simple 'PONG' to show we're alive!
def get(self):
self.set_header('Content-Type', 'application/json')
self.finish({'response':'pong','UTC':datetime.datetime.now().isoformat()})
class BaseRequestHandler(web.RequestHandler):
SUPPORTED_METHODS = ("PUT", "GET", "DELETE", "HEAD","POST","OPTIONS")
def _get_bucket_names(self):
return self.application.S3.collection_names(include_system_collections=False)
def render_xml(self, value,**kwargs):
assert isinstance(value, dict) and len(value) == 1
self.set_header("Content-Type", "application/xml; charset=UTF-8")
name = value.keys()[0]
parts = []
parts.append('<' + escape.utf8(name) +' xmlns="http://s3.amazonaws.com/doc/2006-03-01/">')
parts.append('<Owner><ID>'+getpass.getuser()+'</ID><DisplayName>'+getpass.getuser()+'</DisplayName></Owner>')
self._render_parts(value.values()[0], parts)
parts.append('</' + escape.utf8(name) + '>')
if 'code' in kwargs.keys():
self.set_status(kwargs['code'])
|
self.finish('<?xml version="1.0" encoding="UTF-8"?>' +
''.join(parts))
def _render_parts(self, value, parts=[]):
if isinstance(value, (unicode, bytes)):
parts.append(escape.xhtml_escape(value))
elif isinstance(value, int) or isinstance(value, long):
parts.append(str(value))
elif isinstance(value, datetime.datetime):
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
eli
|
f isinstance(value, dict):
for name, subvalue in value.iteritems():
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
parts.append('<' + escape.utf8(name) + '>')
self._render_parts(subsubvalue, parts)
parts.append('</' + escape.utf8(name) + '>')
else:
raise Exception("Unknown S3 value type %r", value)
def _error(self,**kwargs):
bucket_name = object_name = None
if hasattr(self,'bucket_name'):
bucket_name = self.bucket_name
if hasattr(self,'object_name'):
object_name = self.object_name
s3errorcodes_bucket = {'NSK':'NoSuchKey','NSB':'NoSuchBucket','BNE':'BucketNotEmpty',"BAE":"BucketAlreadyExists"}
s3errorcodes_object = {'NSB':'NoSuchBucket','NSK':'NoSuchKey'}
errormessage_object = {404:'The specified key does not exist.'}
errormessage_bucket = {404:{'NSB':'The specified bucket does not exist.'},409:{'BNE':'The bucket you tried to delete is not empty.','BAE':'The requested bucket name is not available. Please select a different name and try again.'}}
if self.__class__.__name__== 'BucketHandler':
s3errorcodes = s3errorcodes_bucket
errormessage = errormessage_bucket
bucket_name = self.bucket_name
object_name = None
if self.__class__.__name__== 'ObjectHandler':
s3errorcodes = s3errorcodes_object
errormessage = errormessage_object
if hasattr(self,'s3cmd'):
returnDict = {'Error':{}}
errorDict = returnDict['Error']
errorDict['Code'] = s3errorcodes[kwargs['s3code']]
if self.__class__.__name__ == 'BucketHandler':
errorDict['Message'] = errormessage[kwargs['code']][kwargs['s3code']]
else:
errorDict['Message'] = errormessage[kwargs['code']]
errorDict['Resource'] = '/%s/%s' % (bucket_name,object_name)
self.render_xml(returnDict,code=kwargs['code'])
else:
raise web.HTTPError(kwargs['code'])
class s3cmdlHandler(web.RequestHandler):
def prepare(self):
# Handle s3 urls here
self.s3cmd = True
if self.application.settings['debug']:
print "%s %s" % (self.__class__.__name__, self.request.method)
s3match = re.match('(?:http://)(.+)(?:.s3.amazonaws.com\/)(.*)',self.request.uri)
self.prefix = self.get_argument("prefix", u"")
self
|
tomvansteijn/openradar
|
openradar/periods.py
|
Python
|
gpl-3.0
| 1,780
| 0
|
# -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
"""
Period looper for atomic scripts.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import datetime
import math
import re
from raster_store import utils
# period parsing
PERIOD = re.compile('([0-9]{1,2})([mhdw])')
UNITS = {'m': 'minutes', 'h': 'hours', 'd': 'days', 'w': 'weeks'}
def parse(text):
"""
Return start, stop tuple.
text can be:
start/stop: 2003/2004
start: 2003 - now
period: 2d - now
"""
if '/' in text:
return map(utils.parse_datetime, text.split('/'))
now = datetime.datetime.utcnow()
match = PERIOD.match(text)
if match:
value, unit = match.groups()
delta = datetime.timedelta(**{UNITS[unit]: int(value)})
return now - delta, now
return utils.parse_datetime(text), now
class Period(object):
""" Period looper. """
def __init__(self, text):
period = parse(text)
# init
self.step = datetime.timedelta(minutes=5)
# snap
ref = datetime.datetime(2000, 1, 1)
step = self.step.total_seconds()
start = step * math.ceil((period[0] - ref).total_seconds() / step)
stop = step * math.floor((period[1] - ref).total_seconds() / step)
self.start = ref + datetime.timedelta(seconds=start)
self.stop = ref + datetime.timedelta(seconds=stop)
def __iter__(self):
""" Return generator of datetimes. """
|
now = self.start
while
|
now <= self.stop:
yield now
now += self.step
def __repr__(self):
return '{} - {}'.format(self.start, self.stop)
|
mikalstill/nova
|
nova/tests/unit/objects/test_instance_action.py
|
Python
|
apache-2.0
| 16,824
| 0.000178
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
import six
from nova.db import api as db
from nova.objects import instance_action
from nova import test
from nova.tests.unit.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
fake_action = {
'created_at': NOW,
'deleted_at': None,
'updated_at': None,
'deleted': False,
'id': 123,
'action': 'fake-action',
'instance_uuid': uuids.instance,
'request_id': 'fake-request',
'user_id': 'fake-user',
'project_id': 'fake-project',
'start_time': NOW,
'finish_time': None,
'message': 'foo',
}
fake_event = {
'created_at': NOW,
'deleted_at': None,
'updated_at': None,
'deleted': False,
'id': 123,
'event': 'fake-event',
'action_id': 123,
'start_time': NOW,
'finish_time': None,
'result': 'fake-result',
'traceback': 'fake-tb',
'host': 'fake-host',
}
class _TestInstanceActionObject(object):
@mock.patch.object(db, 'action_get_by_request_id')
def test_get_by_request_id(self, mock_get):
context = self.context
mock_get.return_value = fake_action
action = instance_action.InstanceAction.get_by_request_id(
context, 'fake-uuid', 'fake-request')
self.compare_obj(action, fake_action)
mock_get.assert_called_once_with(context,
'fake-uuid', 'fake-request')
def test_pack_action_start(self):
values = instance_action.InstanceAction.pack_action_start(
self.context, 'fake-uuid', 'fake-action')
self.assertEqual(values['request_id'], self.context.request_id)
self.assertEqual(values['user_id'], self.context.user_id)
self.assertEqual(values['project_id'], self.context.project_id)
self.assertEqual(values['instance_uuid'], 'fake-uuid')
self.assertEqual(values['action'], 'fake-action')
self.assertEqual(values['start_time'].replace(tzinfo=None),
self.context.timestamp)
def test_pack_action_finish(self):
self.useFixture(utils_fixture.TimeFixture(NOW))
values = instance_action.InstanceAction.pack_action_finish(
self.context, 'fake-uuid')
self.assertEqual(values['request_id'], self.context.request_id)
self.assertEqual(values['instance_uuid'], 'fake-uuid')
self.assertEqual(values['finish_time'].replace(tzinfo=None), NOW)
@mock.patch.object(db, 'action_start')
def test_action_start(self, mock_start):
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_start(
self.context, 'fake-uuid', 'fake-action')
mock_start.return_value = fake_action
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action', want_result=True)
mock_start.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(action, fake_action)
@mock.patch.object(db, 'action_start')
def test_action_start_no_result(self, mock_start):
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_start(
self.context, 'fake-uuid', 'fake-action')
mock_start.return_value = fake_action
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action', want_result=False)
mock_start.assert_called_once_with(self.context,
expected_packed_values)
self.assertIsNone(action)
@mock.patch.object(db, 'action_finish')
def test_action_finish(self, mock_finish):
self.useFixture(utils_fixture.TimeFixture(NOW))
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_finish(
self.context, 'fake-uuid')
mock_finish.return_value = fake_action
action = instance_action.InstanceAction.action_finish(
self.context, 'fake-uuid', want_result=True)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(action, fake_action)
@mock.patch.object(db, 'action_finish')
def test_action_finish_no_result(self, mock_finish):
self.useFixture(utils_fixture.TimeFixture(NOW))
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_finish(
self.context, 'fake-uuid')
mock_finish.return_value = fake_action
action = instance_action.InstanceAction.action_finish(
self.context, 'fake-uuid', want_result=False)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.assertIsNone(action)
@mock.patch.object(db, 'action_finish')
@mock.patch.object(db, 'action_start')
def test_finish(self, mock_start, mock_finish):
self.useFixture(utils_fixture.TimeFixture(NOW))
expected_packed_action_start = {
'request_id': self.context.request_id,
'user_id': self.context.user_id,
'project_id': self.context.project_id,
'instance_uuid': uuids.instance,
'action': 'fake-action',
|
'start_time': self.context.timestamp,
'updated_at': self.context.timestamp,
}
expected_packed_action_finish = {
'request_id': self.context.request_id,
'instance_uuid': uuids.instance,
'finish_time': NOW,
'updated_at': NOW,
}
mock_start.return_value = fake_action
mock_finish.return_value = fake_action
action = instance_action.InstanceAction.action_start(
|
self.context, uuids.instance, 'fake-action')
action.finish()
mock_start.assert_called_once_with(self.context,
expected_packed_action_start)
mock_finish.assert_called_once_with(self.context,
expected_packed_action_finish)
self.compare_obj(action, fake_action)
@mock.patch.object(db, 'actions_get')
def test_get_list(self, mock_get):
fake_actions = [dict(fake_action, id=1234),
dict(fake_action, id=5678)]
mock_get.return_value = fake_actions
obj_list = instance_action.InstanceActionList.get_by_instance_uuid(
self.context, 'fake-uuid')
for index, action in enumerate(obj_list):
self.compare_obj(action, fake_actions[index])
mock_get.assert_called_once_with(self.context, 'fake-uuid', None,
None, None)
class TestInstanceActionObject(test_objects._LocalTest,
_TestInstanceActionObject):
pass
class TestRemoteInstanceActionObject(test_objects._RemoteTest,
_TestInstanceActionObject):
pass
class _TestInstanceActionEventObject(object):
@mock.patch.object(db, 'action_event_get_by_id')
def test_get_by_id(self, mock_get):
mock_get.return_value = fake_event
event = instance_action.InstanceActionEvent.get_by_id(
self.context, 'fake-action-id', 'fake-event-id')
self.compare_obj(event, fake_event)
mock_get.assert
|
jpablobr/emacs.d
|
vendor/misc/emacs-skype/build/Skype4Py/Skype4Py/api/posix_x11.py
|
Python
|
gpl-3.0
| 17,077
| 0.001581
|
"""
Low level *Skype for Linux* interface implemented using *XWindows messaging*.
Uses direct *Xlib* calls through *ctypes* module.
This module handles the options that you can pass to `Skype.__init__`
for Linux machines when the transport is set to *X11*.
No further options are currently supported.
Warning PyGTK framework users
=============================
The multithreaded architecture of Skype4Py requires a special treatment
if the Xlib transport is combined with PyGTK GUI framework.
The following code has to be called at the top
|
of your script, before
PyGTK is even imported.
.. python::
from Skype4Py.api.posix_x11 import threads_init
threads_init()
This function enables multithreading support in Xlib and GDK. If not done
here, this is enabled for Xlib library when the `Skype` object is instantiated.
If your script imports the PyGTK module, doing this so late may lead to a
segmentation fault when the GUI is shown on the screen.
A remedy is to enable the multithreading support before PyGTK is imported
|
by calling the ``threads_init`` function.
"""
__docformat__ = 'restructuredtext en'
import sys
import threading
import os
from ctypes import *
from ctypes.util import find_library
import time
import logging
from Skype4Py.api import Command, SkypeAPIBase, \
timeout2float, finalize_opts
from Skype4Py.enums import *
from Skype4Py.errors import SkypeAPIError
__all__ = ['SkypeAPI', 'threads_init']
# The Xlib Programming Manual:
# ============================
# http://tronche.com/gui/x/xlib/
# some Xlib constants
PropertyChangeMask = 0x400000
PropertyNotify = 28
ClientMessage = 33
PropertyNewValue = 0
PropertyDelete = 1
# some Xlib types
c_ulong_p = POINTER(c_ulong)
DisplayP = c_void_p
Atom = c_ulong
AtomP = c_ulong_p
XID = c_ulong
Window = XID
Bool = c_int
Status = c_int
Time = c_ulong
c_int_p = POINTER(c_int)
# should the structures be aligned to 8 bytes?
align = (sizeof(c_long) == 8 and sizeof(c_int) == 4)
# some Xlib structures
class XClientMessageEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('pad2', c_int),
('data', c_char * 20)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('data', c_char * 20)]
class XPropertyEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int),
('pad2', c_int)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int)]
class XErrorEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
else:
_fields_ = [('type', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
class XEvent(Union):
if align:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
else:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
XEventP = POINTER(XEvent)
if getattr(sys, 'skype4py_setup', False):
# we get here if we're building docs; to let the module import without
# exceptions, we emulate the X11 library using a class:
class X(object):
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
pass
x11 = X()
else:
# load X11 library (Xlib)
libpath = find_library('X11')
if not libpath:
raise ImportError('Could not find X11 library')
x11 = cdll.LoadLibrary(libpath)
del libpath
# setup Xlib function prototypes
x11.XCloseDisplay.argtypes = (DisplayP,)
x11.XCloseDisplay.restype = None
x11.XCreateSimpleWindow.argtypes = (DisplayP, Window, c_int, c_int, c_uint,
c_uint, c_uint, c_ulong, c_ulong)
x11.XCreateSimpleWindow.restype = Window
x11.XDefaultRootWindow.argtypes = (DisplayP,)
x11.XDefaultRootWindow.restype = Window
x11.XDeleteProperty.argtypes = (DisplayP, Window, Atom)
x11.XDeleteProperty.restype = None
x11.XDestroyWindow.argtypes = (DisplayP, Window)
x11.XDestroyWindow.restype = None
x11.XFree.argtypes = (c_void_p,)
x11.XFree.restype = None
x11.XGetAtomName.argtypes = (DisplayP, Atom)
x11.XGetAtomName.restype = c_void_p
x11.XGetErrorText.argtypes = (DisplayP, c_int, c_char_p, c_int)
x11.XGetErrorText.restype = None
x11.XGetWindowProperty.argtypes = (DisplayP, Window, Atom, c_long, c_long, Bool,
Atom, AtomP, c_int_p, c_ulong_p, c_ulong_p, POINTER(POINTER(Window)))
x11.XGetWindowProperty.restype = c_int
x11.XInitThreads.argtypes = ()
x11.XInitThreads.restype = Status
x11.XInternAtom.argtypes = (DisplayP, c_char_p, Bool)
x11.XInternAtom.restype = Atom
x11.XNextEvent.argtypes = (DisplayP, XEventP)
x11.XNextEvent.restype = None
x11.XOpenDisplay.argtypes = (c_char_p,)
x11.XOpenDisplay.restype = DisplayP
x11.XPending.argtypes = (DisplayP,)
x11.XPending.restype = c_int
x11.XSelectInput.argtypes = (DisplayP, Window, c_long)
x11.XSelectInput.restype = None
x11.XSendEvent.argtypes = (DisplayP, Window, Bool, c_long, XEventP)
x11.XSendEvent.restype = Status
x11.XLockDisplay.argtypes = (DisplayP,)
x11.XLockDisplay.restype = None
x11.XUnlockDisplay.argtypes = (DisplayP,)
x11.XUnlockDisplay.restype = None
def threads_init(gtk=True):
"""Enables multithreading support in Xlib and PyGTK.
See the module docstring for more info.
:Parameters:
gtk : bool
May be set to False to skip the PyGTK module.
"""
# enable X11 multithreading
x11.XInitThreads()
if gtk:
from gtk.gdk import threads_init
threads_init()
class SkypeAPI(SkypeAPIBase):
def __init__(self, opts):
self.logger = logging.getLogger('Skype4Py.api.posix_x11.SkypeAPI')
SkypeAPIBase.__init__(self)
finalize_opts(opts)
# initialize threads if not done already by the user
threads_init(gtk=False)
# init Xlib display
self.disp = x11.XOpenDisplay(None)
if not self.disp:
raise SkypeAPIError('Could not open XDisplay')
self.win_root = x11.XDefaultRootWindow(self.disp)
self.win_self = x11.XCreateSimpleWindow(self.disp, self.win_root,
100, 100, 100, 100, 1, 0, 0)
x11.XSelectInpu
|
Molecular-Image-Recognition/Molecular-Image-Recognition
|
code/rmgpy/data/kinetics/common.py
|
Python
|
mit
| 16,818
| 0.003687
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and functions that are used by multiple modules
in this subpackage.
"""
import itertools
import logging
import warnings
from rmgpy.data.base import LogicNode
from rmgpy.reaction import Reaction
from rmgpy.molecule import Group, Molecule
from rmgpy.species import Species
from rmgpy.exceptions import DatabaseError, KineticsError
################################################################################
def saveEntry(f, entry):
"""
Save an `entry` in the kinetics database by writing a string to
the given file object `f`.
"""
from rmgpy.cantherm.output import prettify
def sortEfficiencies(efficiencies0):
efficiencies = {}
for mol, eff in efficiencies0.iteritems():
if isinstance(mol, str):
# already in SMILES string format
smiles = mol
else:
smiles = mol.toSMILES()
efficiencies[smiles] = eff
keys = efficiencies.keys()
keys.sort()
return [(key, efficiencies[key]) for key in keys]
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
if entry.label != '':
f.write(' label = "{0}",\n'.format(entry.label))
#Entries for kinetic rules, libraries, training reactions
#and depositories will have an Reaction object for its item
if isinstance(entry.item, Reaction):
#Write out additional data if depository or library
#kinetic rules would have a Group object for its reactants instead of Species
if isinstance(entry.item.reactants[0], Species):
# Add degeneracy if the reaction is coming from a depository or kinetics library
f.write(' degeneracy = {0:.1f},\n'.format(entry.item.degeneracy))
if entry.item.duplicate:
f.write(' duplicate = {0!r},\n'.format(entry.item.duplicate))
if not entry.item.reversible:
f.write(' reversible = {0!r},\n'.format(entry.item.reversible))
#Entries for groups with have a group or logicNode for its item
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
elif isinstance(entry.item, LogicNode):
f.write(' group = "{0}",\n'.format(entry.item))
else:
raise DatabaseError("Encountered unexpected item of type {0} while saving database.".format(entry.item.__class__))
# Write kinetics
if isinstance(entry.data, str):
f.write(' kinetics = "{0}",\n'.format(entry.data))
elif entry.data is not None:
efficiencies = None
if hasattr(entry.data, 'efficiencies'):
efficiencies = entry.data.efficiencies
entry.data.efficiencies = dict(sortEfficiencies(entry.data.efficiencies))
kinetics = prettify(repr(entry.data))
kinetics = ' kinetics = {0},\n'.format(kinetics.replace('\n', '\n '))
f.write(kinetics)
if hasattr(entry.data, 'efficiencies'):
entry.data.efficiencies = efficiencies
else:
f.write(' kinetics = None,\n')
# Write reference
if entry.reference is not None:
reference = entry.reference.toPrettyRepr()
lines = reference.splitlines()
f.write(' reference =
|
{0}\n'.format(lines[0]))
for line in lines[1:-1]:
f.write(' {0}\n'.format(line))
f.write(' ),\n'.format(lines[0]))
if entry.referenceType != "":
f.write(' referenceType = "{0}",\n'.format(entry.referenceType))
if entry.rank is not None:
f.write(' rank = {0},\n'.format(entry.rank))
if entry.shortDesc.strip() !='':
f.write(' shortDesc = u"""')
try:
f.
|
write(entry.shortDesc.encode('utf-8'))
except:
f.write(entry.shortDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
if entry.longDesc.strip() !='':
f.write(' longDesc = \n')
f.write('u"""\n')
try:
f.write(entry.longDesc.strip().encode('utf-8') + "\n")
except:
f.write(entry.longDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(')\n\n')
def filter_reactions(reactants, products, reactionList):
"""
Remove any reactions from the given `reactionList` whose reactants do
not involve all the given `reactants` or whose products do not involve
all the given `products`. This method checks both forward and reverse
directions, and only filters out reactions that don't match either.
reactants and products can be either molecule or species objects
"""
warnings.warn("The filter_reactions method is no longer used and may be removed in a future version.", DeprecationWarning)
# Convert from molecules to species and generate resonance isomers.
reactants = ensure_species(reactants, resonance=True)
products = ensure_species(products, resonance=True)
reactions = reactionList[:]
for reaction in reactionList:
# Forward direction
reactants0 = [r for r in reaction.reactants]
for reactant in reactants:
for reactant0 in reactants0:
if reactant.isIsomorphic(reactant0):
reactants0.remove(reactant0)
break
products0 = [p for p in reaction.products]
for product in products:
for product0 in products0:
if product.isIsomorphic(product0):
products0.remove(product0)
break
forward = not (len(reactants0) != 0 or len(products0) != 0)
# Reverse direction
reactants0 = [r for r in reaction.products]
for reactant in reactants:
for reactant0 in reactants0:
if reactant.isIsomorphic(reactant0):
reactants0.remove(reactant0)
break
products0 = [p for p in reaction.reactants]
for product in products:
for product0 in products0:
if product.isIsomorphic(product0):
products0.remove(product0)
break
reverse = not (len(reactants0) != 0 or len(products0) != 0)
if not forward and not reverse:
reactions.remove(reaction)
return reactions
def ensure_species(input_list, resonance=False, keepIsomorphic=False):
"""
Given an input list of molecules or species, return a list with only
species objects.
"""
output_list = []
for item in
|
exercism/python
|
exercises/practice/clock/.meta/example.py
|
Python
|
mit
| 752
| 0
|
class Cloc
|
k:
"""Clock that displays 24 hour clock that rollsover properly"""
def __init__(self, hour, minute):
self.hour = hour
self.minute = minute
self.cleanup()
def __repr__(self):
return f'Clock({self
|
.hour}, {self.minute})'
def __str__(self):
return '{:02d}:{:02d}'.format(self.hour, self.minute)
def __eq__(self, other):
return repr(self) == repr(other)
def __add__(self, minutes):
self.minute += minutes
return self.cleanup()
def __sub__(self, minutes):
self.minute -= minutes
return self.cleanup()
def cleanup(self):
self.hour += self.minute // 60
self.hour %= 24
self.minute %= 60
return self
|
collingreen/startuppong
|
techpong/apps/techpong/migrations/0005_auto__del_field_company_image_url__add_field_company_banner_url__add_f.py
|
Python
|
mit
| 8,058
| 0.007446
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Company.image_url'
db.delete_column(u'techpong_company', 'image_url')
# Adding field 'Company.banner_url'
db.add_column(u'techpong_company', 'banner_url',
self.gf('django.db.models.fields.URLField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'Company.logo_url'
db.add_column(u'techpong_company', 'logo_url',
self.gf('django.db.models.fields.URLField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Company.image_url'
db.add_column(u'techpong_company', 'image_url',
self.gf('django.db.models.fields.URLField')(default=1, max_length=255),
keep_default=False)
# Deleting field 'Company.banner_url'
db.delete_column(u'techpong_company', 'banner_url')
# Deleting field 'Company.logo_url'
db.delete_column(u'techpong_company', 'logo_url')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'techpong.company': {
'Meta': {'object_name': 'Company'},
'banner_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joined_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'logo_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'techpong.match': {
'Meta': {'ordering': "['-played_time']", 'object_name': 'Match'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['techpong.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loser': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'match_loser'", 'to': u"orm['techpong.Player']"}),
'loser_rating_after': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'loser_rating_before': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'match_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'played_time': ('django.db.models.fields.DateTimeField', [], {}),
'winner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'match_winner'", 'to': u"orm['techpong.Player']"}),
'winner_rating_after': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'winner_rating_before': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'techpong.player': {
'Meta': {'object_name': 'Player'},
'cached_rating_changes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cached_results': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['techpong.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'Tru
|
e'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rating': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'techpong.round': {
'Meta': {'object_name': 'Round'},
u'id': (
|
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['techpong.Match']"}),
'player1_score': ('django.db.models.fields.IntegerField', [], {}),
'player2_score': ('django.db.models.fields.IntegerField', [], {}),
'round_number': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'techpong.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['techpong.Company']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['techpong']
|
vileopratama/vitech
|
src/addons/hw_posbox_homepage/__openerp__.py
|
Python
|
mit
| 714
| 0.002801
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'PosBox Homepage',
'version': '1.0',
'category': 'Point of Sale',
'sequence': 6,
|
'website': 'https://www.odoo.com/page/point-of-sale',
'summary': 'A homepage for the PosBox',
'description': """
PosBox Homepage
===============
This module overrides openerp web interface to display a simple
Homepage that explains what's the posbox and show the status,
and where to find documentation.
If you activate this module, you won't be able to access the
regular openerp interface anymore.
""",
'depends': ['hw_proxy'],
'installable': False,
'auto_install': False,
|
}
|
JCBarahona/edX
|
lms/djangoapps/lti_provider/models.py
|
Python
|
agpl-3.0
| 5,760
| 0.00191
|
"""
Database models for the LTI provider feature.
This app uses migrations. If you make changes to this model, be sure to create
an appropriate migration file and check it in at the same time as your model
changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration lti_provider --auto "description" --settings=devstack
"""
from django.contrib.auth.models import User
from django.db import models
import logging
from xmodule_django.models import CourseKeyField, UsageKeyField
from provider.utils import short_token, long_token
log = logging.getLogger("edx.lti_provider")
class LtiConsumer(models.Model):
"""
Database model representing an LTI consumer. This model stores the consumer
specific settings, such as the OAuth key/secret pair and any LTI fields
that must be persisted.
"""
consumer_name = models.CharField(max_length=255, unique=True)
consumer_key = models.CharField(max_length=32, unique=True, db_index=True, default=short_token)
consumer_secret = models.CharField(max_length=32, unique=True, default=short_token)
instance_guid = models.CharField(max_length=255, blank=True, null=True, unique=True)
@staticmethod
def get_or_supplement(instance_guid, consumer_key):
"""
The instance_guid is the best way to uniquely identify an LTI consumer.
However according to the LTI spec, the instance_guid field is optional
and so cannot be relied upon to be present.
This method first attempts to find an LtiConsumer by instance_guid.
Failing that, it tries t
|
o find a record with a matching consumer_key.
This can be the case if the LtiConsumer record was created as the result
of an LTI launch with no instance_guid.
If the instance_guid is now present, the LtiConsumer model will be
supplemented with the instance_guid, to more concretely identify the
consumer.
In practice, nearly all major LTI consu
|
mers provide an instance_guid, so
the fallback mechanism of matching by consumer key should be rarely
required.
"""
consumer = None
if instance_guid:
try:
consumer = LtiConsumer.objects.get(instance_guid=instance_guid)
except LtiConsumer.DoesNotExist:
# The consumer may not exist, or its record may not have a guid
pass
# Search by consumer key instead of instance_guid. If there is no
# consumer with a matching key, the LTI launch does not have permission
# to access the content.
if not consumer:
consumer = LtiConsumer.objects.get(
consumer_key=consumer_key,
)
# Add the instance_guid field to the model if it's not there already.
if instance_guid and not consumer.instance_guid:
consumer.instance_guid = instance_guid
consumer.save()
return consumer
class OutcomeService(models.Model):
"""
Model for a single outcome service associated with an LTI consumer. Note
that a given consumer may have more than one outcome service URL over its
lifetime, so we need to store the outcome service separately from the
LtiConsumer model.
An outcome service can be identified in two ways, depending on the
information provided by an LTI launch. The ideal way to identify the service
is by instance_guid, which should uniquely identify a consumer. However that
field is optional in the LTI launch, and so if it is missing we can fall
back on the consumer key (which should be created uniquely for each consumer
although we don't have a technical way to guarantee that).
Some LTI-specified fields use the prefix lis_; this refers to the IMS
Learning Information Services standard from which LTI inherits some
properties
"""
lis_outcome_service_url = models.CharField(max_length=255, unique=True)
lti_consumer = models.ForeignKey(LtiConsumer)
class GradedAssignment(models.Model):
"""
Model representing a single launch of a graded assignment by an individual
user. There will be a row created here only if the LTI consumer may require
a result to be returned from the LTI launch (determined by the presence of
the lis_result_sourcedid parameter in the launch POST). There will be only
one row created for a given usage/consumer combination; repeated launches of
the same content by the same user from the same LTI consumer will not add
new rows to the table.
Some LTI-specified fields use the prefix lis_; this refers to the IMS
Learning Information Services standard from which LTI inherits some
properties
"""
user = models.ForeignKey(User, db_index=True)
course_key = CourseKeyField(max_length=255, db_index=True)
usage_key = UsageKeyField(max_length=255, db_index=True)
outcome_service = models.ForeignKey(OutcomeService)
lis_result_sourcedid = models.CharField(max_length=255, db_index=True)
version_number = models.IntegerField(default=0)
class Meta(object):
unique_together = ('outcome_service', 'lis_result_sourcedid')
class LtiUser(models.Model):
"""
Model mapping the identity of an LTI user to an account on the edX platform.
The LTI user_id field is guaranteed to be unique per LTI consumer (per
to the LTI spec), so we guarantee a unique mapping from LTI to edX account
by using the lti_consumer/lti_user_id tuple.
"""
lti_consumer = models.ForeignKey(LtiConsumer)
lti_user_id = models.CharField(max_length=255)
edx_user = models.ForeignKey(User, unique=True)
class Meta(object):
unique_together = ('lti_consumer', 'lti_user_id')
|
googleapis/python-game-servers
|
google/cloud/gaming_v1/types/common.py
|
Python
|
apache-2.0
| 13,962
| 0.001361
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.gaming.v1",
manifest={
"OperationMetadata",
"OperationStatus",
"LabelSelector",
"RealmSelector",
"Schedule",
"SpecSource",
"TargetDetails",
"TargetState",
"DeployedFleetDetails",
},
)
class OperationMetadata(proto.Message):
r"""Represents the metadata of the long-running operation.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the operation was
created.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the operation finished
running.
target (str):
Output only. Server-defined resou
|
rce path for
the target of the operation.
verb (str):
Output only. Name of the verb executed by the
operation.
status_message (str):
Output only. Human-readable status of the
operation, if any.
requested_cancellation (bool):
Output only. Identifies whether the user has requested
cancellation of the operation. Operations that have
successfully been cancel
|
led have [Operation.error][] value
with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``.
api_version (str):
Output only. API version used to start the
operation.
unreachable (Sequence[str]):
Output only. List of Locations that could not
be reached.
operation_status (Sequence[google.cloud.gaming_v1.types.OperationMetadata.OperationStatusEntry]):
Output only. Operation status for Game
Services API operations. Operation status is in
the form of key-value pairs where keys are
resource IDs and the values show the status of
the operation. In case of failures, the value
includes an error code and error message.
"""
create_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
target = proto.Field(proto.STRING, number=3,)
verb = proto.Field(proto.STRING, number=4,)
status_message = proto.Field(proto.STRING, number=5,)
requested_cancellation = proto.Field(proto.BOOL, number=6,)
api_version = proto.Field(proto.STRING, number=7,)
unreachable = proto.RepeatedField(proto.STRING, number=8,)
operation_status = proto.MapField(
proto.STRING, proto.MESSAGE, number=9, message="OperationStatus",
)
class OperationStatus(proto.Message):
r"""
Attributes:
done (bool):
Output only. Whether the operation is done or
still in progress.
error_code (google.cloud.gaming_v1.types.OperationStatus.ErrorCode):
The error code in case of failures.
error_message (str):
The human-readable error message.
"""
class ErrorCode(proto.Enum):
r""""""
ERROR_CODE_UNSPECIFIED = 0
INTERNAL_ERROR = 1
PERMISSION_DENIED = 2
CLUSTER_CONNECTION = 3
done = proto.Field(proto.BOOL, number=1,)
error_code = proto.Field(proto.ENUM, number=2, enum=ErrorCode,)
error_message = proto.Field(proto.STRING, number=3,)
class LabelSelector(proto.Message):
r"""The label selector, used to group labels on the resources.
Attributes:
labels (Sequence[google.cloud.gaming_v1.types.LabelSelector.LabelsEntry]):
Resource labels for this selector.
"""
labels = proto.MapField(proto.STRING, proto.STRING, number=1,)
class RealmSelector(proto.Message):
r"""The realm selector, used to match realm resources.
Attributes:
realms (Sequence[str]):
List of realms to match.
"""
realms = proto.RepeatedField(proto.STRING, number=1,)
class Schedule(proto.Message):
r"""The schedule of a recurring or one time event. The event's time span
is specified by start_time and end_time. If the scheduled event's
timespan is larger than the cron_spec + cron_job_duration, the event
will be recurring. If only cron_spec + cron_job_duration are
specified, the event is effective starting at the local time
specified by cron_spec, and is recurring.
::
start_time|-------[cron job]-------[cron job]-------[cron job]---|end_time
cron job: cron spec start time + duration
Attributes:
start_time (google.protobuf.timestamp_pb2.Timestamp):
The start time of the event.
end_time (google.protobuf.timestamp_pb2.Timestamp):
The end time of the event.
cron_job_duration (google.protobuf.duration_pb2.Duration):
The duration for the cron job event. The
duration of the event is effective after the
cron job's start time.
cron_spec (str):
The cron definition of the scheduled event.
See https://en.wikipedia.org/wiki/Cron. Cron
spec specifies the local time as defined by the
realm.
"""
start_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
cron_job_duration = proto.Field(
proto.MESSAGE, number=3, message=duration_pb2.Duration,
)
cron_spec = proto.Field(proto.STRING, number=4,)
class SpecSource(proto.Message):
r"""Encapsulates Agones fleet spec and Agones autoscaler spec
sources.
Attributes:
game_server_config_name (str):
The game server config resource. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment_id}/configs/{config_id}``.
name (str):
The name of the Agones leet config or Agones
scaling config used to derive the Agones fleet
or Agones autoscaler spec.
"""
game_server_config_name = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=2,)
class TargetDetails(proto.Message):
r"""Details about the Agones resources.
Attributes:
game_server_cluster_name (str):
The game server cluster name. Uses the form:
``projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}``.
game_server_deployment_name (str):
The game server deployment name. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment_id}``.
fleet_details (Sequence[google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails]):
Agones fleet details for game server clusters
and game server deployments.
"""
class TargetFleetDetails(proto.Message):
r"""Details of the target Agones fleet.
Attributes:
fleet (google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails.TargetFleet):
Reference to target Agones fleet.
autoscaler (google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails.TargetFleetAutoscaler):
Reference to target Agones fleet autoscaling
policy.
"""
class TargetFleet(proto
|
jvahala/brew-thing
|
app/views.py
|
Python
|
apache-2.0
| 274
| 0
|
from flask import render_template
fr
|
om app import app
@app.route('/')
@app.route('/mainpage')
def slp
|
age():
return render_template('mainpage.html')
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
|
dekomote/mezzanine-modeltranslation-backport
|
mezzanine/core/admin.py
|
Python
|
bsd-2-clause
| 10,434
| 0.000479
|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.db.models import AutoField
from django.forms import ValidationError, ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User as AuthUser
from mezzanine.conf import settings
from mezzanine.core.forms import DynamicInlineAdminForm
from mezzanine.core.models import (Orderable, SitePermission,
CONTENT_STATUS_PUBLISHED)
from mezzanine.utils.urls import admin_url
from mezzanine.utils.models import get_user_model
if settings.USE_MODELTRANSLATION:
from django.utils.datastructures import SortedDict
from django.utils.translation import activate, get_language
from modeltranslation.admin import (TranslationAdmin,
TranslationInlineModelAdmin)
class BaseTranslationModelAdmin(TranslationAdmin):
"""
Mimic modeltranslation's TabbedTranslationAdmin but uses a
custom tabbed_translation_fields.js
"""
class Media:
js = (
"modeltranslation/js/force_jquery.js",
"mezzanine/js/%s" % settings.JQUERY_UI_FILENAME,
"mezzanine/js/admin/tabbed_translation_fields.js",
)
css = {
"all": ("mezzanine/css/admin/tabbed_translation_fields.css",),
}
else:
class BaseTranslationModelAdmin(admin.ModelAdmin):
"""
Abstract class used to handle the switch between translation
and no-translation class logic. We define the basic structure
for the Media class so we can extend it consistently regardless
of whether or not modeltranslation is used.
"""
class Media:
css = {"all": ()}
def getInlineBaseClass(cls):
if settings.USE_MODELTRANSLATION:
class InlineBase(TranslationInlineModelAdmin, cls):
"""
Abstract class that mimics django-modeltranslation's
Translation{Tabular,Stacked}Inline. Used as a placeholder
for future improvement.
"""
pass
return InlineBase
return cls
User = get_user_model()
class DisplayableAdminForm(ModelForm):
def clean_content(form):
status = form.cleaned_data.get("status")
content = form.cleaned_data.get("content")
if status == CONTENT_STATUS_PUBLISHED and not content:
raise ValidationError(_("This field is required if status "
"is set to published."))
return content
class DisplayableAdmin(BaseTranslationModelAdmin):
"""
Admin class for subclasses of the abstract ``Displayable`` model.
"""
list_display = ("title", "status", "admin_link")
list_display_links = ("title",)
list_editable = ("status",)
list_filter = ("status", "keywords__keyword")
date_hierarchy = "publish_date"
radio_fields = {"status": admin.HORIZONTAL}
fieldsets = (
(None, {
"fields": ["title", "status", ("publish_date", "expiry_date")],
}),
(_("Meta data"), {
"fields": ["_meta_title", "slug",
("description", "gen_description"),
"keywords", "in_sitemap"],
"classes": ("collapse-closed",)
}),
)
form = DisplayableAdminForm
def __init__(self, *args, **kwargs):
super(DisplayableAdmin, self).__init__(*args, **kwargs)
try:
self.search_fields = list(set(list(self.search_fields) + list(
self.model.objects.get_search_fields().keys())))
except AttributeError:
pass
class BaseDynamicInlineAdmin(object):
"""
Admin inline that uses JS to inject an "Add another" link which
when clicked, dynamically reveals another fieldset. Also handles
adding the ``_order`` field and its widget for models that
subclass ``Orderable``.
"""
form = DynamicInlineAdminForm
extra = 20
def __init__(self, *args, **kwargs):
super(BaseDynamicInlineAdmin, self).__init__(*args, **kwargs)
if issubclass(self.model, Orderable):
fields = self.fields
if not fields:
fields = self.model._meta.fields
exclude = self.exclude or []
fields = [f.name for f in fields if f.editable and
f.name not in exclude and not isinstance(f, AutoField)]
if "_order" in fields:
del fields[fields.index("_order")]
fields.append("_order")
self.fields = fields
class TabularDynamicInlineAdmin(BaseDynamicInlineAdmin, getInlineBaseClass(admin.TabularInline)):
template = "admin/includes/dynamic_inline_tabular.html"
class StackedDynamicInlineAdmin(BaseDynamicInlineAdmin, getInlineBaseClass(admin.StackedInline)):
template = "admin/includes/dynamic_inline_stacked.html"
def __init__(self, *args, **kwargs):
"""
Stacked dynamic inlines won't work without grappelli
installed, as the JavaScript in dynamic_inline.js isn't
able to target each of the inlines to set the value of
the order field.
"""
grappelli_name = getattr(settings, "PACKAGE_NAME_GRAPPELLI")
if grappelli_name not in settings.INSTALLED_APPS:
error = "StackedDynamicInlineAdmin requires Grappelli installed."
raise Exception(error)
super(StackedDynamicInlineAdmin, self).__init__(*args, **kwargs)
class OwnableAdmin(admin.ModelAdmin):
"""
Admin class for models that subclass the abstract ``Ownable``
model. Handles limiting the change list to objects owned by the
logged in user, as well as setting the owner of newly created
objects to the logged in user.
Remember that this will include the ``user`` field in the required
fields for the admin change form which may not be desirable. The
best approach to solve this is to define a ``fieldsets`` attribute
that excludes the ``user`` field or simple add ``user`` to your
admin excludes: ``exclude = ('user',)``
"""
def save_form(self, request, form, change):
"""
Set the object's owner as the logged in user.
"""
obj = form.save(commit=False)
if obj.user_id is None:
obj.user = request.user
return super(OwnableAdmin, self).save_form(request, form, change)
def queryset(self, request):
"""
Filter the change list by currently logged in user if not a
superuser. We also skip filtering if the model for this admin
class has been added to the sequence in the setting
``OWNABLE_MODELS_ALL_EDITABLE``, which contains models in the
format ``app_label.object_name``, and allows models subclassing
``Ownable`` to be excluded from filtering, eg: ownership should
not imply permission to edit.
"""
opts = self.model._meta
model_name = ("%s.%s" % (opts.app_label, opts.object_name)).lower()
models
|
_all_editable = settings.OWNABLE_MODELS_ALL_EDITABLE
models_all_editable = [m.lower() for m in models_all_editable]
qs = super(OwnableAdmin, self).queryset(request)
if request.user.is_superuser or model_name in models_all_editable:
return qs
return qs.filter(user__id=request.user.id)
class SingletonAdmin(admin.ModelAdmin):
"""
Admin class for models that should only contain a single instance
in the
|
database. Redirect all views to the change view when the
instance exists, and to the add view when it doesn't.
"""
def handle_save(self, request, response):
"""
Handles redirect back to the dashboard when save is clicked
(eg not save and continue editing), by checking for a redirect
response, which only occurs if the form is valid.
"""
form_valid = isinstance(response, HttpRes
|
xingjian-f/Leetcode-solution
|
80. Remove Duplicates from Sorted Array II.py
|
Python
|
mit
| 605
| 0.07438
|
class Solution(object):
def removeDuplicates(self, nums):
|
"""
:type num
|
s: List[int]
:rtype: int
"""
length = len(nums)-1
if length<=0:
return 0
pos = 1
cnt = 1
last = nums[0]
while length > 0:
if nums[pos] == last:
if cnt >= 2:
del nums[pos]
else:
pos += 1
cnt += 1
else:
last = nums[pos]
pos += 1
cnt = 1
length -= 1
return len(nums)
print Solution().removeDuplicates([1,1,1,1,1,2,3,3,4,4,5])
|
bionikspoon/Codewars-Challenges
|
python/5kyu/palindrome_chain_length/solution.py
|
Python
|
mit
| 612
| 0.001634
|
from main import *
def palindrome_chain_length(n):
count = 0
while True:
reversed_n = reverse_order(n)
if is_palin
|
drome(n, reversed_n):
return count
n += reversed_n
count += 1
def reverse_order(n):
return int("".join([i for i in list(str(n)[::-1])]))
def is_palindrome(n, reversed_n):
return True if n == reversed_n else False
test.assert_equals(reverse_order(87), 78)
test.assert_equals(is_palindrome(87, reverse_order(87)), False)
test.assert_equals(is_palindrome(5, reverse_order(5)), True)
test.assert_equals(palind
|
rome_chain_length(87), 4)
|
freak97/binutils
|
gdb/testsuite/gdb.python/py-finish-breakpoint2.py
|
Python
|
gpl-2.0
| 1,221
| 0.006552
|
# Copyright (C) 2011-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as publis
|
hed by
# the Free Software Foun
|
dation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It tests python Finish
# Breakpoints.
class ExceptionFinishBreakpoint(gdb.FinishBreakpoint):
def __init__(self, frame):
gdb.FinishBreakpoint.__init__ (self, frame, internal=1)
self.silent = True
print ("init ExceptionFinishBreakpoint")
def stop(self):
print ("stopped at ExceptionFinishBreakpoint")
return True
def out_of_scope(self):
print ("exception did not finish ...")
print ("Python script imported")
|
seymour1/label-virusshare
|
test/test_hashes.py
|
Python
|
bsd-3-clause
| 1,444
| 0.004848
|
import json
import argparse
import logging
import glob
# Logging Information
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(message)s')
fh = logging.FileHandler('test_hashes.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
|
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
parser = argparse.ArgumentParser()
parser.add_argument("hash_num",
|
help="file that we want to verify")
args = parser.parse_args()
hashes = set()
hash_num = args.hash_num
logger.info("Verifying consistency for VirusShare_00" + str(hash_num).zfill(3))
logger.debug("Generating hashes from ../hashes/VirusShare_00" + str(hash_num).zfill(3) + ".md5")
with open(("../hashes/VirusShare_00" + str(hash_num).zfill(3) + ".md5"),'r') as file:
for line in file.readlines()[6:]:
hashes.add(line.strip())
for filename in glob.glob("../analyses/VirusShare_00" + str(hash_num).zfill(3) + ".*"):
logger.debug("Removing hashes from " + filename)
with open(filename,'r') as file:
for line in file.readlines():
hashes.remove(json.loads(line.strip())["md5"])
if len(hashes) == 0:
logger.info("VirusShare_00" + str(hash_num).zfill(3) + ".ldjson is consistent with hashfile")
else:
logger.error("VirusShare_00" + str(hash_num).zfill(3) + ".ldjson is inconsistent with hashfile")
|
dscho/hg
|
mercurial/streamclone.py
|
Python
|
gpl-2.0
| 13,689
| 0.001315
|
# streamclone.py - producing and consuming streaming repository data
#
# Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import struct
import time
from .i18n import _
from . import (
branchmap,
error,
store,
util,
)
def canperformstreamclone(pullop, bailifbundle2supported=False):
"""Whether it is possible to perform a streaming clone as part of pull.
``bailifbundle2supported`` will cause the function to return False if
bundle2 stream clones are supported. It should only be called by the
legacy stream clone code path.
Returns a tuple of (supported, requirements). ``supported`` is True if
streaming clone is supported and False otherwise. ``requirements`` is
a set of repo requirements from the remote, or ``None`` if stream clone
isn't supported.
"""
repo = pullop.repo
remote = pullop.remote
bundle2supported = False
if pullop.canusebundle2:
if 'v1' in pullop.remotebundle2caps.get('stream', []):
bundle2supported = True
# else
# Server doesn't support bundle2 stream clone or doesn't support
# the versions we support. Fall back and possibly allow legacy.
# Ensures legacy code path uses available bundle2.
if bailifbundle2supported and bundle2supported:
return False, None
# Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
#elif not bailifbundle2supported and not bundle2supported:
# return False
|
, None
# Streaming clone only works on empty repositories.
if len(repo):
return False, None
# Streaming clone only works if all data is being requested.
if pullop.heads:
return False, None
streamrequested = pullop.streamclonerequested
# If we don't have a preference, let the server decide for us. This
# likely only comes into play in LANs.
if str
|
eamrequested is None:
# The server can advertise whether to prefer streaming clone.
streamrequested = remote.capable('stream-preferred')
if not streamrequested:
return False, None
# In order for stream clone to work, the client has to support all the
# requirements advertised by the server.
#
# The server advertises its requirements via the "stream" and "streamreqs"
# capability. "stream" (a value-less capability) is advertised if and only
# if the only requirement is "revlogv1." Else, the "streamreqs" capability
# is advertised and contains a comma-delimited list of requirements.
requirements = set()
if remote.capable('stream'):
requirements.add('revlogv1')
else:
streamreqs = remote.capable('streamreqs')
# This is weird and shouldn't happen with modern servers.
if not streamreqs:
return False, None
streamreqs = set(streamreqs.split(','))
# Server requires something we don't support. Bail.
if streamreqs - repo.supportedformats:
return False, None
requirements = streamreqs
return True, requirements
def maybeperformlegacystreamclone(pullop):
"""Possibly perform a legacy stream clone operation.
Legacy stream clones are performed as part of pull but before all other
operations.
A legacy stream clone will not be performed if a bundle2 stream clone is
supported.
"""
supported, requirements = canperformstreamclone(pullop)
if not supported:
return
repo = pullop.repo
remote = pullop.remote
# Save remote branchmap. We will use it later to speed up branchcache
# creation.
rbranchmap = None
if remote.capable('branchmap'):
rbranchmap = remote.branchmap()
repo.ui.status(_('streaming all changes\n'))
fp = remote.stream_out()
l = fp.readline()
try:
resp = int(l)
except ValueError:
raise error.ResponseError(
_('unexpected response from remote server:'), l)
if resp == 1:
raise error.Abort(_('operation forbidden by server'))
elif resp == 2:
raise error.Abort(_('locking the remote repository failed'))
elif resp != 0:
raise error.Abort(_('the server sent an unknown error code'))
l = fp.readline()
try:
filecount, bytecount = map(int, l.split(' ', 1))
except (ValueError, TypeError):
raise error.ResponseError(
_('unexpected response from remote server:'), l)
with repo.lock():
consumev1(repo, fp, filecount, bytecount)
# new requirements = old non-format requirements +
# new format-related remote requirements
# requirements from the streamed-in repository
repo.requirements = requirements | (
repo.requirements - repo.supportedformats)
repo._applyopenerreqs()
repo._writerequirements()
if rbranchmap:
branchmap.replacecache(repo, rbranchmap)
repo.invalidate()
def allowservergeneration(ui):
"""Whether streaming clones are allowed from the server."""
return ui.configbool('server', 'uncompressed', True, untrusted=True)
# This is it's own function so extensions can override it.
def _walkstreamfiles(repo):
return repo.store.walk()
def generatev1(repo):
"""Emit content for version 1 of a streaming clone.
This returns a 3-tuple of (file count, byte size, data iterator).
The data iterator consists of N entries for each file being transferred.
Each file entry starts as a line with the file name and integer size
delimited by a null byte.
The raw file data follows. Following the raw file data is the next file
entry, or EOF.
When used on the wire protocol, an additional line indicating protocol
success will be prepended to the stream. This function is not responsible
for adding it.
This function will obtain a repository lock to ensure a consistent view of
the store is captured. It therefore may raise LockError.
"""
entries = []
total_bytes = 0
# Get consistent snapshot of repo, lock during scan.
with repo.lock():
repo.ui.debug('scanning\n')
for name, ename, size in _walkstreamfiles(repo):
if size:
entries.append((name, size))
total_bytes += size
repo.ui.debug('%d files, %d bytes to transfer\n' %
(len(entries), total_bytes))
svfs = repo.svfs
oldaudit = svfs.mustaudit
debugflag = repo.ui.debugflag
svfs.mustaudit = False
def emitrevlogdata():
try:
for name, size in entries:
if debugflag:
repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
# partially encode name over the wire for backwards compat
yield '%s\0%d\n' % (store.encodedir(name), size)
if size <= 65536:
with svfs(name, 'rb') as fp:
yield fp.read(size)
else:
for chunk in util.filechunkiter(svfs(name), limit=size):
yield chunk
finally:
svfs.mustaudit = oldaudit
return len(entries), total_bytes, emitrevlogdata()
def generatev1wireproto(repo):
"""Emit content for version 1 of streaming clone suitable for the wire.
This is the data output from ``generatev1()`` with a header line
indicating file count and byte size.
"""
filecount, bytecount, it = generatev1(repo)
yield '%d %d\n' % (filecount, bytecount)
for chunk in it:
yield chunk
def generatebundlev1(repo, compression='UN'):
"""Emit content for version 1 of a stream clone bundle.
The first 4 bytes of the output ("HGS1") denote this as stream clone
bundle version 1.
The next 2 bytes indicate the compression type. Only "UN" is currently
supported.
The next 16 bytes are two 64-bit big endian unsigned integers indicating
file count and byte count, respectively.
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/types/keyword_plan_campaign_service.py
|
Python
|
apache-2.0
| 6,083
| 0.000493
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.resources.types import keyword_plan_campaign
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.services",
marshal="google.ads.googleads.v9",
manifest={
"GetKeywordPlanCampaignRequest",
"MutateKeywordPlanCampaignsRequest",
"KeywordPlanCampaignOperation",
"MutateKeywordPlanCampaignsResponse",
"MutateKeywordPlanCampaignResult",
},
)
class GetKeywordPlanCampaignRequest(proto.Message):
r"""Request message for
[KeywordPlanCampaignService.GetKeywordPlanCampaign][google.ads.googleads.v9.services.KeywordPlanCampaignService.GetKeywordPlanCampaign].
Attributes:
resource_name (str):
Required. The resource name of the Keyword
Plan campaign to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class MutateKeywordPlanCampaignsRequest(proto.Message):
r"""Request message for
[KeywordPlanCampaignService.MutateKeywordPlanCampaigns][google.ads.googleads.v9.services.KeywordPlanCampaignService.MutateKeywordPlanCampaigns].
Attributes:
customer_id (str):
Required. The ID of the customer whose
Keyword Plan campaigns are being modified.
operations (Sequence[google.ads.googleads.v9.services.types.KeywordPlanCampaignOperation]):
Required. The list of operations to perform
on individual Keyword Plan campaigns.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="KeywordPlanCampaignOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
class KeywordPlanCampaignOperation(proto.Mess
|
age):
r"""A single operation (create, update, remove) on a Keyword Plan
campaign.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most
|
one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v9.resources.types.KeywordPlanCampaign):
Create operation: No resource name is
expected for the new Keyword Plan campaign.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v9.resources.types.KeywordPlanCampaign):
Update operation: The Keyword Plan campaign
is expected to have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the removed Keyword
Plan campaign is expected, in this format:
``customers/{customer_id}/keywordPlanCampaigns/{keywordPlan_campaign_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=keyword_plan_campaign.KeywordPlanCampaign,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=keyword_plan_campaign.KeywordPlanCampaign,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateKeywordPlanCampaignsResponse(proto.Message):
r"""Response message for a Keyword Plan campaign mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v9.services.types.MutateKeywordPlanCampaignResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateKeywordPlanCampaignResult",
)
class MutateKeywordPlanCampaignResult(proto.Message):
r"""The result for the Keyword Plan campaign mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
henrysher/imagefactory
|
imagefactory-plugins/EC2Cloud/EC2Cloud.py
|
Python
|
apache-2.0
| 70,484
| 0.006626
|
#
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zope
import oz.Fedora
import oz.TDL
import subprocess
import os
import re
import guestfs
import string
import libxml2
import traceback
import ConfigParser
import boto.ec2
import sys
from time import *
from tempfile import *
from imgfac.ApplicationConfiguratio
|
n import ApplicationConfiguration
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.ReservationManager import ReservationManager
from boto.s3.connection import S3Connection
from boto.s3.connection import Location
from boto.exception import *
from boto.ec2.blockdevicemapping import EBSBlockDeviceType
|
, BlockDeviceMapping
from imgfac.CloudDelegate import CloudDelegate
# Boto is very verbose - shut it up
logging.getLogger('boto').setLevel(logging.INFO)
def subprocess_check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *popenargs, **kwargs)
stdout, stderr = process.communicate()
retcode = process.poll()
if retcode:
cmd = ' '.join(*popenargs)
raise ImageFactoryException("'%s' failed(%d): %s" % (cmd, retcode, stderr))
return (stdout, stderr, retcode)
class EC2Cloud(object):
zope.interface.implements(CloudDelegate)
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
def __init__(self):
# Note that we are now missing ( template, target, config_block = None):
super(EC2Cloud, self).__init__()
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
config_obj = ApplicationConfiguration()
self.app_config = config_obj.configuration
self.oz_config = ConfigParser.SafeConfigParser()
self.oz_config.read("/etc/oz/oz.cfg")
self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"])
if "ec2" in config_obj.jeos_images:
self.ec2_jeos_amis = config_obj.jeos_images['ec2']
else:
self.log.warning("No JEOS amis defined for ec2. Snapshot builds will not be possible.")
self.ec2_jeos_amis = {}
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('builder_should_create_target_image() called on EC2Cloud plugin - returning True')
return True
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
# Nothing really to do here
pass
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('builder_did_create_target_image() called in EC2Cloud plugin')
# The bulk of what is done here is EC2 specific
# There are OS conditionals thrown in at the moment
# For now we are putting everything into the EC2 Cloud plugin
# TODO: Revisit this, and the plugin interface, to see if there are ways to
# make the separation cleaner
# This lets our logging helper know what image is being operated on
self.builder = builder
self.active_image = self.builder.target_image
try:
# TODO: More convenience vars - revisit
self.template = template
self.target = target
self.tdlobj = oz.TDL.TDL(xmlstring=self.template.xml, rootpw_required=True)
self._get_os_helper()
# Add in target specific content
self.add_target_content()
# TODO: This is a convenience variable for refactoring - rename
self.new_image_id = builder.target_image.identifier
# This lets our logging helper know what image is being operated on
self.activity("Initializing Oz environment")
# Create a name combining the TDL name and the UUID for use when tagging EC2 AMIs
self.longname = self.tdlobj.name + "-" + self.new_image_id
# Oz assumes unique names - TDL built for multiple backends guarantees they are not unique
# We don't really care about the name so just force uniqueness
self.tdlobj.name = "factory-build-" + self.new_image_id
# populate a config object to pass to OZ; this allows us to specify our
# own output dir but inherit other Oz behavior
self.oz_config = ConfigParser.SafeConfigParser()
self.oz_config.read("/etc/oz/oz.cfg")
self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"])
# make this a property to enable quick cleanup on abort
self.instance = None
# OK great, we now have a customized KVM image
# Now we do some target specific transformation
# None of these things actually require anything other than the TDL object
# and the original disk image
# At this point our builder has a target_image and a base_image
# OS plugin has already provided the initial file for us to work with
# which we can currently assume is a raw KVM compatible image
self.image = builder.target_image.data
self.modify_oz_filesystem()
self.ec2_copy_filesystem()
self.ec2_modify_filesystem()
except:
self.log_exc()
self.status="FAILED"
raise
self.percent_complete=100
self.status="COMPLETED"
def _get_os_helper(self):
# For now we are adopting a 'mini-plugin' approach to OS specific code within the EC2 plugin
# In theory, this could live in the OS plugin - however, the code in question is very tightly
# related to the EC2 plugin, so it probably should stay here
try:
# Change RHEL-6 to RHEL6, etc.
os_name = self.tdlobj.distro.translate(None, '-')
class_name = "%s_ec2_Helper" % (os_name)
module_name = "imagefactory_plugins.EC2Cloud.EC2CloudOSHelpers"
__import__(module_name)
os_helper_class = getattr(sys.modules[module_name], class_name)
self.os_helper = os_helper_class(self)
except:
self.log_exc()
raise ImageFactoryException("Unable to create EC2 OS helper object for distro (%s) in TDL" % (self.tdlobj.distro) )
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
self.log.info('push_image_to_provider() called in EC2Cloud')
self.builder = builder
self.active_image = self.builder.provider_image
# TODO: This is a convenience variable for refactoring - rename
self.new_image_id = builder.provider_image.identifier
self.tdlobj = oz.TDL.TDL(xmlstring=builder.target_image.template, rootpw_required=True)
self._get_os_helper()
self.push_image_upload(target_image, provider, credentials)
def delete_from_provider(self, builder, provider, credentials, target, parameters):
self.log.debug("Deleting AMI (%s)" % (self.builder.provider_image.identifier_on_provider))
self.activity("Preparing EC2 region details")
region=provider
region_conf=self.
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py
|
Python
|
mit
| 19,348
| 0.001551
|
"""
Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate
Gradient Method (LOBPCG), see
http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/
License: BSD
Authors: Robert Cimrman, Andrew Knyazev
Examples in tests directory contributed by Nils Wagner.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import assert_allclose
from scipy._lib.six import xrange
from scipy.linalg import inv, eigh, cho_factor, cho_solve, cholesky
from scipy.sparse.linalg import aslinearoperator, LinearOperator
__all__ = ['lobpcg']
@np.deprecate(new_name='eigh')
def symeig(mtxA, mtxB=None, select=None):
return eigh(mtxA, b=mtxB, eigvals=select)
def pause():
# Used only when verbosity level > 10.
input()
def save(ar, fileName):
# Used only when verbosity level > 10.
from numpy import savetxt
savetxt(fileName, ar, precision=8)
def _assert_symmetric(M, rtol=1e-5, atol=1e-8):
assert_allclose(M.T, M, rtol=rtol, atol=atol)
##
# 21.05.2007, c
def as2d(ar):
"""
If the input array is 2D return it, if it is 1D, append a dimension,
making it a column vector.
"""
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _makeOperator(operatorInput, expectedShape):
"""Takes a dense numpy array or a sparse matrix or
a function and makes an operator performing matrix * blockvector
products.
Examples
--------
>>> A = _makeOperator( arrayA, (n, n) )
>>> vectorB = A( vectorX )
"""
if operatorInput is None:
def ident(x):
return x
operator = LinearOperator(expectedShape, ident, matmat=ident)
else:
operator = aslinearoperator(operatorInput)
if operator.shape != expectedShape:
raise ValueError('operator has invalid shape')
return operator
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
"""Changes blockVectorV in place."""
gramYBV = np.dot(blockVectorBY.T, blockVectorV)
tmp = cho_solve(factYBY, gramYBV)
blockVectorV -= np.dot(blockVectorY, tmp)
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
gramVBV = np.dot(blockVectorV.T, blockVectorBV)
gramVBV = cholesky(gramVBV)
gramVBV = inv(gramVBV, overwrite_a=True)
# gramVBV is now R^{-1}.
blockVectorV = np.dot(blockVectorV, gramVBV)
if B is not None:
blockVectorBV = np.dot(blockVectorBV, gramVBV)
if retInvR:
return blockVectorV, blockVectorBV, gramVBV
else:
return blockVectorV, blockVectorBV
def lobpcg(A, X,
B=None, M=None, Y=None,
tol=None, maxiter=20,
largest=True, verbosityLevel=0,
retLambdaHistory=False, retResidualNormsHistory=False):
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive
definite (SPD) generalized eigenproblems.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The symmetric linear operator of the problem, usually a
sparse matrix. Often called the "stiffness matrix".
X : array_like
Initial approximation to the k eigenvectors. If A has
shape=(n,n) then X should have shape shape=(n,k).
B : {dense matrix, sparse matrix, LinearOperator}, optional
the right hand side operator in a generalized eigenproblem.
by default, B = Identity
often called the "mass matrix"
M : {dense matrix, sparse matrix, LinearOperator}, optional
preconditioner to A; by default M = Identity
M should approximate the inverse of A
Y : array_like, optional
n-by-sizeY matrix of constraints, sizeY < n
The iterations will be performed in the B-orthogonal complement
of the column-space of Y. Y must be full rank.
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors. V has the same shape as X.
Other Parameters
----------------
tol : scalar, optional
Solver tolerance (stopping criterion)
by default: tol=n*sqrt(eps)
maxiter : integer, optional
maximum number of iterations
by default: maxiter=min(n,20)
largest : bool, optional
when True, solve for the largest eigenvalues, otherwise the smallest
verbosityLevel : integer, optional
controls solver output. default: verbosityLevel = 0.
retLambdaHistory : boolean, optional
whether to return eigenvalue history
retResidualNormsHistory : boolean, optional
whether to return history of residual norms
Examples
--------
Solve A x = lambda B x with constraints and preconditioning.
>>> from scipy.sparse import spdiags, issparse
>>> from scipy.sparse.linalg import lobpcg, LinearOperator
>>> n = 100
>>> vals = [np.arange(n, dtype=np.float64) + 1]
>>> A = spdiags(vals, 0, n, n)
>>> A.toarray()
array([[ 1., 0., 0., ..., 0., 0., 0.],
[ 0., 2., 0., ..., 0., 0., 0.],
[ 0., 0., 3., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 98., 0., 0.],
[ 0., 0., 0., ..., 0., 99., 0.],
[ 0., 0., 0., ..., 0., 0., 100.]])
Constraints.
>>> Y = np.eye(n, 3)
Initial guess for eigenvectors, should have linearly independent
columns. Column dimension = number of requested eigenvalues.
>>> X = np.random.rand(n, 3)
Preconditioner -- inverse of A (as an abstract linear operator).
>>> invA = spdiags([1./vals[0]], 0, n, n)
>>> def precond( x ):
... return invA * x
>>> M = LinearOperator(matvec=precond, shape=(n, n), dtype=float)
Here, ``invA`` could of course have been used directly as a preconditioner.
Let us then solve the problem:
>>> eigs, vecs = lobpcg(A, X, Y=Y, M=M, tol=1e-4, maxiter=40, largest=False)
>>> eigs
array([ 4., 5., 6.])
Note that the vectors passed in Y are the eigenvectors of the 3 smallest
eigenvalues. The results returned are orthogonal to those.
Notes
-----
If both retLambdaHistory and retResidualNormsHistory are True,
the return tuple has the following format
(lambda, V, lambda history, residual norms history).
In the following ``n`` denotes the matrix size and ``m`` the number
of required eigenvalues (smallest or largest).
The LOBPCG code internally solves eigenproblems of the size 3``m`` on every
iteration by calling the "standard" dense eigensolver, so if ``m`` is not
small enough compared to ``n``, it does not make sense to call the LOBPCG
code, but rather one should use the "standard" eigensolver,
e.g. numpy or scipy function in this case.
If one calls the LOBPCG algorithm for 5``m``>``n``,
it will most likely break internally, so the code tries to call the standard
function instead.
It is not that n should be large for the LOBPCG to work, but rather the
ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1
and ``n``=10, it should work, though ``n`` is small. The method is intended
for extremely large ``n``/``m``, see e.g., reference [28] in
http://arxiv.org/abs/0705.2626
The convergence speed depends basically on two factors:
1. How well relatively separated the seeking eigenvalues are
from the rest of the eigenvalues.
One can try to vary ``m
|
`` to make this better.
2. How well conditioned the problem is. This can be changed by using proper
preconditioning. For example, a
|
rod vibration test problem (under tests
directory) is ill-conditioned for large ``n``, so convergence will be
|
stvstnfrd/edx-platform
|
openedx/features/calendar_sync/tests/test_views.py
|
Python
|
agpl-3.0
| 2,051
| 0.002438
|
"""
Tests for Calendar Sync views.
"""
import ddt
from django.test import TestCase
from django.urls import reverse
from openedx.features.calendar_sync.api import SUBSCRIBE, UNSUBSCRIBE
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
TEST_PASSWORD = 'test'
@ddt.ddt
class TestCalendarSyncView(SharedModuleStoreTestCase, TestCase):
"""Tests for the calendar sync view."""
@classmethod
def setUpClass(cls):
""" Set up any course data """
super(TestCalendarSyncView, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCalendarSyncView, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.user = self.create_user_for_course(self.course)
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.calendar_sync_url = reverse('openedx.calendar_sync', args=[self.course.id])
@ddt.data(
# Redirect on successful subscribe
[{'tool_data': "{{'toggle_data': '{}'}}".format(SUBSCRIBE)}, 302, ''],
# Redirect on successful unsubscribe
[{'tool_data': "{{'toggle_data':
|
'{}'}}".format(UNSUBSCRIBE)}, 302, ''],
# 422 on unknown toggle_data
[{'tool_data': "{{'toggle_data': '{}'}}".format('gibberish')}, 422,
'Toggle data was not provided or had unknown value.'],
# 422 on no toggle_data
[{'tool_data': "{{'random_data': '{}'}}".format('gibberish')}, 422,
'Toggle data was not provided or had unknown value.'],
# 422 on no tool_data
[{'nonsense': "{{'random_data': '{}'}}".format('
|
gibberish')}, 422, 'Tool data was not provided.'],
)
@ddt.unpack
def test_course_dates_fragment(self, data, expected_status_code, contained_text):
response = self.client.post(self.calendar_sync_url, data)
assert response.status_code == expected_status_code
assert contained_text in str(response.content)
|
mapr-demos/wifi-sensor-demo
|
python/mpu6050/all_constants.py
|
Python
|
apache-2.0
| 15,010
| 0.022252
|
# constants extracted from
# https://github.com/jrowberg/i2cdevlib/blob/master/Arduino/MPU6050/MPU6050.h
MPU6050_ADDRESS_AD0_LOW = 0x68
MPU6050_ADDRESS_AD0_HIGH = 0x69
MPU6050_DEFAULT_ADDRESS = MPU6050_ADDRESS_AD0_LOW
MPU6050_RA_XG_OFFS_TC = 0x00
MPU6050_RA_YG_OFFS_TC = 0x01
MPU6050_RA_ZG_OFFS_TC = 0x02
MPU6050_RA_X_FINE_GAIN = 0x03
MPU6050_RA_Y_FINE_GAIN = 0x04
MPU6050_RA_Z_FINE_GAIN = 0x05
MPU6050_RA_XA_OFFS_H = 0x06
MPU6050_RA_XA_OFFS_L_TC = 0x07
MPU6050_RA_YA_OFFS_H = 0x08
MPU6050_RA_YA_OFFS_L_TC = 0x09
MPU6050_RA_ZA_OFFS_H = 0x0A
MPU6050_RA_ZA_OFFS_L_TC = 0x0B
MPU6050_RA_SELF_TEST_X = 0x0D
MPU6050_RA_SELF_TEST_Y = 0x0E
MPU6050_RA_SELF_TEST_Z = 0x0F
MPU6050_RA_SELF_TEST_A = 0x10
MPU6050_RA_XG_OFFS_USRH = 0x13
MPU6050_RA_XG_OFFS_USRL = 0x14
MPU6050_RA_YG_OFFS_USRH = 0x15
MPU6050_RA_YG_OFFS_USRL
|
= 0x16
MPU6050_RA_ZG_OFFS_USRH = 0x17
MPU6050_RA_ZG_OFFS_USRL = 0x18
MPU6050_RA_SMPLRT_DIV = 0x19
MPU6050_RA_CONFIG = 0x1A
MPU6050_RA_GYRO_CONFIG = 0x1B
MPU6050_RA_ACCEL_CONFIG = 0x1C
MPU6050_RA_FF_THR = 0x1D
|
MPU6050_RA_FF_DUR = 0x1E
MPU6050_RA_MOT_THR = 0x1F
MPU6050_RA_MOT_DUR = 0x20
MPU6050_RA_ZRMOT_THR = 0x21
MPU6050_RA_ZRMOT_DUR = 0x22
MPU6050_RA_FIFO_EN = 0x23
MPU6050_RA_I2C_MST_CTRL = 0x24
MPU6050_RA_I2C_SLV0_ADDR = 0x25
MPU6050_RA_I2C_SLV0_REG = 0x26
MPU6050_RA_I2C_SLV0_CTRL = 0x27
MPU6050_RA_I2C_SLV1_ADDR = 0x28
MPU6050_RA_I2C_SLV1_REG = 0x29
MPU6050_RA_I2C_SLV1_CTRL = 0x2A
MPU6050_RA_I2C_SLV2_ADDR = 0x2B
MPU6050_RA_I2C_SLV2_REG = 0x2C
MPU6050_RA_I2C_SLV2_CTRL = 0x2D
MPU6050_RA_I2C_SLV3_ADDR = 0x2E
MPU6050_RA_I2C_SLV3_REG = 0x2F
MPU6050_RA_I2C_SLV3_CTRL = 0x30
MPU6050_RA_I2C_SLV4_ADDR = 0x31
MPU6050_RA_I2C_SLV4_REG = 0x32
MPU6050_RA_I2C_SLV4_DO = 0x33
MPU6050_RA_I2C_SLV4_CTRL = 0x34
MPU6050_RA_I2C_SLV4_DI = 0x35
MPU6050_RA_I2C_MST_STATUS = 0x36
MPU6050_RA_INT_PIN_CFG = 0x37
MPU6050_RA_INT_ENABLE = 0x38
MPU6050_RA_DMP_INT_STATUS = 0x39
MPU6050_RA_INT_STATUS = 0x3A
MPU6050_RA_ACCEL_XOUT_H = 0x3B
MPU6050_RA_ACCEL_XOUT_L = 0x3C
MPU6050_RA_ACCEL_YOUT_H = 0x3D
MPU6050_RA_ACCEL_YOUT_L = 0x3E
MPU6050_RA_ACCEL_ZOUT_H = 0x3F
MPU6050_RA_ACCEL_ZOUT_L = 0x40
MPU6050_RA_TEMP_OUT_H = 0x41
MPU6050_RA_TEMP_OUT_L = 0x42
MPU6050_RA_GYRO_XOUT_H = 0x43
MPU6050_RA_GYRO_XOUT_L = 0x44
MPU6050_RA_GYRO_YOUT_H = 0x45
MPU6050_RA_GYRO_YOUT_L = 0x46
MPU6050_RA_GYRO_ZOUT_H = 0x47
MPU6050_RA_GYRO_ZOUT_L = 0x48
MPU6050_RA_EXT_SENS_DATA_00 = 0x49
MPU6050_RA_EXT_SENS_DATA_01 = 0x4A
MPU6050_RA_EXT_SENS_DATA_02 = 0x4B
MPU6050_RA_EXT_SENS_DATA_03 = 0x4C
MPU6050_RA_EXT_SENS_DATA_04 = 0x4D
MPU6050_RA_EXT_SENS_DATA_05 = 0x4E
MPU6050_RA_EXT_SENS_DATA_06 = 0x4F
MPU6050_RA_EXT_SENS_DATA_07 = 0x50
MPU6050_RA_EXT_SENS_DATA_08 = 0x51
MPU6050_RA_EXT_SENS_DATA_09 = 0x52
MPU6050_RA_EXT_SENS_DATA_10 = 0x53
MPU6050_RA_EXT_SENS_DATA_11 = 0x54
MPU6050_RA_EXT_SENS_DATA_12 = 0x55
MPU6050_RA_EXT_SENS_DATA_13 = 0x56
MPU6050_RA_EXT_SENS_DATA_14 = 0x57
MPU6050_RA_EXT_SENS_DATA_15 = 0x58
MPU6050_RA_EXT_SENS_DATA_16 = 0x59
MPU6050_RA_EXT_SENS_DATA_17 = 0x5A
MPU6050_RA_EXT_SENS_DATA_18 = 0x5B
MPU6050_RA_EXT_SENS_DATA_19 = 0x5C
MPU6050_RA_EXT_SENS_DATA_20 = 0x5D
MPU6050_RA_EXT_SENS_DATA_21 = 0x5E
MPU6050_RA_EXT_SENS_DATA_22 = 0x5F
MPU6050_RA_EXT_SENS_DATA_23 = 0x60
MPU6050_RA_MOT_DETECT_STATUS = 0x61
MPU6050_RA_I2C_SLV0_DO = 0x63
MPU6050_RA_I2C_SLV1_DO = 0x64
MPU6050_RA_I2C_SLV2_DO = 0x65
MPU6050_RA_I2C_SLV3_DO = 0x66
MPU6050_RA_I2C_MST_DELAY_CTRL = 0x67
MPU6050_RA_SIGNAL_PATH_RESET = 0x68
MPU6050_RA_MOT_DETECT_CTRL = 0x69
MPU6050_RA_USER_CTRL = 0x6A
MPU6050_RA_PWR_MGMT_1 = 0x6B
MPU6050_RA_PWR_MGMT_2 = 0x6C
MPU6050_RA_BANK_SEL = 0x6D
MPU6050_RA_MEM_START_ADDR = 0x6E
MPU6050_RA_MEM_R_W = 0x6F
MPU6050_RA_DMP_CFG_1 = 0x70
MPU6050_RA_DMP_CFG_2 = 0x71
MPU6050_RA_FIFO_COUNTH = 0x72
MPU6050_RA_FIFO_COUNTL = 0x73
MPU6050_RA_FIFO_R_W = 0x74
MPU6050_RA_WHO_AM_I = 0x75
MPU6050_SELF_TEST_XA_1_BIT = 0x07
MPU6050_SELF_TEST_XA_1_LENGTH = 0x03
MPU6050_SELF_TEST_XA_2_BIT = 0x05
MPU6050_SELF_TEST_XA_2_LENGTH = 0x02
MPU6050_SELF_TEST_YA_1_BIT = 0x07
MPU6050_SELF_TEST_YA_1_LENGTH = 0x03
MPU6050_SELF_TEST_YA_2_BIT = 0x03
MPU6050_SELF_TEST_YA_2_LENGTH = 0x02
MPU6050_SELF_TEST_ZA_1_BIT = 0x07
MPU6050_SELF_TEST_ZA_1_LENGTH = 0x03
MPU6050_SELF_TEST_ZA_2_BIT = 0x01
MPU6050_SELF_TEST_ZA_2_LENGTH = 0x02
MPU6050_SELF_TEST_XG_1_BIT = 0x04
MPU6050_SELF_TEST_XG_1_LENGTH = 0x05
MPU6050_SELF_TEST_YG_1_BIT = 0x04
MPU6050_SELF_TEST_YG_1_LENGTH = 0x05
MPU6050_SELF_TEST_ZG_1_BIT = 0x04
MPU6050_SELF_TEST_ZG_1_LENGTH = 0x05
MPU6050_TC_PWR_MODE_BIT = 7
MPU6050_TC_OFFSET_BIT = 6
MPU6050_TC_OFFSET_LENGTH = 6
MPU6050_TC_OTP_BNK_VLD_BIT = 0
MPU6050_VDDIO_LEVEL_VLOGIC = 0
MPU6050_VDDIO_LEVEL_VDD = 1
MPU6050_CFG_EXT_SYNC_SET_BIT = 5
MPU6050_CFG_EXT_SYNC_SET_LENGTH = 3
MPU6050_CFG_DLPF_CFG_BIT = 2
MPU6050_CFG_DLPF_CFG_LENGTH = 3
MPU6050_EXT_SYNC_DISABLED = 0x0
MPU6050_EXT_SYNC_TEMP_OUT_L = 0x1
MPU6050_EXT_SYNC_GYRO_XOUT_L = 0x2
MPU6050_EXT_SYNC_GYRO_YOUT_L = 0x3
MPU6050_EXT_SYNC_GYRO_ZOUT_L = 0x4
MPU6050_EXT_SYNC_ACCEL_XOUT_L = 0x5
MPU6050_EXT_SYNC_ACCEL_YOUT_L = 0x6
MPU6050_EXT_SYNC_ACCEL_ZOUT_L = 0x7
MPU6050_DLPF_BW_256 = 0x00
MPU6050_DLPF_BW_188 = 0x01
MPU6050_DLPF_BW_98 = 0x02
MPU6050_DLPF_BW_42 = 0x03
MPU6050_DLPF_BW_20 = 0x04
MPU6050_DLPF_BW_10 = 0x05
MPU6050_DLPF_BW_5 = 0x06
MPU6050_GCONFIG_FS_SEL_BIT = 4
MPU6050_GCONFIG_FS_SEL_LENGTH = 2
MPU6050_GYRO_FS_250 = 0x00
MPU6050_GYRO_FS_500 = 0x01
MPU6050_GYRO_FS_1000 = 0x02
MPU6050_GYRO_FS_2000 = 0x03
MPU6050_ACONFIG_XA_ST_BIT = 7
MPU6050_ACONFIG_YA_ST_BIT = 6
MPU6050_ACONFIG_ZA_ST_BIT = 5
MPU6050_ACONFIG_AFS_SEL_BIT = 4
MPU6050_ACONFIG_AFS_SEL_LENGTH = 2
MPU6050_ACONFIG_ACCEL_HPF_BIT = 2
MPU6050_ACONFIG_ACCEL_HPF_LENGTH = 3
MPU6050_ACCEL_FS_2 = 0x00
MPU6050_ACCEL_FS_4 = 0x01
MPU6050_ACCEL_FS_8 = 0x02
MPU6050_ACCEL_FS_16 = 0x03
MPU6050_DHPF_RESET = 0x00
MPU6050_DHPF_5 = 0x01
MPU6050_DHPF_2P5 = 0x02
MPU6050_DHPF_1P25
|
drpaneas/linuxed.gr
|
lib/python2.7/site-packages/dateutil/rrule.py
|
Python
|
mit
| 47,634
| 0.000084
|
# -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <http://www.ietf.org/rfc/rfc2445.txt>`_,
including support for caching of results.
"""
import itertools
import datetime
import calendar
import sys
from six import advance_iterator, integer_types
from six.moves import _thread
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n == 0")
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = Fa
|
lse
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self
|
._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
|
baverman/cakeplant
|
cakeplant/bank/model.py
|
Python
|
mit
| 573
| 0.012216
|
import calendar
db = [None]
def get_month_transaction_day
|
s(acc, year, month):
monthdays = calendar.monthrange(year, month)
result = db[0].view('bank/transaction_days', startkey=[acc._id, year, month, 1],
endkey=[acc._id, year, month, monthdays], group=True, group_level=4).all()
return [r['key'][-1] for r in result]
def get_what_choice():
result = db[0].view('bank/what_choice', group=True)
return [r['key'] for r in result]
def get_who_choice():
result = db[0].view('bank/who_choice', group=True)
return [r
|
['key'] for r in result]
|
Lucky0604/algorithms
|
sort/bubble-sort.py
|
Python
|
mit
| 1,125
| 0.002361
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
冒泡排序(bubble sort):每个回合都从第一个元素开始和它后面的元素比较,
如果比它后面的元素更大的话就交换,一直重复,直到这个元素到了它能到达的位置。
每次遍历都将剩下的元素中最大的那个放到了序列的“最后”(除去了前面已经排好的那些元素)。
注意检测是否已经完成了排序,如果已完成就可以退出了。时间复杂度O(n2)
'''
def short_bubble_sort(a_list):
excha
|
nge = True
pass_num = len(a_list) - 1
while pass_num > 0 and exchange:
exchange = False
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
exchange = True
# temp = a_list[i]
# a_list[i] = a_list[i + 1]
# a_list[i + 1] = temp
a_list[i], a_list[i + 1] = a_list[i + 1], a_list[i]
pass_num = pass_num - 1
|
if __name__ == '__main__':
a_list = [20, 40, 50, 22, 100, 90]
short_bubble_sort(a_list)
print(a_list) # [20, 22, 40, 50, 90, 100]
|
taosheng/jarvis
|
socialBrainTest/python/brain.py
|
Python
|
apache-2.0
| 1,645
| 0.015309
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import requests
class Brain():
def think(self, userSession):
pass
class WikiBrain(Brain):
maxN = ''
maxX = ''
wikiAPI = u'https://zh.wikipedia.org/w/api.php?uselang=zh_tw&action=query&prop=extracts&format=xml&exintro=&titles='
def load(self, info):
print("load information")
def think(self, userSession):
word_n = {}
word_x = {}
result = u'不懂你的意思'
for word in userSession["lastWords"]:
# print(word)
if word.flag == 'n' or word.flag =='x':
wikiResult = self.findWiki(word)
if wikiResult == '':
return result
else:
return wikiResult
else:
|
pass
return result
def findWiki(self, word):
# print(word)
r = requests.get( self.wikiAPI+word.word )
# print(r.encoding)
#print(dir(r))
return self.getExtract(r.text)
def
|
getExtract(self, wikiApiRes):
if wikiApiRes.count('<extract')==0 :
return ""
result = wikiApiRes.split('<extract')[1].split('</extract>')[0]
result = result.replace('xml:space="preserve">','')
result = result.replace('<','')
result = result.replace('p>','')
result = result.replace('/b>','')
result = result.replace('b>','')
result = result.replace('/p>','')
result = result.replace('>','')
result = result.replace('br>','')
return result
|
uwosh/uwosh.filariasis
|
uwosh/filariasis/skins/uwosh.filariasis/indexByBMalayi.py
|
Python
|
gpl-2.0
| 393
| 0.007634
|
if hasattr(context, 'portal_type') a
|
nd context.portal_type == 'FormSaveData2ContentEntry':
index = None
if context.getValue('brugia-malayi') != 'None' or ('B. malayi'
|
in context.getValue('a-aegypti-infected-with-filariae')):
if context.getValue('brugia-malayi') == '':
index = None
else:
index = 'BM'
return index
else:
return None
|
yehnan/project_euler_python
|
p019.py
|
Python
|
mit
| 1,291
| 0.015492
|
# Problem 19: Counting Sundays
# https://projecteuler.net/problem=19
def is_leapyear(year):
if year%4 == 0 and year%100 != 0 or year%400 == 0:
return 1
else:
return 0
month = [31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31]
def days_of_month(m, y):
return month[m-1] + (is_leapyear(y) if m == 2 else 0)
def days_of_year(y):
return sum(month) + is_leapyear(y)
# date 1 Jan 1900 represented as (1, 1, 1900)
# 1 Jan 1900 was Monday, days is 1
# 7 Jan 1900 was Sunday, days is 7
def da
|
te_to_days(date):
dy, mn, yr = date
days = dy
for y in range(1900, yr):
days += days_of_year(y)
for m in range(1, mn):
days += days_of_month(m, yr)
return days
def is_sunday(days):
return days % 7 == 0
def cs():
count = 0
for y in range(1901, 2000+1):
for m in range(1, 1
|
2+1):
days = date_to_days((1, m, y))
if is_sunday(days):
count += 1
return count
#
def test():
return 'No test'
def main():
return cs()
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
print(test())
else:
print(main())
|
tastyproject/tasty
|
tasty/tests/functional/protocols/mul/garbled_server_server_client/protocol_setup_server.py
|
Python
|
gpl-3.0
| 756
| 0.005291
|
from tasty.types import conversions
from tasty.types import *
from tasty.types.driver impo
|
rt TestDriver
__params__ = {'la': 32, 'lb': 32, 'da': 10}
driver = TestDriver()
def protocol(client, server, params):
server.ga = Garbled(val=Unsigned(bitlen=764, dim=[1], signed=False, passive=True, empty=True), signed=False, bitlen=764, dim=[1])
server.gb = Garbled(val=Unsigned(bitlen=764, dim=[1], signed=False, passive=True, empty=True), signed=False, bitlen=764, dim=[1])
conversions.Garbled_Garbled_send(server.ga, client.ga, 764, [1], False)
conversions.G
|
arbled_Garbled_send(server.gb, client.gb, 764, [1], False)
client.gc = client.ga * client.gb
client.c = Unsigned(val=client.gc, passive=True, signed=False, bitlen=1528, dim=[1])
|
daspecster/google-cloud-python
|
speech/google/cloud/speech/client.py
|
Python
|
apache-2.0
| 4,677
| 0
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
|
with the License.
# You
|
may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic client for Google Cloud Speech API."""
import os
from google.cloud.client import Client as BaseClient
from google.cloud.environment_vars import DISABLE_GRPC
from google.cloud.speech._gax import GAPICSpeechAPI
from google.cloud.speech._http import HTTPSpeechAPI
from google.cloud.speech.sample import Sample
_USE_GAX = not os.getenv(DISABLE_GRPC, False)
class Client(BaseClient):
"""Client to bundle configuration needed for API requests.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``http`` object is
passed), falls back to the default inferred from the
environment.
:type http: :class:`~httplib2.Http`
:param http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`~httplib2.Http.request`. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
:type use_gax: bool
:param use_gax: (Optional) Explicitly specifies whether
to use the gRPC transport (via GAX) or HTTP. If unset,
falls back to the ``GOOGLE_CLOUD_DISABLE_GRPC`` environment
variable
"""
SCOPE = ('https://www.googleapis.com/auth/cloud-platform',)
"""The scopes required for authenticating as an API consumer."""
_speech_api = None
def __init__(self, credentials=None, http=None, use_gax=None):
super(Client, self).__init__(credentials=credentials, http=http)
# Save on the actual client class whether we use GAX or not.
if use_gax is None:
self._use_gax = _USE_GAX
else:
self._use_gax = use_gax
def sample(self, content=None, source_uri=None, stream=None, encoding=None,
sample_rate=None):
"""Factory: construct Sample to use when making recognize requests.
:type content: bytes
:param content: (Optional) Bytes containing audio data.
:type source_uri: str
:param source_uri: (Optional) URI that points to a file that contains
audio data bytes as specified in RecognitionConfig.
Currently, only Google Cloud Storage URIs are
supported, which must be specified in the following
format: ``gs://bucket_name/object_name``.
:type stream: file
:param stream: (Optional) File like object to stream.
:type encoding: str
:param encoding: encoding of audio data sent in all RecognitionAudio
messages, can be one of: :attr:`~.Encoding.LINEAR16`,
:attr:`~.Encoding.FLAC`, :attr:`~.Encoding.MULAW`,
:attr:`~.Encoding.AMR`, :attr:`~.Encoding.AMR_WB`
:type sample_rate: int
:param sample_rate: Sample rate in Hertz of the audio data sent in all
requests. Valid values are: 8000-48000. For best
results, set the sampling rate of the audio source
to 16000 Hz. If that's not possible, use the
native sample rate of the audio source (instead of
re-sampling).
:rtype: :class:`~google.cloud.speech.sample.Sample`
:returns: Instance of ``Sample``.
"""
return Sample(content=content, source_uri=source_uri, stream=stream,
encoding=encoding, sample_rate=sample_rate, client=self)
@property
def speech_api(self):
"""Helper for speech-related API calls."""
if self._speech_api is None:
if self._use_gax:
self._speech_api = GAPICSpeechAPI(self)
else:
self._speech_api = HTTPSpeechAPI(self)
return self._speech_api
|
oconnor663/peru
|
tests/test_cache.py
|
Python
|
mit
| 18,779
| 0
|
import asyncio
import os
import time
import peru.cache
from shared import assert_contents, create_dir, make_synchronous, PeruTest
class CacheTest(PeruTest):
@make_synchronous
def setUp(self):
self.cache = yield from peru.cache.Cache(create_dir())
self.content = {
'a': 'foo',
'b/c': 'bar',
'b/d': 'baz',
}
self.content_dir = create_dir(self.content)
self.content_tree = yield from self.cache.import_tree(self.content_dir)
@make_synchronous
def test_basic_export(self):
export_dir = create_dir()
yield from self.cache.export_tree(self.content_tree, export_dir)
assert_contents(export_dir, self.content)
@make_synchronous
def test_export_force_with_preexisting_files(self):
# Create a working tree with a conflicting file.
dirty_content = {'a': 'junk'}
export_dir = create_dir(dirty_content)
# Export should fail by default.
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
yield from self.cache.export_tree(self.content_tree, export_dir)
assert_contents(export_dir, dirty_content)
# But it should suceed with the force flag.
yield from self.cache.export_tree(
self.content_tree, export_dir, force=True)
assert_contents(export_dir, self.content)
@make_synchronous
def test_export_force_with_changed_files(self):
export_dir = create_dir()
yield from self.cache.export_tree(self.content_tree, export_dir)
# If we dirty a file, a resync should fail.
with open(os.path.join(export_dir, 'a'), 'w') as f:
f.write('dirty')
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
yield from self.cache.export_tree(
self.content_tree, export_dir, previous_tree=self.content_tree)
# But it should succeed with the --force flag.
yield from self.cache.export_tree(
self.content_tree, export_dir, force=True,
previous_tree=self.content_tree)
assert_contents(export_dir, self.content)
@make_synchronous
def test_multiple_imports(self):
new_content = {'fee/fi': 'fo fum'}
new_tree = yield from self.cache.import_tree(create_dir(new_content))
export_dir = create_dir()
yield from self.cache.export_tree(new_tree, export_dir)
assert_contents(export_dir, new_content)
@make_synchronous
def test_import_with_gitignore(self):
# Make sure our git imports don't get confused by .gitignore files.
new_content = {'fee/fi': 'fo fum', '.gitignore': 'fee/'}
new_tree = yield from self.cache.import_tree(create_dir(new_content))
export_dir = create_dir()
yield from self.cache.export_tree(new_tree, export_dir)
assert_contents(export_dir, new_content)
@make_synchronous
def test_import_with_files(self):
all_content = {'foo': '',
'bar': '',
'baz/bing': ''}
test_dir = create_dir(all_content)
tree = yield from self.cache.import_tree(
test_dir, picks=['foo', 'baz'])
expected_content = {'foo': '',
'baz/bing': ''}
out_dir = create_dir()
yield from self.cache.export_tree(tree, out_dir)
assert_contents(out_dir, expected_content)
@make_synchronous
def test_export_with_existing_files(self):
# Create a dir with an existing file that doesn't conflict.
more_content = {'untracked': 'stuff'}
export_dir = create_dir(more_content)
yield from self.cache.export_tree(self.content_tree, export_dir)
expected_content = self.content.copy()
expected_content.update(more_content)
assert_contents(export_dir, expected_content)
# But if we try to export twice, the export_dir will now have
# conflicting files, and export_tree() should throw.
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
yield from self.cache.export_tree(self.content_tree, export_dir)
# By default, git's checkout safety doesn't protect files that are
# .gitignore'd. Make sure we still throw the right errors in the
# presence of a .gitignore file.
with open(os.path.join(export_dir, '.gitignore'), 'w') as f:
f.write('*\n') # .gitignore everything
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
yield from self.cache.export_tree(self.content_tree, export_dir)
@make_synchronous
def test_previous_tree(self):
export_dir = create_dir(self.content)
# Create some new content.
new_content = self.content.copy()
new_content['a'] += ' different'
new_content['newfile'] = 'newfile stuff'
new_dir = create_dir(new_content)
new_tree = yield from self.cache.import_tree(new_dir)
# Now use cache.export_tree to move from the original content to the
# different content.
yield from self.cache.export_tree(
new_tree, export_dir, previous_tree=self.content_tree)
assert_contents(export_dir, new_content)
# Now do the same thing again, but use a dirty working copy. This
# should cause an error.
dirty_content = self.content.copy()
dirty_content['a'] += ' dirty'
dirty_dir = create_dir(dirty_content)
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
yield from self.cache.export_tree(
new_tree, dirty_dir, previous_tree=self.content_tree)
# But if the file is simply missing, it should work.
os.remove(os.path.join(dirty_dir, 'a'))
yield from self.cache.export_tree(
new_tree, dirty_dir, previous_tree=self.content_tree)
assert_contents(dirty_dir, new_content)
# Make sure we get an error even if the dirty file is unchanged between
# the previous tree and the new one.
no_conflict_dirty_content = self.content.copy()
no_conflict_dirty_content['b/c'] += ' dirty'
no_conflict_dirty_dir = create_dir(no_conflict_dirty_content)
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
yield from self.cache.export_tree(new_tree, no_conflict_dirty_dir,
previous_tree=self.content_tree)
@make_synchronous
def test_missing_files_in_previous_tree(self):
'''Export should allow missing files, and it should recreate them.'''
export_dir = create_dir()
# Nothing in content_tree exists yet, so this export should be the same
# as if previous_tree wasn't specified.
yield from self.cache.export_tree(
self.content_tree, export_dir, previous_tree=self.content_tree)
assert_contents(export_dir, self.content)
# Make sure the same applies with just a single missing file.
os.remove(os.path.join(export_dir, 'a'))
yield from self.cache.export_tree(
self.content_tree, export_dir, previous_tree=self.content_tree)
assert_contents(export_dir, self.content)
@make_syn
|
chronous
def test_merge_trees(self):
merged_tree = yield from self.cache.merge_trees(
self.content_tree, self.content_tree, 'subdir')
expected_content = dict(self.content)
for path, content in self.content.items():
expected_content[os.path.join('subdir', path)] = content
export_dir = create_dir()
yield from self.cache.export_tree(merged_tree, export_dir)
assert_contents(export_dir, expected_content)
with self.assertRa
|
ises(peru.cache.MergeConflictError):
# subdir/ is already populated, so this merge should throw.
yield from self.cache.merge_trees(
merged_tree, self.content_tree, 'subdir')
@make_synchronous
def test_merge_with_deep_prefix(self):
'''This test was inspired by a bug on Windows where we would give git a
backslash-separated merge prefix, even though git demands
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.