repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
jdfreder/testandroid | helpguide/rich.py | Python | mit | 4,830 | 0.003934 | """Contains RichPage class"""
from __future__ import print_function
from kivy.uix.listview import ListView, CompositeListItem, ListItemButton, ListItemLabel
from kivy.adapters.simplelistadapter import SimpleListAdapter
from kivy.uix.label import Label
from kivy.uix.rst import RstDocument
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.anchorlayout import AnchorLayout
from backwards import BackwardsPage
class RichPage(BackwardsPage):
"""A link of links to other pages."""
singletons = {}
@classmethod
def get_page(cls, app, history, backend, page_id):
if page_id not in cls.singletons:
cls.singletons[page_id] = cls(app, history, backend, page_id)
return cls.singletons[page_id]
def __init__(self, app, history, backend, page_id):
"""Constructor
Parameters
----------
app: Kivy App instance
history: PageBase instance
Reference to the history so we can return to it.
backend: BackendBase instance"""
super(RichPage, self).__init__(app, history)
self._backend = backend
self._page_id = page_id
self.contents = BoxLayout(orientation='vertical')
self.body.add_widget(self.contents)
self._richtext = RstDocument(text='', size_hint=(1., None), height=50)
self.contents.add_widget(self._richtext)
self._simple_list_adapter = SimpleListAdapter(
data=[],
args_converter=self._render_link,
selection_mode='single',
cls=CompositeListItem)
self.list_view = ListView(adapter=self._simple_list_adapter)
self.contents.add_widget(self.list_view)
title_float = FloatLayout()
self.body.add_widget(title_float)
title_anchor = AnchorLayout(anchor_x='left', anchor_y='top', size_hint=(1., 1.), pos=(200., 30.))
title_float.add_widget(title_anchor)
self._title = Label(text='', size_hint=(None, None), markup=True, halign='left')
title_anchor.add_widget(self._title)
self.reload()
def reload(self):
"""Reloads the page's contents."""
self._simple_list_adapter.data = self._backend.get_page_links(self._page_id) or []
self._title.text = '[color=2299ff][size=40] %s[/size][/color]' % self._backend.get_page_title(self._page_id) or ''
contents = self._backend.get_page_contents(self._page_id) or ''
self._richtext.text = contents
# Try to be smart about the page layout. Look at the number of lines in
# the contents. If it's more than 4, assume the contents are the
# primary interest of this page and size the rich text accordingly, else
# the links are the primary interest.
lines = len(contents.split('\n'))
if lines > 4:
self._richtext.size_hint = (1., 0.8)
self.list_view.size_hint = (1., 0.2)
self._richtext.colors['background'] = 'eaeaea'
self._richtext.colors['paragraph'] = '202020'
else:
self._richtext.size_hint = (1., None)
self._richtext.height = 20 + lines * 20
| self.list_view.size_hint = (1., 1.)
self._richtext.col | ors['background'] = '000000'
self._richtext.colors['paragraph'] = '005599'
def _render_link(self, row_index, link_page_id):
def _on_open(list_adapter, *args):
if not isinstance(self.history, list):
new_history = [self.history, self]
else:
new_history = self.history + [self]
RichPage.get_page(self.app, new_history, self._backend, link_page_id).show(new_history)
self.hide()
return {
'text': link_page_id,
'size_hint_y': None,
'height': 25, 'index': row_index,
'halign': 'left',
'cls_dicts': [
{
'cls': ListItemLabel,
'kwargs': {
'text': "[b]{0}[/b]".format(self._backend.get_page_title(link_page_id) or 'Untitled'),
'size_hint_x': 0.1,
'halign': 'left',
'markup': True
}
}, {
'cls': ListItemLabel,
'kwargs': {
'text': "{0}".format(self._backend.get_page_subtitle(link_page_id) or ''),
'is_representing_cls': True,
'size_hint_x': 0.8,
'halign': 'left'
}
}, {
'cls': ListItemButton,
'kwargs': { 'text': 'Open', 'on_press': _on_open, 'size_hint_x': 0.1}
}
]}
|
vkuznet/rep | rep/metaml/stacking.py | Python | apache-2.0 | 4,905 | 0.002243 | """
This module contains stacking strategies (meta-algorithms of machine learning).
"""
from __future__ import division, print_function, absolute_import
import numpy
from sklearn.base import clone
from ..estimators import Classifier
from ..estimators.utils import check_inputs, _get_features
__author__ = 'Alex Rogozhnikov'
class FeatureSplitter(Classifier):
"""
Dataset is split by values of `split_feature`,
for each value of feature, new classifier is trained.
When building predictions, classifier predicts the events with
the same value of `split_feature` it was trained on.
:param str split_feature: the name of key feature,
:param base_estimator: the classifier, its' copies are trained on parts of dataset
:param list[str] features: list of columns classifier uses
.. note:: `split_feature` must be in list of `features`
"""
def __init__(self, split_feature, base_estimator, train_features=None):
self.base_estimator = base_estimator
self.split_feature = split_feature
self.train_features = train_features
Classifier.__init__(self, features=self._features())
def _features(self):
if self.train_features is None:
return None
else:
return list(self.train_features) + [self.split_feature]
def _get_features(self, X, allow_nans=False):
"""
:param pandas.DataFrame X: train dataset
:return: pandas.DataFrame with used features
"""
split_column_values, _ = _get_features([self.split_feature], X, allow_nans=allow_nans)
split_column_values = numpy.ravel(numpy.array(split_column_values))
X_prepared, self.train_features = _get_features(self.train_features, X, allow_nans=allow_nans)
self.features = self._features()
return split_column_values, X_prepared
def fit(self, X, y, sample_weight=None):
"""
Fit dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features] with features
:param y: array-like of shape [n_samples] with targets
:param sample_weight: array-like of shape [n_samples] with events weights or None.
:return: self
"""
if hasattr(self.base_estimator, 'features'):
assert self.base_estimator.features is None, 'Base estimator must have None features! ' \
'Use features parameter in Folding to fix it'
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=True)
# TODO cover the case of missing labels in subsets.
split_column_values, X = self._get_features(X)
self._set_classes(y)
self.base_estimators = {}
for value in numpy.unique(split_column_values):
rows = numpy.array(split_column_values) == value
base_classifier = clone(self.base_estimator)
if sample_weight is None:
base_classifier.fit(X.iloc[rows, :], y[rows])
else:
base_classifier.fit(X.iloc[rows, :], y[rows], sample_weight=sample_weight[rows])
self.base_estimators[value] = base_classifier
return self
def predict_proba(self, X):
"""
Predict probabilities. Each event will be predicted by the classifier
with trained on corresponding value of `split_feature`
:param X: pandas.DataFrame of shape [n_samples, n_features]
:return: probabilities of shape [n_samples, n_classes]
"""
s | plit_col | umn_values, X = self._get_features(X)
result = numpy.zeros([len(X), self.n_classes_])
for value, estimator in self.base_estimators.items():
mask = split_column_values == value
result[mask, :] = estimator.predict_proba(X.loc[mask, :])
return result
def staged_predict_proba(self, X):
"""
Predict probabilities after each stage of base classifier.
Each event will be predicted by the classifier
with trained on corresponding value of `split_feature`
:param X: pandas.DataFrame of shape [n_samples, n_features]
:return: iterable sequence of numpy.arrays of shape [n_samples, n_classes]
"""
split_column_values, X = self._get_features(X)
result = numpy.zeros([len(X), self.n_classes_])
masks_iterators = []
for value, estimator in self.base_estimators.items():
mask = split_column_values == value
prediction_iterator = estimator.staged_predict_proba(X.loc[mask, :])
masks_iterators.append([mask, prediction_iterator])
try:
while True:
for mask, prediction_iterator in masks_iterators:
result[mask, :] = next(prediction_iterator)
yield result
except StopIteration:
pass
|
thaim/ansible | test/units/modules/network/check_point/test_cp_mgmt_install_policy.py | Python | mit | 2,564 | 0.00156 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more detail | s.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
f | rom __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_install_policy
PAYLOAD = {
"access": True,
"targets": [
"corporate-gateway"
],
"policy_package": "standard",
"threat_prevention": True,
"wait_for_task": False
}
RETURN_PAYLOAD = {
"task-id": "53de74b7-8f19-4cbe-99fc-a81ef0759bad"
}
command = 'install-policy'
failure_msg = '{command failed}'
class TestCheckpointInstallPolicy(object):
module = cp_mgmt_install_policy
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_command(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, RETURN_PAYLOAD)
result = self._run_module(PAYLOAD)
assert result['changed']
assert RETURN_PAYLOAD == result[command]
def test_command_fail(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(PAYLOAD)
except Exception as e:
result = e.args[0]
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
|
aferrari07/devops-aula07 | src/testes_inic.py | Python | apache-2.0 | 355 | 0.028169 | import jogovelha
import sys
erroInicializar = False
jogovelha.inicializar()
jogo = jogovelha.tabulei | ro()
if len(jogo) != 3:
erroInicializar = True
else:
for linha in jogo:
if len(linha) != 3:
erroInicializar = True
else:
for elemento in | linha:
if elemento != '.':
erroInicializar = True
if erroInicializar:
print('Erro!')
sys.exit(1)
else:
sys.exit(0)
|
dtklein/vFense | tp/src/scripts/make_api_calls.py | Python | lgpl-3.0 | 650 | 0.003077 | import requests
import json
import cookiel | ib
url = 'https://online-demo.toppatch.com'
api_version = '/api/v1'
api_call = '/agents'
login_uri = '/login'
creds = {'username': 'admin', 'password': 'toppatch'}
session = requests.session()
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
jar = cookielib.CookieJar()
authenticated = session.post(url + login_uri, data=json.dumps(creds), verify=False, headers=headers, cookies=jar)
if aut | henticated.ok:
print 'authenticated'
data = session.get(url + api_version + api_call, verify=False, headers=headers, cookies=jar)
if data.ok:
print json.loads(data.content)
|
flavio-casacurta/Nat2Py | Adabas/demo/LobDemoCenter/settings.py | Python | mit | 4,386 | 0.013452 | """settings.py - LOB Demo Center settings file
Defines databases and other resources for the operation of the
LOB Demo Center (LDC) application.
Note: The paramters in the settings module must adhere to the normal Python syntax
otherwise errors will be reported from the interpreter
$Date: 2008-08-22 19:52:28 +0200 (Fri, 22 Aug 2008) $
$Rev: 58 $
"""
# Copyright 2004-2008 Software AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing | permissions and
# limitations under the License.
# MAXIMAGESIZE defines maximum image size
# Make sure that the Adabas nuclei and NetWork use adequate parameters
# for LU and NAB
MAXIMAGESIZE=5100000
# MAXADATCP maximum size for ADATCP
# ADATCP is currenlty limited to 999,9 | 99 bytes
MAXADATCP=999999
# HTTP Proxy
# Define the proxies to reach the outer world from within the firewall
proxies={
'http': 'http://httpprox.example.com:8080/',
}
noproxy=('localhost','.exa','.example.de')
# List of databases that can be selected in the LDC main menu
# each entry is a list of
# DBID, file-number, adatcpTrueFalse, display text
#
# adatcpTrueFalse: may have the values 0, 1 or False, True for
# databases with ADATCP. If True or 1: will use MAXADATCP as max.
# record buffer size
# an entry with DBID==0 may be used for grouping following entries
#
DATABASES=(
( 8, False, '00008 - v81 - z/OS 1.7 on EXAF - WCP61'),
( 0, 0, 'Demo Environment'),
( 2, False, '00002 - v61 - win32 - local'),
( 104, False, '00104 - v81 - z/OS 1.7 on EXAA - WCP61 '),
( 0, 0, 'Development environment - ... not available all the time ...'),
( 12, False, '00012 - v61 - pc01 win32 - local '),
# ( 61, False, '00061 - v61 - sunsol9/64 on SUN3 - local'),
( 220, False, '00220 - v81 - z/OS 1.7 on EXAF - WCP61'),
(22081, False, '22081 - v81 - z/OS 1.7 on EXAF - WCP61 '),
(51081, True, '51081 - v81 - z/OS 1.7 on EXAF - ADATCP'),
)
# List of file numbers that can be selected in the LDC main menu
FILES=( 88, 86)
# List of images/urls that can be selected in the LDC main menu
# each entry is a list of
# group-flag, description, url
#
IMGLOCL="c|/ADA/WEB/LobDemoCenter/images" # path to images for this application, omitting starting '/'
# "FS/fs1234/sun3/prog/exa/adapy/adabas/demo/LobDemoCenter/images"
IMAGES=(
(1, '< 10000 bytes',''),
(0, 'local - adabas.gif (2781 b)','file:///%s/adabas.gif'%IMGLOCL),
(0, 'http://www.apache.org/images/asf_logo_wide.gif (5866 b)','http://www.apache.org/images/asf_logo_wide.gif'),
(1, '< 100000 bytes',''),
(0, 'local - adabas2006.jpg (13506 b)','file:///%s/adabas2006.jpg'%IMGLOCL),
(0, 'local - natureofbusiness.jpg (21501 b)','file:///%s/natureofbusiness.jpg'%IMGLOCL),
(1, '< 1,0 MB',''),
(0, 'http://antwrp.gsfc.nasa.gov/...earthlights02_dmsp_big.jpg (207685 b)','http://antwrp.gsfc.nasa.gov/apod/image/0610/earthlights02_dmsp_big.jpg'),
(0, 'local - softwareag-hq-in-summer.jpg (206903 b)','file:///%s/softwareag-hq-in-summer.jpg'%IMGLOCL),
(0, 'local - softwareag-hq-in-winter.jpg (444101 b)','file:///%s/softwareag-hq-in-winter.jpg'%IMGLOCL),
(1, '< 2,0 MB',''),
(0, 'http://antwrp.gsfc.nasa.gov/.../robinson_sts114_big.jpg (1270197 b)','http://antwrp.gsfc.nasa.gov/apod/image/0605/robinson_sts114_big.jpg'),
(0, 'http://upload.wikimedia.org/.../Acinonyx_jubatus_walking_edit.jpg (1526510 b)','http://upload.wikimedia.org/wikipedia/commons/4/42/Acinonyx_jubatus_walking_edit.jpg'),
(1, '< 4,0 MB',''),
(0, 'http://antwrp.gsfc.nasa.gov/.../skylab_nasa_big.jpg (2289866 b)','http://antwrp.gsfc.nasa.gov/apod/image/0604/skylab_nasa_big.jpg'),
)
# Path to store temporary files like thumbnails and readFDT output
# default on WIN-environment is your 'Apache2' folder
TEMPPATH=''
# TEMPPATH='/FS/fs1234//sun3/prog/exa/apache2/tmp/'
|
takuan-osho/yael | yael/container.py | Python | mit | 6,350 | 0.001102 | #!/usr/bin/env python
# coding=utf-8
"""
The `META-INF/container.xml` file, storing:
1. the Rendition objects
2. the Rendition Mapping Document
"""
from yael.element import Element
from yael.jsonable import JSONAble
from yael.mediatype import MediaType
from yael.namespace import Namespace
from yael.rendition import Rendition
from yael.rmdocument import RMDocument
import yael.util
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2015, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "0.0.9"
__email__ = "alberto@albertopettarin.it"
__status__ = "Development"
class Container(Element):
"""
Build the `META-INF/container.xml` file
or parse it from `string` or `obj`.
"""
A_ACCESSMODE = "accessMode"
A_FULL_PATH = "full-path"
A_HREF = "href"
A_LABEL = "label"
A_LANGUAGE = "language"
A_LAYOUT = "layout"
A_MEDIA = "media"
A_MEDIA_TYPE = "media-type"
A_MEDIA_TYPE = "media-type"
A_REL = "rel"
A_NS_ACCESSMODE = "{{{0}}}{1}".format(Namespace.RENDITION, A_ACCESSMODE)
A_NS_LABEL = "{{{0}}}{1}".format(Namespace.RENDITION, A_LABEL)
A_NS_LANGUAGE = "{{{0}}}{1}".format(Namespace.RENDITION, A_LANGUAGE)
A_NS_LAYOUT = "{{{0}}}{1}".format(Namespace.RENDITION, A_LAYOUT)
A_NS_MEDIA = "{{{0}}}{1}".format(Namespace.RENDITION, A_MEDIA)
E_CONTAINER = "container"
E_LINK = "link"
E_ROOTFILE = "rootfile"
E_ROOTFILES = "rootfiles"
V_ACCESSMODE_AUDITORY = "auditory"
V_ACCESSMODE_TACTILE = "tactile"
V_ACCESSMODE_TEXTUAL = "textual"
V_ACCESSMODE_VISUAL = "visual"
V_LAYOUT_PRE_PAGINATED = "pre-paginated"
V_LAYOUT_REFLOWABLE = "reflowable"
V_REL_MAPPING = "mapping"
def __init__(self, internal_path=None, obj=None, string=None):
self.renditions = []
self.rm_document = None
Element.__init__(
self,
internal_path=internal_path,
obj=obj,
string=string)
def json_object(self, recursive=True):
obj = {
"internal_path": self.internal_path,
"renditions": len(self.renditions),
"rm_document": (self.rm_document == None),
}
if recursive:
obj["renditions"] = JSONAble.safe(self.renditions)
obj["rm_document"] = JSONAble.safe(self.rm_document)
return obj
def parse_object(self, obj):
try:
# locate `<container>` element
container_arr = yael.util.query_xpath(
obj=obj,
query="/{0}:{1}",
args=['c', Container.E_CONTAINER],
nsp={'c': Namespace.CONTAINER},
required=Container.E_CONTAINER)
container = container_arr[0]
# locate `<rootfile>` elements
rootfile_arr = yael.util.query_xpath(
obj=container,
query="{0}:{1}/{0}:{2}",
args=['c', Container.E_ROOTFILES, Container.E_ROOTFILE],
nsp={'c': Namespace.CONTAINER},
required=None)
for rootfile in rootfile_arr:
self._parse_rootfile(rootfile)
# locate `<link>` optional element
link_arr = yael.util.query_xpath(
obj=container,
query="{0}:{1}",
args=['c', Container.E_LINK],
nsp={'c': Namespace.CONTAINER},
required=None)
for link in link_arr:
self._parse_link(link)
except:
raise Exception("Error while parsing the given object")
def add_rendition(self, rendition):
"""
Add a Rendition to this Container.
:param rendition: Rendition to be added
:type rendition: :class:`yael.rendition.Rendition`
"""
self.renditions.append(rendition)
@property
def renditions(self):
"""
The list of Rendition objects in this Container.
:rtype: list of :class:`yael.rendition.Rendition`
"""
return self.__renditions
@renditions.setter
def renditions(self, renditions):
self.__renditions = renditions
@property
def rm_document(self):
"""
The Rendition Mapping Document object in this Container,
or None if it is not present.
:rtype: :class:`yael.rmdocument.RMDocument`
"""
return self.__rm_document
@rm_document.setter
def rm_document(self, rm_document):
self.__rm_document = rm_document
@property
def default_rendition(self):
"""
The Default Rendition object in this Container,
or None if there are no Renditions.
:rtype: :class:`yael.rendition.Rendition`
"""
return yael.util.safe_first(self.renditions)
def _parse_rootfile(self, obj):
"""
Parse the given `<rootfile>` node object,
and append the parsed Rendition to this Container.
"""
# required attributes
full_path = obj.get(Container.A_FULL_PATH)
media_type = obj.get(Container.A | _MEDIA_TYPE)
if (full_path != None) and (media_type != None):
r_obj = Rendition(internal_path=full_path)
r_obj.v_full_path = full_path
r_obj.v_media_type = media_type
# multiple renditions
r_obj.v_rendition_accessmode = obj.get(Container.A_NS_ACCESSMODE)
r_obj.v_rendition_label = obj.get(Container.A_NS_LABEL)
r_ | obj.v_rendition_language = obj.get(Container.A_NS_LANGUAGE)
r_obj.v_rendition_layout = obj.get(Container.A_NS_LAYOUT)
r_obj.v_rendition_media = obj.get(Container.A_NS_MEDIA)
self.renditions.append(r_obj)
def _parse_link(self, obj):
"""
Parse the given `<link>` node object,
and append the parsed RMDocument
to this Container.
"""
# required attributes for rendition mapping document
rel = obj.get(Container.A_REL)
href = obj.get(Container.A_HREF)
media_type = obj.get(Container.A_MEDIA_TYPE)
if ((rel == Container.V_REL_MAPPING) and
(media_type == MediaType.XHTML) and
(href != None)):
self.rm_document = RMDocument(internal_path=href)
return None
|
srmcc/survival_factor_model | docs/conf.py | Python | gpl-3.0 | 8,651 | 0.006358 | # -*- coding: utf-8 -*-
#
# Survival Factor Analysis documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 19 15:12:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
##
sys.path.insert(0, os.path.abspath(".."))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Survival Factor Analysis'
copyright = u'2016, Shannon McCurdy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# | The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an | image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SurvivalFactorAnalysisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SurvivalFactorAnalysis.tex', u'Survival Factor Analysis Documentation',
u'Shannon McCurdy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'survivalfactoranalysis', u'Survival Factor Analysis Documentation',
[u'Shannon McCurdy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SurvivalFactorAnalysis', u'Survival Factor Analysis Documentation',
u'Shannon McCurdy', 'SurvivalFactorAnalysis', 'One line description of project.',
'Miscellaneous'),
]
# Documents t |
sk89q/Plumeria | orchard/graphviz.py | Python | mit | 2,670 | 0.001124 | """Generate directed and non-directed graphs using Graphviz."""
import asyncio
import io
import os
import subprocess
import threading
import dot_parser
from dot_parser import graph_definition
from pyparsing import ParseException
from plumeria.command import commands, CommandError
from plumeria.message import Response, MemoryAttachment
from plumeria.util.message import strip_markdown_code
from | plumeria.util.ratelimit import rate_limit
lock = threading.RLock()
def parse_dot_data(s):
with lock:
dot_parser.top_graphs = [] # Clear list of existing graphs because this module is bad
parser = graph_definition | ()
parser.parseWithTabs()
tokens = parser.parseString(s)
return list(tokens)
def render_dot(graph, format="png"):
program = 'dot'
if os.name == 'nt' and not program.endswith('.exe'):
program += '.exe'
p = subprocess.Popen(
[program, '-T' + format],
env={'SERVER_NAME': 'plumeria',
'GV_FILE_PATH': '/dev/null'},
shell=False,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = p.communicate(input=graph.to_string().encode('utf-8'))
if p.returncode != 0:
raise Exception("Received non-zero return code from grapviz\n\nError: {}".format(stderr.decode('utf-8')))
return stdout
async def handle_request(message, type):
content = strip_markdown_code(message.content.strip())
def execute():
# Use parser as a rudimentary validator
graph = parse_dot_data(type + " G {\n" + content + "\n}")[0]
buf = io.BytesIO()
buf.write(render_dot(graph, format="png"))
return buf
try:
buf = await asyncio.get_event_loop().run_in_executor(None, execute)
return Response("", attachments=[MemoryAttachment(buf, "graph.png", "image/png")])
except ParseException as e:
raise CommandError("Parse error: {}".format(str(e)))
@commands.create("graph", category="Graphing")
@rate_limit()
async def graph(message):
"""
Generates a non-directed graph using DOT syntax and drawn using Graphviz.
Example::
/graph
a -- b
b -- c
c -- a
"""
return await handle_request(message, "graph")
@commands.create("digraph", category="Graphing")
@rate_limit()
async def digraph(message):
"""
Generates a directed graph using DOT syntax and drawn using Graphviz.
Example::
/digraph
a -> b
b -> c
c -> a
"""
return await handle_request(message, "digraph")
def setup():
commands.add(graph)
commands.add(digraph)
|
oblitum/ycmd | ycmd/tests/clang/testdata/test-include/.ycm_extra_conf.py | Python | gpl-3.0 | 212 | 0.066038 | import os.path
def FlagsForFile( filename, **kw | args ):
d = os.path.dirname( filename )
return { 'flags': [ '-iquote', os.path.join( d, 'quote' ),
| '-I', os.path.join( d, 'system' ) ] }
|
RedHatInsights/insights-core | insights/parsers/tests/test_net_namespace.py | Python | apache-2.0 | 1,815 | 0.001102 | import doctest
from insights.parsers import net_namespace
from insights.parsers.net_namespace import NetworkNamespace
from insights.tests import context_wrap
from insights.parsers import SkipException
import pytest
LIST_NAMESPACE = """
temp_netns temp_netns_2 temp_netns_3
""".strip()
LIST_NAMESPACE_2 = """
temp_netns
""".strip()
LIST_NAMESPACE_3 = """
""".strip()
CMD_LIST_NAMESPACE = """
temp_netns_3
temp_netns_2
temp_netns
""".strip()
CMD_LIST_NAMESPACE_2 = """
temp_netns_3
""".strip()
CMD_LIST_NAMESPACE_3 = """
""".strip()
def test_netstat_doc_examples():
env = {
'netns_obj': NetworkNamespace(context_wrap(LIST_NAMESPACE))
}
failed, total = doctest.testmod(net_namespace, globs=env)
assert failed == 0
def test_bond_class():
| netns_obj = NetworkNamespace(context_wrap(LIST_NAMESPACE))
assert netns_obj.netns_list.sort() == ['temp_netns', 'temp_netns_2', 'temp_netns_3'].sort()
assert len(netns_obj.netns_list) == 3
netns_obj = NetworkNamespace(context | _wrap(LIST_NAMESPACE_2))
assert netns_obj.netns_list == ['temp_netns']
assert len(netns_obj.netns_list) == 1
netns_obj = NetworkNamespace(context_wrap(CMD_LIST_NAMESPACE))
assert netns_obj.netns_list.sort() == ['temp_netns', 'temp_netns_2', 'temp_netns_3'].sort()
assert len(netns_obj.netns_list) == 3
netns_obj = NetworkNamespace(context_wrap(CMD_LIST_NAMESPACE_2))
assert netns_obj.netns_list == ['temp_netns_3']
assert len(netns_obj.netns_list) == 1
def test_abnormal():
with pytest.raises(SkipException) as pe:
NetworkNamespace(context_wrap(LIST_NAMESPACE_3))
assert "Nothing to parse." in str(pe)
with pytest.raises(SkipException) as pe:
NetworkNamespace(context_wrap(CMD_LIST_NAMESPACE_3))
assert "Nothing to parse." in str(pe)
|
veltzer/demos-python | src/examples/short/ftp/ftp_rmdir.py | Python | gpl-3.0 | 1,089 | 0.001837 | #!/usr/bin/env python
import ftplib
import os.path
import sys
p_debug = False
def ftp_rmdir(ftp, folder, remove_toplevel, dontremove):
for filename, attr in ftp.mlsd(folder):
if attr['type'] == 'file' and filename not in dontremove:
if p_debug:
print(
'removing file [{0}] from folder [{1}]'.format(filename, folder))
ftp.delete(os.path.join(folder, filename))
if attr['type'] == 'dir':
ftp_rmdir(ftp, filename, True, dontremove)
if remove_toplevel:
if p_debug:
print('removing folder [{0}]'.format(folder))
ftp.rmd(folder)
def main():
p_host = sys.argv[1]
p_user = sys.argv[2]
p_pass = sys.argv[3]
p_dir = sys.argv[4]
if p_debug:
print(p_host)
print(p_user)
print(p_pass)
print(p_dir)
| ftp = ftplib.FTP(p_host)
ftp.login(user=p_user, p | asswd=p_pass)
# ftp_rmdir(ftp, p_dir, False, set(['.ftpquota']))
ftp_rmdir(ftp, p_dir, False, set())
ftp.quit()
if __name__ == '__main__':
main()
|
LittleRichard/sormtger | server/utils/StringUtils.py | Python | gpl-3.0 | 305 | 0 | from psyco | pg2._psycopg import adapt
class StringUtils(object):
@staticmethod
def adapt_to_str_for_orm(value):
value = (value.replace('%', '')
.replace(':', '')
)
adapted_value = adapt(value)
return adapted_value | .getquoted()[1:-1]
|
tusharmakkar08/SQL_PARSER | aggregate.py | Python | mit | 3,641 | 0.043944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# aggregate.py
#
# Copyright 2013 tusharmakkar08 <tusharmakkar08@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
""" Importing Modules """
import csv
import os
import logging
import collections
import sys
""" Main Code Starts Here """
try:
open('example1.log', 'w').close()
except IOError:
pass
logging.basicConfig(filename='example1.log',format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',level=logging.DEBUG)
def groupby(att,filen):
logging.debug("Entered groupby with filename=%s attr=%s"%(filen,att))
sreader=csv.reader(open(filen,"rb"))
co=0
for row in sreader:
if co==0:
row_name=row
break
sreader=csv.reader(open(filen,"rb"))
for i in row_name:
if i==att:
break
co+=1
sreader=csv.reader(open(filen,"rb"))
k={}
for row in sreader:
try:
k[row[co]].append(row)
except KeyError :
k[row[co]]=[]
k[row[co]].append(row)
od = collections.OrderedDict(sorted(k.items()))
lo=raw_input("Do you want to see output ? Y/N \n")
if(lo=='Y'):
print od
swriter=csv.writer(open("tring.csv","wb"))
for ke,v in od.iteritems():
swriter.writerow(v)
while(1):
tik=raw_input("1 for printing count for each group and 0 for break\n")
if(tik=='0'):
break;
else:
for ke,v in od.iteritems():
if(ke=='id'):
break
print ke,len(v)
os.system("rm tring.csv")
def rename(filen,attr,newfilen,newattr):
logging.debug("Entered Rename with filename=%s attr=%s newfilename=%s newattr=%s"%(filen,attr,newfilen,newattr))
if attr=='-1':
| k="mv "+filen+" "+newfilen
os.system(k)
print "------Done-----"
else:
k="mv "+filen+" "+newfilen
os.system(k)
| sreader=csv.reader(open(newfilen,"rb"))
swriter=csv.writer(open("tring1.csv","wb"))
j=0
for row in sreader:
if j==0:
ti=newattr.split(',')
print ti
swriter.writerow(ti)
else:
swriter.writerow(row)
j+=1
sreader=csv.reader(open("tring1.csv","rb"))
swriter=csv.writer(open(newfilen,"wb"))
for row in sreader:
swriter.writerow(row)
os.system("rm tring1.csv")
def main():
while 1:
a=raw_input("Enter Regular Expression for Groupby or Renaming Statement and -1 for exit \n").strip().split()
if a[0]=="-1":
break
if(a[0]=="groupby"):
groupby(a[1],a[2])
if(a[0]=="rename"):
rename(a[1],a[2],a[3],a[4])
logging.debug("---------------------------------------------------------------------")
return 0
if __name__ == '__main__':
main()
"""
Format:
- groupby attr filename
- rename tablename attrname new_tablename new_attrname
"""
"""
Example Queries:
- groupby id sortdata.csv
- rename sortindex.csv -1 sortindex1.csv -1
- rename sortindex.csv id,Algorithm_Name,Best_Case_Running_Time,Average_Case_Running_Time,Worst_Case_Running_Time,Worst_Case_Space_Complexity sortindex1.csv ID,Algorithm_Name,Best_Case_Running_Time,Average_Case_Running_Time,Worst_Case_Running_Time,Worst_Case_Space_Complexity
"""
|
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/core/reshape/reshape.py | Python | mit | 45,575 | 0.000044 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import re
import numpy as np
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.missing import notnull
import pandas.core.dtypes.concat as _concat
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
from pandas.core.categorical import Categorical, _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
from pandas.core.frame import _shared_docs
from pandas.util._decorators import Appender
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 2
b 3 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = algos.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
| if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1] | )]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{}".format(name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype |
kharazi/summarize | summarize/summarize.py | Python | gpl-3.0 | 2,018 | 0.000991 | # -*- coding: utf-8 -*-
import math
from nltk.probability import FreqDist
from nltk.corpus import stopwords
from hazm import sent_tokenize, word_tokenize
fr | om hazm import Normalizer
class Summarizer(object):
def __init__(self):
self.normalizer = Normalizer()
def summarize(self, input):
self.input = self.normalizer.normalize(input)
self.base_words = word_tokenize(self.input)
self.working_sentences = sent_tokenize(self.input)
self.sentences_number = len(self.working_sentences)
return self._get_summarize(num_sentences=self._find_num_sentences())
|
def _find_num_sentences(self):
return (int(math.log(self.sentences_number) ** 2 + 1) + 1) if self.sentences_number >= 6 else self.sentences_number
# return int(self.sentences_number - 0.2 * self.sentences_number)
def _get_summarize(self, num_sentences):
# if str(word not in stopwords.words()]
words = [word for word in self.base_words if word not in stopwords.words('persian')]
word_frequencies = FreqDist(words)
most_frequent_words = [pair[0] for pair in
word_frequencies.items()[:100]]
actual_sentences = sent_tokenize(self.input)
output_sentences = []
for word in most_frequent_words:
for i in range(0, len(self.working_sentences)):
if (word in self.working_sentences[i]
and actual_sentences[i] not in output_sentences):
output_sentences.append(actual_sentences[i])
break
if len(output_sentences) >= num_sentences:
break
if len(output_sentences) >= num_sentences:
break
return self._reorder_sentences(output_sentences)
def _reorder_sentences(self, output_sentences):
output_sentences.sort(lambda s1, s2:
self.input.find(s1) - self.input.find(s2))
return output_sentences
|
theo-l/django | tests/datatypes/models.py | Python | bsd-3-clause | 779 | 0 | """
This is a basic model to test saving and loading boolean and date-related
types, w | hich in the past were problematic for some database backends.
"""
from django.db import models
class Donut(models.Model):
name = models.CharField(max_length=100)
is_frosted = models.BooleanField(default=False)
has_sprinkles = models.BooleanField(null=True)
has_sprinkles_old = models.NullBooleanField()
baked_date = models.DateField(null=True)
baked_time = models.TimeField(null=True)
consumed_at = models.DateTimeFiel | d(null=True)
review = models.TextField()
class Meta:
ordering = ('consumed_at',)
class RumBaba(models.Model):
baked_date = models.DateField(auto_now_add=True)
baked_timestamp = models.DateTimeField(auto_now_add=True)
|
idlead/pydebitoor | pydebitoor/client.py | Python | mit | 7,309 | 0 | # -*- coding: utf-8 -*-
import json
import logging
import requests
from requests.exceptions import ConnectionError, HTTPError
from .errors import RequestError, NotFoundError
from .services import (CustomerService, InvoiceService, DraftService,
TaxService)
logger = logging.getLogger('pydebitoor')
DEFAULT_API_URL = 'https://api.debitoor.com/api'
SERVICE_MAPPING = {
'CustomerService': CustomerService,
'DraftService': DraftService,
'InvoiceService': InvoiceService,
'TaxService': TaxService
}
class DebitoorClient(object):
def __init__(self, access_token, base_url=None):
self.access_token = access_token
self.base_url = base_url or DEFAULT_API_URL
self.__ensure_credentials()
def __ensure_credentials(self):
"""
Check if credentials are valid by performing a
base query.
Raises
------
ConnectionError if credentials are invalid.
"""
try:
self.get('/environment/v1')
except HTTPError as exc:
logger.exception('Could not connect to the API')
raise ConnectionError(exc)
def __make_url(s | elf, uri):
"""
Build URL from URI.
Parameters
----------
uri: Ressource | identifier
Returns
-------
URL to query
"""
return '{}{}'.format(self.base_url, uri)
def __make_header(self):
"""
Generate credential headers.
Returns
-------
Dict reprensenting API header with credential.
"""
return {'x-token': self.access_token,
'Content-Type': 'application/json'}
def __execute(self, method, url, **kwargs):
"""
Execute API call.
Parameters
----------
method: str
REST method, one of PUT, POST, PATCH, GET, DELETE
args: list
request call args.
kwargs: dict
request call kwargs
Returns
-------
Call response, deserialized from Content-type
If JSON, perform a json.loads transformation.
Else, return response body as a string.
Raises
------
RequestError: If API call is invalid (Response code 400)
NotFoundError: If url is invalid (Response code 404)
HTTPError: For any other error
"""
headers = self.__make_header()
headers.update(kwargs.pop('headers', {}))
response = requests.request(method, url, headers=headers, **kwargs)
if 200 <= response.status_code <= 299:
if 'application/json' in response.headers['content-type']:
return response.json()
return response.content
elif response.status_code == 400:
logger.debug('Invalid request: %s', response.text)
raise RequestError(response=response)
elif response.status_code == 404:
logger.debug('Invalid API endpoint: %s', url)
raise NotFoundError(response=response)
logger.debug('API Error [HTTP Code %s]: %s',
response.status_code, response.text)
response.raise_for_status()
def post(self, uri, payload, **params):
"""
Parameters
----------
uri: str
URI of the resource (without API base url)
payload: dict
POST arguments.
params: dict
Querystring parameters
Returns
-------
Call response as a dict
Raises
------
RequestError: If API call is invalid (Response code 400)
NotFoundError: If url is invalid (Response code 404)
HTTPError: For any other error
"""
if isinstance(payload, dict):
payload = json.dumps(payload)
return self.__execute('POST', self.__make_url(uri),
data=payload, params=params)
def get(self, uri, **params):
"""
Perform a POST call to the debitoor API.
Parameters
----------
uri: str
URI of the resource (without API base url)
params: dict
Querystring parameters
Returns
-------
Call response as a dict
Raises
------
RequestError: If API call is invalid (Response code 400)
NotFoundError: If url is invalid (Response code 404)
HTTPError: For any other error
"""
return self.__execute('GET', self.__make_url(uri), params=params)
def put(self, uri, payload, **params):
"""
Perform a PUT call to the debitoor API.
Parameters
----------
uri: str
URI of the resource (without API base url)
payload: dict
POST arguments.
params: dict
Querystring parameters
Returns
-------
Call response as a dict
Raises
------
RequestError: If API call is invalid (Response code 400)
NotFoundError: If url is invalid (Response code 404)
HTTPError: For any other error
"""
if isinstance(payload, dict):
payload = json.dumps(payload)
return self.__execute('PUT', self.__make_url(uri), data=payload,
params=params)
def delete(self, uri, **params):
"""
Perform a DELETE call to the debitoor API.
Parameters
----------
uri: str
URI of the resource (without API base url)
params: dict
Querystring parameters
Returns
-------
Call response as a dict
Raises
------
RequestError: If API call is invalid (Response code 400)
NotFoundError: If url is invalid (Response code 404)
HTTPError: For any other error
"""
return self.__execute('DELETE', self.__make_url(uri), params=params)
def patch(self, uri, payload, **params):
"""
Perform a PATCH call to the debitoor API.
Parameters
----------
uri: str
URI of the resource (without API base url)
payload: dict
POST arguments.
params: dict
Querystring parameters
Returns
-------
Call response as a dict
Raises
------
RequestError: If API call is invalid (Response code 400)
NotFoundError: If url is invalid (Response code 404)
HTTPError: For any other error
"""
return self.__execute('PATCH', self.__make_url(uri),
data=payload, params=params)
def get_service(self, service_name):
"""
Get Debitoor service form name.
Parameters
----------
service_name:
Name of service. Must be one of CustomerService,
DraftService, InvoiceService.
Returns
-------
Service instance.
Raises
-------
Value Error if the service does not exist
"""
try:
return SERVICE_MAPPING[service_name](self)
except KeyError:
raise ValueError('Unknown service: %s' % service_name)
|
michaelgallacher/intellij-community | python/testData/inspections/PyPropertyAccessInspection/inheritedClassAttrAssignmentAndOwnWithAttrAndInheritedSlots.py | Python | apache-2.0 | 243 | 0.032922 | class B(object):
attr = 'baz'
__slots__ = ['f', 'b']
class C | (B):
__slots__ = ['attr', 'bar']
C.attr = 'spam'
print(C.attr)
c = C()
<warning descr="'C' object attribute 'attr' is read-only">c.attr</warning> = 'spam'
print(c.a | ttr) |
oblique-labs/pyVM | rpython/jit/metainterp/optimizeopt/test/test_guard.py | Python | mit | 12,148 | 0.011278 | import py
from rpython.jit.metainterp import compile
from rpython.jit.metainterp.history import (TargetToken, JitCellToken,
TreeLoop, Const)
from rpython.jit.metainterp.optimizeopt.util import equaloplists
from rpython.jit.metainterp.optimizeopt.vector import (Pack,
NotAProfitableLoop, VectorizingOptimizer)
from rpython.jit.metainterp.optimizeopt.dependency import (Node,
DependencyGraph, IndexVar)
from rpython.jit.metainterp.optimizeopt.guard import (GuardStrengthenOpt,
Guard)
from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin
from rpython.jit.metainterp.optimizeopt.test.test_schedule import SchedulerBaseTest
from rpython.jit.metainterp.optimizeopt.test.test_vecopt import (FakeMetaInterpStaticData,
FakeJitDriverStaticData, FakeLoopInfo)
from rpython.jit.metainterp.resoperation import (rop,
ResOperation, InputArgInt)
from rpython.jit.tool.oparser_model import get_model
class FakeMemoryRef(object):
def __init__(self, array, iv):
self.index_var = iv
self.array = array
def is_adjacent_to(self, other):
if self.array is not other.array:
return False
iv = self.index_var
ov = other.index_var
val = (int(str(ov.var)[1:]) - int(str(iv.var)[1:]))
# i0 and i1 are adjacent
# i1 and i0 ...
# but not i0, i2
# ...
return abs(val) == 1
class FakeOp(object):
def __init__(self, cmpop):
self.boolinverse = ResOperation(cmpop, [box(0), box(0)], None).boolinverse
self.cmpop = cmpop
def getopnum(self):
return self.cmpop
def getarg(self, index):
if index == 0:
return 'lhs'
elif index == 1:
return 'rhs'
else:
assert 0
class FakeResOp(object):
def __init__(self, opnum):
self.opnum = opnum
def getopnum(self):
return self.opnum
def box(value):
return InputArgInt(value)
def const(value):
return Const._new(value)
def iv(value, coeff=(1,1,0)):
var = IndexVar(value)
var.coefficient_mul = coeff[0]
var.coefficient_div = coeff[1]
var.constant = coeff[2]
return var
def guard(opnum):
def guard_impl(cmpop, lhs, rhs):
guard = Guard(0, FakeResOp(opnum), FakeOp(cmpop), {'lhs': lhs, 'rhs': rhs})
return guard
return guard_impl
guard_true = guard(rop.GUARD_TRUE)
guard_false = guard(rop.GUARD_FALSE)
del guard
class GuardBaseTest(SchedulerBaseTest):
def optguards(self, loop, user_code=False):
info = FakeLoopInfo(loop)
info.snapshot(loop)
for op in loop.operations:
if op.is_guard():
op.setdescr(compile.CompileLoopVersionDescr())
dep = DependencyGraph(loop)
opt = GuardStrengthenOpt(dep.index_vars)
opt.propagate_all_forward(info, loop, user_code)
return opt
def assert_guard_count(self, loop, count):
guard = 0
for op in loop.operations + loop.prefix:
if op.is_guard():
guard += 1
if guard != count:
self.debug_print_operations(loop)
assert guard == count
def assert_contains_sequence(self, loop, instr):
class Glob(object):
next = None
prev = None
def __repr__(self):
return '*'
from rpython.jit.tool.oparser import OpParser, default_fail_descr
parser = OpParser(instr, self.cpu, self.namespace, None, default_fail_descr, True, None)
parser.vars = { arg.repr_short(arg._repr_memo) : arg for arg in loop.inputargs}
operations = []
last_glob = None
prev_op = None
f | or line in instr.splitlines():
line = line.strip()
if | line.startswith("#") or \
line == "":
continue
if line.startswith("..."):
last_glob = Glob()
last_glob.prev = prev_op
operations.append(last_glob)
continue
op = parser.parse_next_op(line)
if last_glob is not None:
last_glob.next = op
last_glob = None
operations.append(op)
def check(op, candidate, rename):
m = 0
if isinstance(candidate, Glob):
if candidate.next is None:
return 0 # consumes the rest
if op.getopnum() != candidate.next.getopnum():
return 0
m = 1
candidate = candidate.next
if op.getopnum() == candidate.getopnum():
for i,arg in enumerate(op.getarglist()):
oarg = candidate.getarg(i)
if arg in rename:
assert rename[arg].same_box(oarg)
else:
rename[arg] = oarg
if not op.returns_void():
rename[op] = candidate
m += 1
return m
return 0
j = 0
rename = {}
ops = loop.finaloplist()
for i, op in enumerate(ops):
candidate = operations[j]
j += check(op, candidate, rename)
if isinstance(operations[-1], Glob):
assert j == len(operations)-1, self.debug_print_operations(loop)
else:
assert j == len(operations), self.debug_print_operations(loop)
def test_basic(self):
loop1 = self.parse_trace("""
i10 = int_lt(i1, 42)
guard_true(i10) []
i101 = int_add(i1, 1)
i102 = int_lt(i101, 42)
guard_true(i102) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_add(i1, 1)
i12 = int_lt(i101, 42)
guard_true(i12) []
...
""")
def test_basic_sub(self):
loop1 = self.parse_trace("""
i10 = int_gt(i1, 42)
guard_true(i10) []
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
...
""")
def test_basic_mul(self):
loop1 = self.parse_trace("""
i10 = int_mul(i1, 4)
i20 = int_lt(i10, 42)
guard_true(i20) []
i12 = int_add(i10, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_mul(i1, 4)
i12 = int_add(i101, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
...
""")
def test_compare(self):
key = box(1)
incomparable = (False, 0)
# const const
assert iv(const(42)).compare(iv(const(42))) == (True, 0)
assert iv(const(-400)).compare(iv(const(-200))) == (True, -200)
assert iv(const(0)).compare(iv(const(-1))) == (True, 1)
# var const
assert iv(key, coeff=(1,1,0)).compare(iv(const(42))) == incomparable
assert iv(key, coeff=(5,70,500)).compare(iv(const(500))) == incomparable
# var var
assert iv(key, coeff=(1,1,0)).compare(iv(key,coeff=(1,1,0))) == (True, 0)
assert iv(key, coeff=(1,7,0)).compare(iv(key,coeff=(1,7,0))) == (True, 0)
assert iv(key, coeff=(4,7,0)).compare(iv(key,coeff=(3,7,0))) == incomparable
assert iv(key, coeff=(14,7,0)).compare(iv(key,coeff=(2,1,0))) == (True, 0)
assert iv(key, coeff=(14,7,33)).compare(iv(key,coeff=(2,1,0))) == (True, 33)
assert iv(key, coeff=(15,5,33)).compare(iv(key,coeff=(3,1,33))) == (True, 0)
def test_imply_basic(self):
key = box(1)
# if x < 42 <=> x < 42
g1 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(42)))
g2 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(c |
akiokio/centralfitestoque | src/.pycharm_helpers/pydev/pydevd_import_class.py | Python | bsd-2-clause | 1,825 | 0.014795 | #Note: code gotten from importsTipper.
import sys
def _imp(name, log=None):
try:
return __import__(name)
except:
if '.' in name:
sub = name[0:name.rfind('.')]
if log is not None:
log.AddContent('Unable to import', name, 'trying with', sub)
log.AddException()
return _imp(sub, log)
else:
s = 'Unable to import module: %s - sys.path: %s' % (str(name), sys.path)
if log is not None:
log.AddContent(s)
log.AddException()
raise ImportError(s)
IS_IPY = False
if sys.platform == 'cli':
IS_IPY = True
_old_imp = _imp
def _imp(name, log=None):
#We must add a reference in clr for .Net
import clr #@UnresolvedImport
initial_name = name
while '.' in name:
try:
clr.AddReference(name)
break #If it worked, that's OK.
except:
name = name[0:name.rfind('.')]
else:
try:
clr.AddReference(name)
except:
pass #That's OK (not dot net module).
return _old_imp(initial_name, log)
def ImportName(name, log=None):
mod = _imp(name, log)
components = name.split('.')
old_comp = None
for comp in components[1:]:
try: | #this happens in the following case:
#we have mx.DateTime.mxDateTime.mxDateTime.pyd
#but after importing it, mx.DateTime.mxDateTime shadows access to mxDateTime.pyd
mod = getattr(mod, comp)
except AttributeError:
if old_comp != comp:
raise
old_comp = comp
return mod
| |
Zhang-RQ/OI_DataBase | BZOJ/2729.py | Python | mit | 498 | 0.01004 | # def A(n,m):
| # ret=1
# while m!=0:
# ret=ret*n
# n-=1
# m-=1
# return ret
# def mul(n):
# ret=1
# while n!=1:
# ret*=n
# n-=1
# return ret
# n,m=raw_input().split()
# n=int(n)
# m=int(m)
# print(mul(n)*(A(n+1,2)*A(n+3,m)+2*m*(n+1)*A(n+2,m-1)))
def mul(x, y):
re = 1
for i in range (x, y + 1):
re = re * i
return re
n, m = raw_input().split()
n | = int(n); m = int(m)
print(mul(1,n+1)*mul(n+4-m,n+2)*(n*(n+3)+2*m))
|
Nitrokey/libnitrokey | unittest/test_storage.py | Python | lgpl-3.0 | 27,100 | 0.00417 | """
Copyright (c) 2015-2019 Nitrokey UG
This file is part of libnitrokey.
libnitrokey is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
libnitrokey is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with libnitrokey. If not, see <http://www.gnu.org/licenses/>.
SPDX-License-Identifier: LGPL-3.0
"""
import dataclasses
import pprint
import secrets
from datetime import datetime, timedelta
from time import sleep
import pytest
from hypothesis import given, strategies as st, settings, Verbosity, assume
from conftest import skip_if_device_version_lower_than
from constants import DefaultPasswords, DeviceErrorCode
from misc import gs, wait, ffi, bb
pprint = pprint.PrettyPrinter(indent=4).pprint
def get_dict_from_dissect(status):
x = []
for s in status.split('\n'):
try:
if not ':' in s: continue
ss = s.replace('\t', '').replace(' (int) ', '').split(':')
if not len(ss) == 2: continue
x.append(ss)
except:
pass
d = {k.strip(): v.strip() for k, v in x}
return d
def get_status_storage(C):
status_pointer = C.NK_get_status_storage_as_string()
assert C.NK_get_last_command_status() == DeviceErrorCode.STATUS_OK
status_string = gs(status_pointer)
assert len(status_string) > 0
status_dict = get_dict_from_dissect(status_string.decode('ascii'))
# assert int(status_dict['AdminPwRetryCount']) == default_admin_password_retry_count
return status_dict, C.NK_get_last_command_status()
@pytest.mark.other
@pytest.mark.info
def test_get_status_storage(C):
skip_if_device_version_lower_than({'S': 43})
status_pointer = C.NK_get_status_storage_as_string()
assert C.NK_get_last_command_status() == DeviceErrorCode.STATUS_OK
status_string = gs(status_pointer)
assert len(status_string) > 0
status_dict = get_dict_from_dissect(status_string.decode('ascii'))
default_admin_password_retry_count = 3
assert int(status_dict['AdminPwRetryCount']) == default_admin_password_retry_count
print('C.NK_get_major_firmware_version(): {}'.format(C.NK_get_major_firmware_version()))
print('C.NK_get_minor_firmware_version(): {}'.format(C.NK_get_minor_firmware_version()))
@pytest.mark.other
@pytest.mark.info
def test_sd_card_usage(C):
skip_if_device_version_lower_than({'S': 43})
data_pointer = C.NK_get_SD_usage_data_as_string()
assert C.NK_get_last_command_status() == DeviceErrorCode.STATUS_OK
data_string = gs(data_pointer)
assert len(data_string) > 0
data_dict = get_dict_from_dissect(data_string.decode("ascii"))
assert int(data_dict['WriteLevelMax']) <= 100
@pytest.mark.encrypted
def test_encrypted_volume_unlock(C):
skip_if_device_version_lower_than({'S': 43})
assert C.NK_lock_device() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
@pytest.mark.hidden
def test_encrypted_volume_unlock_hidden(C):
skip_if_device_version_lower_than({'S': 43})
hidden_volume_password = b'hiddenpassword'
assert C.NK_lock_device() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
assert C.NK_create_hidden_volume(0, 20, 21, hidden_volume_password) == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_hidden_volume(hidden_volume_password) == DeviceErrorCode.STATUS_OK
@pytest.mark.hidden
def test_encrypted_volume_setup_multiple_hidden_lock(C):
import random
skip_if_device_version_lower_than({'S': 45}) #hangs device on lower version
hidden_volume_password = b'hiddenpassword' + bb(str(random.randint(0 | ,100)))
p = lambda i: hidden_volume_ | password + bb(str(i))
assert C.NK_lock_device() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
for i in range(4):
assert C.NK_create_hidden_volume(i, 20+i*10, 20+i*10+i+1, p(i) ) == DeviceErrorCode.STATUS_OK
for i in range(4):
assert C.NK_lock_device() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_hidden_volume(p(i)) == DeviceErrorCode.STATUS_OK
@pytest.mark.hidden
@pytest.mark.parametrize("volumes_to_setup", range(1, 5))
def test_encrypted_volume_setup_multiple_hidden_no_lock_device_volumes(C, volumes_to_setup):
skip_if_device_version_lower_than({'S': 43})
hidden_volume_password = b'hiddenpassword'
p = lambda i: hidden_volume_password + bb(str(i))
assert C.NK_lock_device() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
for i in range(volumes_to_setup):
assert C.NK_create_hidden_volume(i, 20+i*10, 20+i*10+i+1, p(i)) == DeviceErrorCode.STATUS_OK
assert C.NK_lock_encrypted_volume() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
for i in range(volumes_to_setup):
assert C.NK_unlock_hidden_volume(p(i)) == DeviceErrorCode.STATUS_OK
# TODO mount and test for files
assert C.NK_lock_hidden_volume() == DeviceErrorCode.STATUS_OK
@pytest.mark.hidden
@pytest.mark.parametrize("volumes_to_setup", range(1, 5))
def test_encrypted_volume_setup_multiple_hidden_no_lock_device_volumes_unlock_at_once(C, volumes_to_setup):
skip_if_device_version_lower_than({'S': 43})
hidden_volume_password = b'hiddenpassword'
p = lambda i: hidden_volume_password + bb(str(i))
assert C.NK_lock_device() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
for i in range(volumes_to_setup):
assert C.NK_create_hidden_volume(i, 20+i*10, 20+i*10+i+1, p(i)) == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_hidden_volume(p(i)) == DeviceErrorCode.STATUS_OK
assert C.NK_lock_hidden_volume() == DeviceErrorCode.STATUS_OK
assert C.NK_lock_encrypted_volume() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
for i in range(volumes_to_setup):
assert C.NK_unlock_hidden_volume(p(i)) == DeviceErrorCode.STATUS_OK
# TODO mount and test for files
assert C.NK_lock_hidden_volume() == DeviceErrorCode.STATUS_OK
@pytest.mark.hidden
@pytest.mark.parametrize("use_slot", range(4))
def test_encrypted_volume_setup_one_hidden_no_lock_device_slot(C, use_slot):
skip_if_device_version_lower_than({'S': 43})
hidden_volume_password = b'hiddenpassword'
p = lambda i: hidden_volume_password + bb(str(i))
assert C.NK_lock_device() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
i = use_slot
assert C.NK_create_hidden_volume(i, 20+i*10, 20+i*10+i+1, p(i)) == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_hidden_volume(p(i)) == DeviceErrorCode.STATUS_OK
assert C.NK_lock_hidden_volume() == DeviceErrorCode.STATUS_OK
assert C.NK_lock_encrypted_volume() == DeviceErrorCode.STATUS_OK
assert C.NK_unlock_encrypted_volume(DefaultPasswords.USER) == DeviceErrorCode.STATUS_OK
for j in range(3):
assert C.NK_unlock_hidden_volume(p(i)) == DeviceErrorCode.STATUS_OK
# TODO mount and test for files
assert C.NK_lock_hidden_volume() == DeviceErrorCode.STATUS_OK
@pytest.mark.hidden
@pytest.mark.PWS
def test_password_safe_slot_name_corruption(C):
skip_if_device_version_lower_than({'S': 43})
volumes_to_setup = 4
# connected with encrypted volumes, possible also with hidden
def fill(s, wid):
assert wid >= len(s)
|
manglakaran/TrafficKarmaSent | extras/check_break.py | Python | mit | 361 | 0.049861 | import csv
with open('historical_data.csv', 'rb') as | csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if int(row["TIME"]) == 0 :
save = float(row["Speed"])
else:
if(float(row["Spe | ed"]) - save >= 0.1*save or -float(row["Speed"]) + save >= 0.1*save ):
print row["SER"] + "->" , int(row["TIME"])-1
save = float(row["Speed"])
|
aakashsinha19/Aspectus | Image Segmentation/tf-image-segmentation/tf_image_segmentation/models/fcn_32s.py | Python | apache-2.0 | 6,382 | 0.004701 | from nets import vgg
import tensorflow as tf
from preprocessing import vgg_preprocessing
from ..utils.upsampling import bilinear_upsample_weights
slim = tf.contrib.slim
# Mean values for VGG-16 |
from preprocessing.vgg_preprocessing import _R_MEAN, _G_MEAN, _B_MEAN
def extract_vgg_16_mapping_without_fc8(vgg_16_variables_mapping):
"""Removes the fc8 variable mapping from FCN-32s t | o VGG-16 model mapping dict.
Given the FCN-32s to VGG-16 model mapping dict which is returned by FCN_32s()
function, remove the mapping for the fc8 variable. This is done because this
variable is responsible for final class prediction and is different for different
tasks. Last layer usually has different size, depending on the number of classes
to be predicted. This is why we omit it from the dict and those variables will
be randomly initialized later.
Parameters
----------
vgg_16_variables_mapping : dict {string: variable}
Dict which maps the FCN-32s model's variables to VGG-16 checkpoint variables
names. Look at FCN-32s() function for more details.
Returns
-------
updated_mapping : dict {string: variable}
Dict which maps the FCN-32s model's variables to VGG-16 checkpoint variables
names without fc8 layer mapping.
"""
# TODO: review this part one more time
vgg_16_keys = vgg_16_variables_mapping.keys()
vgg_16_without_fc8_keys = []
for key in vgg_16_keys:
if 'fc8' not in key:
vgg_16_without_fc8_keys.append(key)
updated_mapping = {key: vgg_16_variables_mapping[key] for key in vgg_16_without_fc8_keys}
return updated_mapping
def FCN_32s(image_batch_tensor,
number_of_classes,
is_training):
"""Returns the FCN-32s model definition.
The function returns the model definition of a network that was described
in 'Fully Convolutional Networks for Semantic Segmentation' by Long et al.
The network subsamples the input by a factor of 32 and uses the bilinear
upsampling kernel to upsample prediction by a factor of 32. This means that
if the image size is not of the factor 32, the prediction of different size
will be delivered. To adapt the network for an any size input use
adapt_network_for_any_size_input(FCN_32s, 32). Note: the upsampling kernel
is fixed in this model definition, because it didn't give significant
improvements according to aforementioned paper.
Parameters
----------
image_batch_tensor : [batch_size, height, width, depth] Tensor
Tensor specifying input image batch
number_of_classes : int
An argument specifying the number of classes to be predicted.
For example, for PASCAL VOC it is 21.
is_training : boolean
An argument specifying if the network is being evaluated or trained.
It affects the work of underlying dropout layer of VGG-16.
Returns
-------
upsampled_logits : [batch_size, height, width, number_of_classes] Tensor
Tensor with logits representing predictions for each class.
Be careful, the output can be of different size compared to input,
use adapt_network_for_any_size_input to adapt network for any input size.
Otherwise, the input images sizes should be of multiple 32.
vgg_16_variables_mapping : dict {string: variable}
Dict which maps the FCN-32s model's variables to VGG-16 checkpoint variables
names. We need this to initilize the weights of FCN-32s model with VGG-16 from
checkpoint file. Look at ipython notebook for examples.
"""
with tf.variable_scope("fcn_32s") as fcn_32s_scope:
upsample_factor = 32
# Convert image to float32 before subtracting the
# mean pixel value
image_batch_float = tf.to_float(image_batch_tensor)
# Subtract the mean pixel value from each pixel
mean_centered_image_batch = image_batch_float - [_R_MEAN, _G_MEAN, _B_MEAN]
upsample_filter_np = bilinear_upsample_weights(upsample_factor,
number_of_classes)
upsample_filter_tensor = tf.constant(upsample_filter_np)
# TODO: make pull request to get this custom vgg feature accepted
# to avoid using custom slim repo.
with slim.arg_scope(vgg.vgg_arg_scope()):
logits, end_points = vgg.vgg_16(mean_centered_image_batch,
num_classes=number_of_classes,
is_training=is_training,
spatial_squeeze=False,
fc_conv_padding='SAME')
downsampled_logits_shape = tf.shape(logits)
# Calculate the ouput size of the upsampled tensor
upsampled_logits_shape = tf.pack([
downsampled_logits_shape[0],
downsampled_logits_shape[1] * upsample_factor,
downsampled_logits_shape[2] * upsample_factor,
downsampled_logits_shape[3]
])
# Perform the upsampling
upsampled_logits = tf.nn.conv2d_transpose(logits,
upsample_filter_tensor,
output_shape=upsampled_logits_shape,
strides=[1, upsample_factor, upsample_factor, 1])
# Map the original vgg-16 variable names
# to the variables in our model. This is done
# to make it possible to use assign_from_checkpoint_fn()
# while providing this mapping.
# TODO: make it cleaner
vgg_16_variables_mapping = {}
vgg_16_variables = slim.get_variables(fcn_32s_scope)
for variable in vgg_16_variables:
# Here we remove the part of a name of the variable
# that is responsible for the current variable scope
original_vgg_16_checkpoint_string = variable.name[len(fcn_32s_scope.original_name_scope):-2]
vgg_16_variables_mapping[original_vgg_16_checkpoint_string] = variable
return upsampled_logits, vgg_16_variables_mapping |
jmesteve/saas3 | openerp/addons_extra/account_balance_reporting/account_balance_reporting_report.py | Python | agpl-3.0 | 22,002 | 0.002727 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP - Account balance reporting engine
# Copyright (C) 2009 Pexego Sistemas Informáticos. All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Account balanc | e report objects
Generic account balance report document (with header and detail lines).
Designed following the needs of the
Spanish/Spain localization.
"""
from openerp.osv import orm,fields
from openerp.tools.translate import _
import re
import | time
import openerp.netsvc as netsvc
import logging
# CSS classes for the account line templates
CSS_CLASSES = [('default','Default'),('l1', 'Level 1'), ('l2', 'Level 2'),
('l3', 'Level 3'), ('l4', 'Level 4'), ('l5', 'Level 5')]
class account_balance_reporting(orm.Model):
"""
Account balance report.
It stores the configuration/header fields of an account balance report,
and the linked lines of detail with the values of the accounting concepts
(values generated from the selected template lines of detail formulas).
"""
_name = "account.balance.reporting"
_columns = {
'name': fields.char('Name', size=64, required=True, select=True),
'template_id': fields.many2one('account.balance.reporting.template',
'Template', ondelete='set null', required=True, select=True,
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'calc_date': fields.datetime("Calculation date", readonly=True),
'state': fields.selection([('draft','Draft'),
('calc','Processing'),
('calc_done','Processed'),
('done','Done'),
('canceled','Canceled')], 'State'),
'company_id': fields.many2one('res.company', 'Company',
ondelete='cascade', required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'current_fiscalyear_id': fields.many2one('account.fiscalyear',
'Fiscal year 1', select=True, required=True,
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'current_period_ids': fields.many2many('account.period',
'account_balance_reporting_account_period_current_rel',
'account_balance_reporting_id', 'period_id',
'Fiscal year 1 periods',
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'previous_fiscalyear_id': fields.many2one('account.fiscalyear',
'Fiscal year 2', select=True,
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'previous_period_ids': fields.many2many('account.period',
'account_balance_reporting_account_period_previous_rel',
'account_balance_reporting_id', 'period_id',
'Fiscal year 2 periods',
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'line_ids': fields.one2many('account.balance.reporting.line',
'report_id', 'Lines',
states = {'done': [('readonly', True)]}),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context).company_id.id,
'state': 'draft',
}
def action_calculate(self, cr, uid, ids, context=None):
"""Called when the user presses the Calculate button.
It will use the report template to generate lines of detail for the
report with calculated values."""
if context is None:
context = {}
line_obj = self.pool.get('account.balance.reporting.line')
# Set the state to 'calculating'
self.write(cr, uid, ids, {
'state': 'calc',
'calc_date': time.strftime('%Y-%m-%d %H:%M:%S')
})
for report in self.browse(cr, uid, ids, context=context):
# Clear the report data (unlink the lines of detail)
line_obj.unlink(cr, uid, [line.id for line in report.line_ids],
context=context)
# Fill the report with a 'copy' of the lines of its template (if it has one)
if report.template_id:
for template_line in report.template_id.line_ids:
line_obj.create(cr, uid, {
'code': template_line.code,
'name': template_line.name,
'report_id': report.id,
'template_line_id': template_line.id,
'parent_id': None,
'current_value': None,
'previous_value': None,
'sequence': template_line.sequence,
'css_class': template_line.css_class,
}, context=context)
# Set the parents of the lines in the report
# Note: We reload the reports objects to refresh the lines of detail.
for report in self.browse(cr, uid, ids, context=context):
if report.template_id:
# Set line parents (now that they have been created)
for line in report.line_ids:
tmpl_line = line.template_line_id
if tmpl_line and tmpl_line.parent_id:
parent_line_ids = line_obj.search(cr, uid,
[('report_id', '=', report.id),
('code', '=', tmpl_line.parent_id.code)])
line_obj.write(cr, uid, line.id, {
'parent_id': (parent_line_ids and
parent_line_ids[0] or False),
}, context=context)
# Calculate the values of the lines
# Note: We reload the reports objects to refresh the lines of detail.
for report in self.browse(cr, uid, ids, context=context):
if report.template_id:
# Refresh the report's lines values
for line in report.line_ids:
line.refresh_values()
# Set the report as calculated
self.write(cr, uid, [report.id], {
'state': 'calc_done'
}, context=context)
else:
# Ouch! no template: Going back to draft state.
self.write(cr, uid, [report.id], {'state': 'draft'},
context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
"""Called when the user clicks the confirm button."""
self.write(cr, uid, ids, {'state': 'done'}, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
"""Called when the user clicks the cancel button."""
self.write(cr, uid, ids, {'state': 'canceled'}, context=context)
return True
def action_recover(self, cr, uid, ids, context=None):
"""Called when the user clicks the draft button to create
a new workflow instance."" |
corymintz/mtools | mtools/test/test_util_logevent.py | Python | apache-2.0 | 7,031 | 0.021761 | import sys
from nose.tools import *
from mtools.util.logevent import LogEvent
import time
import datetime
from dateutil import parser
line_ctime_pre24 = "Sun Aug 3 21:52:05 [initandlisten] db version v2.2.4, pdfile version 4.5"
line_ctime = "Sun Aug 3 21:52:05.995 [initandlisten] db version v2.4.5"
line_iso8601_local = "2013-08-03T21:52:05.995+1000 [initandlisten] db version v2.5.2-pre-"
line_iso8601_utc = "2013-08-03T11:52:05.995Z [initandlisten] db version v2.5.2-pre-"
line_getmore = "Mon Aug 5 20:26:32 [conn9] getmore local.oplog.rs query: { ts: { $gte: new Date(5908578361554239489) } } cursorid:1870634279361287923 ntoreturn:0 keyUpdates:0 numYields: 107 locks(micros) r:85093 nreturned:13551 reslen:230387 144ms"
line_253_numYields = "2013-10-21T12:07:27.057+1100 [conn2] query test.docs query: { foo: 234333.0 } ntoreturn:0 ntoskip:0 keyUpdates:0 numYields:1 locks(micros) r:239078 nreturned:0 reslen:20 145ms"
line_246_numYields = "Mon Oct 21 12:14:21.888 [conn4] query test.docs query: { foo: 23432.0 } ntoreturn:0 ntoskip:0 nscanned:316776 keyUpdates:0 numYields: 2405 locks(micros) r:743292 nreturned:2 reslen:2116 451ms"
line_pattern_26_a = """2014-03-18T18:34:30.435+1100 | [conn10] query test.new query: { a: 1.0 } planSumma | ry: EOF ntoreturn:0 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:103 nreturned:0 reslen:20 0ms"""
line_pattern_26_b = """2014-03-18T18:34:34.360+1100 [conn10] query test.new query: { query: { a: 1.0 }, orderby: { b: 1.0 } } planSummary: EOF ntoreturn:0 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:55 nreturned:0 reslen:20 0ms"""
line_pattern_26_c = """2014-03-18T18:34:50.777+1100 [conn10] query test.new query: { $query: { a: 1.0 }, $orderby: { b: 1.0 } } planSummary: EOF ntoreturn:0 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:60 nreturned:0 reslen:20 0ms"""
# fake system.profile documents
profile_doc1 = { "op" : "query", "ns" : "test.foo", "thread": "test.system.profile", "query" : { "test" : 1 }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 0, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : 461, "w" :0 }, "timeAcquiringMicros" : { "r" : 4, "w" : 3 } }, "nreturned" : 0, "responseLength" : 20, "millis" : 0, "ts" : parser.parse("2014-03-20T04:04:21.231Z"), "client" : "127.0.0.1", "allUsers" : [ ], "user" : "" }
profile_doc2 = { "op" : "query", "ns" : "test.foo", "thread": "test.system.profile", "query" : { "query" : { "test" : 1 }, "orderby" : { "field" : 1 } }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 0, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : 534, "w" : 0 }, "timeAcquiringMicros" : { "r" : 5, "w" : 4 } }, "nreturned" : 0, "responseLength" : 20, "millis" : 0, "ts" : parser.parse("2014-03-20T04:04:33.775Z"), "client" : "127.0.0.1", "allUsers" : [ ], "user" : "" }
profile_doc3 = { "op" : "query", "ns" : "test.foo", "thread": "test.system.profile", "query" : { "$query" : { "test" : 1 }, "$orderby" : { "field" : 1 } }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 0, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : 436, "w" : 0 }, "timeAcquiringMicros" : { "r" : 5, "w" : 8 } }, "nreturned" : 0, "responseLength" : 20, "millis" : 0, "ts" : parser.parse("2014-03-20T04:04:52.791Z"), "client" : "127.0.0.1", "allUsers" : [ ], "user" : "" }
def test_logevent_datetime_parsing():
""" Check that all four timestamp formats are correctly parsed. """
le = LogEvent(line_ctime_pre24)
this_year = datetime.datetime.now().year
le_str = le.line_str
assert(str(le.datetime) == '%s-08-03 21:52:05+00:00'%this_year)
assert(le._datetime_format == 'ctime-pre2.4')
assert(le.line_str[4:] == le_str[4:])
# make sure all datetime objects are timezone aware
assert(le.datetime.tzinfo != None)
le = LogEvent(line_ctime)
le_str = le.line_str
assert(str(le.datetime) == '%s-08-03 21:52:05.995000+00:00'%this_year)
assert(le._datetime_format == 'ctime')
assert(le.line_str[4:] == le_str[4:])
# make sure all datetime objects are timezone aware
assert(le.datetime.tzinfo != None)
le = LogEvent(line_iso8601_utc)
le_str = le.line_str
assert(str(le.datetime) == '2013-08-03 11:52:05.995000+00:00')
assert(le._datetime_format == 'iso8601-utc')
assert(le.line_str[4:] == le_str[4:])
# make sure all datetime objects are timezone aware
assert(le.datetime.tzinfo != None)
le = LogEvent(line_iso8601_local)
le_str = le.line_str
assert(str(le.datetime) == '2013-08-03 21:52:05.995000+10:00')
assert(le._datetime_format == 'iso8601-local')
assert(le.line_str[4:] == le_str[4:])
# make sure all datetime objects are timezone aware
assert(le.datetime.tzinfo != None)
def test_logevent_pattern_parsing():
le = LogEvent(line_pattern_26_a)
assert(le.pattern) == '{"a": 1}'
le = LogEvent(line_pattern_26_b)
assert(le.pattern) == '{"a": 1}'
le = LogEvent(line_pattern_26_c)
assert(le.pattern) == '{"a": 1}'
def test_logevent_sort_pattern_parsing():
le = LogEvent(line_pattern_26_a)
assert(le.sort_pattern) == None
le = LogEvent(line_pattern_26_b)
assert(le.sort_pattern) == '{"b": 1}'
le = LogEvent(line_pattern_26_c)
assert(le.sort_pattern) == '{"b": 1}'
def test_logevent_profile_pattern_parsing():
le = LogEvent(profile_doc1)
assert(le.pattern == '{"test": 1}')
le = LogEvent(profile_doc2)
assert(le.pattern == '{"test": 1}')
le = LogEvent(profile_doc3)
assert(le.pattern == '{"test": 1}')
def test_logevent_profile_sort_pattern_parsing():
le = LogEvent(profile_doc1)
assert(le.sort_pattern == None)
le = LogEvent(profile_doc2)
assert(le.sort_pattern == '{"field": 1}')
le = LogEvent(profile_doc3)
assert(le.sort_pattern == '{"field": 1}')
def test_logevent_extract_new_and_old_numYields():
le = LogEvent(line_246_numYields)
assert(le.numYields == 2405)
le = LogEvent(line_253_numYields)
assert(le.numYields == 1)
def test_logevent_value_extraction():
""" Check for correct value extraction of all fields. """
le = LogEvent(line_getmore)
assert(le.thread == 'conn9')
assert(le.operation == 'getmore')
assert(le.namespace == 'local.oplog.rs')
assert(le.duration == 144)
assert(le.numYields == 107)
assert(le.r == 85093)
assert(le.ntoreturn == 0)
assert(le.nreturned == 13551)
assert(le.pattern == '{"ts": 1}')
def test_logevent_lazy_evaluation():
""" Check that all LogEvent variables are evaluated lazily. """
fields = ['_thread', '_operation', '_namespace', '_duration', '_numYields', '_r', '_ntoreturn', '_nreturned', '_pattern']
# before parsing all member variables need to be None
le = LogEvent(line_getmore)
for attr in fields:
assert(getattr(le, attr) == None)
# after parsing, they all need to be filled out
le.parse_all()
for attr in fields:
assert(getattr(le, attr) != None)
|
cogu/autosar | autosar/parser/datatype_parser.py | Python | mit | 25,123 | 0.00617 | import sys
from autosar.parser.parser_base import ElementParser
import autosar.datatype
class DataTypeParser(ElementParser):
def __init__(self,version=3.0):
super().__init__(version)
if self.version >= 3.0 and self.version < 4.0:
self.switcher = {'ARRAY-TYPE': self.parseArrayType,
'BOOLEAN-TYPE': self.parseBooleanType,
'INTEGER-TYPE': self.parseIntegerType,
'REAL-TYPE': self.parseRealType,
'RECORD-TYPE': self.parseRecordType,
'STRING-TYPE': self.parseStringType}
elif self.version >= 4.0:
self.switcher = {
'DATA-CONSTR': self.parseDataConstraint,
'IMPLEMENTATION-DATA-TYPE': self.parseImplementationDataType,
'SW-BASE-TYPE': self.parseSwBaseType,
'DATA-TYPE-MAPPING-SET': self.parseDataTypeMappingSet,
'APPLICATION-PRIMITIVE-DATA-TYPE': self.parseApplicationPrimitiveDataType,
'APPLICATION-ARRAY-DATA-TYPE' : self.parseApplicationArrayDataType,
'APPLICATION-RECORD-DATA-TYPE': self.parseApplicationRecordDataTypeXML,
}
def getSupportedTags(self):
return self.switcher.keys()
def parseElement(self, xmlElement, parent = None):
parseFunc = self.switcher.get(xmlElement.tag)
if parseFunc is not None:
return parseFunc(xmlElement,parent)
else:
return None
def parseIntegerType(self,root,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
minval = int(root.find("./LOWER-LIMIT").text)
maxval = int(root.find("./UPPER-LIMIT").text)
dataDefXML = root.find('./SW-DATA-DEF-PROPS')
dataType = autosar.datatype.IntegerDataType(name,minval,maxval)
self.parseDesc(root,dataType)
if dataDefXML is not None:
for elem in dataDefXML.findall('./*'):
if elem.tag=='COMPU-METHOD-REF':
dataType.compuMethodRef=self.parseTextNode(elem)
else:
raise NotImplementedError(elem.tag)
return dataType
def parseRecordType(self,root,parent=None):
if self.version>=3.0:
elements = []
name=root.find("./SHORT-NAME").text
for elem in root.findall('./ELEMENTS/RECORD-ELEMENT'):
elemName = self.parseTextNode(elem.find("./SHORT-NAME"))
elemTypeRef = self.parseTextNode(elem.find("./TYPE-TREF"))
elements.append(autosar.datatype.RecordTypeElement(elemName, elemTypeRef))
dataType=autosar.datatype.RecordDataType(name,elements);
self.parseDesc(root,dataType)
return dataType
def parseArrayType(self,root,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
length=int(root.find('ELEMENT/MAX-NUMBER-OF-ELEMENTS').text)
typeRef=root.find('ELEMENT/TYPE-TREF').text
dataType=autosar.datatype.ArrayDataType(name,typeRef,length)
self.parseDesc(root,dataType)
return dataType;
de | f parseBooleanType(self,root,parent=None):
if self.version>=3:
name=root.find("./SHORT-NAME").text
dataType=autosar.datatype.BooleanDataType(name)
self.parseDesc(root,dataType)
return dataType
def parseStringType(self,r | oot,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
length=int(root.find('MAX-NUMBER-OF-CHARS').text)
encoding=root.find('ENCODING').text
dataType=autosar.datatype.StringDataType(name,length,encoding)
self.parseDesc(root,dataType)
return dataType
def parseRealType(self,root,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
elem = root.find("./LOWER-LIMIT")
if elem is not None:
minval = elem.text
minvalType = elem.attrib['INTERVAL-TYPE']
elem = root.find("./UPPER-LIMIT")
if elem is not None:
maxval = elem.text
maxvalType = elem.attrib['INTERVAL-TYPE']
hasNaNText = self.parseTextNode(root.find("./ALLOW-NAN"))
hasNaN = True if (hasNaNText is not None and hasNaNText == 'true') else False
encoding = self.parseTextNode(root.find("./ENCODING"))
dataType=autosar.datatype.RealDataType(name,minval,maxval,minvalType,maxvalType,hasNaN,encoding)
self.parseDesc(root,dataType)
return dataType
def parseDataConstraint(self, xmlRoot, parent=None):
assert (xmlRoot.tag == 'DATA-CONSTR')
rules=[]
constraintLevel = None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'DATA-CONSTR-RULES':
for xmlChildElem in xmlElem.findall('./DATA-CONSTR-RULE/*'):
if xmlChildElem.tag == 'INTERNAL-CONSTRS':
rules.append(self._parseDataConstraintRule(xmlChildElem, 'internalConstraint'))
elif xmlChildElem.tag == 'PHYS-CONSTRS':
rules.append(self._parseDataConstraintRule(xmlChildElem, 'physicalConstraint'))
elif xmlChildElem.tag == 'CONSTR-LEVEL':
constraintLevel = self.parseIntNode(xmlChildElem)
else:
raise NotImplementedError(xmlChildElem.tag)
else:
self.defaultHandler(xmlElem)
elem = autosar.datatype.DataConstraint(self.name, rules, constraintLevel, parent, self.adminData)
self.pop(elem)
return elem
def _parseDataConstraintRule(self, xmlElem, constraintType):
lowerLimitXML = xmlElem.find('./LOWER-LIMIT')
upperLimitXML = xmlElem.find('./UPPER-LIMIT')
lowerLimit = None if lowerLimitXML is None else self.parseNumberNode(lowerLimitXML)
upperLimit = None if upperLimitXML is None else self.parseNumberNode(upperLimitXML)
lowerLimitType = 'CLOSED'
upperLimitType = 'CLOSED'
key = 'INTERVAL-TYPE'
if lowerLimitXML is not None and key in lowerLimitXML.attrib and lowerLimitXML.attrib[key]=='OPEN':
lowerLimitType='OPEN'
if upperLimitXML is not None and key in upperLimitXML.attrib and upperLimitXML.attrib[key]=='OPEN':
upperLimitType='OPEN'
return {
'type': constraintType,
'lowerLimit': lowerLimit,
'upperLimit': upperLimit,
'lowerLimitType': lowerLimitType,
'upperLimitType': upperLimitType}
def parseImplementationDataType(self, xmlRoot, parent=None):
assert (xmlRoot.tag == 'IMPLEMENTATION-DATA-TYPE')
variantProps, typeEmitter, parseTextNode, dynamicArraySizeProfile, subElementsXML, symbolProps = None, None, None, None, None, None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'SW-DATA-DEF-PROPS':
variantProps = self.parseSwDataDefProps(xmlElem)
elif xmlElem.tag == 'TYPE-EMITTER':
typeEmitter = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'DYNAMIC-ARRAY-SIZE-PROFILE':
dynamicArraySizeProfile = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'SUB-ELEMENTS':
subElementsXML = xmlElem
elif xmlElem.tag == 'SYMBOL-PROPS':
symbolProps = self.parseSymbolProps(xmlElem)
else:
self.defaultHandler(xmlElem)
dataType = autosar.datatype.ImplementationDataType(
self.name,
variantProps = variantProps,
dynamicArraySizeProfile = dynamicAr |
sakhuja/cookie_lover | tests/test_models.py | Python | bsd-3-clause | 1,655 | 0.001813 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pyt | est
from cookie_flaskApp.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
asser | t isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self):
user = UserFactory(password="myprecious")
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="foo@bar.com",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles |
DanteOnline/free-art | project/free_art/item/urls.py | Python | gpl-3.0 | 273 | 0.007326 | from django.conf.urls import url
from item.views i | mport CategoryDetailView, ScriptDetailView
urlpatterns = [
url(r'^category/(?P<pk>\d+)?$', CategoryDetailView.as_view(), name='category'),
url(r'^script/(?P<pk>\d+)?$' | , ScriptDetailView.as_view(), name='script'),
] |
vfonov/ITK | Wrapping/Generators/Python/Tests/getNameOfClass.py | Python | apache-2.0 | 3,326 | 0.000301 | # ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
# a short program to check the value returned by the GetNameOfClass() methods
import itk
import sys
itk.auto_progress(2)
# must force the load to return all the names with dir(itk)
itk.force_load()
# itk.ImageToImageFilter
def wrongClassName(cl, name):
o = cl.New()
# be sure that the type of the instantiated object is the same
# than the one of the class. It can be different if the class
# is an "abstract" one and don't provide any New() method.
# In that case, the one of the superclass is used.
return o.GetNameOfClass() != name and itk.class_(o) == cl
# | a list of classes to exclude. Typically, the classes with a custom New()
# method, which return a subclass of the current class
exclude = [
"ForwardFFTImageFilter",
"Forward1DFFTImageFilter",
"InverseFFTImageFilter",
"Inverse1DFFTImageFilter",
"OutputWindow",
"MultiThreaderBase",
"FFTComplexToComplexImageFilter",
"ComplexToComplexFFTImageFilter",
"ComplexToComplex1DImageFilter",
"templated_class",
"HalfHermitianToRealInverseFFTImageFilter",
| "RealToHalfHermitianForwardFFTImageFilter",
"CustomColormapFunction",
"ScanlineFilterCommon", # Segfault
"cvar",
]
wrongName = 0
totalName = 0
for t in dir(itk):
if t not in exclude:
T = itk.__dict__[t]
# first case - that's a templated class
if isinstance(T, itk.Vector.__class__) and len(T) > 0:
# use only the first specialization - all of them return the same
# name
i = T.values()[0]
# GetNameOfClass() is a virtual method of the LightObject class,
# so we must instantiate an object with the New() method
if "New" in dir(i) and "GetNameOfClass" in dir(i):
totalName += 1
if wrongClassName(i, t):
msg = f"{T}: wrong class name: {t}"
print(msg, file=sys.stderr)
wrongName += 1
else:
if "New" in dir(T) and "GetNameOfClass" in dir(T):
totalName += 1
if wrongClassName(T, t):
msg = f"{T}: wrong class name: {t}"
print(msg, file=sys.stderr)
o = T.New()
print(itk.class_(o), file=sys.stderr)
print(o.GetNameOfClass(), file=sys.stderr)
wrongName += 1
print(f"{totalName} classes checked.")
if wrongName:
print(f"{wrongName} classes are not providing the correct name.", file=sys.stderr)
sys.exit(1)
|
pixyj/pramod.io | blog/migrations/0002_auto_20160513_1114.py | Python | mit | 587 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-13 11:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
| dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='content',
new_name='markdown_content',
),
migrations.AddField(
model_name='post',
name='is_published',
field=models.BooleanField(default=True),
| ),
]
|
mvidalgarcia/indico | indico/legacy/common/Conversion.py | Python | mit | 261 | 0 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LIC | ENSE file for more details.
|
from indico.util.fossilize.conversion import *
|
pluser/nikola_plugins | v7/less/less.py | Python | mit | 4,899 | 0.001633 | # -*- coding: utf-8 -*-
# Copyright © 2012-2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import codecs
import glob
import os
import sys
import subprocess
from nikola.plugin_categories import Task
from nikola import utils
class BuildLess(Task):
"""Generate CSS out of LESS sources."""
name = "build_less"
sources_folder = "less"
sources_ext = ".less"
def gen_tasks(self):
"""Generate CSS out of LESS sources."""
self.compiler_name = self.site.config['LESS_COMPILER']
self.compiler_options = self.site.config['LESS_OPTIONS']
kw = {
'cache_folder': self.site.config['CACHE_FOLDER'],
'themes': self.site.THEMES,
}
tasks = {}
# Find where in the theme chain we define the LESS targets
# There can be many *.less in the folder, but we only will build
# the ones listed in less/targets
if os.path.isfile(os.path.join(self.sources_folder, "targets")):
targets_path = os.path.join(self.sources_folder, "targets")
else:
targets_path = utils.get_asset_path(os.path.join(self.sources_folder, "targets"), self.site.THEMES)
try:
with codecs.open(targets_path, "rb", "utf-8") as inf:
targets = [x.strip() for x in inf.readlines()]
except Exception:
targets = []
for task in utils.copy_tree(self.sources_folder, os.path.join(kw['cache_folder'], self.sources_folder)):
if task['name'] in tasks:
continue
task['basename'] = 'prepare_less_sources'
tasks[task['name']] = task
yield task
for theme_name in kw['themes']:
src = os.path.join(utils.get_theme_path(theme_name), self.sources_folder)
for task in utils.copy_tree(src, os.path.join(kw['cache_folder'], self.sources_folder)):
task['basename'] = 'prepare_less_sources'
yield task
# Build targets and write CSS files
base_path = utils.get_theme_path(self.site.THEMES[0])
dst_dir = os.path.join(self.site.config['OUTPUT_FOLDER'], 'assets', 'css')
# Make everything depend on all sources, rough but enough
deps = []
if os.path.isfile(os.path.join(self.sources_folder, "targets")):
deps += glob.glob(os.path.join(kw['cache_folder'], self.sources_folder,
'*{0}'.format(self.sources_ext)))
else:
deps += glob.glob(os.path.join(base_path, self.sources_folder,
'*{0}'.format(self.sources_ext)))
def compile_target(target, dst):
| utils.makedirs(dst_dir)
src = os.path.join(kw['cache_folder'], self.sources_folder, target)
run_in_shell = sys.platform == 'win32'
try:
compiled = subprocess.check | _output([self.compiler_name] + self.compiler_options + [src], shell=run_in_shell)
except OSError:
utils.req_missing([self.compiler_name],
'build LESS files (and use this theme)',
False, False)
with open(dst, "wb+") as outf:
outf.write(compiled)
yield self.group_task()
for target in targets:
dst = os.path.join(dst_dir, target.replace(self.sources_ext, ".css"))
yield {
'basename': self.name,
'name': dst,
'targets': [dst],
'file_dep': deps,
'task_dep': ['prepare_less_sources'],
'actions': ((compile_target, [target, dst]), ),
'uptodate': [utils.config_changed(kw)],
'clean': True
}
|
jardiacaj/finem_imperii | turn/public_order.py | Python | agpl-3.0 | 1,600 | 0 | import random
from unit.models import WorldUnit
from world.models.geography import World, Settlement
def worldwide_public_order(world: World):
for tile in world.tile_set.all():
for settlement in tile.settlement_set.all():
do_settlement_public_order_update(settlement)
def do_settlement_public_order_update(settlement: Settlement):
hunger_percent = settlement.get_hunger_percentage()
if hunger_percent > 20:
settlement.public_order -= (hunger_percent - 20) * 5
elif hunger_percent < 10:
settlement.public_order += (10 - hunger_percent) * 10
settlement.make_public_order_in_range()
non_barbarian_units = WorldUnit.objects.filter(
location=settlement,
owner_character__isnull=False
)
public_order_contributing_units = []
settlement_vm = settlement.tile.controlled_by.get_violence_monopoly()
for non_barbarian_ | unit in non_barbarian_units:
char_vm = non_barbarian_unit.owner_character.get_violence_monopoly()
if char_vm == settlement_vm:
public_order_contributing_units.append(
non_barbarian_unit
)
contributing_soldiers = sum(
[unit.soldier.count() for unit in public_order_contributing_units]
)
soldier_to_pop_ratio = contributing_soldiers / settlement.population
settle | ment.public_order += soldier_to_pop_ratio * 500
settlement.make_public_order_in_range()
if soldier_to_pop_ratio < 0.05:
settlement.public_order -= random.randint(0, 70)
settlement.make_public_order_in_range()
settlement.save()
|
Jumpscale/jumpscale6_core | apps/agentcontroller/jumpscripts/extended/system/backup_osis.py | Python | bsd-2-clause | 2,202 | 0.003633 | from JumpScale import j
descr = """
Creates a targz of the backup under {var directory}/backup/osis/{timestamp}.tgz
"""
organization = "jumpscale"
author = "khamisr@codescalers.com"
license = "bsd"
version = "1.0"
category = "system.backup.osis"
period = 60*60*24
enable = True
async = True
roles = ["admin"]
queue ='io'
def action():
import JumpScale.grid.osis
import tarfile
"""
"""
backuppath = j.system.fs.joinPaths(j.dirs.tmpDir, 'backup', 'osis')
timestamp = j.base.time.getTimeEpoch()
timestamp = j.base.time.formatTime(timestamp, "%Y%m%d_%H%M%S")
try:
oscl = j.core.osis.getClientByInstance('main')
namespaces = oscl.listNamespaces()
if j.system.fs.exists(backuppath):
j.system.fs.removeDirTree(backuppath)
for namespace in namespaces:
categories = oscl.listNamespaceCategories(namespace)
for category in categories:
if namespace == 'system' and category in ['stats', 'lo | g', 'sessioncache']:
continue
outputpath = j.system.fs.joinPaths(backuppath, namespace, category)
j.system.fs.createDir(outputpath)
oscl.export(namespace, category, outputpath)
#targz
backupdir = j.system.fs.joinPaths(j.dirs.varDir, 'backup', 'osis')
j.system.fs.createDir(backupdir)
| outputpath = j.system.fs.joinPaths(backupdir, '%s.tar.gz' % timestamp)
with tarfile.open(outputpath, "w:gz") as tar:
tar.add(backuppath)
j.system.fs.removeDirTree(backuppath)
except Exception:
import JumpScale.baselib.mailclient
import traceback
error = traceback.format_exc()
message = '''
OSIS backup at %s failed on %s.%s
Data should have been backed up to %s on the admin node.
Exception:
-----------------------------
%s
-----------------------------
''' % (timestamp, j.application.whoAmI.gid, j.application.whoAmI.nid, backuppath, error)
message = message.replace('\n', '<br/>')
j.clients.email.send('support@mothership1.com', 'monitor@mothership1.com', 'OSIS backup failed', message)
if __name__ == '__main__':
action()
|
rika/precip | examples/gcloud/hello-world.py | Python | apache-2.0 | 2,375 | 0.004632 | #!/usr/bin/python
import os
import time
from pprint import pprint
from precip import *
exp = None
PROJECT = ''
ZONE = 'us-central1-f'
USER = 'precip'
IMAGE_PROJECT = 'ubuntu-os-cloud' # look at https://cloud.google.com/compute/docs/operating-systems/linux-os
IMAGE_NAME = 'ubuntu-1504-vivid-v20150422' # to list images: gcloud compute images list --project [IMAGE_PROJECT]
SOURCE_DISK_IMAGE = 'projects/%s/global/images/%s' % (IMAGE_PROJECT, IMAGE_NAME)
MACHINE_TYPE = 'zones/%s/machineTypes/n1-standard-1' % ZONE
MACHINE_TYPE = 'zones/%s/machineTypes/f1-micro' % ZONE
# Use try/except liberally in your experiments - the api is set up to
# raise ExperimentException on most errors
try:
# Create a new OpenStack based experiment. In this case we pick
# up endpoints and access/secret cloud keys from the environment
# as exposing those is the common setup on FutureGrid
exp = GCloudExperiment(PROJECT, ZONE, USER)
# Provision an instance based on the ami-0000004c. Note that tags are
# used throughout the api to identify and manipulate instances. You
# can give an instance an arbitrary number of tags.
exp.provision(SOURCE_DISK_IMAGE, MACHINE_TYPE, tags=["test1"], count=1)
# Wait for all instances to boot | and become accessible. The provision
# method only starts the provisioning, and can be used to start a large
# number of instances at the same time. The wait method provides a
# barrier to when it is safe to start the actual experiment.
exp.wait()
# Print out the details of the instance. The details include instance id,
# private and public hostnames, and tags both defined by you and some
# added by the api
pprint( | exp.list())
# Run a command on the instances having the "test1" tag. In this case we
# only have one instance, but if you had multiple instances with that
# tag, the command would run on each one.
exp.run(["test1"], "echo 'Hello world from a experiment instance'", USER)
except ExperimentException as e:
# This is the default exception for most errors in the api
print "ERROR: %s" % e
finally:
# Be sure to always deprovision the instances we have started. Putting
# the deprovision call under finally: make the deprovisioning happening
# even in the case of failure.
if exp is not None:
exp.deprovision()
|
MWers/sd-coldfusion-plugin | plugins/ColdFusion.py | Python | mit | 3,975 | 0.000252 | """
Server Density Plugin
ColdFusion stats
https://github.com/MWers/sd-coldfusion-plugin/
Version: 1.0.2
"""
import os
import platform
import subprocess
class ColdFusion:
sd_cfstat_opt = 'coldfusion_cfstat_path'
cfstat_locations = ['/opt/ColdFusion11/bin/cfstat',
'/opt/coldfusion10/bin/cfstat',
'/opt/ColdFusion9/bin/cfstat',
'/opt/coldfusion8/bin/cfstat',
'/opt/coldfusionmx7/bin/cfstat']
cfstat_keys_long = ['Pg/s Now', 'Pg/s Hi', 'DB/s Now', 'DB/s Hi',
'CP/s Now', 'CP/s Hi', 'Reqs Q''ed', 'Reqs Run''g',
'Reqs TO''ed', 'Tpl Q''ed', 'Tpl Run''g', 'Tpl TO''ed',
'Flash Q''ed', 'Flash Run''g', 'Flash TO''ed',
'CFC Q''ed', 'CFC Run''g', 'CFC TO''ed',
'WebSvc Q''ed', 'WebSvc Run''g', 'WebSvc TO''ed',
'Avg Q Time', 'Avg Req Time', 'Avg DB Time',
'Bytes In/s', 'Bytes Out/s']
cfstat_keys_short = ['Pg/s Now', 'Pg/s Hi', 'DB/s Now', 'DB/s Hi',
'CP/s Now', 'CP/s Hi', 'Reqs Q''ed', 'Reqs Run' | 'g',
'Reqs TO''ed', 'Avg Q Time', 'Avg Req Time',
'Avg DB Time', 'Bytes In/s', 'Bytes Out/s']
python_version = platform.python_version_tuple()
def __init__(self, agent_config, checks_logger, raw_config):
s | elf.agent_config = agent_config
self.checks_logger = checks_logger
self.raw_config = raw_config
def run(self):
# Determine location of cfstat and make sure it's executable
cfstat = None
if 'Main' in self.raw_config and \
self.sd_cfstat_opt in self.raw_config['Main']:
cfstat = self.raw_config['Main'][self.sd_cfstat_opt]
if not os.access(cfstat, os.X_OK):
self.checks_logger.error('ColdFusion: The location of cfstat '
'given in config (%s) either does '
'not exist or is not executable'
% cfstat)
return False
else:
self.checks_logger.debug('ColdFusion: Using cfstat defined in '
'config: %s' % cfstat)
else:
self.checks_logger.debug('ColdFusion: cfstat path not in config, '
'checking standard locations')
for location in self.cfstat_locations:
if os.access(location, os.X_OK):
cfstat = location
self.checks_logger.debug('ColdFusion: Using cfstat found '
'here: %s' % cfstat)
break
if not cfstat:
self.checks_logger.error('ColdFusion: Could not find cfstat')
return False
# Run cfstat and collect its data
self.checks_logger.debug('ColdFusion: Getting stats from ColdFusion')
proc = subprocess.Popen([cfstat, '-x', '-n', '-s'],
stdout=subprocess.PIPE, close_fds=True)
stats = proc.communicate()[0]
if int(self.python_version[1]) >= 6:
try:
proc.kill()
except Exception, e:
self.checks_logger.debug('ColdFusion: cfstat process already '
'terminated')
stats = stats.split()
if len(stats) == 26:
keys = self.cfstat_keys_long
elif len(stats) == 14:
keys = self.cfstat_keys_short
else:
self.checks_logger.error('ColdFusion: Received unexpected '
'response from cfstat: %s' % str(stats))
return False
data = dict(zip(keys, stats))
return data
|
Zolomon/reversi-ai | tests/board.py | Python | mit | 7,995 | 0.001126 | from game.board import Board
from game.settings import *
__author__ = 'bengt'
import unittest
class TestBoard(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
b = Board(False)
self.assertEqual(len(b.get_move_pieces(WHITE)), 0)
self.assertEqual(len(b.get_move_pieces(BLACK)), 0)
def test_draw(self):
b = Board(False)
b.set_black(0, 0)
b.set_black(1, 0)
b.set_white(1, 1)
b.set_move(0, 1)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 BBBB............1
2 MMWW............2
3 ................3
4 ................4
5 ................5
6 ................6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
b.clear_moves()
b.mark_moves(BLACK)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 BBBB............1
2 ..WW............2
3 ..MMMM..........3
4 ................4
5 ................5
6 ................6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
b.clear_moves()
b = Board(False)
b.set_white(3, 3)
b.set_white(3, 4)
b.set_white(4, 4)
b.set_white(4, 3)
b.set_black(2, 2)
b.set_black(3, 2)
b.set_black(4, 2)
b.set_black(5, 2)
b.set_black(2, 3)
b.set_black(5, 3)
b.set_black(2, 4)
b.set_black(5, 4)
b.set_black(2, 5)
b.set_black(3, 5)
b.set_black(4, 5)
b.set_black(5, 5)
b.clear_moves()
b.mark_moves(WHITE)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 ................1
2 ..MMMMMMMMMMMM..2
3 ..MMBBBBBBBBMM..3
4 ..MMBBWWWWBBMM..4
5 ..MMBBWWWWBBMM..5
6 ..MMBBBBBBBBMM..6
7 ..MMMMMMMMMMMM..7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
b = Board(False)
b.set_white(3, 3)
b.set_white(4, 4)
b.set_black(3, 4)
b.set_black(4, 3)
b.mark_moves(BLACK)
result = b.draw()
| canvas = """ a.b.c.d.e.f.g.h.
1 ................1
2 ................2
3 ......MM........3
4 ....MMWWBB......4
5 ......BBWWMM....5
6 ........MM......6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
b.clear_moves()
b.make_move((3, 2), BLACK)
b.mark_moves(WHITE)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 .............. | ..1
2 ................2
3 ....MMBBMM......3
4 ......BBBB......4
5 ....MMBBWW......5
6 ................6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
b.clear_moves()
b.make_move((2, 2), WHITE)
b.mark_moves(BLACK)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 ................1
2 ................2
3 ..MMWWBB........3
4 ....MMWWBB......4
5 ......BBWWMM....5
6 ........MM......6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
b = Board(False)
b.set_white(6, 0)
b.set_white(5, 1)
b.set_white(3, 1)
b.set_white(3, 2)
b.set_white(4, 2)
b.set_white(5, 2)
b.set_white(3, 3)
b.set_white(4, 3)
b.set_black(2, 3)
b.set_black(2, 4)
b.set_black(3, 4)
b.set_black(4, 4)
b.set_black(1, 5)
b.set_black(4, 5)
b.mark_moves(BLACK)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 ......MM....WW..1
2 ......WWMMWWMM..2
3 ....MMWWWWWW....3
4 ....BBWWWWMM....4
5 ....BBBBBB......5
6 ..BB....BB......6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
b = Board(False)
b.set_white(3, 4)
b.set_white(4, 0)
b.set_white(4, 3)
b.set_white(4, 4)
b.set_white(5, 0)
b.set_white(5, 1)
b.set_white(5, 3)
b.set_white(6, 0)
b.set_white(6, 1)
b.set_white(7, 0)
b.set_black(2, 0)
b.set_black(3, 1)
b.set_black(4, 1)
b.set_black(7, 1)
b.set_black(3, 2)
b.set_black(4, 2)
b.set_black(5, 2)
b.set_black(6, 2)
b.set_black(7, 2)
b.set_black(3, 3)
b.set_black(2, 3)
b.set_black(2, 4)
b.set_black(1, 5)
b.mark_moves(BLACK)
result = b.draw()
#0 1 2 3 4 5 6 7
canvas = """ a.b.c.d.e.f.g.h.
1 ....BB..WWWWWWWW1
2 ......BBBBWWWWBB2
3 ......BBBBBBBBBB3
4 ....BBBBWWWWMM..4
5 ....BBWWWWMMMM..5
6 ..BBMMMMMMMM....6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
b = Board(False)
b.set_white(3, 4)
b.set_white(4, 0)
b.set_white(4, 3)
b.set_white(4, 4)
b.set_white(5, 0)
b.set_white(5, 1)
b.set_white(5, 3)
b.set_white(6, 0)
b.set_white(6, 1)
b.set_white(7, 0)
b.set_black(2, 0)
b.set_black(3, 1)
b.set_black(4, 1)
b.set_black(7, 1)
b.set_black(3, 2)
b.set_black(4, 2)
b.set_black(5, 2)
b.set_black(6, 2)
b.set_black(7, 2)
b.set_black(3, 3)
b.set_black(2, 3)
b.set_black(2, 4)
b.set_black(1, 5)
b.mark_moves(BLACK)
result = b.draw()
#0 1 2 3 4 5 6 7
canvas = """ a.b.c.d.e.f.g.h.
1 ....BB..WWWWWWWW1
2 ......BBBBWWWWBB2
3 ......BBBBBBBBBB3
4 ....BBBBWWWWMM..4
5 ....BBWWWWMMMM..5
6 ..BBMMMMMMMM....6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
b = Board(False)
b.set_white(3, 2)
b.set_white(4, 1)
b.set_white(4, 2)
b.set_white(5, 1)
b.set_white(5, 2)
b.set_white(6, 0)
b.set_white(6, 1)
b.set_white(6, 2)
b.set_white(6, 3)
b.set_black(1, 3)
b.set_black(1, 5)
b.set_black(2, 0)
b.set_black(2, 3)
b.set_black(2, 4)
b.set_black(3, 1)
b.set_black(3, 3)
b.set_black(3, 4)
b.set_black(3, 5)
b.set_black(4, 3)
b.set_black(4, 4)
b.set_black(5, 3)
b.set_black(5, 5)
b.set_black(6, 4)
b.set_black(7, 0)
b.mark_moves(BLACK)
result = b.draw()
#0 1 2 3 4 5 6 7
canvas = """ a.b.c.d.e.f.g.h.
1 ....BB..MMMMWWBB1
2 ....MMBBWWWWWWMM2
3 ......WWWWWWWW..3
4 ..BBBBBBBBBBWWMM4
5 ....BBBBBB..BB..5
6 ..BB..BB..BB....6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
b.clear_moves()
b.mark_moves(WHITE)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 ....BBMM....WWBB1
2 ....MMBBWWWWWW..2
3 ......WWWWWWWW..3
4 MMBBBBBBBBBBWW..4
5 ..MMBBBBBBMMBB..5
6 ..BBMMBBMMBBMMMM6
7 MM..MMMM........7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
##b.clear_moves()
##b.mark_moves(WHITE)
b.clear_moves()
b.make_move((0, 3), WHITE)
result = b.draw()
#0 1 2 3 4 5 6 7
canvas = """ a.b.c.d.e.f.g.h.
1 ....BB......WWBB1
2 ......BBWWWWWW..2
3 ......WWWWWWWW..3
4 WWWWWWWWWWWWWW..4
5 ....BBBBBB..BB..5
6 ..BB..BB..BB....6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
def test_ai(self):
b = Board(False)
b.set_black(4, 3)
b.set_black(3, 4)
b.set_white(4, 4)
b.set_white(3, 3)
b.clear_moves()
b.mark_moves(BLACK)
result = b.draw()
canvas = """ a.b.c.d.e.f.g.h.
1 ................1
2 ................2
3 ......MM........3
4 ....MMWWBB......4
5 ......BBWWMM....5
6 ........MM......6
7 ................7
8 ................8
a.b.c.d.e.f.g.h."""
self.assertEqual(result, canvas)
#b.make_move()
if __name__ == '__main__':
unittest.main()
|
LLNL/spack | var/spack/repos/builtin/packages/examl/package.py | Python | lgpl-2.1 | 2,255 | 0.004878 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Examl(MakefilePackage):
"""
Exascale Maximum Likelihood (ExaML) code for phylogenetic inference
| using MPI. This code implements the popular RAxML search algorithm
for maximum likelihood based inference of phylogenetic trees.
"""
homepage = "https://github.com/stamatak/ExaML"
url = "https://github. | com/stamatak/ExaML/archive/v3.0.22.tar.gz"
maintainers = ['robqiao']
version('3.0.22', sha256='802e673b0c2ea83fdbe6b060048d83f22b6978933a04be64fb9b4334fe318ca3')
version('3.0.21', sha256='6c7e6c5d7bf4ab5cfbac5cc0d577885272a803c142e06b531693a6a589102e2e')
version('3.0.20', sha256='023681248bbc7f19821b509948d79301e46bbf275aa90bf12e9f4879639a023b')
version('3.0.19', sha256='3814230bf7578b8396731dc87ce665d0b1a671d8effd571f924c5b7936ae1c9e')
version('3.0.18', sha256='1bacb5124d943d921e7beae52b7062626d0ce3cf2f83e3aa3acf6ea26cf9cd87')
version('3.0.17', sha256='90a859e0b8fff697722352253e748f03c57b78ec5fbc1ae72f7e702d299dac67')
version('3.0.16', sha256='abc922994332d40892e30f077e4644db08cd59662da8e2a9197d1bd8bcb9aa5f')
version('3.0.15', sha256='da5e66a63d6fa34b640535c359d8daf67f23bd2fcc958ac604551082567906b0')
version('3.0.14', sha256='698b538996946ae23a2d6fa1e230c210832e59080da33679ff7d6b342a9e6180')
version('3.0.13', sha256='893aecb5545798235a17975aa07268693d3526d0aee0ed59a2d6e791248791ed')
variant('mpi', default=True, description='Enable MPI parallel support')
depends_on('mpi', when='+mpi')
def build(self, spec, prefix):
#####################
# Build Directories #
#####################
with working_dir('examl'):
make('-f', 'Makefile.SSE3.gcc')
with working_dir('parser'):
make('-f', 'Makefile.SSE3.gcc')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install("examl/examl", prefix.bin)
install("parser/parse-examl", prefix.bin)
install_tree("manual", prefix.manual)
install_tree("testData", prefix.testData)
|
anushreejangid/csm-ut | csmpe/core_plugins/csm_node_status_check/ios_xe/plugin.py | Python | bsd-2-clause | 2,529 | 0.000791 | # =============================================================================
# asr9k
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTW | ARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCI | DENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from csmpe.plugins import CSMPlugin
from plugin_lib import parse_show_platform
class Plugin(CSMPlugin):
"""This plugin checks the states of all nodes"""
name = "Node Status Check Plugin"
platforms = {'ASR900'}
phases = {'Pre-Upgrade', 'Post-Upgrade'}
def run(self):
output = self.ctx.send("show platform")
inventory = parse_show_platform(self.ctx, output)
valid_state = [
'ok',
'ok, active',
'ok, standby',
'ps, fail',
'out of service',
'N/A'
]
for key, value in inventory.items():
if value['state'] not in valid_state:
self.ctx.warning("{}={}: {}".format(key, value, "Not in valid state for upgrade"))
break
else:
self.ctx.save_data("node_status", inventory)
self.ctx.info("All nodes in valid state for upgrade")
return True
self.ctx.error("Not all nodes in correct state. Upgrade can not proceed")
|
duyet-website/api.duyet.net | lib/gensim/test/test_coherencemodel.py | Python | mit | 9,464 | 0.006762 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <ra | dimrehurek@sezna | m.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import os.path
import tempfile
from gensim.models.coherencemodel import CoherenceModel
from gensim.models.ldamodel import LdaModel
from gensim.models.wrappers import LdaMallet
from gensim.models.wrappers import LdaVowpalWabbit
from gensim.corpora.dictionary import Dictionary
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
boolean_document_based = ['u_mass']
sliding_window_based = ['c_v', 'c_uci', 'c_npmi']
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')
def checkCoherenceMeasure(topics1, topics2, coherence):
"""Check provided topic coherence algorithm on given topics"""
if coherence in boolean_document_based:
cm1 = CoherenceModel(topics=topics1, corpus=corpus, dictionary=dictionary, coherence=coherence)
cm2 = CoherenceModel(topics=topics2, corpus=corpus, dictionary=dictionary, coherence=coherence)
else:
cm1 = CoherenceModel(topics=topics1, texts=texts, dictionary=dictionary, coherence=coherence)
cm2 = CoherenceModel(topics=topics2, texts=texts, dictionary=dictionary, coherence=coherence)
return cm1.get_coherence() > cm2.get_coherence()
class TestCoherenceModel(unittest.TestCase):
def setUp(self):
# Suppose given below are the topics which two different LdaModels come up with.
# `topics1` is clearly better as it has a clear distinction between system-human
# interaction and graphs. Hence both the coherence measures for `topics1` should be
# greater.
self.topics1 = [['human', 'computer', 'system', 'interface'],
['graph', 'minors', 'trees', 'eps']]
self.topics2 = [['user', 'graph', 'minors', 'system'],
['time', 'graph', 'survey', 'minors']]
self.ldamodel = LdaModel(corpus=corpus, id2word=dictionary, num_topics=2, passes=0, iterations=0)
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
if self.mallet_path:
self.malletmodel = LdaMallet(mallet_path=self.mallet_path, corpus=corpus, id2word=dictionary, num_topics=2, iterations=0)
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
msg = "Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping sanity checks for LDA Model"
logging.info(msg)
self.vw_path = None
else:
self.vw_path = vw_path
self.vwmodel = LdaVowpalWabbit(self.vw_path, corpus=corpus, id2word=dictionary, num_topics=2, passes=0)
def testUMass(self):
"""Test U_Mass topic coherence algorithm on given topics"""
self.assertTrue(checkCoherenceMeasure(self.topics1, self.topics2, 'u_mass'))
def testCv(self):
"""Test C_v topic coherence algorithm on given topics"""
self.assertTrue(checkCoherenceMeasure(self.topics1, self.topics2, 'c_v'))
def testCuci(self):
"""Test C_uci topic coherence algorithm on given topics"""
self.assertTrue(checkCoherenceMeasure(self.topics1, self.topics2, 'c_uci'))
def testCnpmi(self):
"""Test C_npmi topic coherence algorithm on given topics"""
self.assertTrue(checkCoherenceMeasure(self.topics1, self.topics2, 'c_npmi'))
def testUMassLdaModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Model"""
# Note that this is just a sanity check because LDA does not guarantee a better coherence
# value on the topics if iterations are increased. This can be seen here:
# https://gist.github.com/dsquareindia/60fd9ab65b673711c3fa00509287ddde
try:
cm = CoherenceModel(model=self.ldamodel, corpus=corpus, coherence='u_mass')
except:
raise
def testCvLdaModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Model"""
try:
cm = CoherenceModel(model=self.ldamodel, texts=texts, coherence='c_v')
except:
raise
def testCuciLdaModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Model"""
try:
cm = CoherenceModel(model=self.ldamodel, texts=texts, coherence='c_uci')
except:
raise
def testCnpmiLdaModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Model"""
try:
cm = CoherenceModel(model=self.ldamodel, texts=texts, coherence='c_npmi')
except:
raise
def testUMassMalletModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Mallet gensim wrapper"""
if not self.mallet_path:
return
try:
cm = CoherenceModel(model=self.malletmodel, corpus=corpus, coherence='u_mass')
except:
raise
def testCvMalletModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Mallet gensim wrapper"""
if not self.mallet_path:
return
try:
cm = CoherenceModel(model=self.malletmodel, texts=texts, coherence='c_v')
except:
raise
def testCuciMalletModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Mallet gensim wrapper"""
if not self.mallet_path:
return
try:
cm = CoherenceModel(model=self.malletmodel, texts=texts, coherence='c_uci')
except:
raise
def testCnpmiMalletModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Mallet gensim wrapper"""
if not self.mallet_path:
return
try:
cm = CoherenceModel(model=self.malletmodel, texts=texts, coherence='c_npmi')
except:
raise
def testUMassVWModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA VW gensim wrapper"""
if not self.vw_path:
return
try:
cm = CoherenceModel(model=self.vwmodel, corpus=corpus, coherence='u_mass')
except:
raise
def testCvVWModel(self):
"""Perform sanity check to see if c_v coherence works with LDA VW gensim wrapper"""
if not self.vw_path:
return
try:
cm = CoherenceModel(model=self.vwmodel, texts=texts, coherence='c_v')
except:
raise
def testCuciVWModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA VW gensim wrapper"""
if not self.vw_path:
return
try:
cm = CoherenceModel(model=self.vwmodel, texts=texts, coherence='c_uci')
except:
raise
def testCnpmiVWModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA VW gensim wrapper"""
if not self.vw_path:
return
try:
cm = CoherenceModel(model=self.vwmodel, texts=texts, coherence='c_npmi')
except:
raise
def testErrors(self):
"""Test if errors are raised on bad input"""
# not |
byaka/flaskJSONRPCServer | flaskJSONRPCServer/gmultiprocessing.py | Python | apache-2.0 | 6,393 | 0.022055 | # -*- coding: utf-8 -*-
"""
Add compatibility for gevent and multiprocessing.
Source based on project GIPC 0.6.0
https://bitbucket.org/jgehrcke/gipc/
"""
import os, sys, signal, multiprocessing, multiprocessing.process, multiprocessing.reduction
gevent=None
geventEvent=None
def _tryGevent():
global gevent, geventEvent
if gevent and geventEvent: return False
try:
import gevent
from gevent import event as geventEvent
return True
except ImportError:
raise ValueError('gevent not found')
def Process(target, args=(), kwargs={}, name=None): # daemon=None
# check if gevent availible
try: _tryGevent()
except ValueError:
print 'Gevent not founded, switching to native'
return multiprocessing.Process(target=target, args=args, kwargs=kwargs, name=name)
if int(gevent.__version__[0])<1:
raise NotImplementedError('Gmultiprocessing supports only gevent>=1.0, your version %s'%gevent.__version__)
if not isinstance(args, tuple):
raise TypeError('<args> must be a tuple')
if not isinstance(kwargs, dict):
raise TypeError('<kwargs> must be a dict')
p = _GProcess(
target=_child,
name=name,
kwargs={"target": target, "args": args, "kwargs": kwargs}
)
# if daemon is not None: p.daemon = daemon
return p
def _child(target, args, kwargs):
"""Wrapper function that runs in child process. Resets gevent/libev state
and executes user-given function.
"""
_tryGevent()
_reset_signal_handlers()
gevent.reinit()
hub = gevent.get_hub()
del hub.threadpool
hub._threadpool = None
hub.destroy(destroy_loop=True)
h = gevent.get_hub(default=True)
assert h.loop.default, 'Could not create libev default event loop.'
target(*args, **kwargs)
class _GProcess(multiprocessing.Process):
"""
Compatible with the ``multiprocessing.Process`` API.
"""
try:
from multiprocessing.forking import Popen as mp_Popen
except ImportError:
# multiprocessing's internal structure has changed from 3.3 to 3.4.
from multiprocessing.popen_fork import Popen as mp_Popen
# Monkey-patch and forget about the name.
mp_Popen.poll = lambda *a, **b: None
del mp_Popen
def start(self):
_tryGevent()
# Start grabbing SIGCHLD within libev event loop.
gevent.get_hub().loop.install_sigchld()
# Run new process (based on `fork()` on POSIX-compliant systems).
super(_GProcess, self).start()
# The occurrence of SIGCHLD is recorded asynchronously in libev.
# This guarantees proper behavior even if the child watcher is
# started after the child exits. Start child watcher now.
self._sigchld_watcher = gevent.get_hub().loop.child(self.pid)
self._returnevent = gevent.event.Event()
self._sigchld_watcher.start(self._on_sigchld, self._sigchld_watcher)
def _on_sigchld(self, watcher):
"""Callback of libev child watcher. Called when libev event loop
catches corresponding SIGCHLD signal.
"""
watcher.stop()
# Status evaluation copied from `multiprocessing.forking` in Py2.7.
if os.WIFSIGNALED(watcher.rstatus):
self._popen.returncode = -os.WTERMSIG(watcher.rstatus)
else:
assert os.WIFEXITED(watcher.rstatus)
self._popen.returncode = os.WEXITSTATUS(watcher.rstatus)
self._returnevent.set()
def is_alive(self):
assert self._popen is not None, "Process not yet started."
if self._popen.returncode is None:
return True
return False
@property
def exitcode(self):
if self._popen is None:
return None
return self._popen.returncode
def __repr__(self):
exitcodedict = multiprocessing.process._exitcode_to_name
status = 'started'
if self._parent_pid != os.getpid(): status = 'unknown'
elif self.exitcode is not None: status = self.exitcode
if status == 0: status = 'stopped'
elif isinstance(status, int):
status = 'stopped[%s]' % exitcodedict.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self.daemon and ' daemon' or '')
def join(self, timeout=None):
"""
Wait cooperatively until child process terminates or timeout occurs.
:arg timeout: ``None`` (default) or a a time in seconds. The method
simply returns upon timeout expiration. The state of the process
has to be identified via ``is_alive()``.
"""
assert self._parent_pid == os.getpid(), "I'm not parent of this child."
assert self._popen is not None, 'Can only join a started process.'
# Resemble multiprocessing's join() method while replacing
# `self._popen.wait(timeout)` with
# `self._returnevent.wait(timeout)`
self._returnevent.wait(timeout)
if self._popen.returncode is not None:
if hasattr(multiprocessing.process, '_children'): # This is for Python 3.4.
kids = multiprocessing.process._children
else: # For Python 2.6, 2.7, 3.3.
kids = multiprocessing.process._current_process._children
kids.discard(self)
# Inspect signal module for signals whose action is to be restored to the default action right after fork.
_signals_to_reset = [getattr(signal, s) for s in
set([s for s in dir(signal) if s.startswith("SIG")]) -
# Exclude constants that are not signals such as SIG_DFL and SIG_BLOCK.
set([s for s in dir(signal) if s.startswith("SIG_")]) -
# Leave handlers for SIG(STOP/KILL/PIPE) untouched.
set(['SIGSTOP', 'SIGKILL', 'SIGPIPE'])]
def _reset_signal_handlers():
for s in _signals_to_reset:
if s < signal.NSIG:
signal.signal(s, signal.SIG_DFL)
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def __exec(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._ | getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
| _locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
__exec("""def _reraise(tp, value, tb=None): raise tp, value, tb""")
|
stpettersens/sublimetext-buildtools | ApiGen/apigen_all.py | Python | mit | 451 | 0.02439 | # Wrappe | r for ApiGen All (PHP)
import sys
import platform
import os
import glob
from subprocess import call
def invokeApiGen(dir, out_dir):
cmd = ['apigen', '--quiet', '--source', dir]
if platform.system() == 'Windows': cmd[0] = 'apigen.cmd'
os.chdir( | dir)
for php in glob.glob('*.php'):
print('ApiGen ~ Documenting PHP file: {0}'.format(php))
cmd.append('--destination')
cmd.append(out_dir)
call(cmd)
invokeApiGen(sys.argv[1], sys.argv[2])
|
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/clients/rospy/src/rospy/impl/udpros.py | Python | bsd-3-clause | 11,612 | 0.006631 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
from __future__ import print_function
"""
UDPROS connection protocol.
"""
## UDPROS connection protocol.
# http://ros.org/wiki/ROS/UDPROS
#
import rosgraph.network
import rospy.impl.registration
import rospy.impl.transport
def get_max_datagram_size():
#TODO
return 1024
class UDPROSHandler(rospy.transport.ProtocolHandler):
"""
rospy protocol handler for UDPROS. Stores the datagram server if necessary.
"""
def __init__(self, port=0):
"""
ctor
"""
self.port = port
self.buff_size = get_max_datagram_size()
def init_server(self):
"""
Initialize and start the server thread, if not already initialized.
"""
if self.server is not None:
return
if rosgraph.network.use_ipv6():
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((rosgraph.network.get_bi | nd_address(), self.port))
if self.port == 0:
self.port = s.getsockname()[1]
self.server = s
threading.start_new_thread(self.run, ())
def run(self):
buff_size = self.buff_size
try:
while not rospy.core.is_shutdown():
| data = self.server.recvfrom(self.buff_size)
print("received packet")
#TODO
except:
#TODO: log
pass
def shutdown(self):
if self.sock is not None:
self.sock.close()
def create_transport(self, topic_name, pub_uri, protocol_params):
"""
Connect to topic resolved_name on Publisher pub_uri using UDPROS.
@param resolved_name str: resolved topic name
@type resolved_name: str
@param pub_uri: XML-RPC URI of publisher
@type pub_uri: str
@param protocol_params: protocol parameters to use for connecting
@type protocol_params: [XmlRpcLegal]
@return: code, message, debug
@rtype: (int, str, int)
"""
#Validate protocol params = [UDPROS, address, port, headers]
if type(protocol_params) != list or len(protocol_params) != 4:
return 0, "ERROR: invalid UDPROS parameters", 0
if protocol_params[0] != UDPROS:
return 0, "INTERNAL ERROR: protocol id is not UDPROS: %s"%id, 0
#TODO: get connection_id and buffer size from params
id, dest_addr, dest_port, headers = protocol_params
self.init_server()
#TODO: parse/validate headers
sub = rospy.registration.get_topic_manager().get_subscriber_impl(topic_name)
# Create Transport
# TODO: create just a single 'connection' instance to represent
# all UDP connections. 'connection' can take care of unifying
# publication if addresses are the same
transport = UDPTransport(protocol, topic_name, sub.receive_callback)
# Attach connection to _SubscriberImpl
if sub.add_connection(transport): #pass udp connection to handler
return 1, "Connected topic[%s]. Transport impl[%s]"%(topic_name, transport.__class__.__name__), dest_port
else:
transport.close()
return 0, "ERROR: Race condition failure: duplicate topic subscriber [%s] was created"%(topic_name), 0
def supports(self, protocol):
"""
@param protocol: name of protocol
@type protocol: str
@return: True if protocol is supported
@rtype: bool
"""
return protocol == UDPROS
def get_supported(self):
"""
Get supported protocols
"""
return [[UDPROS]]
def init_publisher(self, topic_name, protocol_params):
"""
Initialize this node to start publishing to a new UDP location.
@param resolved_name: topic name
@type resolved__name: str
@param protocol_params: requested protocol
parameters. protocol[0] must be the string 'UDPROS'
@type protocol_params: [str, value*]
@return: (code, msg, [UDPROS, addr, port])
@rtype: (int, str, list)
"""
if protocol_params[0] != UDPROS:
return 0, "Internal error: protocol does not match UDPROS: %s"%protocol, []
#TODO
_, header, host, port, max_datagram_size = protocol_params
#TODO: connection_id, max_datagraph_size
return 1, "ready", [UDPROS]
def topic_connection_handler(self, sock, client_addr, header):
"""
Process incoming topic connection. Reads in topic name from
handshake and creates the appropriate L{TCPROSPub} handler for the
connection.
@param sock: socket connection
@type sock: socket.socket
@param client_addr: client address
@type client_addr: (str, int)
@param header: key/value pairs from handshake header
@type header: dict
@return: error string or None
@rtype: str
"""
for required in ['topic', 'md5sum', 'callerid']:
if not required in header:
return "Missing required '%s' field"%required
else:
resolved_topic_name = header['topic']
md5sum = header['md5sum']
tm = rospy.registration.get_topic_manager()
topic = tm.get_publisher_impl(resolved_topic_name)
if not topic:
return "[%s] is not a publisher of [%s]. Topics are %s"%(rospy.names.get_caller_id(), resolved_topic_name, tm.get_publications())
elif md5sum != rospy.names.TOPIC_ANYTYPE and md5sum != topic.data_class._md5sum:
actual_type = topic.data_class._type
# check to see if subscriber sent 'type' header. If they did, check that
# types are same first as this provides a better debugging message
if 'type' in header:
requested_type = header['type']
if requested_type != actual_type:
return "topic types do not match: [%s] vs. [%s]"%(requested_type, actual_type)
else:
# defaults to actual type
requested_type = actual_type
return "Client [%s] wants topic [%s] to have datatype/md5sum [%s/%s], but our version has [%s/%s] Dropping connection."%(header['callerid'], resolved_topic_name, requested_type, md5sum, actual_type, topic.data_class. |
JasperGerth/Mopidy-GPIOcont | tests/test_extension.py | Python | apache-2.0 | 507 | 0.003945 | from __future__ import unicode_literals
from mopidy_gpiocont im | port Extension, frontend as frontend_lib
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert '[gpiocont]' in config
assert 'enabled = true' in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
# TODO Test the content of your config schema
| #assert 'username' in schema
#assert 'password' in schema
# TODO Write more tests
|
jawilson/home-assistant | tests/components/balboa/test_config_flow.py | Python | apache-2.0 | 5,737 | 0.001046 | """Test the Balboa Spa Client config flow."""
from unittest.mock import patch
from home | assistant import config_entries, data_entry_flow
from homeassistant.components.balboa.const import CONF_SYNC_T | IME, DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import BalboaMock
from tests.common import MockConfigEntry
TEST_DATA = {
CONF_HOST: "1.1.1.1",
}
TEST_ID = "FakeBalboa"
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
new=BalboaMock.connect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.disconnect",
new=BalboaMock.disconnect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.listen",
new=BalboaMock.listen,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.send_mod_ident_req",
new=BalboaMock.send_mod_ident_req,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.send_panel_req",
new=BalboaMock.send_panel_req,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.spa_configured",
new=BalboaMock.spa_configured,
), patch(
"homeassistant.components.balboa.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == TEST_DATA
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
new=BalboaMock.broken_connect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.disconnect",
new=BalboaMock.disconnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_unknown_error(hass: HomeAssistant) -> None:
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_already_configured(hass: HomeAssistant) -> None:
"""Test when provided credentials are already configured."""
MockConfigEntry(domain=DOMAIN, data=TEST_DATA, unique_id=TEST_ID).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
new=BalboaMock.connect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.disconnect",
new=BalboaMock.disconnect,
), patch(
"homeassistant.components.balboa.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_options_flow(hass):
"""Test specifying non default settings using options flow."""
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_DATA, unique_id=TEST_ID)
config_entry.add_to_hass(hass)
# Rather than mocking out 15 or so functions, we just need to mock
# the entire library, otherwise it will get stuck in a listener and
# the various loops in pybalboa.
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi",
new=BalboaMock,
), patch(
"homeassistant.components.balboa.BalboaSpaWifi",
new=BalboaMock,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SYNC_TIME: True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_SYNC_TIME: True}
|
cryptapus/electrum | electrum/gui/qt/qrcodewidget.py | Python | mit | 3,675 | 0.003265 | import os
import qrcode
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import PyQt5.QtGui as QtGui
from PyQt5.QtWidgets import (
QApplication, QVBoxLayout, QTextEdit, QHBoxLayout, QPushButton, QWidget)
import electrum
from electrum.i18n import _
from .util import WindowModalDialog
class QRCodeWidget(QWidget):
def __init__(self, data = None, fixedSize=False):
QWidget.__init__(self)
self.data = None
self.qr = None
self.fixedSize=fixedSize
if fixedSize:
self.setFixedSize(fixedSize, fixedSize)
self.setData(data)
def setData(self, data):
if self.data != data:
self.data = data
if self.data:
self.qr = qrcode.QRCode()
self.qr.add_data(self.data)
if not self.fixedSize:
k = len(self.qr.get_matrix())
self.setMinimumSize(k*5,k*5)
else:
self.qr = None
self.update()
def paintEvent(self, e):
if not self.data:
return
black = QColor(0, 0, 0, 255)
white = QColor(255, 255, 255, 255)
if not self.qr:
qp = QtGui.QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
r = qp.viewport()
qp.drawRect(0, 0, r.width(), r.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QtGui.QPainter()
qp.begin(self)
r = qp.viewport()
margin = 10
framesize = min(r.width(), r.height())
boxsize = int( (framesize - 2*margin)/k )
size = k*boxsize
left = (r.width() - size)/2
top = (r.height() - size)/2
# Make a white margin around the QR in case of dark theme use
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(left-margin, top-margin, size+(margin*2), size+(margin*2))
qp.setBrush(black)
qp.setPen(black)
for r in range(k):
for c in range(k):
if matrix[r][c]:
qp.drawRect(left+c*boxsize, top+r*boxsize, boxsize - 1, boxsize - 1)
qp.end()
class QRDialog(WindowModalDialog):
def __init__(self, data, parent=None, title = "", show_text=False):
WindowModalDialog.__init__(self, parent, title)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
qscreen = QApplication.primaryScreen()
vbox.addWidget(qrw, 1)
if show_text:
text = QTextEdit()
text.setText(data)
text.setReadOnly(True)
| vbox.addWidget(text)
hbox = QHBoxLayout()
hbox.addStretch(1)
c | onfig = electrum.get_config()
if config:
filename = os.path.join(config.path, "qrcode.png")
def print_qr():
p = qscreen.grabWindow(qrw.winId())
p.save(filename, 'png')
self.show_message(_("QR code saved to file") + " " + filename)
def copy_to_clipboard():
p = qscreen.grabWindow(qrw.winId())
QApplication.clipboard().setPixmap(p)
self.show_message(_("QR code copied to clipboard"))
b = QPushButton(_("Copy"))
hbox.addWidget(b)
b.clicked.connect(copy_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(self.accept)
b.setDefault(True)
vbox.addLayout(hbox)
self.setLayout(vbox)
|
renskiy/marnadi | marnadi/http/headers.py | Python | mit | 3,678 | 0 | import collections
import itertools
from marnadi.utils import cached_property, CachedDescriptor
class Header(collections.Mapping):
__slots__ = 'value', 'params'
def __init__(self, *value, **params):
assert len(value) == 1
| self.value = value[0]
self.params = params
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __str__(self):
ret | urn self.stringify()
def __bytes__(self):
value = self.stringify()
if isinstance(value, bytes): # python 2.x
return value
return value.encode(encoding='latin1')
def __getitem__(self, item):
return self.params[item]
def __iter__(self):
return iter(self.params)
def __len__(self):
return len(self.params)
def __bool__(self):
return True
def __nonzero__(self):
return self.__bool__()
def stringify(self):
if not self.params:
return str(self.value)
return '{value}; {params}'.format(
value=self.value,
params='; '.join(
'%s=%s' % (attr_name, attr_value)
for attr_name, attr_value in self.params.items()
),
)
class HeadersMixin(collections.Mapping):
if hasattr(collections.Mapping, '__slots__'):
__slots__ = '__weakref__',
def __getitem__(self, header):
return self._headers[header.title()]
def __len__(self):
return len(self._headers)
def __iter__(self):
return iter(self._headers)
__hash__ = object.__hash__
__eq__ = object.__eq__
__ne__ = object.__ne__
@cached_property
def _headers(self):
raise ValueError("This property must be set before using")
def items(self, stringify=False):
for header, values in self._headers.items():
for value in values:
yield header, str(value) if stringify else value
def values(self, stringify=False):
for values in self._headers.values():
for value in values:
yield str(value) if stringify else value
class ResponseHeaders(HeadersMixin, collections.MutableMapping):
__slots__ = ()
def __init__(self, default_headers):
self._headers = default_headers
def __delitem__(self, header):
del self._headers[header.title()]
def __setitem__(self, header, value):
self._headers[header.title()] = [value]
def append(self, header_item):
header, value = header_item
self._headers[header.title()].append(value)
def extend(self, headers):
for header in headers:
self.append(header)
def setdefault(self, header, default=None):
return self._headers.setdefault(header.title(), [default])
def clear(self, *headers):
if headers:
for header in headers:
try:
del self[header]
except KeyError:
pass
else:
self._headers.clear()
class Headers(CachedDescriptor, HeadersMixin):
__slots__ = ()
def __init__(self, *default_headers, **kw_default_headers):
super(Headers, self).__init__()
self._headers = collections.defaultdict(list)
for header, value in itertools.chain(
default_headers,
kw_default_headers.items(),
):
self._headers[header.title()].append(value)
def get_value(self, instance):
return ResponseHeaders(default_headers=self._headers.copy())
|
google-research/google-research | social_rl/gym_multigrid/manual_control_multiagent.py | Python | apache-2.0 | 3,290 | 0.010334 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Script used for debugging the environment via command line.
The env is rendered as a string so it can be used over ssh.
"""
import argparse
import gym
import matplotlib.pyplot as plt
import numpy as np
# Import needed to trigger env registration, so pylint: disable=unused-import
from social_rl import gym_multigrid
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--env_name', type=str, default='MultiGrid-DoorKey-8x8-v0',
help='Name of multi-agent environment.')
return parser.parse_args()
def get_user_input(env):
"""Validates user keyboard input to obtain valid actions for all agents.
Args:
env: Instance of MultiGrid environment
Returns:
An array of integer actions.
"""
max_action = max(env.Actions).value
min_action = min(env.Actions).value
# Print action commands for user convenience.
print('Actio | ns are:')
for act in env.Actions:
print('\t', str(act.value) + ':', act.name)
prompt = 'Enter actions for ' + str(env.n_agents) + \
' agents separated by commas, or r to reset, or q to quit: '
# Check user input
while True:
user_cmd = input(prompt)
if user_cmd == 'q':
return False
# reset
if user_cmd == 'r':
return -1
actions = user_cmd.split(',')
if len(actions) != env.n_agents:
print('Uh oh, you entered c | ommands for', len(actions),
'agents but there are', str(env.n_agents) + '. Try again?')
continue
valid = True
for i, a in enumerate(actions):
if not a.isdigit() or int(a) > max_action or int(a) < min_action:
print('Uh oh, action', i, 'is invalid.')
valid = False
if valid:
break
else:
print('All actions must be an integer between', min_action, 'and',
max_action)
return [int(a) for a in actions if a]
def main(args):
# This code will only work with MultiGrid environments
assert 'MultiGrid' in args.env_name
env = gym.make(args.env_name)
env.reset()
reward_hist = []
# Environment interaction loop
while True:
# plt.imshow(env.render('rgb_array'))
print(env)
actions = get_user_input(env)
if not actions:
return
# Reset
if actions == -1:
env.reset()
reward_hist = []
continue
_, rewards, done, _ = env.step(actions)
reward_hist.append(rewards)
plt.imshow(env.render('rgb_array'))
print('Rewards:', rewards)
print('Collective reward history:', reward_hist)
print('Cumulative collective reward:', np.sum(reward_hist))
if done:
print('Game over')
break
if __name__ == '__main__':
main(parse_args())
|
DES-SL/EasyLens | docs/conf.py | Python | mit | 8,340 | 0.007554 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, parent)
import lensDES
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DESlens'
copyright = u'2015, ETH Zurich, Institute for Astronomy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = lensDES.__version__
# The full version, including alpha/beta/rc tags.
release = lensDES.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" wi | ll overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sideba | r templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'lensDESdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'lensDES.tex', u'DESlens Documentation',
u'Simon Birrer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'lensDES', u'DESlens Documentation',
[u'Simon Birrer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'lensDES', u'DESlens Documentation',
u'Simon Birrer', 'lensDES', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
try:
import sp |
gc3-uzh-ch/easybuild-framework | easybuild/tools/build_details.py | Python | gpl-2.0 | 2,178 | 0.001377 | # Copyright 2014-2014 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
#
"""
All required to provide details of build environment
and allow for reproducable builds
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import time
from easybuild.tools.filetools import det_size
from easybuild.tools.ordereddict import OrderedDict
from easybuild.tools.systemtools import get_system_info
from easybuild.tools.version import EASYBLOCKS_VERSION, FRAMEWORK_VERSION
def get_build_stats(app, start_time, command_line):
"""
Return build statistics for this build
"""
time_now = time.time()
build_time = round(time_now - start_time, 2)
buildstats = OrderedDict([
('easybuild-framework_version', str(FRAMEWORK_VERSION)),
('easybuild-easyblocks_version', str(EASYBLOCKS_VERSION)),
(' | timestamp', int(time_now)),
('build_time', build_time),
('install_size', det_size(app.installdir)),
('command_line', command_line),
('modules_tool', app.modules_tool.buildstats()),
| ])
for key, val in sorted(get_system_info().items()):
buildstats.update({key: val})
return buildstats
|
greasypizza/grpc | src/python/grpcio/grpc_core_dependencies.py | Python | bsd-3-clause | 28,918 | 0.000069 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/lib/profiling/basic_timers.c',
'src/core/lib/profiling/stap_timers.c',
'src/core/lib/support/alloc.c',
'src/core/lib/support/avl.c',
'src/core/lib/support/backoff.c',
'src/core/lib/support/cmdline.c',
'src/core/lib/support/cpu_iphone.c',
'src/core/lib/support/cpu_linux.c',
'src/core/lib/support/cpu_posix.c',
'src/core/lib/support/cpu_windows.c',
'src/core/lib/support/env_linux.c',
'src/core/lib/support/env_posix.c',
'src/core/lib/support/env_windows.c',
'src/core/lib/support/histogram.c',
'src/core/lib/support/host_port.c',
'src/core/lib/support/log.c',
'src/core/lib/support/log_android.c',
'src/core/lib/support/log_linux.c',
'src/core/lib/support/log_posix.c',
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
'src/core/lib/support/string_windows.c',
'src/core/lib/support/subprocess_posix.c',
'src/core/lib/support/subprocess_windows.c',
'src/core/lib/support/sync.c',
'src/core/lib/support/sync_posix.c',
'src/core/lib/support/sync_windows.c',
'src/core/lib/support/thd.c',
'src/core/lib/support/thd_posix.c',
'src/core/lib/support/thd_windows.c',
'src/core/lib/support/time.c',
'src/core/lib/support/time_posix.c',
'src/core/lib/support/time_precise.c',
'src/core/lib/support/time_windows.c',
'src/core/lib/support/tls_pthread.c',
'src/core/lib/support/tmpfile_msys.c',
'src/core/lib/support/tmpfile_posix.c',
'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
'src/core/lib/surface/init.c',
'src/core/lib/channel/channel_args.c',
'src/core/lib/channel/channel_stack.c',
'src/core/lib/channel/channel_stack_builder.c',
'src/core/lib/channel/compress_filter.c',
'src/core/lib/channel/connected_channel.c',
'src/core/lib/channel/deadline_filter.c',
'src/core/lib/channel/handshaker.c',
'src/core/lib/channel/handshaker_factory.c',
'src/core/lib/channel/handshaker_registry.c',
'src/core/lib/channel/http_client_filter.c',
'src/core/lib/channel/http_server_filter.c',
'src/core/lib/channel/message_size_filter.c',
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/debug/trace.c',
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c',
'src/core/lib/iomgr/endpoint_pair_posix.c',
'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
'src/core/lib/iomgr/executor.c',
'src/core/lib/iomgr/iocp_windows.c',
'src/core/lib/iomgr/iomgr.c',
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/network_status_tracker.c',
'src/core/lib/iomgr/polling_entity.c',
'src/core/lib/iomgr/pollset_set_uv.c',
'src/core/lib/iomgr/pollset_set_windows.c',
'src/core/lib/iomgr/pollset_uv.c',
'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_uv.c',
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',
'src/core/lib/iomgr/socket_utils_uv.c',
'src/core/lib/iomgr/socket_utils_windows.c',
'src/core/lib/iomgr/socket_windows.c',
'src/core/lib/i | omgr/tcp_client_posix.c',
'src/core/lib/iomgr/tcp_client_uv.c',
'src/core/lib/iomgr/tcp_client_windows.c',
'src/core/lib/iomgr/tcp_posix.c',
'src/core/lib/iomgr/tcp_ | server_posix.c',
'src/core/lib/iomgr/tcp_server_uv.c',
'src/core/lib/iomgr/tcp_server_windows.c',
'src/core/lib/iomgr/tcp_uv.c',
'src/core/lib/iomgr/tcp_windows.c',
'src/core/lib/iomgr/time_averaged_stats.c',
'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
'src/core/lib/json/json_writer.c',
'src/core/lib/slice/percent_encoding.c',
'src/core/lib/slice/slice.c',
'src/core/lib/slice/slice_buffer.c',
'src/core/lib/slice/slice_string_helpers.c',
'src/core/lib/surface/alarm.c',
'src/core/lib/surface/api_trace.c',
'src/core/lib/surface/byte_buffer.c',
'src/core/lib/surface/byte_buffer_reader.c',
'src/core/lib/surface/call.c',
'src/core/lib/surface/call_details.c',
'src/core/lib/surface/call_log_batch.c',
'src/core/lib/surface/channel.c',
'src/core/lib/surface/channel_init.c',
'src/core/lib/surface/channel_ping.c',
'src/core/lib/surface/channel_stack_type.c',
'src/core/lib/surface/completion_queue.c',
'src/core/lib/surface/event_string.c',
'src/core/lib/surface/lame_client.c',
'src/core/lib/surface/metadata_array.c',
'src/core/lib/surface/server.c',
'src/core/lib/surface/validate_metadata.c',
'src/core/lib/surface/version.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/mdstr_hash_table.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/service_config.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
'src/core/ext/transport/chttp2/server/secure/server_secu |
uva-its/awstools | cli/ec2_cli.py | Python | mit | 912 | 0.004386 | import argparse
def processCommand(mgr, args):
parser = argparse.ArgumentParser(usage='''maws ec2 <subcommand> [<args>]
maws ec2 help''')
parser.add_argu | ment('subcommand', help='ec2 subcommand',
choices=[ 'help', 'create' ])
args = parser.parse_args(args)
if args.subcommand == "help":
print("""
The 'ec2' subcommand performs high-level operations on ec2 instances. Each
command will update DNS entries and SimpleDB items | as needed.
Sub-commands:
create <name> Run a new instance for the first time
launch <name> Run an instance that has been previously created and terminated
rebuild <name> Terminate and re-launch and instance
start <name> Restart a stopped instance
stop <name> Stop a running instance
reconcile Update SimpleDB tables from the running environment, useful
for bootstrapping or updating after manual operations""")
|
MaxVanDeursen/tribler | Tribler/Test/Core/Modules/RestApi/test_downloads_endpoint.py | Python | lgpl-3.0 | 14,983 | 0.005273 | import json
import os
from binascii import hexlify
from urllib import pathname2url
from Tribler.Core.DownloadConfig import DownloadStartupConfig
from Tribler.Core.Utilities.network_utils import get_random_port
from Tribler.Test.Core.Modules.RestApi.base_api_test import AbstractApiTest
from Tribler.Test.common import UBUNTU_1504_INFOHASH, TESTS_DATA_DIR
from Tribler.Test.twisted_thread import deferred
class TestDownloadsEndpoint(AbstractApiTest):
def setUpPreSession(self):
super(TestDownloadsEndpoint, self).setUpPreSession()
self.config.set_libtorrent(True)
self.config.set_megacache(True)
@deferred(timeout=10)
def test_get_downloads_no_downloads(self):
"""
Testing whether the API returns an empty list when downloads are fetched but no downloads are active
"""
return self.do_request('downloads?get_peers=1&get_pieces=1', expected_code=200, expected_json={"downloads": []})
@deferred(timeout=20)
def test_get_downloads(self):
"""
Testing whether the API returns the right download when a download is added
"""
def verify_download(downloads):
downloads_json = json.loads(downloads)
self.assertEqual(len(downloads_json['downloads']), 2)
video_tdef, _ = self.create_local_torrent(os.path.join(TESTS_DATA_DIR, 'video.avi'))
self.session.start_download_from_tdef(video_tdef, DownloadStartupConfig())
self.session.start_download_from_uri("file:" + pathname2url(
os.path.join(TESTS_DATA_DIR, "bak_single.torrent")))
self.should_check_equality = False
return self.do_request('downloads?get_peers=1&get_pieces=1', expected_code=200).addCallback(verify_download)
@deferred(timeout=10)
def test_start_download_no_uri(self):
"""
Testing whether an error is returned when we start a torrent download and do not pass any URI
"""
self.should_check_equality = False
return self.do_request('downloads', expected_code=400, request_type='PUT')
@deferred(timeout=10)
def test_start_download_bad_params(self):
"""
Testing whether an error is returned when we start a torrent download and pass wrong data
"""
self.should_check_equality = False
post_data = {'anon_hops': 1, 'safe_seeding': 0, 'uri': 'abcd'}
return self.do_request('downloads', expected_code=400, request_type='PUT', post_data=post_data)
@deferred(timeout=10)
def test_start_download_bad_uri(self):
"""
Testing whether an error is returned when we start a download from a bad URI
"""
post_data = {'uri': 'abcd', 'destination': 'a/b/c', 'selected_files[]': '1'}
return self.do_request('downloads', expected_code=500, request_type='PUT', post_data=post_data,
expected_json={'error': 'invalid uri'})
@deferred(timeout=10)
def test_start_download_from_file(self):
"""
Testing whether we can start a download from a file
"""
def verify_download(_):
self.assertGreaterEqual(len(self.session.get_downloads()), 1)
post_data = {'uri': 'file:%s' % os.path.join(TESTS_DATA_DIR, 'video.avi.torrent')}
expected_json = {'started': True, 'infohash': '42bb0a78d8a10bef4a5aee3a7d9f1edf9941cee4'}
return self.do_request('downloads', expected_code=200, request_type='PUT', post_data=post_data,
expected_json=expected_json).addCallback(verify_download)
@deferred(timeout=10)
def test_start_download_from_magnet(self):
"""
Testing whether we can start a download from a magnet
"""
def verify_download(_):
self.assertGreaterEqual(len(self.session.get_downloads()), 1)
self.assert | Equal(self.session.get_downloads()[0].get_def().get_name(), 'Unknown name')
post_data = {'uri': 'magnet:?xt=urn | :btih:%s' % (hexlify(UBUNTU_1504_INFOHASH))}
expected_json = {'started': True, 'infohash': 'fc8a15a2faf2734dbb1dc5f7afdc5c9beaeb1f59'}
return self.do_request('downloads', expected_code=200, request_type='PUT', post_data=post_data,
expected_json=expected_json).addCallback(verify_download)
@deferred(timeout=10)
def test_start_download_from_bad_url(self):
"""
Testing whether starting a download from a unexisting URL gives an error
"""
post_data = {'uri': 'http://localhost:%d/test.torrent' % get_random_port()}
self.should_check_equality = False
return self.do_request('downloads', expected_code=500, request_type='PUT', post_data=post_data)
@deferred(timeout=10)
def test_remove_download_no_remove_data_param(self):
"""
Testing whether the API returns error 400 if the remove_data parameter is not passed
"""
self.should_check_equality = False
return self.do_request('downloads/abcd', expected_code=400, request_type='DELETE')
@deferred(timeout=10)
def test_remove_download_wrong_infohash(self):
"""
Testing whether the API returns error 404 if a non-existent download is removed
"""
self.should_check_equality = False
return self.do_request('downloads/abcd', post_data={"remove_data": True},
expected_code=404, request_type='DELETE')
@deferred(timeout=10)
def test_remove_download(self):
"""
Testing whether the API returns 200 if a download is being removed
"""
def verify_removed(_):
self.assertEqual(len(self.session.get_downloads()), 0)
video_tdef, _ = self.create_local_torrent(os.path.join(TESTS_DATA_DIR, 'video.avi'))
self.session.start_download_from_tdef(video_tdef, DownloadStartupConfig())
infohash = video_tdef.get_infohash().encode('hex')
request_deferred = self.do_request('downloads/%s' % infohash, post_data={"remove_data": True},
expected_code=200, expected_json={"removed": True}, request_type='DELETE')
return request_deferred.addCallback(verify_removed)
@deferred(timeout=10)
def test_stop_download_wrong_infohash(self):
"""
Testing whether the API returns error 404 if a non-existent download is stopped
"""
self.should_check_equality = False
return self.do_request('downloads/abcd', expected_code=404, post_data={"state": "stop"}, request_type='PATCH')
@deferred(timeout=10)
def test_stop_download(self):
"""
Testing whether the API returns 200 if a download is being stopped
"""
video_tdef, _ = self.create_local_torrent(os.path.join(TESTS_DATA_DIR, 'video.avi'))
download = self.session.start_download_from_tdef(video_tdef, DownloadStartupConfig())
infohash = video_tdef.get_infohash().encode('hex')
original_stop = download.stop
def mocked_stop():
download.should_stop = True
download.stop = original_stop
def verify_removed(_):
self.assertEqual(len(self.session.get_downloads()), 1)
download = self.session.get_downloads()[0]
self.assertTrue(download.should_stop)
download.stop = mocked_stop
request_deferred = self.do_request('downloads/%s' % infohash, post_data={"state": "stop"},
expected_code=200, expected_json={"modified": True}, request_type='PATCH')
return request_deferred.addCallback(verify_removed)
@deferred(timeout=10)
def test_select_download_file_range(self):
"""
Testing whether an error is returned when we toggle a file for inclusion out of range
"""
video_tdef, _ = self.create_local_torrent(os.path.join(TESTS_DATA_DIR, 'video.avi'))
self.session.start_download_from_tdef(video_tdef, DownloadStartupConfig())
infohash = video_tdef.get_infohash().encode('hex')
self.should_check_equality = False
return self.do_request('downloads/%s' % infohash, |
portnov/assethub | assethub/assets/email.py | Python | bsd-3-clause | 1,412 | 0.001416 | from django.core.mail import send_mail
from notifications.signals import notify
from django_comments.models import Comment
events_registry = []
class Event(object):
model = None
parent = None
verb = None
@staticmethod
def get_email_subject(instance, parent, actor, recipient):
raise NotImplementedError
@staticmethod
def get_email_body_template(instance, parent, actor, recipient):
"""Should return template name to render email body."""
raise NotImplementedError
@staticmethod
def get_template_data(instance, parent, actor, recipient):
| """Should return a tuple:
(template name, context)
"""
raise NotImplementedError
@staticmethod
def is_user_subscribed(recipient):
raise NotImplementedError |
@staticmethod
def register(event):
global events_registry
events_registry.append(event)
@staticmethod
def get(model, parent, verb):
global events_registry
for event in events_registry:
model_ok = event.model is None or model == event.model
parent_ok = event.parent is None or parent == event.parent
verb_ok = event.verb is None or verb == event.verb
if model_ok and parent_ok and verb_ok:
return event
return None
class CommentPosted(Event):
model = Comment
parent = Asset
|
mkieszek/jobsplus | jobsplus_recruitment/jp_project.py | Python | agpl-3.0 | 1,475 | 0.019661 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 12:06:12 2013
@author: mbereda
"""
from openerp.osv import fields, osv
from openerp.tools.translate import _
import pdb
class jp_project(osv.Model):
_name = 'jp.project'
_description = 'Project'
_columns = {
'project_from' : fields.date('From'),
'project_to': fields.date('To'),
'deal_id': fields.many2one('jp.deal', 'Deal'),
'file_data': fields.binary('File', required=True),
'file_name': fields.char('File name', size=64),
}
def create(self, cr, uid, vals, context=None):
project_id = super(jp_project, self).create(cr, uid, vals, context=context)
project = self.browse(cr, uid, project_id)
jp_config_obj = self.pool.get('jp.config.settings')
jp_config_id = jp_config_obj.search(cr, uid, [])[-1]
jp_crm = jp_config_obj.browse(cr, uid, jp_config_id).jobsplus_crm
url = ("http://%s/?db=%s#id=%s&view_type=form&model=jp.deal")%(jp_crm, cr.dbname, project.deal_id.id)
subject = _("Utw | orzono nowy projekt")
body = _("Utworzono nowy projekt dla deal'a: %s<br/><a href='%s'>Link do deal'a</a>")%(project.deal_id.title, url)
self.pool.get('jp.deal').message_post(cr, uid, project.deal_id.id, body=body, subject=subject, type='email', subtype='mail.mt_comment',
parent_id=False, attachments=None, context=context, content_subtype='html') |
return project_id |
peshay/tpm | tpm.py | Python | mit | 32,755 | 0.000916 | #! /usr/bin/env python
"""Team Password Manager API
To simplify usage of Team Password Manager API.
You can authenticate with username and password
>>> import tpm
>>> URL = "https://mypasswordmanager.example.com"
>>> USER = 'MyUser'
>>> PASS = 'Secret'
>>> tpmconn = tpm.TpmApiv5(URL, username=USER, password=PASS)
Or with Private/Public Key
>>> pubkey = '3726d93f2a0e5f0fe2cc3a6e9e3ade964b43b07f897d579466c28b7f8ff51cd0'
>>> privkey = '87324bedead51af96a45271d217b8ad5ef3f220da6c078a9bce4e4318729189c'
>>> tpmconn = tpm.TpmApiv5(URL, private_key=privkey, public_key=pubkey)
With the connection object you can use all TPM functions, like list all passwords:
>>> tpmconn.list_passwords()
All API functions from Team Password Manager are included.
see http://teampasswordmanager.com/docs/api/
:copyright: (c) 2021 by Andreas Hubert.
:license: The MIT License (MIT), see LICENSE for more details.
"""
__version__ = '4.1'
import hmac
import hashlib
import time
import requests
import re
import json
import logging
import base64
import os.path
from urllib.parse import quote_plus
# set logger
log = logging.getLogger(__name__)
# disable unsecure SSL warning
requests.packages.urllib3.disable_warnings()
class TPMException(Exception):
pass
class TpmApi(object):
"""Settings needed for the connec | tion to Team Password Manager."""
class ConfigError(Exception):
"""To throw Exception based on wrong Settings."""
def __init__(self | , value):
self.value = value
log.critical(value)
def __str__(self):
return repr(self.value)
def __init__(self, api, base_url, kwargs):
"""init thing."""
# Check if API version is not bullshit
REGEXurl = "^" \
"(?:(?:https?)://)" \
"(?:\\S+(?::\\S*)?@)?" \
"(?:" \
"(?:[1-9]\\d?|1\\d\\d|2[01]\\d|22[0-3])" \
"(?:\\.(?:1?\\d{1,2}|2[0-4]\\d|25[0-5])){2}" \
"(?:\\.(?:[1-9]\\d?|1\\d\\d|2[0-4]\\d|25[0-4]))" \
"|" \
"(?:(?:[a-z\\u00a1-\\uffff0-9]-*)*[a-z\\u00a1-\\uffff0-9]+)" \
"(?:\\.(?:[a-z\\u00a1-\\uffff0-9]-*)*[a-z\\u00a1-\\uffff0-9]+)*" \
"(?:\\.(?:[a-z\\u00a1-\\uffff]{2,}))?" \
".?" \
")" \
"(?::\\d{2,5})?" \
"(?:[/?#]\\S*)?" \
"$"
self.apiurl = 'api/' + api + '/'
log.debug('Set as apiurl: {}'.format(self.apiurl))
self.api = self.apiurl
# Check if URL is not bullshit
if re.match(REGEXurl, base_url):
self.base_url = base_url + '/index.php/'
log.debug('Set Base URL to {}'.format(self.base_url))
self.url = self.base_url + self.apiurl
log.debug('Set URL to {}'.format(self.url))
else:
raise self.ConfigError('Invalid URL: {}'.format(base_url))
# set headers
self.headers = {'Content-Type': 'application/json; charset=utf-8',
'User-Agent': 'tpm.py/' + __version__
}
log.debug('Set header to {}'.format(self.headers))
# check kwargs for either keys or user credentials
self.private_key = False
self.public_key = False
self.username = False
self.password = False
self.unlock_reason = False
for key in kwargs:
if key == 'private_key':
self.private_key = kwargs[key]
elif key == 'public_key':
self.public_key = kwargs[key]
elif key == 'username':
self.username = kwargs[key]
elif key == 'password':
self.password = kwargs[key]
elif key == 'unlock_reason':
self.unlock_reason = kwargs[key]
if self.private_key is not False and self.public_key is not False and\
self.username is False and self.password is False:
log.debug('Using Private/Public Key authentication.')
elif self.username is not False and self.password is not False and\
self.private_key is False and self.public_key is False:
log.debug('Using Basic authentication.')
else:
raise self.ConfigError('No authentication specified'
' (user/password or private/public key)')
def request(self, path, action, data=''):
"""To make a request to the API."""
# Check if the path includes URL or not.
head = self.base_url
if path.startswith(head):
path = path[len(head):]
path = quote_plus(path, safe='/')
if not path.startswith(self.api):
path = self.api + path
log.debug('Using path {}'.format(path))
# If we have data, convert to JSON
if data:
data = json.dumps(data)
log.debug('Data to sent: {}'.format(data))
# In case of key authentication
if self.private_key and self.public_key:
timestamp = str(int(time.time()))
log.debug('Using timestamp: {}'.format(timestamp))
unhashed = path + timestamp + str(data)
log.debug('Using message: {}'.format(unhashed))
self.hash = hmac.new(str.encode(self.private_key),
msg=unhashed.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest()
log.debug('Authenticating with hash: {}'.format(self.hash))
self.headers['X-Public-Key'] = self.public_key
self.headers['X-Request-Hash'] = self.hash
self.headers['X-Request-Timestamp'] = timestamp
auth = False
# In case of user credentials authentication
elif self.username and self.password:
auth = requests.auth.HTTPBasicAuth(self.username, self.password)
# Set unlock reason
if self.unlock_reason:
self.headers['X-Unlock-Reason'] = self.unlock_reason
log.info('Unlock Reason: {}'.format(self.unlock_reason))
url = head + path
# Try API request and handle Exceptions
try:
if action == 'get':
log.debug('GET request {}'.format(url))
self.req = requests.get(url, headers=self.headers, auth=auth,
verify=False)
elif action == 'post':
log.debug('POST request {}'.format(url))
self.req = requests.post(url, headers=self.headers, auth=auth,
verify=False, data=data)
elif action == 'put':
log.debug('PUT request {}'.format(url))
self.req = requests.put(url, headers=self.headers,
auth=auth, verify=False,
data=data)
elif action == 'delete':
log.debug('DELETE request {}'.format(url))
self.req = requests.delete(url, headers=self.headers,
verify=False, auth=auth)
if self.req.content == b'':
result = None
log.debug('No result returned.')
else:
result = self.req.json()
if 'error' in result and result['error']:
raise TPMException(result['message'])
except requests.exceptions.RequestException as e:
log.critical("Connection error for " + str(e))
raise TPMException("Connection error for " + str(e))
except ValueError as e:
if self.req.status_code == 403:
log.warning(url + " forbidden")
raise TPMException(url + " forbidden")
elif self.req.status_code == 404:
log.warning(url + " forbidden")
raise TPMException(url + " not found")
else:
message = ('{}: {} {}'.format(e, self.req.url, self.req.text))
log.de |
Caoimhinmg/PmagPy | programs/foldtest.py | Python | bsd-3-clause | 6,056 | 0.024108 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import input
from builtins import range
from past.utils import old_div
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pylab
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
foldtest.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
dec inc dip_direction dip
SYNTAX
foldtest.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE file with input data
-F FILE for confidence bounds on fold | test
-u ANGLE (circular standard deviation) for uncertainty on bedding po | les
-b MIN MAX bounds for quick search of percent untilting [default is -10 to 150%]
-n NB number of bootstrap samples [default is 1000]
-fmt FMT, specify format - default is svg
-sav save figures and quit
INPUT FILE
Dec Inc Dip_Direction Dip in space delimited file
OUTPUT PLOTS
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a post-tilt magnetization is indicated
If the 95% conf bounds include 100, then a pre-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
Geographic: is an equal area projection of the input data in
OPTIONAL OUTPUT FILE:
The output file has the % untilting within the 95% confidence bounds
nd the number of bootstrap samples
"""
kappa=0
fmt,plot='svg',0
nb=1000 # number of bootstraps
min,max=-10,150
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=open(sys.argv[ind+1],'w')
else:
outfile=""
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
DIDDs=numpy.loadtxt(file)
else:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
if '-b' in sys.argv:
ind=sys.argv.index('-b')
min=int(sys.argv[ind+1])
max=int(sys.argv[ind+2])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
nb=int(sys.argv[ind+1])
if '-u' in sys.argv:
ind=sys.argv.index('-u')
csd=float(sys.argv[ind+1])
kappa=(old_div(81.,csd))**2
#
# get to work
#
PLTS={'geo':1,'strat':2,'taus':3} # make plot dictionary
pmagplotlib.plot_init(PLTS['geo'],5,5)
pmagplotlib.plot_init(PLTS['strat'],5,5)
pmagplotlib.plot_init(PLTS['taus'],5,5)
pmagplotlib.plotEQ(PLTS['geo'],DIDDs,'Geographic')
D,I=pmag.dotilt_V(DIDDs)
TCs=numpy.array([D,I]).transpose()
pmagplotlib.plotEQ(PLTS['strat'],TCs,'Stratigraphic')
if plot==0:pmagplotlib.drawFIGS(PLTS)
Percs=list(range(min,max))
Cdf,Untilt=[],[]
pylab.figure(num=PLTS['taus'])
print('doing ',nb,' iterations...please be patient.....')
for n in range(nb): # do bootstrap data sets - plot first 25 as dashed red line
if n%50==0:print(n)
Taus=[] # set up lists for taus
PDs=pmag.pseudo(DIDDs)
if kappa!=0:
for k in range(len(PDs)):
d,i=pmag.fshdev(kappa)
dipdir,dip=pmag.dodirot(d,i,PDs[k][2],PDs[k][3])
PDs[k][2]=dipdir
PDs[k][3]=dip
for perc in Percs:
tilt=numpy.array([1.,1.,1.,0.01*perc])
D,I=pmag.dotilt_V(PDs*tilt)
TCs=numpy.array([D,I]).transpose()
ppars=pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n<25:pylab.plot(Percs,Taus,'r--')
Untilt.append(Percs[Taus.index(numpy.max(Taus))]) # tilt that gives maximum tau
Cdf.append(old_div(float(n),float(nb)))
pylab.plot(Percs,Taus,'k')
pylab.xlabel('% Untilting')
pylab.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
pylab.plot(Untilt,Cdf,'g')
lower=int(.025*nb)
upper=int(.975*nb)
pylab.axvline(x=Untilt[lower],ymin=0,ymax=1,linewidth=1,linestyle='--')
pylab.axvline(x=Untilt[upper],ymin=0,ymax=1,linewidth=1,linestyle='--')
tit= '%i - %i %s'%(Untilt[lower],Untilt[upper],'Percent Unfolding')
print(tit)
print('range of all bootstrap samples: ', Untilt[0], ' - ', Untilt[-1])
pylab.title(tit)
outstring= '%i - %i; %i\n'%(Untilt[lower],Untilt[upper],nb)
if outfile!="":outfile.write(outstring)
files={}
for key in list(PLTS.keys()):
files[key]=('foldtest_'+'%s'%(key.strip()[:2])+'.'+fmt)
if plot==0:
pmagplotlib.drawFIGS(PLTS)
ans= input('S[a]ve all figures, <Return> to quit ')
if ans!='a':
print("Good bye")
sys.exit()
pmagplotlib.saveP(PLTS,files)
main()
|
ctsit/nacculator | nacc/uds3/np/builder.py | Python | bsd-2-clause | 5,859 | 0 | ###############################################################################
# Copyright 2015-2020 University of Florida. All rights reserved.
# This file is part of UF CTS-IT's NACCulator project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
from nacc.uds3.np import forms as np_forms
from nacc.uds3 import packet as np_packet
def build_uds3_np_form(record):
packet = np_packet.Packet()
np = np_forms.FormNP()
np.NPFORMMO = record['npformmo']
np.NPFORMDY = record['npformdy']
np.NPFORMYR = record['npformyr']
np.NPID = record['npid']
np.NPSEX = record['npsex']
np.NPDAGE = record['npdage']
np.NPDODMO = record['npdodmo']
np.NPDODDY = record['npdoddy']
np.NPDODYR = record['npdodyr']
np.NPPMIH = record['nppmih']
np.NPFIX = record['npfix']
np.NPFIXX = record['npfixx']
np.NPWBRWT = record['npwbrwt']
np.NPWBRF = record['npwbrf']
np.NPGRCCA = record['npgrcca']
np.NPGRLA = record['npgrla']
np.NPGRHA = record['npgrha']
np.NPGRSNH = record['npgrsnh']
np.NPGRLCH = record['npgrlch']
np.NPAVAS = record['npavas']
np.NPTAN = record['nptan']
np.NPTANX = record['nptanx']
np.NPABAN = record['npaban']
np.NPABANX = record['npabanx']
np.NPASAN = record['npasan']
np.NPASANX = record['npasanx']
np.NPTDPAN = record['nptdpan']
np.NPTDPANX = record['nptdpanx']
np.NPHISMB = record['nphismb']
np.NPHISG = record['nphisg']
np.NPHISSS = record['nphisss']
np.NPHIST = record['nphist']
np.NPHISO = record['nphiso']
np.NPHISOX = record['nphisox']
np.NPTHAL = record['npthal']
np.NPBRAAK = record['npbraak']
np.NPNEUR = record['npneur']
np.NPADNC = record['npadnc']
np.NPDIFF = record['npdiff']
np.NPAMY = record['npamy']
np.NPINF = record['npinf']
np.NPINF1A = record['npinf1a']
np.NPINF1B = record['npinf1b']
np.NPINF1D = record['npinf1d']
np.NPINF1F = record['npinf1f']
np.NPINF2A = record['npinf2a']
np.NPINF2B = record['npinf2b']
np.NPINF2D = record['npinf2d']
np.NPINF2F = record['npinf2f']
np.NPINF3A = record['npinf3a']
np.NPINF3B = record['npinf3b']
np.NPINF3D = record['npinf3d']
np.NPINF3F = record['npinf3f']
np.NPINF4A = record['npinf4a']
np.NPINF4B = record['npinf4b']
np.NPINF4D = record['npinf4d']
np.NPINF4F = record['npinf4f']
np.NPHEMO = record['nphemo']
np.NPHEMO1 = record['nphemo1']
np.NPHEMO2 = record['nphemo2']
np.NPHEMO3 = record['nphemo3']
np.NPOLD = record['npold']
np.NPOLD1 = record['npold1']
np.NPOLD2 = record['npold2']
np.NPOLD3 = record['npold3']
np.NPOLD4 = record['npold4']
np.NPOLDD = record['npoldd']
np.NPOLDD1 = record['npoldd1']
np.NPOLDD2 = record['npoldd2']
np.NPOLDD3 = record['npoldd3']
np.NPOLDD4 = record['npoldd4']
np.NPARTER = record['nparter']
np.NPWMR = record['npwmr']
np.NPPATH = record['nppath']
np.NPNEC = record['npnec']
np.NPPATH2 = record['nppath2']
np.NPPATH3 = record['nppath3']
np.NPPATH4 = record['nppath4']
np.NPPATH5 = record['nppath5']
np.NPPATH6 = record['nppath6']
np.NPPATH7 = record['nppath7']
np.NPPATH8 = record['nppath8']
np.NPPATH9 = record['nppath9']
np.NPPATH10 = record['nppath10']
np.NPPATH11 = record['nppath11']
np.NPPATHO = record['nppatho']
np.NPPATHOX = record['nppathox']
np.NPLBOD = record['nplbod']
np.NPNLOSS = record['npnloss']
np.NPHIPSCL = record['nphipscl']
np.NPTDPA = record['nptdpa']
np.NPTDPB = record['nptdpb']
np.NPTDPC = record['nptdpc']
np.NPTDPD = record['nptdpd']
np.NPTDPE = record['nptdpe']
np.NPFTDTAU = record['npftdtau']
np.NPPICK = record['nppick']
np.NPFTDT2 = record['npftdt2']
np.NPCORT = record['npcort']
np.NPPROG = record['npprog']
np.NPFTDT5 = record['npftdt5']
np.NPFTDT6 = record['npftdt6']
np.NPFTDT7 = record['npftdt7']
np.NPFTDT8 = record['npftdt8']
np.NPFTDT9 = record['npftdt9']
np.NPFTDT10 = record['npftdt10']
np.NPFTDTDP = record['npftdtdp']
np.NPALSMND = record['npalsmnd']
np.NPOFTD = record['npoftd']
np.NPOFTD1 = record['npoftd1']
np.NPOFTD2 = record['npoftd2']
np.NPOFTD3 = record['npoftd3']
np.NPOFTD4 = record['npoftd4']
np.NPOFTD5 = record['npoftd5']
np.NPPDXA = record['nppdxa']
np.NPPDXB = record['nppdxb']
np.NPPDXC = record['nppdxc']
np.NPPDXD = record['nppdxd']
np.NPPDXE = record['nppdxe']
np.NPPDXF = record['nppdxf']
np.NPPDXG = record['nppdxg']
np.NPPDXH = record['nppdxh']
np.NPPDXI = record['nppdxi']
np.NPPDXJ = record['nppdxj']
np.NPPDXK = record['nppdxk']
np.NPPDXL = record['nppdxl']
np.NPPDXM = record['nppdxm']
np.NPPDXN = record['nppdxn']
np.NPPDXO = record['nppdxo']
np.NPPDXP = record['nppdxp']
np.NPPDXQ = record['nppdxq']
np.NPPDXR = record['nppdxr']
np.NPPDXRX = record['nppdxrx']
np.NPPDXS = record['nppdxs']
np.NPPDXSX = record['nppdxsx']
np.NPPDXT = record['nppdxt']
np.NPPDXTX = record['nppdxtx']
np.NPBNKA = record['npbnka']
np.NPBNKB = record['npbnkb']
np.NPBNKC = record['npbnkc']
np.NPBNKD = record['npbnkd']
| np.NPBNKE = record['npbnke']
np.NPBNKF = record['npbnkf']
np.NPBNKG = rec | ord['npbnkg']
np.NPFAUT = record['npfaut']
np.NPFAUT1 = record['npfaut1']
np.NPFAUT2 = record['npfaut2']
np.NPFAUT3 = record['npfaut3']
np.NPFAUT4 = record['npfaut4']
packet.append(np)
update_header(record, packet)
return packet
def update_header(record, packet):
for header in packet:
header.FORMVER = 10
header.ADCID = record['adcid']
header.PTID = record['ptid']
|
willzhang05/postgrestesting1 | postgrestesting1/lib/python3.5/site-packages/django/db/models/fields/related.py | Python | mit | 114,783 | 0.002004 | from __future__ import unicode_literals
import | warnings
from operator import attrgetter
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.core.exceptions import FieldDoesNotExist
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import Q, signals
| from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.fields import (
BLANK_CHOICE_DASH, AutoField, Field, IntegerField, PositiveIntegerField,
PositiveSmallIntegerField,
)
from django.db.models.lookups import IsNull
from django.db.models.query import QuerySet
from django.db.models.query_utils import PathInfo
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property, curry
from django.utils.translation import ugettext_lazy as _
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_registered_model raises a LookupError, it means
# that the related model isn't loaded yet, so we need to pend the relation
# until the class is prepared.
try:
model = cls._meta.apps.get_registered_model(app_label, model_name)
except LookupError:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.apps._pending_lookups.setdefault(key, []).append(value)
else:
operation(field, model, cls)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.apps._pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.rel.to
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import re
import keyword
related_name = self.rel.related_name
if not related_name:
return []
is_valid_id = True
if keyword.iskeyword(related_name):
is_valid_id = False
if six.PY3:
if not related_name.isidentifier():
is_valid_id = False
else:
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z', related_name):
is_valid_id = False
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.rel.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_relation_model_exists(self):
rel_is_missing = self.rel.to not in apps.get_models()
rel_is_string = isinstance(self.rel.to, six.string_types)
model_name = self.rel.to if rel_is_string else self.rel.to._meta.object_name
if rel_is_missing and (rel_is_string or not self.rel.to._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.rel.to not in apps.get_models() and
not isinstance(self.rel.to, six.string_types) and
self.rel.to._meta.swapped):
model = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.rel.to._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
""" Check accessor and reverse query name clashes. """
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.rel.to` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.rel.to, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.rel.is_hidden():
return []
try:
self.rel
except AttributeError:
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.rel.to._meta
# rel_opts.object_name == "Target"
rel_name = self.rel.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor |
ulikoehler/UliEngineering | tests/Utils/TestZIP.py | Python | apache-2.0 | 411 | 0.012165 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
from numpy.testing import assert_approx_equ | al, assert_allclose, assert_array_equal
from UliEngineering.Utils.ZIP import *
from UliEngineering.Utils.Temporary import *
import unittest
class TestFileUtils(unittest.TestCase):
def setUp(self):
self.tmp = AutoDeleteTempfileGenerator()
def create_zip_from_directory(self):
| pass #TODO |
Jgarcia-IAS/SAT | openerp/addons-extra/odoo-pruebas/odoo-server/addons/stock/wizard/stock_return_picking.py | Python | agpl-3.0 | 8,537 | 0.002811 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_return_picking_line(osv.osv_memory):
_name = "stock.return.picking.line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', string="Product", required=True),
'quantity': fields.float("Quantity", digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'wizard_id': fields.many2one('stock.return.picking', string="Wizard"),
'move_id': fields.many2one('stock.move', "Move"),
'lot_id': fields.many2one('stock.production.lot', 'Serial Number', help="Used to choose the lot/serial number of the product returned"),
}
class stock_return_picking(osv.osv_memory):
_name = 'stock.return.picking'
_description = 'Return Picking'
_columns = {
'product_return_moves': fields.one2many('stock.return.picking.line', 'wizard_id', 'Moves'),
'move_dest_exists': fields.boolean('Chained Move Exists', readonly=True, help="Technical field used to hide help tooltip if not needed"),
}
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary with default values for all field in ``fields``
"""
result1 = []
if context is None:
context = {}
res = super(stock_return_picking, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
uom_obj = self.pool.get('product.uom')
pick_obj = self.pool.get('stock.picking')
pick = pick_obj.browse(cr, uid, record_id, context=context)
quant_obj = self.pool.get("stock.quant")
chained_move_exist = False
if pick:
if pick.state != 'done':
raise osv.except_osv(_('Warning!'), _("You may only return pickings that are Done!"))
for move in pick.move_lines:
if move.move_dest_id:
chained_move_exist = True
#Sum the quants in that location that can be returned (they should have been moved by the moves that were included in the returned picking)
qty = 0
quant_search = quant_obj.search(cr, uid, [('history_ids', 'in', move.id), ('qty', '>', 0.0), ('location_id', 'child_of', move.location_dest_id.id)], context=context)
for quant in quant_obj.browse(cr, uid, quant_search, context=context):
if not quant.reservation_id or quant.reservation_id.origin_returned_move_id.id != move.id:
qty += quant.qty
qty = uom_obj._compute_qty(cr, uid, move.product_id.uom_id.id, qty, move.product_uom.id)
result1.append({'product_id': move.product_id.id, 'quantity': qty, 'move_id': move.id})
if len(result1) == 0:
raise osv.except_osv(_('Warning!'), _("No products to return (only lines in Done state and not fully returned yet can be returned)!"))
if 'product_return_moves' in fields:
res.update({'product_return_moves': result1})
if 'move_dest_exists' in fields:
res.update({'move_dest_exists': chained_move_exist})
return res
def _create_returns(self, cr, uid, ids, context=None):
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
move_obj = self.pool.get('stock.move')
pick_obj = self.pool.get('stock.picking')
uom_obj = self.pool.get('product.uom')
data_obj = self.pool.get('stock.return.picking.line')
pick = pick_obj.browse(cr, uid, record_id, context=context)
data = self.read(cr, uid, ids[0], context=context)
returned_lines = 0
# Cancel assignment of existing chained assigned moves
#Create new picking for returned products
pick_type_id = pick.picking_type_id.return_picking_type_id and pick.picking_type_id.return_picking_type_id.id or pick.picking_type_id.id
new_picking = pick_obj.copy(cr, uid, pick.id, {
'move_lines': [],
'picking_type_id': pick_type_id,
'state': 'draft',
'origin': pick.name,
}, context=context)
for data_get in data_obj.browse(cr, uid, data['product_return_moves'], context=context):
move = data_get.move_id
if not move:
raise osv.except_osv(_('Warning !'), _("You have manually created product lines, please delete them to proceed"))
new_qty = data_get.quantity
if new_qty:
returned_lines += 1
move_obj.copy(cr, uid, move.id, {
'product_id': data_get.product_id.id,
'product_uom_qty': new_qty,
'product_uos_qty': uom_obj._compute_qty(cr, uid, move.product_uom.id, new_qty, move.product_uos.id),
'picking_id': new_picking,
'state': 'draft',
'location_id': move.location_dest_id.id,
'location_dest_id': move.location_id.id,
'origin_returned_move_id': move.id,
'procure_method': 'make_to_stock',
'restrict_lot_id': data_get.lot_id.id,
'price_unit': move.product_id.standard_price,
'unit_price': move.product_id.standard_price,
'cost': move.product_id.standard_price,
})
if not returned_lines:
raise osv.except_osv(_('Warning!'), _("Please specify at least one non-zero quantity."))
pick_obj.action_confirm(cr, uid, [new_picking], context=context)
pick_obj.action_assign(cr, uid, [new_picking], context)
return new_picking, pick_type_id
def create_returns(self, cr, uid, ids, context=None):
"""
Creates return picking.
@param self: The object pointer.
| @param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of ids selected
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
new_picki | ng_id, pick_type_id = self._create_returns(cr, uid, ids, context=context)
# Override the context to disable all the potential filters that could have been set previously
ctx = {
'search_default_picking_type_id': pick_type_id,
'search_default_draft': False,
'search_default_assigned': False,
'search_default_confirmed': False,
'search_default_ready': False,
'search_default_late': False,
'search_default_available': False,
}
return {
'domain': |
thelabnyc/django-oscar-api-checkout | src/oscarapicheckout/email.py | Python | isc | 229 | 0 | from oscar.core.loading import get_class
OrderPlacementMixin = get | _class("checkout.mixins", "OrderPlacementMixin")
class OrderMessageSender(OrderPlacementMixin):
def __init__(self, request):
self.request = request
| |
DrSkippy/Gnacs | acscsv/acscsv.py | Python | bsd-2-clause | 13,140 | 0.011948 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Scott Hendrickson"
__license__="Simplified BSD"
import sys
import datetime
import fileinput
from io import StringIO
# Experimental: Use numba to speed up some fo the basic function
# that are run many times per record
# from numba import jit
# use fastest option available
try:
import ujson as json
except ImportError:
try:
import json
except ImportError:
import simplejson as json
gnipError = "GNIPERROR"
gnipRemove = "GNIPREMOVE"
gnipDateTime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")
INTERNAL_EMPTY_FIELD = "GNIPEMPTYFIELD"
class _Field(object):
"""
Base class for extracting the desired value at the end of a series of keys in a JSON Activity
Streams payload. Set the application-wide default value (for e.g. missing values) here,
but also use child classes to override when necessary. Subclasses also need to define the
key-path (path) to the desired location by overwriting the path attr.
"""
# set some default values; these can be overwritten in custom classes
# twitter format
default_t_fmt = "%Y-%m-%dT%H:%M:%S.000Z"
default_value = INTERNAL_EMPTY_FIELD
path = [] # dict key-path to follow for desired value
label = 'DummyKeyPathLabel' # this must match if-statement in constructor
def __init__(self, json_record):
if self.label == 'DummyKeyPathLabel':
self.label = ':'.join(self.path)
self.value = None # str representation of the field, often = str( self.value_list )
if json_record is not None:
self.value = self.walk_path(json_record)
else:
self.value = self.default_value
def __repr__(self):
return unicode(self.value)
def walk_path(self, json_record, path=None):
res = json_record
if path is None:
path = self.path
for k in path:
if res is None:
break
if k not in res or ( type(res[k]) is list and len(res[k]) == 0 ):
# parenthetical clause for values with empty lists e.g. twitter_entities
return self.default_value
res = res[k]
# handle the special case where the walk_path found null (JSON) which converts to
# a Python None. Only use "None" (str version) if it's assigned to self.default_value
res = res if res is not None else self.default_value
return res
def walk_path_slower(self, json_record, path=None):
"""Slower version fo walk path. Depricated."""
if path is None:
path = self.path
try:
execstr = "res=json_record" + '["{}"]'*len(path)
exec(execstr.format(*path))
except (KeyError, TypeError):
res = None
if res is None:
res = self.default_value
return res
def fix_length(self, iterable, limit=None):
"""
Take an iterable (typically a list) and an optional maximum length (limit).
If limit is not given, and the input iterable is not equal to self.default_value
(typically "None"), the input iterable is returned. If limit is given, the return
value is a list that is either truncated to the first limit items, or padded
with self.default_value until it is of size limit. Note: strings are iterables,
so if you pass this function a string, it will (optionally) truncate the
number of characters in the string according to limit.
"""
res = []
if limit is None:
# no limits on the length of the result, so just return the original iterable
res = iterable
else:
#if len(iterable) == 0:
if iterable == self.default_value or len(iterable) == 0:
# if walk_path() finds the final key, but the value is an empty list
# (common for e.g. the contents of twitter_entities)
# overwrite self.value with a list of self.default_value and of length limit
res = [ self.default_value ]*limit
else:
# found something useful in the iterable, either pad the list or truncate
# to end up with something of the proper length
current_length = len( iterable )
if current_length < limit:
res = iterable + [ self.default_value
for _ in range(limit - current_length) ]
else:
res = iterable[:limit]
return res
class _LimitedField(_Field):
"""
Takes JSON record (in python dict form) and optionally a maximum length (limit,
with default length=5). Uses parent class _Field() to assign the appropriate value
to self.value. When self.value is a list of dictionaries,
inheriting from _LimitedField() class allows for the extraction and combination of
an arbitrary number of fields within self.value into self.value_list.
Ex: if your class would lead to having
self.value = [ {'a': 1, 'b': 2, 'c': 3}, {'a': 4, 'b': 5, 'c': 6} ], and what you'd like
is a list that looks like [ 1, 2, 4, 5 ], inheriting from _LimitedField() allows you
to overwrite the fields list ( fields=["a", "b"] ) to obtain this result.
Finally, self.value is set to a string representation of the final self.value_list.
"""
#TODO: is there a better way that this class an | d the fix_length() method in _Field class
# could be combined?
#TODO: set limit=None by default and just return as many as there are, otherwise (by specifying
# limi | t), return a maximum of limit.
# TODO:
# - consolidate _LimitedField() & fix_length() if possible
def __init__(self, json_record, limit=1):
self.fields = None
super(
_LimitedField
, self).__init__(json_record)
# self.value is possibly a list of dicts for each activity media object
if self.fields:
# start with default list full of the default_values
self.value_list = [ self.default_value ]*( len(self.fields)*limit )
if self.value != self.default_value:
for i,x in enumerate(self.value): # iterate over the dicts in the list
if i < limit: # ... up until you reach limit
for j,y in enumerate(self.fields): # iterate over the dict keys
self.value_list[ len( self.fields )*i + j ] = x[ self.fields[j] ]
# finally, str-ify the list
self.value = str( self.value_list )
class AcsCSV(object):
"""Base class for all delimited list objects. Basic delimited list utility functions"""
def __init__(self, delim, options_keypath):
self.delim = delim
if delim == "":
print >>sys.stderr, "Warning - Output has Null delimiter"
self.rmchars = "\n\r {}".format(self.delim)
self.options_keypath = options_keypath
def string_hook(self, record_string, mode_dummy):
"""
Returns a file-like StringIO object built from the activity record in record_string.
This is ultimately passed down to the FileInput.readline() method. The mode_dummy
parameter is only included so the signature matches other hooks.
"""
return StringIO( record_string )
def file_reader(self, options_filename=None, json_string=None):
"""
Read arbitrary input file(s) or standard Python str. When passing file_reader() a
JSON string, assign it to the json_string arg. Yields a tuple of (line number, record).
"""
line_number = 0
if json_string is not None:
hook = self.string_hook
options_filename = json_string
else:
hook = fileinput.hook_compressed |
johnlb/strange_wp | strange_bak/document.py | Python | gpl-3.0 | 28,054 | 0.000071 | # coding: utf-8
"""
weasyprint.document
-------------------
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import io
import math
import shutil
import functools
import cairocffi as cairo
import gdspy
from . import CSS
from . import images
from .logger import LOGGER
from .css import get_all_computed_styles
from .formatting_structure import boxes
from .formatting_structure.build import build_formatting_structure
from .layout import layout_document
from .layout.backgrounds import percentage
from .draw import draw_page, draw_page_gds, stacked
from .pdf import write_pdf_metadata
from .compat import izip, iteritems, FILESYSTEM_ENCODING
def _get_matrix(box):
"""Return the matrix for the CSS transforms on this box.
:returns: a :class:`cairocffi.Matrix` object or :obj:`None`.
"""
# "Transforms apply to block-level and atomic inline-level elements,
# but do not apply to elements which may be split into
# multiple inline-level boxes."
# http://www.w3.org/TR/css3-2d-transforms/#introduction
if box.style.transform and not isinstance(box, boxes.InlineBox):
border_width = box.border_width()
border_height = box.border_height()
origin_x, origin_y = box.style.transform_origin
origin_x = box.border_box_x() + percentage(origin_x, border_width)
origin_y = box.border_box_y() + percentage(origin | _y, border_height)
matrix = cairo.Matrix()
matrix.translate(origin_x, origin_y)
for name, args in box.style.transform:
if name == 'scale':
matrix.scale(*args)
elif name == 'rotate':
matrix.rotate(args)
elif name == 'translate':
translate_x, translate_y = args
matrix.tra | nslate(
percentage(translate_x, border_width),
percentage(translate_y, border_height),
)
else:
if name == 'skewx':
args = (1, 0, math.tan(args), 1, 0, 0)
elif name == 'skewy':
args = (1, math.tan(args), 0, 1, 0, 0)
else:
assert name == 'matrix'
matrix = cairo.Matrix(*args) * matrix
matrix.translate(-origin_x, -origin_y)
box.transformation_matrix = matrix
return matrix
def rectangle_aabb(matrix, pos_x, pos_y, width, height):
"""Apply a transformation matrix to an axis-aligned rectangle
and return its axis-aligned bounding box as ``(x, y, width, height)``
"""
transform_point = matrix.transform_point
x1, y1 = transform_point(pos_x, pos_y)
x2, y2 = transform_point(pos_x + width, pos_y)
x3, y3 = transform_point(pos_x, pos_y + height)
x4, y4 = transform_point(pos_x + width, pos_y + height)
box_x1 = min(x1, x2, x3, x4)
box_y1 = min(y1, y2, y3, y4)
box_x2 = max(x1, x2, x3, x4)
box_y2 = max(y1, y2, y3, y4)
return box_x1, box_y1, box_x2 - box_x1, box_y2 - box_y1
class _TaggedTuple(tuple):
"""A tuple with a :attr:`sourceline` attribute,
The line number in the HTML source for whatever the tuple represents.
"""
def _gather_links_and_bookmarks(box, bookmarks, links, anchors, matrix):
transform = _get_matrix(box)
if transform:
matrix = transform * matrix if matrix else transform
bookmark_label = box.style.bookmark_label
if box.style.bookmark_level == 'none':
bookmark_level = None
else:
bookmark_level = box.style.bookmark_level
link = box.style.link
anchor_name = box.style.anchor
has_bookmark = bookmark_label and bookmark_level
# 'link' is inherited but redundant on text boxes
has_link = link and not isinstance(box, boxes.TextBox)
# In case of duplicate IDs, only the first is an anchor.
has_anchor = anchor_name and anchor_name not in anchors
is_attachment = hasattr(box, 'is_attachment') and box.is_attachment
if has_bookmark or has_link or has_anchor:
pos_x, pos_y, width, height = box.hit_area()
if has_link:
link_type, target = link
if isinstance(target, bytes):
# Links are filesystem_encoding/utf-8 bytestrings in Python 2
# and ASCII unicode in Python 3. See ``iri_to_uri`` and
# standard library's ``quote`` source.
target = target.decode(
FILESYSTEM_ENCODING if target.startswith('file:')
else 'utf-8')
if link_type == 'external' and is_attachment:
link_type = 'attachment'
if matrix:
link = _TaggedTuple(
(link_type, target, rectangle_aabb(
matrix, pos_x, pos_y, width, height)))
else:
link = _TaggedTuple(
(link_type, target, (pos_x, pos_y, width, height)))
link.sourceline = box.sourceline
links.append(link)
if matrix and (has_bookmark or has_anchor):
pos_x, pos_y = matrix.transform_point(pos_x, pos_y)
if has_bookmark:
bookmarks.append((bookmark_level, bookmark_label, (pos_x, pos_y)))
if has_anchor:
anchors[anchor_name] = pos_x, pos_y
for child in box.all_children():
_gather_links_and_bookmarks(child, bookmarks, links, anchors, matrix)
class Page(object):
"""Represents a single rendered page.
.. versionadded:: 0.15
Should be obtained from :attr:`Document.pages` but not
instantiated directly.
"""
def __init__(self, page_box, enable_hinting=False):
#: The page width, including margins, in CSS pixels.
self.width = page_box.margin_width()
#: The page height, including margins, in CSS pixels.
self.height = page_box.margin_height()
#: A list of ``(bookmark_level, bookmark_label, target)`` tuples.
#: :obj:`bookmark_level` and :obj:`bookmark_label` are respectively
#: an integer and an Unicode string, based on the CSS properties
#: of the same names. :obj:`target` is a ``(x, y)`` point
#: in CSS pixels from the top-left of the page.
self.bookmarks = bookmarks = []
#: A list of ``(link_type, target, rectangle)`` tuples.
#: A rectangle is ``(x, y, width, height)``, in CSS pixels from
#: the top-left of the page. :obj:`link_type` is one of two strings:
#:
#: * ``'external'``: :obj:`target` is an absolute URL
#: * ``'internal'``: :obj:`target` is an anchor name (see
#: :attr:`Page.anchors`).
# The anchor might be defined in another page,
# in multiple pages (in which case the first occurence is used),
# or not at all.
#: * ``'attachment'``: :obj:`target` is an absolute URL and points
#: to a resource to attach to the document.
self.links = links = []
#: A dict mapping anchor names to their target, ``(x, y)`` points
#: in CSS pixels form the top-left of the page.)
self.anchors = anchors = {}
_gather_links_and_bookmarks(
page_box, bookmarks, links, anchors, matrix=None)
self._page_box = page_box
self._enable_hinting = enable_hinting
def paint(self, cairo_context, left_x=0, top_y=0, scale=1, clip=False):
"""Paint the page in cairo, on any type of surface.
:param cairo_context:
Any :class:`cairocffi.Context` object.
.. note::
In case you get a :class:`cairo.Context` object
(eg. form PyGTK),
it is possible to :ref:`convert it to cairocffi
<converting_pycairo>`.
:param left_x:
X coordinate of the left of the page, in cairo u |
hzengin/openvpn-config-splitter | lib/constants.py | Python | apache-2.0 | 598 | 0.025084 | defaultFileNames = {
"caCert": "ca.crt",
"userCert": | "user.crt",
"privateKey": "private.key",
"tlsAuth": "tls.key",
"configOutput": "client.new.ovpn",
| }
parserMatchers = {
"caCert": "<ca>([\s\S]*?)<\/ca>",
"userCert": "<cert>([\s\S]*?)<\/cert>",
"privateKey": "<key>([\s\S]*?)<\/key>",
"tlsAuth": "<tls-auth>([\s\S]*?)<\/tls-auth>",
}
keyDirMatcher = "key-direction\s+([10])"
textToInsertRefs = {
"caCert": "ca",
"userCert": "cert",
"privateKey": "key",
"tlsAuth": "tls-auth",
}
insertLocationMatcher = "(## -----BEGIN \w+ SIGNATURE-----)"
|
tony-rasskazov/meteo | weewx/bin/user/installer/amphibian/install.py | Python | mit | 1,727 | 0.002316 | # $Id: install.py 1169 2014-12-07 14:39:20Z mwall $
# installer for amphibian
# Copyright 2014 Matthew Wall
from setup import ExtensionInstaller
def loader():
return AmphibianInstaller()
class AmphibianInstaller(ExtensionInstaller):
def __init__(self):
super(AmphibianInstaller, self).__init__(
version="0.11",
name='amphibian',
description='Skin that looks a bit like a wet frog.',
author="Matthew Wall",
author_email="mwall@users.sourceforge.net",
config={
'StdReport': {
'amphibian': {
'skin':'amphibian',
'HTML_ROOT':'amphibian'}}},
files=[('skins/amphibian',
['skins/amphibian/almanac.html.tmpl',
'skins/amphibian/amphibian.css',
'skins/amphibian/amphibian.js',
'skins/amphibian/charts.inc',
'skins/amphibian/day.html.tmpl',
'skins/amphibian/favicon.ico',
'skins/amphibian/footer.inc',
'skins/amphibian/header.inc',
'skins/amphibian/index.html.tmpl',
'skins/amphibian/month-table.html.tmpl',
'skins/amphibian/month.html.tmpl',
'skins/amphibian/skin.conf',
'skins/amphibian/week-table.html.tmpl',
'skins/amphibian/week.html.tmpl',
'skins/amphibian/weewx_rss.xml.tmpl',
| 'skins/amphibian/year-table.html.tmpl',
'skins/amphibian/year.html.tmp | l']),
]
)
|
bp-kelley/rdkit | rdkit/Chem/MolDb/FingerprintUtils.py | Python | bsd-3-clause | 3,612 | 0.001384 | # $Id$
#
# Copyright (C) 2009 Greg Landrum
# All Rights Reserved
#
import pickle
from rdkit import Chem, DataStructs
similarityMethods = {
'RDK': DataStructs.ExplicitBitVect,
'AtomPairs': DataStructs.IntSparseIntVect,
'TopologicalTorsions': DataStructs.LongSparseIntVect,
'Pharm2D': DataStructs.SparseBitVect,
'Gobbi2D': DataStructs.SparseBitVect,
'Morgan': DataStructs.UIntSparseIntVect,
'Avalon': DataStructs.ExplicitBitVect,
}
supportedSimilarityMethods = list(iter(similarityMethods))
class LayeredOptions:
loadLayerFlags = 0xFFFFFFFF
searchLayerFlags = 0x7
minPath = 1
maxPath = 6
fpSize = 1024
wordSize = 32
nWords = fpSize // wordSize
@staticmethod
def GetFingerprint(mol, query=True):
if query:
flags = LayeredOptions.searchLayerFlags
else:
flags = LayeredOptions.loadLayerFlags
return Chem.LayeredFingerprint(mol, layerFlags=flags, minPath=LayeredOptions.minPath,
maxPath=LayeredOptions.maxPath, fpSize=LayeredOptions.fpSize)
@staticmethod
def GetWords(mol, query=True):
txt = LayeredOptions.GetFingerprint(mol, query=query).ToBitString()
return [int(txt[x:x + 32], 2) for x in range(0, len(txt), 32)]
@staticmethod
def GetQueryText(mol, query=True):
words = LayeredOptions.GetWords(mol, query=query)
colqs = []
for idx, word in enumerate(words):
if not word:
continue
colqs.append(f'{word}&Col_{idx + 1}={word}')
return ' and '.join(colqs)
def BuildSigFactory(options=None, fdefFile=None,
bins=[(2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 100)],
skipFeats=('LumpedHydrophobe', 'ZnBinder')):
if options:
fdefFile = options.fdefFile
if not fdefFile:
raise ValueError('bad fdef file')
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.Pharm2D import SigFactory
featFactory = ChemicalFeatures.BuildFeatureFactory(fdefFile)
sigFactory = SigFactory.SigFactory(featFactory, skipFeats=skipFeats, trianglePruneBins=False)
sigFactory.SetBins(bins)
return sigFactory
def BuildAtomPairFP(mol):
from rdkit.Chem.AtomPairs import Pairs
fp = Pairs.GetAtomPairFi | ngerprintAsIntVect(mol)
fp._sumCache = fp.GetTotalVal()
return fp
def BuildTorsionsFP(mol):
from rdkit.Chem.AtomPairs import Torsions
fp = Torsions.GetTopologicalTorsionFingerprintAsIntVect(mol)
fp._sumCache = fp.GetTo | talVal()
return fp
def BuildRDKitFP(mol):
return Chem.RDKFingerprint(mol, nBitsPerHash=1)
def BuildPharm2DFP(mol):
global sigFactory
from rdkit.Chem.Pharm2D import Generate
try:
fp = Generate.Gen2DFingerprint(mol, sigFactory)
except IndexError:
print('FAIL:', Chem.MolToSmiles(mol, True))
raise
return fp
def BuildMorganFP(mol):
from rdkit.Chem import rdMolDescriptors
fp = rdMolDescriptors.GetMorganFingerprint(mol, 2)
fp._sumCache = fp.GetTotalVal()
return fp
def BuildAvalonFP(mol, smiles=None):
from rdkit.Avalon import pyAvalonTools
if smiles is None:
return pyAvalonTools.GetAvalonFP(mol)
return pyAvalonTools.GetAvalonFP(smiles, True)
def DepickleFP(pkl, similarityMethod):
if not isinstance(pkl, (bytes, str)):
pkl = str(pkl)
try:
klass = similarityMethods[similarityMethod]
fp = klass(pkl)
except Exception:
import traceback
traceback.print_exc()
fp = pickle.loads(pkl)
return fp
|
jmesteve/saas3 | openerp/addons/account/wizard/account_report_account_balance.py | Python | agpl-3.0 | 1,729 | 0.001735 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_balance_report(osv.osv_memory):
_inherit = "account.common.account.report"
_name = 'account.balance.report'
_description = 'Trial Balance Report'
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_balance_report_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'journal_ids': [],
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return {'type': 'ir.actions.report.xml', ' | report_name': 'account.account.balance', 'datas': data}
# vim:expandtab:smartindent: | tabstop=4:softtabstop=4:shiftwidth=4:
|
jeremyosborne/python | scope/scope.py | Python | mit | 548 | 0.005474 | """ Python expresses functional and modular scope | for variables.
"""
# Global to the module, not | global in the builtin sense.
x = 5
def f1():
"""If not local, reference global.
"""
return x
def f2():
"""Local references global.
"""
global x
x = 3
return x
# Should print 5.
print f1()
# Should print 3.
print f2()
# Should print 3.
print x
# When done, open the python interpreter and import this module.
# Note the output when importing.
# Note that our "global" x is only available via reference of scope.x.
|
gimunu/mopidy-lcdplate | mopidy_lcdplate/__init__.py | Python | apache-2.0 | 1,698 | 0.003534 | from __future__ import unicode_literals
import logging
import os
# TODO: Remove entirely if you don't register GStreamer elements below
import pygst
pygst.require('0.10')
import gst
import gobject
from mopidy import config, ext
__version__ = '0.1.0'
# TODO: If you need to log, use loggers named after the current Python module
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-Lcdplate'
ext_name = 'lcdplate'
version = __version__
def get_default_config( | self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
# TODO: Comment in and edit, or remove entirely
#schema['username'] = config.String()
#schema['password'] = config.Secret()
return schema
def setup(self, registry):
# You will typicall | y only implement one of the following things
# in a single extension.
# TODO: Edit or remove entirely
from .frontend import FoobarFrontend
registry.add('frontend', FoobarFrontend)
# TODO: Edit or remove entirely
from .backend import FoobarBackend
registry.add('backend', FoobarBackend)
# TODO: Edit or remove entirely
from .mixer import FoobarMixer
gobject.type_register(FoobarMixer)
gst.element_register(FoobarMixer, 'foobarmixer', gst.RANK_MARGINAL)
# TODO: Edit or remove entirely
registry.add('http:static', {
'name': self.ext_name,
'path': os.path.join(os.path.dirname(__file__), 'static'),
}) |
turbokongen/home-assistant | homeassistant/components/homekit/type_switches.py | Python | apache-2.0 | 7,993 | 0.001126 | """Class to hold all switch accessories."""
import logging
from pyhap.const import (
CATEGORY_FAUCET,
CATEGORY_OUTLET,
CATEGORY_SHOWER_HEAD,
CATEGORY_SPRINKLER,
CATEGORY_SWITCH,
)
from homeassistant.components.switch import DOMAIN
from homeassistant.components.vacuum import (
DOMAIN as VACUUM_DOMAIN,
SERVICE_RETURN_TO_BASE,
SERVICE_START,
STATE_CLEANING,
SUPPORT_RETURN_HOME,
SUPPORT_START,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_TYPE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import callback, split_entity_id
from homeassistant.helpers.event import call_later
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_ACTIVE,
CHAR_IN_USE,
CHAR_ON,
CHAR_OUTLET_IN_USE,
CHAR_VALVE_TYPE,
SERV_OUTLET,
SERV_SWITCH,
SERV_VALVE,
TYPE_FAUCET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_VALVE,
)
_LOGGER = logging.getLogger(__name__)
VALVE_TYPE = {
TYPE_FAUCET: (CATEGORY_FAUCET, 3),
TYPE_SHOWER: (CATEGORY_SHOWER_HEAD, 2),
TYPE_SPRINKLER: (CATEGORY_SPRINKLER, 1),
TYPE_VALVE: (CATEGORY_FAUCET, 0),
}
@TYPES.register("Outlet")
class Outlet(HomeAccessory):
"""Generate an Outlet accessory."""
def __init__(self, *args):
"""Initialize an Outlet accessory object."""
super().__init__(*args, category=CATEGORY_OUTLET)
state = self.hass.states.get(self.entity_id)
serv_outlet = self.add_preload_service(SERV_OUTLET)
self.char_on = serv_outlet.configure_char(
CHAR_ON, value=False, setter_callback=self.set_state
)
self.char_outlet_in_use = serv_outlet.configure_char(
CHAR_OUTLET_IN_USE, value=True
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
def set_state(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug("%s: Set switch state to %s", self.entity_id, value)
params = {ATTR_ENTITY_ID: self.entity_id}
service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF
self.call_service(DOMAIN, service, params)
@callback
def async_update_state(self, new_state):
"""Update switch state after state changed."""
current_state = new_state.state == STATE_ON
if self.char_on.value is not current_state:
_LOGGER.debug("%s: Set current state to %s", self.entity_id, current_state)
self.char_on.set_value(current_state)
@TYPES.register("Switch")
class Switch(HomeAccessory):
"""Generate a Switch accessory."""
def __init__(self, *args):
"""Initialize a Switch accessory object."""
super().__init__(*args, category=CATEGORY_SWITCH)
self._domain = split_entity_id(self.entity_id)[0]
state = self.hass.states.get(self.entity_id)
self.activate_only = self.is_activate(self.hass.states.get(self.entity_id))
serv_switch = self.add_preload_service(SERV_SWITCH)
self.char_on = serv_switch.configure_char(
CHAR_ON, value=False, setter_callback=self.set_state
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
def is_activate(self, state):
"""Check if entity is activate only."""
if self._domain == "scene":
return True
return False
def reset_switch(self, *args):
| """Reset switch to emulate activate click."""
_LOGGER.debug("%s: Reset switch t | o off", self.entity_id)
if self.char_on.value is not False:
self.char_on.set_value(False)
def set_state(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug("%s: Set switch state to %s", self.entity_id, value)
if self.activate_only and not value:
_LOGGER.debug("%s: Ignoring turn_off call", self.entity_id)
return
params = {ATTR_ENTITY_ID: self.entity_id}
service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF
self.call_service(self._domain, service, params)
if self.activate_only:
call_later(self.hass, 1, self.reset_switch)
@callback
def async_update_state(self, new_state):
"""Update switch state after state changed."""
self.activate_only = self.is_activate(new_state)
if self.activate_only:
_LOGGER.debug(
"%s: Ignore state change, entity is activate only", self.entity_id
)
return
current_state = new_state.state == STATE_ON
if self.char_on.value is not current_state:
_LOGGER.debug("%s: Set current state to %s", self.entity_id, current_state)
self.char_on.set_value(current_state)
@TYPES.register("Vacuum")
class Vacuum(Switch):
"""Generate a Switch accessory."""
def set_state(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug("%s: Set switch state to %s", self.entity_id, value)
state = self.hass.states.get(self.entity_id)
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if value:
sup_start = features & SUPPORT_START
service = SERVICE_START if sup_start else SERVICE_TURN_ON
else:
sup_return_home = features & SUPPORT_RETURN_HOME
service = SERVICE_RETURN_TO_BASE if sup_return_home else SERVICE_TURN_OFF
self.call_service(VACUUM_DOMAIN, service, {ATTR_ENTITY_ID: self.entity_id})
@callback
def async_update_state(self, new_state):
"""Update switch state after state changed."""
current_state = new_state.state in (STATE_CLEANING, STATE_ON)
if self.char_on.value is not current_state:
_LOGGER.debug("%s: Set current state to %s", self.entity_id, current_state)
self.char_on.set_value(current_state)
@TYPES.register("Valve")
class Valve(HomeAccessory):
"""Generate a Valve accessory."""
def __init__(self, *args):
"""Initialize a Valve accessory object."""
super().__init__(*args)
state = self.hass.states.get(self.entity_id)
valve_type = self.config[CONF_TYPE]
self.category = VALVE_TYPE[valve_type][0]
serv_valve = self.add_preload_service(SERV_VALVE)
self.char_active = serv_valve.configure_char(
CHAR_ACTIVE, value=False, setter_callback=self.set_state
)
self.char_in_use = serv_valve.configure_char(CHAR_IN_USE, value=False)
self.char_valve_type = serv_valve.configure_char(
CHAR_VALVE_TYPE, value=VALVE_TYPE[valve_type][1]
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
def set_state(self, value):
"""Move value state to value if call came from HomeKit."""
_LOGGER.debug("%s: Set switch state to %s", self.entity_id, value)
self.char_in_use.set_value(value)
params = {ATTR_ENTITY_ID: self.entity_id}
service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF
self.call_service(DOMAIN, service, params)
@callback
def async_update_state(self, new_state):
"""Update switch state after state changed."""
current_state = 1 if new_state.state == STATE_ON else 0
if self.char_active.value != current_state:
_LOGGER.debug("%s: Set active state to %s", self.entity_id, current_state)
self.char_active.set_value(current_state)
if self.char_in_use.value != current_state:
_LOGGER.debug("%s: Set in_use state to %s", self.entity_id, current_state)
self.char_in_use.set_value(current_state)
|
ekaputra07/wpcdesk | wpcdesk/wpcdesk_threads.py | Python | gpl-3.0 | 3,754 | 0.00293 | # -*- coding: utf-8 -*-
# wpcdesk - WordPress Comment Desktop
# Copyright (C) 2012 Eka Putra - ekaputra@balitechy.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui, QtCore
import xmlrpclib
from settings import get_connection_settings
class BaseCommentThread(QtCore.QThread):
"""
The base thread class for all connection thread used by this application.
"""
connection_error_status = 'Connection error...'
connection_error_msg = 'Failed connecting to server,\nMake sure connection settings is correct and you are connected to internet.'
def get_connection(self):
data = get_connection_settings()
self.server_url = str(data.get('server', ''))
self.server = xmlrpclib.Server(self.server_url)
self.username = str(data.get('username', ''))
self.password = str(data.get('password', ''))
class GetCommentsThread(BaseCommentThread):
""" Get latest comments """
response_received = QtCore.pyqtSignal(list)
error_raised = QtCore.pyqtSignal(str)
status_updated = QtCore.pyqtSignal(str)
is_loading = QtCore.pyqtSignal(bool)
def run(self):
self.get_connection()
self.is_loading.emit(True)
extra = {'number':20}
try:
self.status_updated.emit('Connecting to %s...' % self.server_url)
comments = self.server.wp.getComments(1, self.username, self.password, extra)
except:
self.is_loading.emit(False)
self.status_updated.emit(self.connection_error_status)
self.error_raised.emit(self.connection | _error_msg)
else:
comments_num = len(comments)
self.status_updated.emit('%s comments received.' % str(comments_num))
self.response_received.emit(comments)
self.is_loading.emit(False)
class EditCommentThread(BaseCommentThread): |
""" Edit single comment """
is_loading = QtCore.pyqtSignal(bool)
is_success = QtCore.pyqtSignal(bool)
def set_data(self, data):
self.data = data
def set_comment_id(self, comment_id):
self.comment_id = comment_id
def run(self):
self.get_connection()
self.is_loading.emit(True)
try:
status = self.server.wp.editComment(1, self.username, self.password, self.comment_id, self.data)
except:
self.is_success.emit(False)
else:
self.is_success.emit(True)
self.is_loading.emit(False)
class DeleteCommentThread(BaseCommentThread):
""" Delete comment """
is_loading = QtCore.pyqtSignal(bool)
is_success = QtCore.pyqtSignal(bool)
def __init__(self, data, *args, **kwargs):
super(DeleteCommentThread, self).__init__(*args, **kwargs)
self.comment_id = data['comment_id']
def run(self):
self.get_connection()
self.is_loading.emit(True)
try:
status = self.server.wp.deleteComment(1, self.username, self.password, int(self.comment_id))
except:
self.is_success.emit(False)
else:
self.is_success.emit(True)
self.is_loading.emit(False)
|
pythontech/ptscrape | ptscrape.py | Python | lgpl-2.1 | 2,668 | 0.003373 | #=======================================================================
# Screen-scraping framework
#=======================================================================
import logging
try:
import bs4 as soup
except ImportError:
import BeautifulSoup as soup
import urllib2
from urllib import urlencode
from urlparse import urljoin
import cookielib
import os
import re
_log = logging.getLogger(__name__)
class PageSource(object):
def __init__(self, cachedir=None, replay=False):
self.cachedir = cachedir
self.replay = replay
self.jar = cookielib.CookieJar()
self.agent = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.jar))
# urllib2.HTTPRedirectHandler())
def get(self, url, query=None, tag=None):
'''HTTP GET request on a URL with optional query'''
if query:
url += '?' + urlencode(query)
_log.info('GET %s', url)
return self._transact(url, tag=tag)
def post(self, url, query=None, tag=None):
'''HTTP POST | request on a URL with optional query'''
_log.info('POST %s', url)
data = ''
if query:
data = urlencode(query)
return self._transact(url, data, tag=tag)
def _transact(self, url, data=None, tag=None):
'''Perform an HTTP request, or fetch page from cache'''
if tag is None:
tag = os.path.basename(url)
| if self.replay:
content = self.read_cache(tag)
else:
doc = self.agent.open(url, data)
_log.info('info %r', doc.info())
content = doc.read()
if self.cachedir:
self.write_cache(tag, content)
doc = soup.BeautifulSoup(content)
return Page(url, doc)
def read_cache(self, tag):
cachefile = os.path.join(os.path.expanduser(self.cachedir), tag)
with open(cachefile, 'rb') as f:
content = f.read()
return content
def write_cache(self, tag, content):
cachefile = os.path.join(os.path.expanduser(self.cachedir), tag)
with open(cachefile, 'wb') as f:
f.write(content)
class Page(object):
def __init__(self, url, doc):
self.url = url
self.doc = doc
def bs_cdata(tag):
'''Get the character data inside a BeautifulSoup element, ignoring all markup'''
return ''.join(tag.findAll(text=True))
if __name__=='__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--replay', action='store_true')
ap.add_argument('url')
args = ap.parse_args()
logging.basicConfig(level=logging.INFO)
|
DONIKAN/django | tests/defer/tests.py | Python | bsd-3-clause | 11,262 | 0.000533 | from __future__ import unicode_literals
from django.db.models.query_utils import DeferredAttribute, InvalidQuery
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin(object):
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname), DeferredAttribute):
count += 1
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 3)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
# When we defer a field and also select_related it, the query is
# invalid and raises an exception.
with self.assertRaises(InvalidQuery):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
with self.assertRaises(InvalidQuery):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# With a depth-based select_related, all deferred ForeignKeys are
# deferred instead of traversed.
with self.assertNumQueries(3):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
| "p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Chi | ld.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
|
freakboy3742/pyxero | xero/__init__.py | Python | bsd-3-clause | 59 | 0 | from .api import Xero # NOQA: F401
__version_ | _ = "0.9 | .3"
|
ashang/calibre | src/calibre/devices/mtp/defaults.py | Python | gpl-3.0 | 1,629 | 0.006139 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import traceback, re
from calibre.constants import iswindows
class DeviceDefaults(object):
def __init__(self):
self.rules = (
# Amazon devices
({'vendor':0x1949}, {
'format_map': ['azw3', 'mobi', 'azw',
'azw1', 'azw4', 'pdf'],
'send_to': ['documents', 'books', 'kindle'],
}
),
)
def __call__(self, device, driver):
if iswindows:
vid = pid = 0xffff
m = re.search(r'(?i)vid_([0-9a-fA-F]+)&pid_([0-9a-fA-F]+)', device)
if m is not None:
try:
vid, pid = int(m.group(1), 16), int(m.group(2), 16)
except:
traceback.print_exc()
else:
vid, pid = device.vendor_id, device.product_id
for rule in self.rules:
tests = rule[0]
| matches = True
for k, v in tests.iteritems():
if k == 'vendor' and v != vid:
matches = False
break
if k == 'product' and v != pid:
matches = False
break
| if matches:
return rule[1]
return {}
|
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/papyon/service/AddressBook/scenario/contacts/check_pending_invite.py | Python | gpl-3.0 | 107 | 0.018692 | ../../../../../../../../share/pyshared/papyon/service/AddressBook/scenario/contac | ts/check_pen | ding_invite.py |
geofrenzy/utm-mbsb | ros-src/catkin_ws/build/catkin_generated/order_packages.py | Python | apache-2.0 | 323 | 0.003096 | # generat | ed from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/opt/geofrenzy/src/catkin_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/opt/ros/kinetic".split(';') if "/opt/ros/kinetic" ! | = "" else []
|
doduytrung/odoo-8.0 | openerp/addons/base/ir/ir_model.py | Python | agpl-3.0 | 61,894 | 0.006301 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, | field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self | .pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
id |
Napoleon314/Venus3D | generate_projects.py | Python | mit | 1,373 | 0.020393 | import os, sys, multiprocessing, subprocess
from build_util import *
if __name__ == "__main__":
cfg = cfg_from_argv(sys.argv)
bi = build_info(cfg.compiler, cfg.archs, cfg.cfg)
print("Starting build project: " + build_cfg.project_name + " ...")
additional_options = "-DCFG_PROJECT_NAME:STRING=\"%s\"" % build_cfg.project_name
additional_options += " -DCFG_BINARY_PATH:STRING=\"%s\"" % build_cfg.binary_path
additional_options += | " -DCFG_BUILD_PATH:STRING=\"%s\"" % build_cfg.build_path
additional_options += " -DCFG_DEPENDENT_PATH:STRING=\"%s\"" % build_cfg.dependent_path
additional_options += " -DCFG_DOCUMENT_PATH:STRING=\"%s\"" % build_cfg.document_path
additional_options += " -DCFG_EXTERN | AL_PATH:STRING=\"%s\"" % build_cfg.external_path
additional_options += " -DCFG_INCLUDE_PATH:STRING=\"%s\"" % build_cfg.include_path
additional_options += " -DCFG_SOURCE_PATH:STRING=\"%s\"" % build_cfg.source_path
additional_options += " -DCFG_TEST_PATH:STRING=\"%s\"" % build_cfg.test_path
additional_options += " -DCFG_INSTALL_PATH:STRING=\"%s\"" % build_cfg.install_path
additional_options += " -DCFG_INTRINSICS_LEVEL:STRING=\"%d\"" % build_cfg.intrinsics_level
print("Generating %s..." % (build_cfg.project_name))
for info in bi.compilers:
build_project(build_cfg.project_name, build_cfg.build_path, bi, "../cmake", info, False, False, additional_options)
|
DavideTonin99/pygameoflife | main.py | Python | mit | 7,534 | 0.006769 | """
Simulation of Game of | Life with pygame
Instructions:
Press ESC or F4 to quit the game
Press RETURN to restart the game
Press SPACE to stop or resume the game
Press "p" or "+" to zoom in
Press "m" or "-" to zoom out
Press one of the letter below to change the color of the aliv | e cells:
- r: red
- b: blue
- g: green
- c: cyan
- w: white
When the game is stopped, you can click and move the mouse to select new cells
"""
import numpy as np
import pygame
import random
from pygame.locals import *
__author__ = "Davide Tonin"
game_ended = False
game_stop = False
board_changed = False
CELL_SIZE = 20
FPS = 60
total_cells, alive_cells = 0, 0
game_board = None
color = "red"
mouse_clicked = False
def init_board():
"""Initialize the game board with random alive and dead cells"""
global game_board
game_board = np.random.randint(2, size=(HEIGHT // CELL_SIZE, WIDTH // CELL_SIZE))
def game_board_transition():
"""Parse the game board, count neighbours and do the transition to the next step"""
global game_board, alive_cells
previous_game_board = np.copy(game_board)
alive_cells = 0
for row in range(game_board.shape[0]):
for column in range(game_board[row].shape[0]):
alive_neighbours = 0
if row > 0:
if column > 0 and previous_game_board[row - 1][column - 1] > 0:
alive_neighbours += 1
if previous_game_board[row - 1][column] > 0:
alive_neighbours += 1
if column < game_board[row].shape[0]-1 and previous_game_board[row - 1][column + 1] > 0:
alive_neighbours += 1
if column > 0 and previous_game_board[row][column-1] > 0:
alive_neighbours += 1
if column < game_board[row].shape[0]-1 and previous_game_board[row][column + 1] > 0:
alive_neighbours += 1
if row < game_board.shape[0]-1:
if column > 0 and previous_game_board[row + 1][column - 1] > 0:
alive_neighbours += 1
if previous_game_board[row + 1][column] > 0:
alive_neighbours += 1
if column < game_board[row].shape[0]-1 and previous_game_board[row + 1][column + 1] > 0:
alive_neighbours += 1
if game_board[row][column] > 0:
if alive_neighbours == 2 or alive_neighbours == 3:
if game_board[row][column] < 6:
game_board[row][column] += 1
else:
game_board[row][column] = 0
else:
if alive_neighbours == 3:
game_board[row][column] = 1
if game_board[row][column] > 0:
alive_cells += 1
def resize_board(action):
""" Resize the game board """
global game_board, CELL_SIZE
CELL_SIZE += 1 if action == "+" else -1
new_game_board = np.zeros((HEIGHT // CELL_SIZE, WIDTH // CELL_SIZE), dtype=int)
for row in range(new_game_board.shape[0]):
for column in range(new_game_board[row].shape[0]):
try:
new_game_board[row][column] = game_board[row][column]
except:
new_game_board[row][column] = random.randint(0, 1)
game_board = np.copy(new_game_board)
def draw_game_board():
"""Draw the game board"""
global game_window, game_board, color
draw_array = []
for row in range(game_board.shape[0]):
for column in range(game_board[row].shape[0]):
if game_board[row][column] > 0:
if color == "red":
alive_color = (game_board[row][column] * 40, 0, 0)
elif color == "green":
alive_color = (0, game_board[row][column] * 40, 0)
elif color == "blue":
alive_color = (0, 0, game_board[row][column] * 40)
elif color == "cyan":
alive_color = (0, game_board[row][column] * 40, game_board[row][column] * 40)
elif color == "white":
alive_color = (
game_board[row][column] * 40, game_board[row][column] * 40, game_board[row][column] * 40)
pygame.draw.rect(game_window, alive_color, [column * CELL_SIZE, row * CELL_SIZE, CELL_SIZE, CELL_SIZE])
def select_cells():
global alive_cells
row, col = pygame.mouse.get_pos()[1] // CELL_SIZE, pygame.mouse.get_pos()[0] // CELL_SIZE
try:
if game_board[row][col] < 6:
game_board[row][col] += 1
if game_board[row][col] == 1:
alive_cells += 1
except:
pass
if __name__ == '__main__':
pygame.init()
GAME_RESOLUTION = WIDTH, HEIGHT = pygame.display.Info().current_w, pygame.display.Info().current_h
game_window = pygame.display.set_mode(GAME_RESOLUTION, FULLSCREEN | HWSURFACE | DOUBLEBUF | HWACCEL)
pygame.display.set_caption("PyGameOfLife, " + __author__)
clock = pygame.time.Clock()
pygame.font.init()
text_settings = pygame.font.SysFont("Open Sans", 25)
init_board()
stop_transition = True
while not game_ended:
# Event handler
for event in pygame.event.get():
if event.type == pygame.QUIT: game_ended = True
if event.type == KEYDOWN:
if event.key == K_ESCAPE or event.key == K_F4: game_ended = True
if event.key == K_RETURN: init_board(); game_stop = False; board_changed = True
if event.key == K_SPACE: game_stop = not game_stop
if event.key == K_r: color = "red"; board_changed = True
if event.key == K_g: color = "green"; board_changed = True
if event.key == K_b: color = "blue"; board_changed = True
if event.key == K_c: color = "cyan"; board_changed = True
if event.key == K_w: color = "white"; board_changed = True
if event.key == K_p or event.key == K_PLUS: resize_board("+"); game_stop = False; board_changed = True
if event.key == K_m or event.key == K_MINUS: resize_board("-"); game_stop = False; board_changed = True
if event.type == MOUSEBUTTONDOWN:
mouse_clicked = True
if event.type == MOUSEBUTTONUP:
mouse_clicked = False
pygame.Surface.fill(game_window, (0, 0, 0))
if not game_stop or board_changed:
if board_changed:
board_changed = False
else:
game_board_transition()
elif game_stop:
if mouse_clicked:
select_cells()
draw_game_board()
total_cells = (WIDTH // CELL_SIZE) * (HEIGHT // CELL_SIZE)
game_window.blit(text_settings.render("FPS: " + str(round(clock.get_fps(), 2)), True, (255, 255, 255)),
(20, 20))
game_window.blit(text_settings.render("Total cells: " + str(total_cells), True, (255, 255, 255)), (20, 50))
game_window.blit(text_settings.render(
"Alive cells: " + str(alive_cells) + ", " + str(round(alive_cells * 100 / total_cells, 2)) + "%", True,
(255, 255, 255)), (20, 80))
pygame.display.flip()
clock.tick(FPS)
pygame.quit()
exit()
|
apache/libcloud | libcloud/test/dns/test_buddyns.py | Python | apache-2.0 | 6,194 | 0.000646 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_BUDDYNS
from libcloud.dns.drivers.buddyns import BuddyNSDNSDriver
from libcloud.utils.py3 import httplib
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.base import Zone
class BuddyNSDNSTests(unittest.TestCase):
def setUp(self):
BuddyNSMockHttp.type = None
BuddyNSDNSDriver.connectionCls.conn_class = BuddyNSMockHttp
self.driver = BuddyNSDNSDriver(*DNS_PARAMS_BUDDYNS)
self.test_zone = Zone(
id="test.com",
type="master",
ttl=None,
domain="test.com",
extra={},
driver=self,
)
def test_list_zones_empty(self):
BuddyNSMockHttp.type = "EMPTY_ZONES_LIST"
zones = self.driver.lis | t_zones()
sel | f.assertEqual(zones, [])
def test_list_zones_success(self):
BuddyNSMockHttp.type = "LIST_ZONES"
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone = zones[0]
self.assertEqual(zone.id, "microsoft.com")
self.assertIsNone(zone.type)
self.assertEqual(zone.domain, "microsoft.com")
self.assertIsNone(zone.ttl)
zone = zones[1]
self.assertEqual(zone.id, "google.de")
self.assertIsNone(zone.type)
self.assertEqual(zone.domain, "google.de")
self.assertIsNone(zone.ttl)
def test_delete_zone_zone_does_not_exist(self):
BuddyNSMockHttp.type = "DELETE_ZONE_ZONE_DOES_NOT_EXIST"
try:
self.driver.delete_zone(zone=self.test_zone)
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, self.test_zone.id)
else:
self.fail("Exception was not thrown")
def test_delete_zone_success(self):
BuddyNSMockHttp.type = "DELETE_ZONE_SUCCESS"
status = self.driver.delete_zone(zone=self.test_zone)
self.assertTrue(status)
def test_get_zone_zone_does_not_exist(self):
BuddyNSMockHttp.type = "GET_ZONE_ZONE_DOES_NOT_EXIST"
try:
self.driver.get_zone(zone_id="zonedoesnotexist.com")
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, "zonedoesnotexist.com")
else:
self.fail("Exception was not thrown")
def test_get_zone_success(self):
BuddyNSMockHttp.type = "GET_ZONE_SUCCESS"
zone = self.driver.get_zone(zone_id="myexample.com")
self.assertEqual(zone.id, "myexample.com")
self.assertEqual(zone.domain, "myexample.com")
self.assertIsNone(zone.type)
self.assertIsNone(zone.ttl)
self.assertEqual(zone.driver, self.driver)
def test_create_zone_success(self):
BuddyNSMockHttp.type = "CREATE_ZONE_SUCCESS"
zone = self.driver.create_zone(domain="microsoft.com")
self.assertEqual(zone.id, "microsoft.com")
self.assertEqual(zone.domain, "microsoft.com")
self.assertIsNone(zone.type),
self.assertIsNone(zone.ttl)
def test_create_zone_zone_already_exists(self):
BuddyNSMockHttp.type = "CREATE_ZONE_ZONE_ALREADY_EXISTS"
try:
self.driver.create_zone(domain="newzone.com", extra={"master": "13.0.0.1"})
except ZoneAlreadyExistsError as e:
self.assertEqual(e.zone_id, "newzone.com")
else:
self.fail("Exception was not thrown")
class BuddyNSMockHttp(MockHttp):
fixtures = DNSFileFixtures("buddyns")
def _api_v2_zone_EMPTY_ZONES_LIST(self, method, url, body, headers):
body = self.fixtures.load("empty_zones_list.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_LIST_ZONES(self, method, url, body, headers):
body = self.fixtures.load("list_zones.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_zonedoesnotexist_com_GET_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers
):
body = self.fixtures.load("zone_does_not_exist.json")
return 404, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_myexample_com_GET_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("get_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_test_com_DELETE_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("delete_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_test_com_DELETE_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers
):
body = self.fixtures.load("zone_does_not_exist.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_CREATE_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("create_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_CREATE_ZONE_ZONE_ALREADY_EXISTS(self, method, url, body, headers):
body = self.fixtures.load("zone_already_exists.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
if __name__ == "__main__":
sys.exit(unittest.main())
|
francocurotto/GraphSLAM | src/python-helpers/v3-real-data/data/Parque OHiggins/OHigginsRaw2g2o.py | Python | gpl-3.0 | 3,023 | 0.005624 | # imports
import shlex
import math
def ohigginsRaw2g2o(infoOdomPos, infoOdomAng, infoPointSen, dataDir, dataSkip, dataSize):
# filenames
inDeadReckon = dataDir + "deadReckoning.dat"
inMeasurement = dataDir + "measurement.dat"
outG2O = dataDir + "ohiggins.g2o"
fg2o = open(outG2O, 'w')
odometry = [line.rstrip('\n') for line in open(inDeadReckon)]
measurements = [line.rstrip('\n') for line in open(inMeasurement)]
poseID = 0
landID = len(odometry)
j = 1
count = 0
for i in range((len(odometry))):
odomLine = odometry[i]
# don't use first line
if (odomLine[0] == '#' or count > dataSize):
continue
# data skip
if poseID % dataSkip == 0:
odomWords = shlex.split(odomLine)
# check not last pose
if i+dataSkip < len(odometry):
# odometry
nextWords = shlex.split(odometry[i+dataSkip])
x1 = float(odomWords[2])
y1 = float(odomWords[3] | )
a1 = float(odomWords[4])
x2 = float(nextWords[2])
y2 = float(nextWords[3])
a2 = float(nextWords[4])
dx = (x2 - x1)*math.cos(a1) + (y2 - y1)*math.sin(a1)
dy = -(x2 - x1)*math.sin(a1) + (y2 - y1)*math.cos(a1)
dt = ((a2 - a1 + math.pi) % (2*math.pi)) - math.pi
fg2o.write("EDGE_SE2 " + str(poseID) + " " + str(poseID+dataSkip) + " " + str | (dx) + " " +
str(dy) + " " + str(dt) + " " + str(infoOdomPos) + " 0 0 " + str(infoOdomPos) + " 0 " + str(infoOdomAng) + "\n")
count = count+1
# measurements
measLine = measurements[j]
measWords = shlex.split(measLine)
odomTime = float(odomWords[0]) + (float(odomWords[1])*1e-9)
measTime = float(measWords[0]) + (float(measWords[1])*1e-9)
while (j < len(measurements) and measTime <= odomTime):
measLine = measurements[j]
measWords = shlex.split(measLine)
measTime = float(measWords[0]) + (float(measWords[1])*1e-9)
#print "("+odomWords[0]+","+measWords[0]+"), " + "("+odomWords[1]+","+measWords[1]+")"
#if (odomWords[0] == measWords[0]) and (odomWords[1] == measWords[1]):
if (measTime == odomTime):
px = float(odomWords[2])
py = float(odomWords[3])
mr = float(measWords[2])
mt = float(measWords[3])
lx = mr*math.cos(mt)
ly = mr*math.sin(mt)
fg2o.write("EDGE_SE2_XY " + str(poseID) + " " + str(landID) + " " + str(lx) +
" " + str(ly) + " " + str(infoPointSen) + " 0 " + str(infoPointSen) + "\n")
landID = landID + 1
j = j+1
poseID = poseID + 1
fg2o.write("FIX " + str(0) + "\n")
fg2o.close()
|
staranjeet/fjord | vendor/packages/translate-toolkit/translate/storage/versioncontrol/darcs.py | Python | bsd-3-clause | 4,465 | 0.002912 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2008,2012 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
from translate.storage.versioncontrol import GenericRevisionControlSystem
from translate.storage.versioncontrol import run_command, prepare_filelist, youngest_ancestor
def is_available():
"""check if darcs is installed"""
exitcode, output, error = run_command(["darcs", "--version"])
return exitcode == 0
class darcs(GenericRevisionControlSystem):
"""Class to manage items under revision control of darcs."""
RCS_METADIR = "_darcs"
SCAN_PARENTS = True
def update(self, revision=None, needs_revert=True):
"""Does a clean update of the given path
:param revision: ignored for darcs
"""
output_revert = ""
if needs_revert:
# revert local changes (avoids conflicts)
command = ["darcs", "revert", "--repodir", self.root_dir,
"-a", self.location_rel]
exitcode, output_revert, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] error running '%s': %s" % (command, error))
# pull new patches
command = ["darcs", "pull", "--repodir", self.root_d | ir, "-a"]
exitcode, output_pull, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] error running | '%s': %s" % (command, error))
return output_revert + output_pull
def add(self, files, message=None, author=None):
"""Add and commit files."""
files = prepare_filelist(files)
command = ["darcs", "add", "--repodir", self.root_dir] + files
exitcode, output, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] Error running darcs command '%s': %s" \
% (command, error))
# go down as deep as possible in the tree to avoid accidental commits
# TODO: explicitly commit files by name
ancestor = youngest_ancestor(files)
return output + type(self)(ancestor).commit(message, author)
def commit(self, message=None, author=None):
"""Commits the file and supplies the given commit message if present"""
if message is None:
message = ""
# set change message
command = ["darcs", "record", "-a", "--repodir", self.root_dir,
"--skip-long-comment", "-m", message]
# add the 'author' to the list of arguments if it was given
if author:
command.extend(["--author", author])
# the location of the file is the last argument
command.append(self.location_rel)
exitcode, output_record, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] Error running darcs command '%s': %s" \
% (command, error))
# push changes
command = ["darcs", "push", "-a", "--repodir", self.root_dir]
exitcode, output_push, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] Error running darcs command '%s': %s" \
% (command, error))
return output_record + output_push
def getcleanfile(self, revision=None):
"""Get a clean version of a file from the darcs repository
:param revision: ignored for darcs
"""
import os
filename = os.path.join(self.root_dir, self.RCS_METADIR, 'pristine',
self.location_rel)
try:
darcs_file = open(filename)
output = darcs_file.read()
darcs_file.close()
except IOError, error:
raise IOError("[Darcs] error reading original file '%s': %s" % \
(filename, error))
return output
|
neuront/pyjhashcode | jhashcode/__init__.py | Python | mit | 594 | 0.003367 | import sys
def _unknown_hash(_):
raise TypeError('Unsupported | type')
def hash_str_unicode(s):
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
def hash_int(i):
return i
if int(sys.version[0]) > 2:
_TP_MAPPING = {
bytes: hash_str_unicode,
str: hash_str_unicode,
int: hash_int,
}
else:
_TP_MAPPING = {
str: hash_str_unicode,
unicode: hash_str_unicode,
int: hash_int,
}
def hashcode | (o):
return _TP_MAPPING.get(type(o), _unknown_hash)(o)
|
jn2840/bitcoin | contrib/devtools/fix-copyright-headers.py | Python | mit | 1,492 | 0.015416 | #!/usr/bin/env python
'''
Run this script inside of src/ and it will look for all the files
that | were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The Beardcoi | n Core developers
it will change it to
// Copyright (c) 2009-2014 The Beardcoin Core developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The Beardcoin/%s The Beardcoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,year,filePath))
n = n + 1
|
sebastiandev/pyragraph | filters/words.py | Python | mit | 595 | 0.003361 | # -*- coding: utf-8 -*-
from .base import Filter, skip_empty_data, ensure_list_input
class | StopWordFilter(Filter):
"""
Filters stop words from the input tokens. Input is expected to be a list
"""
def __init__(self, stopwords, next_filter=None):
super(StopWordFilter, self).__init__(next_filter)
self._stopwords = set(stopwords) | # sets lookup works as a dict and makes search time O(1)
@skip_empty_data(default=[])
@ensure_list_input
def _apply_filter(self, tokens):
return [t for t in tokens if t not in self._stopwords]
|
EliotBerriot/trax | trax/users/apps.py | Python | mit | 270 | 0 | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'trax.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in: |
Users system checks
Users signal registration
"""
p | ass
|
nickedes/Ni-Algos | Practice/Hr/Data Structures/Stack/Game of two stacks.py | Python | gpl-2.0 | 567 | 0 | def scoring(a, b, n, m, x):
i, j, sumNum = 0, 0, 0
while i < n and (sumNum+a[i]) <= x:
sumN | um += a[i]
i += 1
ans = i
while j < m and i >= 0:
sumNum += b[j]
j += 1
while sumNum > x and i > 0:
i -= 1
sumNum -= a[i]
if sumNum <= x and i+j > ans:
ans = i+j
return ans
g = int(input())
for i in range(g):
n, m, x = list(map(int, input().split()))
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print(scoring(a, b, n, | m, x))
|
pwollstadt/IDTxl | demos/demo_active_information_storage.py | Python | gpl-3.0 | 591 | 0 | # Im | port classes
from idtxl.active_information_storage import ActiveInformationStorage
from idtxl.data import Data
# a) Generate test data
data = Data()
data.generate_mute_data(n_samples=1000, n_replications=5)
# b) Initialise analysis object and define settings
network_analysis = ActiveInformationStorage()
settings = {'cmi_estimator': 'JidtGaussianCMI',
'max_lag': 5}
# c) Run analysis
results = network_analysis.anal | yse_network(settings=settings, data=data)
# d) Plot list of processes with significant AIS to console
print(results.get_significant_processes(fdr=False))
|
openairproject/sensor-esp32 | bin/firmware_installer.py | Python | gpl-3.0 | 655 | 0.035115 | 1. ask for UART NAME
2.
make TEMP_DIR
git clone https://github.com/espressif/esptool.git -o TEMP_DIR
3.
TEMP_DIR/esptool.py --port /dev/tty.SLAB_USBtoUART --after no_reset chip_id
4.
fetc | h https://openairproject.com/ota/index.txt to TEMP_DIR
parse first line
fetch binaries to TEMP_DIR
test sha
5.
fetch partitions_two_ota.bin
fetch bootloader.bin
6.
python TEMP_DIR/esptool.py --chip esp32 --port /dev/tty.SLAB_USBtoUART --baud 921600 --before default_reset
--after hard_reset write_flash -u --flash_mode dio | --flash_freq 40m --flash_size detect
0x1000 TEMP_DIR/bootloader.bin 0x10000 TEMP_DIR/sensor-esp32.bin 0x8000 TEMP_DIR/partitions_two_ota.bin |
morpheby/levelup-by | common/djangoapps/course_modes/tests/factories.py | Python | agpl-3.0 | 397 | 0.002519 | from course_modes.mode | ls import CourseMode
from factory import DjangoModelFactory
# Factories don't have __init__ methods, and are self documenting
# pylint: disable=W0232
class CourseModeFactory(DjangoModelFactory):
FACTORY_FOR = CourseMode
course_id = u'MITx/999/Robot_Super_Course'
mode_slug = 'audit'
mode_display_name = 'audit | course'
min_price = 0
currency = 'usd'
|
aliparsai/LittleDarwin | utils/NullExperiment/CompareMutantDatabases.py | Python | gpl-3.0 | 1,279 | 0.002346 | import os
import sys
import shelve
import random
import math
import scipy.stats.stats
class MutantResults(object):
def __init__(self, sourceDatabase, resultsDatabase):
# self.resultsDatabase = shelve.open(resultsDatabase, "r")
try:
self.resultsDatabase = shelve.open(resultsDatabase, "r")
self.mutationDatabase = shelve.open(sourceDatabase, "r")
except Exception:
print("Error opening databases.")
# print Exception.message.getter()
sys.exit(1)
self.unitCount = len(self.mutationDatabase.keys())
assert self.unitCount == len(self.resultsDatabase.keys())
self.mutantCount = 0
for key in self.mutationDatabase.keys():
self.mutantCount += len(self.mutationDatabase[key])
def calculateCoverageForClass(self, c):
if not self.resultsDatabase.has_key(c):
return None
survivedList, killedList = self.r | esultsDatabase[ | c]
if len(survivedList) + len(killedList) > 0:
score = float(len(killedList)) / float(len(survivedList) + len(killedList))
else:
score = None
return score
def calculateCoverageForType(self, t):
pass
if __name__ == "__main__":
pass
|
CollabQ/CollabQ | vendor/django/contrib/contenttypes/views.py | Python | apache-2.0 | 2,734 | 0.001463 | from django import http
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
def shortcut(request, content_type_id, object_id):
"Redirect to an object's page based on a content-type ID and an object ID."
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id))
try:
absurl = obj.get_absolute_url()
except AttributeError:
raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name)
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
| if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
i | f object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current().domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.is_secure() and 'https' or 'http'
return http.HttpResponseRedirect('%s://%s%s' % (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
|
Neitsch/ASE4156 | authentication/migrations/0005_auto_20171011_1314.py | Python | apache-2.0 | 616 | 0.001623 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10- | 11 13:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('authentication', '0004_userbank_institution_name'),
]
operations = [
migrations.AlterField(
model_name='userbank',
name='user',
field=models.ForeignKey(on_delete | =django.db.models.deletion.CASCADE, related_name='userbank', to=settings.AUTH_USER_MODEL),
),
]
|
mementum/bfplusplus | bfplusplus/guimods/mainframe/onsizesash.py | Python | gpl-3.0 | 1,795 | 0.004457 | #!/usr/bin/env python
# -*- coding: latin-1; py-indent-offset:4 -*-
################################################################################
#
# | This file is part of Bfplusplus
#
# Bfplusplus is a graphical interface to the Betfair Betting Exchange
# Copyright (C) 2010 Daniel Rodriguez (aka Daniel Rodriksson)
# Copyright (C) 2011 Sensible Odds Ltd.
#
# You can learn more and contact the author at:
#
# http://code.google.com/p/bfplusplus/
#
# Bfplusplus is free softw | are: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Bfplusplus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Bfplusplus. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import wx
if True:
def init(self):
self.Bind(wx.EVT_MOVE, self.OnMove, id=self.GetId())
def RePosOverlays(self):
overlays = [
self.transMsg, self.transMsgError, self.marketStatusOverlay,
self.matchedOverlay, self.stoppedOverlay,
]
for overlay in overlays:
overlay.CalcAndSetPosition()
overlay.Refresh()
def OnMove(self, event):
event.Skip()
self.Refresh()
self.RePosOverlays()
def OnSize(self, event):
event.Skip()
self.Refresh()
self.RePosOverlays()
|
exiahuang/SalesforceXyTools | xlsxwriter/chart_pie.py | Python | apache-2.0 | 6,168 | 0 | ###############################################################################
#
# ChartPie - A class for writing the Excel XLSX Pie charts.
#
# Copyright 2013-2016, John McNamara, jmcnamara@cpan.org
#
from warnings import warn
from . import chart
class ChartPie(chart.Chart):
"""
A class for writing the Excel XLSX Pie charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartPie, self).__init__()
if options is None:
options = {}
self.vary_data_color = 1
self.rotation = 0
# Set the available data label positions for this chart type.
self.label_position_default = 'best_fit'
self.label_positions = {
'center': 'ctr',
'inside_end': 'inEnd',
'outside_end': 'outEnd',
'best_fit': 'bestFit'}
def set_rotation(self, rotation):
"""
Set the Pie/Doughnut chart rotation: the angle of the first slice.
Args:
rotation: First segment angle: 0 <= rotation <= 360.
Returns:
Nothing.
"""
if rotation is None:
return
# Ensure the rotation is in Excel's range.
if rotation < 0 or rotation > 360:
warn("Chart rotation %d outside Excel range: 0 <= rotation <= 360"
% rotation)
return
self.rotation = int(rotation)
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:pieChart element.
self._write_pie_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_pie_chart(self, args):
# Write the <c:pieChart> element. Over-ridden method to remove
# axis_id code since Pie charts don't require val and cat axes.
self._xml_start_tag('c:pieChart')
# Write the c:varyColors element.
self._write_vary_colors()
# Write the series elements.
for data in self.series:
self._write_ser(data)
# Write the c:firstSliceAng element.
self._write_first_slice_ang()
self._xml_end_tag('c:pieChart')
def _write_plot_area(self):
# Over-ridden method to remove the cat_axis() and val_axis() code
# since Pie charts don't require those axes.
#
# Write the <c:plotArea> element.
self._xml_start_tag('c:plotArea')
# Write the c:layout element.
self._wr | ite_layout(self.plotarea.get('layout'), 'plot')
# Write the subclass chart type element.
self._write_chart_type(None)
self._xml_end_tag('c:plotArea')
def _write_legend(self):
# Over-ridden method to add <c:txPr> to legend.
# Write the <c:legend> element.
position = self.legend_position
font = self.legend_font
delete_series = []
overlay = 0
if | (self.legend_delete_series is not None
and type(self.legend_delete_series) is list):
delete_series = self.legend_delete_series
if position.startswith('overlay_'):
position = position.replace('overlay_', '')
overlay = 1
allowed = {
'right': 'r',
'left': 'l',
'top': 't',
'bottom': 'b',
}
if position == 'none':
return
if position not in allowed:
return
position = allowed[position]
self._xml_start_tag('c:legend')
# Write the c:legendPos element.
self._write_legend_pos(position)
# Remove series labels from the legend.
for index in delete_series:
# Write the c:legendEntry element.
self._write_legend_entry(index)
# Write the c:layout element.
self._write_layout(self.legend_layout, 'legend')
# Write the c:overlay element.
if overlay:
self._write_overlay()
# Write the c:txPr element. Over-ridden.
self._write_tx_pr_legend(None, font)
self._xml_end_tag('c:legend')
def _write_tx_pr_legend(self, horiz, font):
# Write the <c:txPr> element for legends.
if font and font.get('rotation'):
rotation = font['rotation']
else:
rotation = None
self._xml_start_tag('c:txPr')
# Write the a:bodyPr element.
self._write_a_body_pr(rotation, horiz)
# Write the a:lstStyle element.
self._write_a_lst_style()
# Write the a:p element.
self._write_a_p_legend(font)
self._xml_end_tag('c:txPr')
def _write_a_p_legend(self, font):
# Write the <a:p> element for legends.
self._xml_start_tag('a:p')
# Write the a:pPr element.
self._write_a_p_pr_legend(font)
# Write the a:endParaRPr element.
self._write_a_end_para_rpr()
self._xml_end_tag('a:p')
def _write_a_p_pr_legend(self, font):
# Write the <a:pPr> element for legends.
attributes = [('rtl', 0)]
self._xml_start_tag('a:pPr', attributes)
# Write the a:defRPr element.
self._write_a_def_rpr(font)
self._xml_end_tag('a:pPr')
def _write_vary_colors(self):
# Write the <c:varyColors> element.
attributes = [('val', 1)]
self._xml_empty_tag('c:varyColors', attributes)
def _write_first_slice_ang(self):
# Write the <c:firstSliceAng> element.
attributes = [('val', self.rotation)]
self._xml_empty_tag('c:firstSliceAng', attributes)
|
jeffleary00/greenery | potnanny/apps/room/api.py | Python | bsd-2-clause | 1,520 | 0.001974 | from flask import Blueprint, request, url_for, jsonify
from flask_restful import Api, Resource
from flask_jwt_extended import jwt_required
from potnanny_core.models impo | rt Room
from .schemas import RoomSchema
from potnanny.crud import CrudInterface
bp = Blueprint('room_api', __name__, url_prefix='/api/1.0/rooms')
api = Api(bp)
ifc = CrudInterface(Room, RoomSchema)
class RoomListApi(Resource):
# @jwt_required
def get(self):
ser, err, code = ifc.get()
| if err:
return err, code
return ser, code
# @jwt_required
def post(self):
data, errors = RoomSchema().load(request.get_json())
if errors:
return errors, 400
ser, err, code = ifc.create(data)
if err:
return err, code
return ser, code
class RoomApi(Resource):
# @jwt_required
def get(self, pk):
ser, err, code = ifc.get(pk, ['environment','environment'])
if err:
return err, code
return ser, code
# @jwt_required
def put(self, pk):
data, errors = RoomSchema().load(request.get_json())
if errors:
return errors, 400
ser, err, code = ifc.edit(pk, data)
if err:
return err, code
return ser, code
# @jwt_required
def delete(self, pk):
ser, err, code = ifc.delete(pk)
if err:
return err, code
return ser, code
api.add_resource(RoomListApi, '')
api.add_resource(RoomApi, '/<int:pk>')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.