repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
hugobuddel/orange3 | Orange/widgets/__init__.py | Python | gpl-3.0 | 791 | 0.002528 | """
"""
import os
import sysconfig
import pkg_resources
import Orange
# Entry point for main Orange categories/widgets discovery
def widget_discovery(discovery):
dist = pkg_resources.get_distribution("Orange")
pkgs = [
"Orange.widgets.data",
"Orange.widgets.visualize",
"Orange.widgets.classify",
"Orange.widgets.regression",
| "Orange.widgets.evaluate",
"Orange.widgets.unsupervised",
]
for pkg in pkgs:
discovery.process_category_package(pkg, distribution=dist)
WIDGET_HELP_PATH = (
("{DEVELOP_ROOT}/doc/build/htmlhelp/index.html", None),
| # os.path.join(sysconfig.get_path("data"),
# "share", "doc", "Orange-{}".format(Orange.__version__)),
("http://docs.orange.biolab.si/3/", "")
)
|
shawon922/django-blog | posts/migrations/0001_initial.py | Python | mit | 1,203 | 0.001663 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-12 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import posts.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to=posts.models.upload_location, width_field='width_field')),
('height_field', models.IntegerField | (default=0)),
('width_field', models.IntegerField(default=0)),
('content', models.TextField()),
('updated', models.DateTimeFie | ld(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-timestamp', '-updated'],
},
),
]
|
bdol/bdol-ml | decision_trees/run_decision_tree.py | Python | lgpl-3.0 | 1,993 | 0.001505 | """
An example that runs a single decision tree using MNIST. This single tree can
achieve ~20% error rate on a random 70/30 train/test split on the original MNIST
data (with a depth limit of 10).
==============
Copyright Info
==============
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You | should have received a copy of the GNU General Public License
along with this p | rogram. If not, see <http://www.gnu.org/licenses/>.
Copyright Brian Dolhansky 2014
bdolmail@gmail.com
"""
from decision_tree import DecisionTree
from fast_decision_tree import FastDecisionTree
from sklearn.datasets import fetch_mldata
from data_utils import integral_to_indicator, split_train_test
import numpy as np
print "Loading data..."
mnist = fetch_mldata('MNIST original', data_home='/home/bdol/data')
train_data, test_data, train_target, test_target = split_train_test(mnist.data,
mnist.target)
train_target = integral_to_indicator(train_target)
test_target = integral_to_indicator(test_target)
print "Done!"
np.seterr(all='ignore')
print "Training decision tree..."
# Comment the following two lines and uncomment the two lines following that
# if you want a faster version of the decision tree.
# dt = DecisionTree(6, 10)
# root = dt.train(train_data, train_target)
fast_dt = FastDecisionTree(10, 10, feat_subset=0.3)
root = fast_dt.train(train_data, train_target)
print "Done training!"
print "Testing..."
err = fast_dt.test(root, test_data, test_target)
print "Error rate: {0}".format(err)
print "Done!"
|
mhaze4/jquery-textflow | django-textflow/textflow/templatetags/__init__.py | Python | mit | 37 | 0 | # coding: utf-8
__author__ = 'mh | aze | '
|
cliburn/flow | src/plugins/io/ReadFCS/memoized.py | Python | gpl-3.0 | 662 | 0.036254 | class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self | .func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list | as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
|
uw-it-aca/course-dashboards | coursedashboards/cache.py | Python | apache-2.0 | 815 | 0 | from memcached_clients import RestclientPymemcacheClient
import re
ONE_MINUTE = 60
ONE_HOUR = 60 * 60
class RestClientsCache(RestclientPymemcacheClient):
""" A custom cache implementation | for Course Dashboards """
def get_cache_expiration_time(self, service, url, status=200):
if "sws" == service:
if re.match(r"^/student/v\d/term/\d{4}", url):
return ONE_HOUR * 10
if re.match(r"^/studen | t/v\d/(?:enrollment|registration)", url):
return ONE_HOUR * 2
return ONE_HOUR
if "pws" == service:
return ONE_HOUR * 10
if "gws" == service:
return ONE_MINUTE * 2
if "canvas" == service:
if status == 200:
return ONE_HOUR * 10
return ONE_MINUTE * 5
|
thorsummoner/crowbar | crowbar/baseextension.py | Python | unlicense | 2,406 | 0.002494 | #
# crowbar - a geometry manipulation program
# Copyright (C) 2020 Dylan Scott Grafmyre
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
crowbar - a geometry manipulation program
Copyright (C) 2020 Dylan Scott Grafmyre
"""
import logging
import os
import pkg_resources
from gi.repository import Gtk
class BaseExtension:
builder = None
pkg_res | ources_module = None
pkg_resources_prefix = None
main_glade_object_name = None
def __init__(self):
self.LOGGER = logging.getLogger(self.__class__.__name__)
@property
def glade_filename(self):
raise NotImplementedError
@property
def main_gtk_widget(self):
if self.builder is None:
self.LOGGER.info('Gtk.Builder()')
self.builder = Gtk.Builder()
self.LOGGER.info('Gtk.Builde | r.add_from_file(%s)', self.glade_filename)
self.builder.add_from_file(self.glade_filename)
# builder.connect_signals(Handler())
self.LOGGER.info('Gtk.Builder.get_object(%s)', self.main_glade_object_name)
widget = self.builder.get_object(self.main_glade_object_name)
if widget == None:
raise AssertionError('Gtk.Builder returned None')
return widget
def icons_append_search_path(self):
pass
class CoreExtension(BaseExtension):
pkg_resources_module = 'crowbar'
@property
def main_glade_object_name(self):
return self.__class__.__name__
@property
def glade_filename(self):
return pkg_resources.resource_filename(
self.pkg_resources_module,
os.path.join(
self.pkg_resources_prefix,
'{}.glade'.format(self.__class__.__name__)
)
)
|
rg3/youtube-dl | youtube_dl/extractor/bfmtv.py | Python | unlicense | 4,216 | 0.002137 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import extract_attributes
class BFMTVBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?bfmtv\.com/'
_VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\d{12})\.html'
_VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block"[^>]*>)'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
def _brightcove_url_result(self, video_id, video_block):
account_id = video_block.get('accountid') or '876450612001'
player_id = video_block.get('playerid') or 'I2qBTln4u'
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),
'BrightcoveNew', video_id)
class BFMTVIE(BFMTVBaseIE):
IE_NAME = 'bfmtv'
_VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'V'
_TESTS = [{
'url': 'https://www.bfmtv.com/politique/emmanuel-macron-l-islam-est-une-religion-qui-vit-une-crise-aujourd-hui-partout-dans-le-monde_VN-202010020146.html',
'info_dict': {
'id': '6196747868001',
'ext': 'mp4',
'title': 'Emmanuel Macron: "L\'Islam est une religion qui vit une crise aujourd’hui, partout dans le monde"',
'description': 'Le Président s\'exprime sur la question du séparatisme depuis les Mureaux, dans les Yvelines.',
'uploader_id': '876450610001',
'upload_date': '20201002',
'timestamp': 1601629620,
},
}]
def _real_extract(self, url):
bfmtv_id = self._match_id(url)
webpage = self._download_webpage(url, bfmtv_id)
video_block = extract_attributes(self._search_regex(
self._VIDEO_BLOCK_REGEX, webpage, 'video block'))
return self._brightcove_url_result(video_block['videoid'], video_block)
class BFMTVLiveIE(BFMTVIE):
IE_NAME = 'bfmtv:live'
_VALID_URL = BFMTVBaseIE._VALID_URL_BASE + '(?P<id>(?:[^/]+/)?en-direct)'
_TESTS = [{
'url': 'https://www.bfmtv.com/en-direct/',
'info_dict': {
'id': '5615950982001',
'ext': 'mp4',
'title': r're:^le direct BFMTV WEB \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'uploader_id': '876450610001',
'upload_date': '20171018',
'timestamp': 1508329950,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.bfmtv.com/economie/en-direct/',
'only_matching': True,
}]
class BFMTVArticleIE(BFMTVBaseIE):
IE_NAME = 'bfmtv:article'
_VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'A'
_TESTS = [{
'url': 'https://www.bfmtv.com/sante/covid-19-un-responsable-de-l-institut-pasteur-se-demande-quand-la-france-va-se-reconfiner_AV-202101060198.html',
'info_dict': {
'id': '202101060198',
'title': 'Covid-19: un responsable de l\'Institut Pasteur se demande "quand la France va se reconfiner"',
'description': 'md5:947974089c303d3ac6196670ae262843',
},
'playlist_count': 2,
}, {
'url': 'https://www.bfmtv.com/international/pour-bolsonaro-le-bresil-est-en-faillite-mais-il-ne-peut-rien-faire_AD-202101060232.html',
'only_matching': True,
}, {
'url': 'https://www.bfmtv.com/sante/covid-19-oui-le-vaccin-de-pfizer-distribue-en-france-a-bien-ete-teste-sur-des-personnes-agees_ | AN-202101060275.html',
'only_matching': True,
}]
def _real_extract(self, url):
bfmtv_id = self._match_id(url | )
webpage = self._download_webpage(url, bfmtv_id)
entries = []
for video_block_el in re.findall(self._VIDEO_BLOCK_REGEX, webpage):
video_block = extract_attributes(video_block_el)
video_id = video_block.get('videoid')
if not video_id:
continue
entries.append(self._brightcove_url_result(video_id, video_block))
return self.playlist_result(
entries, bfmtv_id, self._og_search_title(webpage, fatal=False),
self._html_search_meta(['og:description', 'description'], webpage))
|
cloudkick/libcloud | libcloud/base.py | Python | apache-2.0 | 1,286 | 0.000778 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the | License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex | press or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.base import RawResponse, Response, LoggingConnection
from libcloud.common.base import LoggingHTTPSConnection, LoggingHTTPConnection
from libcloud.common.base import ConnectionKey, ConnectionUserAndKey
from libcloud.compute.base import Node, NodeSize, NodeImage
from libcloud.compute.base import NodeLocation, NodeAuthSSHKey, NodeAuthPassword
from libcloud.compute.base import NodeDriver, is_private_subnet
from libcloud.utils import deprecated_warning
deprecated_warning(__name__)
|
benediktschmitt/py-jsonapi | jsonapi/flask/api.py | Python | mit | 4,513 | 0.001329 | #!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 Benedikt Schmitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
jsonapi.flask.api
=================
"""
# std
import logging
# third party
import flask
import werkzeug
# local
import jsonapi
__all__ = [
"FlaskAPI",
"current_api"
]
LOG = logging.getLogger(__file__)
def get_request():
"""
Transforms the current flask request object in to a jsonapi request object.
"""
uri = flask.request.base_url
if flask.request.query_string:
uri += "?" + flask.request.query_string.decode("utf-8")
method = flask.request.method
headers = dict(flask.request.headers)
body = flask.request.get_data()
return jsonapi.base.Request(uri, method, headers, body)
def to_response(japi_response):
"""
Transforms the jsonapi response object in | to a flask response.
"""
if japi_response.is_file:
flask_response = flask.send_file(japi_response.file)
elif japi_response.has_body:
flask_response = flask.Respons | e(japi_response.body)
else:
flask_response = flask.Response("")
for key, value in japi_response.headers.items():
flask_response.headers[str(key)] = value
flask_response.status_code = japi_response.status
return flask_response
class FlaskAPI(jsonapi.base.api.API):
"""
Implements the API for flask. You can provide the flask application
later via :meth:`init_app`.
"""
def __init__(self, uri, db, settings=None, flask_app=None):
"""
"""
super().__init__(uri=uri, db=db, settings=settings)
self._flask_app = None
if flask_app is not None:
self.init_app(flask_app)
return None
@property
def debug(self):
"""
This is a proxy for the :attr:`flask_app` *debug* attribute. This means,
that you must enable the debug mode on the flask application.
"""
return self._flask_app.debug
@property
def flask_app(self):
"""
The flask application, this API is registered on.
"""
return self._flask_app
def init_app(self, app):
"""
Registers this API on the flask application *app*.
:arg flask.Flask app:
"""
# Avoid double initialization.
if self._flask_app is app:
return None
if self._flask_app is not None:
raise RuntimeError(
"This api has already been registered on a flask application."
)
self._flask_app = app
# Add the url rule.
app.add_url_rule(
rule=self._uri + "/<path:path>",
endpoint="jsonapi",
view_func=self.handle_request,
methods=["get", "post", "patch", "delete", "head"]
)
# Register the jsonapi extension on the flask application.
app.extensions = getattr(app, "extensions", dict())
app.extensions["jsonapi"] = self
# Add the api to the jinja environment
app.jinja_env.globals["jsonapi"] = current_api
return None
def handle_request(self, path=None):
"""
Handles a request to the API.
"""
req = get_request()
resp = super().handle_request(req)
return to_response(resp)
#: Returns the FlaskAPI instance, which is by used by the current flask
#: application.
current_api = werkzeug.local.LocalProxy(
lambda: flask.current_app.extensions["jsonapi"]
)
|
hnakamur/saklient.python | saklient/cloud/resources/routerplan.py | Python | mit | 4,491 | 0.01222 | # -*- coding:utf-8 -*-
from ..client import Client
from .resource import Resource
from ...util import Util
import saklient
# module saklient.cloud.resources.routerplan
class RouterPlan(Resource):
## ルータ帯域プラン情報の1レコードに対応するクラス。
# (instance field) m_id
# (instance field) m_name
# (instance field) m_band_width_mbps
# (instance field) m_service_class
## @private
# @return {str}
def _api_path(self):
return "/product/internet"
## @private
# @return {str}
def _root_key(self):
return "InternetPlan"
## @private
# @return {str}
def _root_key_m(self):
return "InternetPlans"
## @private
# @return {str}
def _class_name(self):
return "RouterPlan"
## @private
# @return {str}
def _id(self):
return self.get_id()
## @ignore
# @param {saklient.cloud.client.Client} client
# @param {any} obj
# @param {bool} wrapped=False
def __init__(self, client, obj, wrapped=False):
super(RouterPlan, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
Util.validate_type(wrapped, "bool")
self.api_deserialize(obj, wrapped)
# (instance field) n_id = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str}
def get_id(self):
return self.m_id
## ID
id = property(get_id, None, None)
# (instance field) n_name = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str}
def get_name(self):
return self.m_name
## 名前
name = property(get_name, None, None)
# (instance field) n_band_width_mbps = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {int}
def get_band_width_mbps(self):
return self.m_band_width_mbps
## 帯域幅
band_width_mbps = property(get_band_width_mbps, None, None)
# (instance field) n_service_class = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str}
def get_service_class(self):
return self.m_service_class
## サービスクラス
service_class = property(get_service_class, None, None)
## (This method is generated in Translator_default#buildImpl)
#
# @param {any} r
def api_deserialize_impl(self, r):
self.is_new = r is None
if self.is_new:
r = {
| }
self.is | _incomplete = False
if Util.exists_path(r, "ID"):
self.m_id = None if Util.get_by_path(r, "ID") is None else str(Util.get_by_path(r, "ID"))
else:
self.m_id = None
self.is_incomplete = True
self.n_id = False
if Util.exists_path(r, "Name"):
self.m_name = None if Util.get_by_path(r, "Name") is None else str(Util.get_by_path(r, "Name"))
else:
self.m_name = None
self.is_incomplete = True
self.n_name = False
if Util.exists_path(r, "BandWidthMbps"):
self.m_band_width_mbps = None if Util.get_by_path(r, "BandWidthMbps") is None else int(str(Util.get_by_path(r, "BandWidthMbps")))
else:
self.m_band_width_mbps = None
self.is_incomplete = True
self.n_band_width_mbps = False
if Util.exists_path(r, "ServiceClass"):
self.m_service_class = None if Util.get_by_path(r, "ServiceClass") is None else str(Util.get_by_path(r, "ServiceClass"))
else:
self.m_service_class = None
self.is_incomplete = True
self.n_service_class = False
## @ignore
# @param {bool} withClean=False
# @return {any}
def api_serialize_impl(self, withClean=False):
Util.validate_type(withClean, "bool")
ret = {
}
if withClean or self.n_id:
Util.set_by_path(ret, "ID", self.m_id)
if withClean or self.n_name:
Util.set_by_path(ret, "Name", self.m_name)
if withClean or self.n_band_width_mbps:
Util.set_by_path(ret, "BandWidthMbps", self.m_band_width_mbps)
if withClean or self.n_service_class:
Util.set_by_path(ret, "ServiceClass", self.m_service_class)
return ret
|
NoXPhasma/sshplus | sshplus.py | Python | gpl-3.0 | 9,010 | 0.001665 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# SSHplus
# A remote connect utlity, sshmenu compatible clone, and application starter.
#
# (C) 2011 Anil Gulecha
# Based on sshlist, incorporating changes by Benjamin Heil's simplestarter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Instructions
#
# 1. Copy file sshplus.py (this file) to /usr/local/bin
# 2. Edit file .sshplus in your home directory to add menu entries, each
# line in the format NAME|COMMAND|ARGS
# 3. Launch sshplus.py
# 4. Or better yet, add it to gnome startup programs list so it's run on login.
import shlex
import sys
import notify2
import os
import gi
gi.require_version("AppIndicator3", "0.1")
from gi.repository import AppIndicator3
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
_VERSION = "1.0"
_SETTINGS_FILE = os.getenv("HOME") + "/.sshplus"
_ABOUT_TXT = """A simple application starter as appindicator.
To add items to the menu, edit the file <i>.sshplus</i> in your home directory. Each entry must be on a new line in this format:
<tt>NAME|COMMAND|ARGS</tt>
If the item is clicked in the menu, COMMAND with arguments ARGS will be executed. ARGS can be empty. To insert a separator, add a line which only contains "sep". Lines starting with "#" will be ignored. You can set an unclickable label with the prefix "label:". To insert a nested menu, use the prefix "folder:menu name". Subsequent items will be inserted in this menu, until a line containing an empty folder name is found: "folder:". After that, subsequent items get inserted in the parent menu. That means that more than one level of nested menus can be created.
Example file:
<tt><small>
Show top|gnome-terminal|-x top
sep
# this is a comment
label:SSH connections
# create a folder named "Home"
folder:Home
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
# to mark the end of items inside "Home", specify and empty folder:
folder:
# this item appears in the main menu
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
label:RDP connections
RDP Ex|rdesktop|-T "RDP-Server" -r sound:local 1.2.3.4
</small></tt>
Copyright 2011 Anil Gulecha
Incorporating changes from simplestarter, Benjamin Heil, http://www.bheil.net
Released under GPL3, http://www.gnu.org/licenses/gpl-3.0.html"""
_EDIT_CONFIG = """To add items to the menu, edit the file <i>.sshplus</i> in your home directory. Each entry must be on a new line in this format:
<tt>NAME|COMMAND|ARGS</tt>
If the item is clicked in the menu, COMMAND with arguments ARGS will be executed. ARGS can be empty. To insert a separator, add a line which only contains "sep". Lines starting with "#" will be ignored. You can set an unclickable label with the prefix "label:". To insert a nested menu, use the prefix "folder:menu name". Subsequent items will be inserted in this menu, until a line containing an empty folder name is found: "folder:". After that, subsequent items get inserted in the parent menu. That means that more than one level of nested menus can be created.
Example file:
<tt><small>
Show top|gnome-terminal|-x top
sep
# this is a comment
label:SSH connections
# create a folder named "Home"
folder:Home
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
# to mark the end of items inside "Home", specify and empty folder:
folder:
# this item appears in the main menu
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
label:RDP connections
RDP Ex|rdesktop|-T "RDP-Server" -r sound:local 1.2.3.4
</small></tt>"""
def menuitem_response(w, item):
if item == "_about":
show_help_dlg(_ABOUT_TXT)
elif item == "_edit":
edit_config_file()
elif item == "_refresh":
newmenu = build_menu()
ind.set_menu(newmenu)
notify2.init("sshplus")
notify2.Notification(
"SSHplus refreshed", '"%s" has been read! Menu list was refreshed!' % _SETTINGS_FILE
).show()
elif item == "_quit":
sys.exit(0)
elif item == "folder":
pass
else:
print(item)
os.spawnvp(os.P_NOWAIT, item["cmd"], [item["cmd"]] + item["args"])
os.wait3(os.WNOHANG)
def show_help_dlg(msg, error=False):
if error:
dlg_icon = Gtk.MessageType.ERROR
md = Gtk.MessageDialog(
None, 0, dlg_icon, Gtk.ButtonsType.OK, "This is an INFO MessageDialog"
)
edit_config_file()
else:
dlg_icon = Gtk.MessageType.INFO
md = Gtk.MessageDialog(
None, 0, dlg_icon, Gtk.ButtonsType.OK, "This is an INFO MessageDialog"
)
try:
md.set_markup("<b>SSHplus %s</b>" % _VERSION)
md.format_secondary_markup(msg)
md.run()
finally:
md.destroy()
def edit_config_file():
if os.path.isfile(_SETTINGS_FILE) is not True:
os.mknod(_SETTINGS_FILE)
show_help_dlg(
"<b>No <i>.sshplus</i> config file found, we created one for you!\n\nPlease edit the"
" file and reload the config.</b>\n\n%s"
% _EDIT_CONFIG,
error=True,
)
os.spawnvp(os.P_NOWAIT, "xdg-open", ["xdg-open", _SETTINGS_FILE])
os.wait3(os.WNOHANG)
def add_separator(menu):
separator = Gtk.SeparatorMenuItem()
separator.show()
menu.append(separator)
def add_menu_item(menu, caption, item=None):
menu_item = Gtk.MenuItem.new_with_label(caption)
if item:
menu_item.connect("activate", menuitem_response, item)
else:
menu_item.set_sensitive(False)
menu_item.show()
me | nu.append(menu_item)
return menu_item
def get_sshplusconfig():
if not os.path.exists(_SETTINGS_FILE):
return []
app_list = []
f = open(_SETTINGS_FILE, "r")
try:
for line in f.readlines():
line = line.rstrip()
if not line or line.startswith("#"):
continue
elif | line == "sep":
app_list.append("sep")
elif line.startswith("label:"):
app_list.append({"name": "LABEL", "cmd": line[6:], "args": ""})
elif line.startswith("folder:"):
app_list.append({"name": "FOLDER", "cmd": line[7:], "args": ""})
else:
try:
name, cmd, args = line.split("|", 2)
app_list.append(
{
"name": name,
"cmd": cmd,
"args": [n.replace("\n", "") for n in shlex.split(args)],
}
)
except ValueError:
print("The following line has errors and will be ignored:\n%s" % line)
finally:
f.close()
return app_list
def build_menu():
if not os.path.exists(_SETTINGS_FILE):
show_help_dlg(
"<b>ERROR: No .sshmenu file found in home directory</b>\n\n%s" % _ABOUT_TXT, error=True
)
sys.exit(1)
app_list = get_sshplusconfig()
menu = Gtk.Menu()
menus = [menu]
for app in app_list:
if app == "sep":
add_separator(menus[-1])
elif app["name"] == "FOLDER" and not app["cmd"]:
if len(menus) > 1:
menus.pop()
elif app["name"] == "FOLDER":
menu_item = add_menu_item(menus[-1], app["cmd"], "folder")
menus.append(Gtk.Menu())
menu_item.set_submenu(menus[-1])
elif app["name"] == "LABEL":
add_menu_item(menus[-1], app["cmd"], None)
else:
add_menu_item(menus[-1], app["name"], app)
# Add SSHplus options folder to the end of the Menu
add_separator(menu)
m |
databricks/spark-deep-learning | sparkdl/horovod/tensorflow/keras.py | Python | apache-2.0 | 903 | 0.001107 | # Copyright 2018 Databricks, Inc.
#
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
# pylint: disable=logging-format-interpolation
# pylint: disable=invalid-name
import time
from tensorflow import keras
from sparkdl.horovod import log_to_driver
__all__ = ["LogCallback"]
class LogCallback(keras.callbacks.Callback):
"""
A simple Hor | ovodRunner log callback that streams event logs to notebook cell output.
"""
def __init__(self, per_batch_log=False | ):
"""
:param per_batch_log: whether to output logs per batch, default: False.
"""
raise NotImplementedError()
def on_epoch_begin(self, epoch, logs=None):
raise NotImplementedError()
def on_batch_end(self, batch, logs=None):
raise NotImplementedError()
def on_epoch_end(self, epoch, logs=None):
raise NotImplementedError()
|
orbitfp7/nova | nova/scheduler/filters/ram_filter.py | Python | apache-2.0 | 3,894 | 0.002054 | # Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2012 Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
ram_allocation_ratio_opt = cfg.FloatOpt('ram_allocation_ratio',
default=1.5,
help='Virtual ram to physical ram allocation ratio which affects '
'all ram filters. This configuration specifies a global ratio '
'for RamFilter. For AggregateRamFilter, it will fall back to '
'this configuration value if no per-aggregate setting found.')
CONF = cfg.CONF
CONF.register_opt(ram_allocation_ratio_opt)
class BaseRamFilter(filters.BaseHostFilter):
def _get_ram_allocation_ratio(self, host_state, filter_properties):
raise NotImplementedError
def host_passes(self, host_state, filter_properties):
"""Only return hosts with sufficient available RAM."""
instance_type = filter_properties.get('instance_type')
requested_ram = instance_type['memory_mb']
free_ram_mb = host_state.free_ram_mb
total_usable_ram_mb = host_state.total_usable_ram_mb
ram_allocation_ratio = self._get_ram_allocation_ratio(host_state,
filter_properties)
memory_mb_limit = total_usable_ram_mb * ram_allocation_ratio
used_ram_mb = total_usable_ram_mb - free_ram_mb
usable_ram = memory_mb_limit - used_ram_mb
if not usable_ram >= requested_ram:
LOG.debug("%(host_state)s does not have %(requested_ram)s MB "
"usable ram, it only has %(usable_ram)s MB usable ram.",
{'host_state': host_state,
'requested_ram': requested_ram,
'usable_ram': usable_ram})
return False
# save oversubscription limit for | compute node to test | against:
host_state.limits['memory_mb'] = memory_mb_limit
return True
class RamFilter(BaseRamFilter):
"""Ram Filter with over subscription flag."""
ram_allocation_ratio = CONF.ram_allocation_ratio
def _get_ram_allocation_ratio(self, host_state, filter_properties):
return self.ram_allocation_ratio
class AggregateRamFilter(BaseRamFilter):
"""AggregateRamFilter with per-aggregate ram subscription flag.
Fall back to global ram_allocation_ratio if no per-aggregate setting found.
"""
def _get_ram_allocation_ratio(self, host_state, filter_properties):
# TODO(uni): DB query in filter is a performance hit, especially for
# system with lots of hosts. Will need a general solution here to fix
# all filters with aggregate DB call things.
aggregate_vals = utils.aggregate_values_from_db(
filter_properties['context'],
host_state.host,
'ram_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, CONF.ram_allocation_ratio, cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode ram_allocation_ratio: '%s'"), e)
ratio = CONF.ram_allocation_ratio
return ratio
|
kyoheiotsuka/Latent-Dirichlet-Allocation-Variational-Implementation- | sample.py | Python | mit | 976 | 0.050205 | # -*- coding: utf-8 -*-
import numpy as np
import cv2, os, lda
if __name__ == "__main__":
# set parameter for experiment
nTopics = 8
# create folder for saving result
if not os.path.exists("result"):
os.mkdir("result")
# create folder for showing fitting process
if not os.path.exists("visualization"):
os.mkdir("visualization")
# load image files (created by createImage.py)
data = np.zeros((1000,16),dtype=np.uint8)
for i in range(1000):
image = cv2.resize(cv2.imread("image/%d.jpg"%i,0),(4,4),interpolation=cv2.INTER_NEAREST)
data[i,:] = image.reshape((16)).astype(np.uint8)
# apply latent dirichlet allocation
model = lda.LDA()
model.setData(data)
model.solve(nTopics=nTopics)
# show topics obtained
for i in range(nTopics):
topic = model.qPhi[i,:]
topic = topic/to | pic.max()*255
topic = topic.reshape((4,4))
cv2.imwrite("result/%d.bmp"%i,c | v2.resize(topic.astype(np.uint8),(200,200),interpolation=cv2.INTER_NEAREST))
|
fabianmg/Tareas-1-2014 | JuegoOware.py | Python | gpl-2.0 | 12,611 | 0.043731 | # -*- coding: utf-8 -*-
''' oware
para windows :@
alternativa 1 (idea 1, con 2 listas para cada jugador)
open(nombre del archivo,modo)
'''
import os
import random
# 0 1 2 3 4 5 6 -->
j2 = [4,4,4,4,4,4,0] # Esta sera la lista del jugador dos,
j1 = [4,4,4,4,4,4,0] # jugador 1
# 0 1 2 3 4 5 6 --> en el campo 6 se guardaran los puntos de los jugadores
'''
j2 = d.d('i',[4,4,4,4,4,4,0]) # jugador 2
j1 = d.d('i',[4,4,4,4,4,4,0]) # jugador 1
'''
movidas = 0 # lleva la cuenta de movidas o jugadas
x = 0 # x almacena, en la funcion base(x,y) la eleccion del jugador 1
y = 0 # y almacena, en la funcion base(x,y) la eleccion del jugador 2
# se alamacena para llamar a la funcion convBases para el jugador 1
# y convBasesy para el jugador 2. y convertir la base deseada por el jugador
# su eleccion se guarada en x,y.
j2aux = [] # estas dos listas (auxiliares) se usan para guardar las listas correspondientes al
j1aux = [] # jugador uno y dos. Convertidas a las bases anteriormente seleccionadas
'''
def Guardar(j1,j2,movidas):
fileg= open('./owaa.txt')
fileg.write(j1,'\n',j2,'\n',movidas)
fileg.close() '''
def mapa(movidas,j1aux,j2aux): # esta funcion es la encargada de imprimir 'el tablero del juego'
#Guardar(j1,j2,movidas) # lo imprime con las lista (auxliares).
#os.system('clear')
###os.system('cls')
print(" Oware")
print(" ┌------------------┐ ┌------------------┐")
print(" |Jugador 1 %3d | | Jugador 2 %3d |" %(j1[6], j2[6]))
print(" └------------------┘ └------------------┘")
print("Movida: ", movidas ) # numero de movidas en el juego...
print(" 6 5 4 3 2 1 ")
print(" ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐")
print(" | ",j2aux[5]," ",j2aux[4]," ",j2aux[3]," ",j2aux[2]," ",j2aux[1]," ",j2aux[0])
print(" └-----┘ └-----┘ └-----┘ └-----┘ └-----┘ └-----┘")
print(" ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐")
print(" | ",j1aux[0]," ",j1aux[1]," ",j1aux[2]," ",j1aux[3]," ",j1aux[4]," ",j1aux[5])
print(" └-----┘ └-----┘ └-----┘ └-----┘ └-----┘ └-----┘")
print(" 1 2 3 4 5 6")
return " "
def error(lugar): # si comete el mismo error muchas veces seguidas --->BSOD<---
os.system('cls')
os.system('color 1f')
if not isinstance(lugar,int):
print(" Se ha encontrado un problema y Windows ha sido apagado para evitar daños al equipo")
print(" El probema parece estar originado por el siguiente archivo: Progra Oware. type(Oware) = USB ")
print(" Ah ingresado un caracter no valido, por favor ingrese solo numeros")
print(" Si esta es la primera vez que ve esta pantalla de error de detención, reinicie su equipo. ")
print(" Si los problemas continúan, deshabilite o elimine cualquier hardware o software de capa 8.")
print(" Deshabilite las opciones de memoria de la BIOS como caché o vigilancia. ")
print(" Si necesita utilizar el modo a prueba de errores para quitar o deshabilitar componentes, ")
print(" reinicie su equipo, presione F8 para seleccionar opciones de inicio avanzadas y, a continuación,")
print(" seleccione Modo a prueba de errores.")
print(" ")
print(" 6 5 4 3 2 1 ")
print(" ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐")
print(" | %3d | | %3d | | %3d | | %3d | | %3d | | %3d |" %(j2[5], j2[4], j2[3], j2[2], j2[1], j2[0]))
print(" └-----┘ └-----┘ └-----┘ └-----┘ └-----┘ └-----┘")
print(" ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐ ┌-----┐")
print(" | %3d | | %3d | | %3d | | %3d | | %3d | | %3d |" %(j1[0], j1[1], j1[2], j1[3], j1[4], j1[5]))
print(" └-----┘ └-----┘ └-----┘ └-----┘ └-----┘ └-----┘")
print(" 1 2 3 4 5 6")
print(" Eliga una posicion valida!!! ")
lugar = input("Posicion: " )
return error(lugar)
else:
return lugar
def finjuego(vect): # funcion para comprabar si un jugador (las lista j1 o 2j)
# se quedo sin semillas
'''
Un vector nulo es aquel en que todos sus elementos son igual a cero
funcion para saber si un jugador se queda sin semillas!
'''
if len(vect) == 0: # cuando el tamaño del vector es 0, retorna true
return True
elif vect[0] == 0: # si la cabeza del vector es = a 0
return finjuego(vect[1:]) # retorna la misma funcion (recursiva) con la cola del vector
else: # en caso de que la cabeza no sea 0
return False # retorna false
def ganador(x): # comprueba si hay ganador
os.system('clear') # se limpia la terminal o el 'cmd'
if j1[6]>24:
print("El jugador 1 es el ganador, con ",j1[6]," semillas.")
return False
if j2[6]>24:
print("El jugador 2 es el ganador, con ",j2[6]," semillas.?")
return False
if finjuego(j1[:-1]) and x == 1:
print(mapa(movidas+1,j1aux,j2aux))
if j1[6]>j2[6]:
print("El jugador 1 es el ganador, con ",j1[6]," semillas.")
return False
else:
print("El jugador 2 es el ganador, con ",j2[6]," semillas.?")
return False
elif finjuego(j2[:-1]) and x == 2:
print(mapa(movidas+1,j1aux,j2aux))
if j1[6]>j2[6]:
print("El jugador 1 es el ganador, con ",j1[6]," semillas.")
return False
else:
print("El jugador 2 es el ganador, con ",j2[6]," semillas.?")
return False
else:
return True
def validarLugar(lugar,jugador):
if jugador[lugar] != 0:
return True
else:
return False
''''''
def mismoJugador(vect1,vect2,a): # a se utiliza como contador // cp de tareas pasadas
if len(vect2) == a: # cuando |a| tiene el mismo valor que
return True # el tamaño del vector retorna el True
elif vect1[a] == vect2[a]: # sii las posiciones |a| de ambos vectores son iguales
return mismoJugador(vect1,vect2,a+1) # retorna la misma funcion con (a+1)
else: # en caso de que posiciones |a| de ambos vectores no son iguales
return False
''''''
def puntos(lugar,jugador,ptsGanados):
if jugador[lugar] == 3 or jugador[lugar] == 2:
print('lugar--->',lugar,"-->",jugador)
ptsGanados[6] = ptsGanados[6]+jugador[lugar]
jugador[lugar] = 0
#if jugador[lugar-1] == 3 or jugador[lugar-1] == 2:
# ptsGanados[6] = ptsGanados[6]+ jugador[lugar-1]
# jugador[lugar] = 0
if jugador[lugar-1] == 3 or jugador[lugar-1] == 2:
print('lugar--->',lugar-2,"-->",jugador)
return puntos(lugar-1,jugador,ptsGanados)
return " "
def mover1(lugar,semillas,jugador,x,actual):
if x == 0: # x es 0 solo la primera vez que entra a la func
jugador[lugar] = 0 # si x es 0 el lugar elegido por el jugador se pondra en 0(semillas)
#x = 1 # para luego continuar con las distribucion de semillas
if semillas == 0: # cuando semillas es 0. Es por q | ue se repartieron todas (al contrario de)
# de las agujas del reloj
if mismoJugador(actual,j1,1):#si el jugador acutal es el uno (j1)
puntos(lugar,j2,j1) #comprueba si gana puntos o no, llamando a la funcion putos
else: # y se le asignaran al j1, si es que los gana
puntos(lugar,j1,j2) # puntos para el jugador 2
return True # me saca de la funcio. Retornando True
elif lugar < 5:
lugar += | 1
jugador[lugar] += 1
semillas -= 1
#print(semillas)
return mover1(lugar,semillas,jugador,1,actual)
elif mismoJugador(jugador,j1,1):
return mover1(-1,semillas,j2,1,actual)
else:
return mover1(-1,semillas,j1,1,actual)
def noHaySemillas(semillas,jugador,lugar) :
#mapa($movidas)
if semillas == 0:
print(" ,__, Eliga una ")
print(" (oo)____ posicion con semillas: ")
print(" (__) )\ ")
print(" ||''|| * ")
lugar = int(input("posicion: " ))
return noHaySemillas(jugador[lugar-1],jugador,lugar)
else:
return lugar
'''''converciones'''''''''
def toBin(n):
if n == 0:
return str(0)
if n//2==0:
return str(1)
else:
return str(toBin(n//2))+str(n%2)
def toOctal(n):
if n == 0:
return str(0)
if n//8==0:
return str(n%8)
else:
return str(toOctal(n//8))+str(n%8)
def toHex(n):
if n%16==10:
return str(toHex(n//16))+ 'A'
elif n%16==11:
return str(toHex(n//16))+ 'B'
elif n%16==12:
|
bioinfo-center-pasteur-fr/python-course-1 | source/_static/code/homolog.py | Python | cc0-1.0 | 1,585 | 0.003785 |
Class Gene:
def __init__(self, name, system, loner, profile):
self.name = name
self.profile = profile
self._system = system
self._loner = loner
@property
def system(self):
"""
:return: the System that owns this Gene
:rtype: :class:`macsypy.system.System` object
"""
return self._system
def __eq__(self, gene):
"""
:return: True if the gene names (gene.name) are the same, False otherwise.
:param gene: the query of the test
:type gene: :class:`macsypy.gene.Gene` object.
:rtype: boolean.
"""
return self.name == gene.name
class Homolog:
def __init__(self, name, system, loner, profile, gene_ref, aligned=False):
super(Homolog, self).__init__(name, system, loner, profile)
self.ref = gene_ref
self.aligned = aligned
@property
def system(self):
"""
:return: the System that owns this Gene
:rtype: :class:`macsypy.system.System` object
"""
return self.gene.system
def __eq__(self, gene):
"""
:return: True if the gene names (gene.name) are the same, False otherwise.
:p | aram gene: the query of the test
:type gene: :class:`macsypy.gene.Gene` object.
:rtype: boolean.
"""
return self.gene.name == gene.name
def is_aligned(self):
"""
:return: True if this gene homolog is aligned to its | homolog, False otherwise.
:rtype: boolean
"""
return self.aligned |
switch-model/switch-hawaii-studies | models/dr/fuel_markets_expansion.py | Python | apache-2.0 | 4,206 | 0.011888 | # For large systems, each fuel market tier is a category of capacity expansion, and
# it can be built fractionally. For small systems, each fuel market tier is one
# capacity-expansion project, and it must be fully built and/or activated each period.
# To do this, we add binary variables and confine additions and activations to match them.
# Each tier has a capital cost and duration (locked in if it is developed)
# and a fixed and variable cost. Variable costs are already shown in fuel_markets.py,
# and this module adds fixed costs (some economies of scale, but assuming 100% salvage
# value at all times, i.e., projects can be deactivated without losing any capital cost.)
# Later we may add a more complete capital cost system.
import os
from pyomo.environ import *
inf = float('inf')
def define_components(m):
# eventually this should be extended to include capital costs and fixed lifetimes
# for fuel supply infrastructure, but then it gets fairly complicated (equivalent
# to the project build / activate / operate system)
# Maybe we can setup some sort of inheritance system for different types of object
# -- base capital assets, which could then be power production projects (of which some
# are generators (fuel-based or intermittent), and some are storage), fuel-supply projects,
# transmission lines, etc.
# fixed cost (per mmBtu/year of capacity) of having each tier in service during each period
# note: this must be zero if a tier has unlimited capacity, to avoid having infinite cost
m.rfm_supply_tier_fixed_cost = Param(m.RFM_SUPPLY_TIERS, default=0.0,
validate=lambda m, v, r, p, st: v == 0.0 or m.rfm_supply_tier_limit[r, p, st] < inf)
# should the tier be activated?
# Note: in large regions, a tier represents a block of expandable capacity,
# so this could be continuous, but then you could just lump the fixed cost
# into the variable cost and not use this module.
m.RFMSupplyTierActivate = Var(m.RFM_SUPPLY_TIERS, within=Binary)
# force all unlimited tiers to be activated (since they must have no cost,
# and to avoid a limit of 0.0 * inf in the constraint below)
m.Force_Activate_Unlimited_RFM_Supply_Tier = Constraint(m.RFM_SUPPLY_TIERS,
rule=lambda m, r, p, st:
(m.RFMSupplyTierActivate[r, p, st] == 1) if (m.rfm_supply_tier_limit[r, p, st] == inf)
else Constraint.Skip
)
# only all | ow delivery from activated tiers
m.Enforce_RFM_Supply_Tier_Activated = Constraint(
| m.RFM_SUPPLY_TIERS,
rule=lambda m, r, p, st:
m.FuelConsumptionByTier[r, p, st]
<=
m.RFMSupplyTierActivate[r, p, st] * m.rfm_supply_tier_limit[r, p, st])
# Eventually, when we add capital costs for capacity expansion, we will need a
# variable showing how much of each tier to build each period (and then the upper
# limit could be a lifetime limit rather than a limit on what can be added each
# period). Then we may want to make the expansion variable Binary for small systems
# and continuous for large systems. That could be done by building a shadow list
# of binary variables and constraining the actual decisions to match the binary
# version if some flag is set in the data.
m.RFM_Fixed_Costs_Annual = Expression(
m.PERIODS,
rule=lambda m, p: sum(
(
# note: we dance around projects with unlimited supply and 0.0 fixed cost
0.0 if m.rfm_supply_tier_fixed_cost[rfm_st] == 0.0
else m.rfm_supply_tier_fixed_cost[rfm_st]
* m.RFMSupplyTierActivate[rfm_st] * m.rfm_supply_tier_limit[rfm_st]
)
for r in m.REGIONAL_FUEL_MARKET
for rfm_st in m.RFM_P_SUPPLY_TIERS[r, p]))
m.cost_components_annual.append('RFM_Fixed_Costs_Annual')
def load_inputs(m, switch_data, inputs_dir):
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'fuel_supply_curves.tab'),
select=('regional_fuel_market', 'period', 'tier', 'fixed_cost'),
param=(m.rfm_supply_tier_fixed_cost,))
|
gajim/gajim | gajim/gajim_remote.py | Python | gpl-3.0 | 18,904 | 0.003967 | #!/usr/bin/env python3
# Copyright (C) 2005-2006 Dimitur Kirov <dkirov AT gmail.com>
# Nikos Kouremenos <kourem AT gmail.com>
# Copyright (C) 2005-2014 Yann Leboulanger <asterix AT lagaule.org>
# Copyright (C) 2006 Junglecow <junglecow AT gmail.com>
# Travis Shirk <travis AT pobox.com>
# Copyright (C) 2006-2008 Jean-Marie Traissard <jim AT lapin.org>
# Copyright (C) 2007 Julien Pivotto <roidelapluie AT gmail.com>
#
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
# gajim-remote help will show you the D-BUS API of Gajim
import os
import sys
import locale
import signal
from gajim.common import exceptions
from gajim.common.i18n import _
from gajim.common.i18n import Q_
sign | al.signal(signal.SIGINT, signal.SIG_DFL) # ^C exits the application
try:
PREFERRED_ENCODING = locale.getpreferreden | coding()
except Exception:
PREFERRED_ENCODING = 'UTF-8'
def send_error(error_message):
'''Writes error message to stderr and exits'''
print(error_message, file=sys.stderr)
sys.exit(1)
try:
import dbus
import dbus.service
# import dbus.glib
# test if dbus-x11 is installed
bus = dbus.SessionBus()
except Exception:
print(_('D-Bus is not present on this machine or python module is missing'))
sys.exit(1)
OBJ_PATH = '/org/gajim/dbus/RemoteObject'
INTERFACE = 'org.gajim.dbus.RemoteInterface'
SERVICE = 'org.gajim.Gajim'
BASENAME = 'gajim-remote'
class GajimRemote:
def __init__(self):
self.argv_len = len(sys.argv)
# define commands dict. Prototype :
# {
# 'command': [comment, [list of arguments] ]
# }
#
# each argument is defined as a tuple:
# (argument name, help on argument, is mandatory)
#
self.commands = {
'help': [
_('Shows a help on specific command'),
[
#User gets help for the command, specified by this parameter
(_('command'),
_('show help on command'), False)
]
],
'list_contacts': [
_('Lists all contacts in the contact list, one for each line'),
[
(Q_('?CLI:account'), _('show only contacts of the given account'),
False)
]
],
'list_accounts': [
_('Prints a list of registered accounts'),
[]
],
'change_status': [
_('Changes the status of account(s)'),
[
#offline, online, chat, away, xa, dnd should not be translated
(Q_('?CLI:status'), _('one of: offline, online, chat, away, xa, dnd. If not set, use account\'s previous status'), False),
(Q_('?CLI:message'), _('status message'), False),
(Q_('?CLI:account'), _('change status of account "account". '
'If not specified, try to change status of all accounts that have '
'"sync with global status" option set'), False)
]
],
'set_priority': [
_('Changes the priority of account(s)'),
[
(Q_('?CLI:priority'), _('priority you want to give to the account'),
True),
(Q_('?CLI:account'), _('change the priority of the given account. '
'If not specified, change status of all accounts that have'
' "sync with global status" option set'), False)
]
],
'send_chat_message': [
_('Sends new chat message to a contact in the contact list. Account is optional.'),
[
('jid', _('XMPP Address of the contact that will receive the message'), True),
(Q_('?CLI:message'), _('message contents'), True),
(Q_('?CLI:account'), _('if specified, the message will be sent '
'using this account'), False),
]
],
'send_single_message': [
_('Sends a chat message to someone on your contact list. '
'Account is optional.'),
[
('jid', _('XMPP Address of the contact that will receive the message'), True),
(_('subject'), _('message subject'), True),
(Q_('?CLI:message'), _('message contents'), True),
(Q_('?CLI:account'), _('if specified, the message will be sent '
'using this account'), False),
]
],
'send_groupchat_message': [
_('Sends new message to a group chat you\'ve joined.'),
[
('room_jid', _('XMPP Address of the group chat that will receive the message'), True),
(Q_('?CLI:message'), _('message contents'), True),
(Q_('?CLI:account'), _('if specified, the message will be sent '
'using this account'), False),
]
],
'contact_info': [
_('Gets detailed info on a contact'),
[
('jid', _('XMPP Address of the contact'), True)
]
],
'account_info': [
_('Gets detailed info on a account'),
[
('account', _('Name of the account'), True)
]
],
'send_file': [
_('Sends file to a contact'),
[
(_('file'), _('File path'), True),
('jid', _('XMPP Address of the contact'), True),
(Q_('?CLI:account'), _('if specified, file will be sent using this '
'account'), False)
]
],
'remove_contact': [
_('Removes contact from contact list'),
[
('jid', _('XMPP Address of the contact'), True),
(Q_('?CLI:account'), _('if specified, contact is |
Kefkius/scallop | gui/android.py | Python | gpl-3.0 | 31,779 | 0.008559 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import android
from scallop import SimpleConfig, Wallet, WalletStorage, format_satoshis
from scallop.bitcoin import is_address
from scallop import util
from decimal import Decimal
import datetime, re
def modal_dialog(title, msg = None):
droid.dialogCreateAlert(title,msg)
droid.dialogSetPositiveButtonText('OK')
droid.dialogShow()
droid.dialogGetResponse()
droid.dialogDismiss()
def modal_input(title, msg, value = None, etype=None):
droid.dialogCreateInput(title, msg, value, etype)
droid.dialogSetPositiveButtonText('OK')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
print "modal input: result is none"
return modal_input(title, msg, value, etype)
if result.get('which') == 'positive':
return result.get('value')
def modal_question(q, msg, pos_text = 'OK', neg_text = 'Cancel'):
droid.dialogCreateAlert(q, msg)
droid.dialogSetPositiveButtonText(pos_text)
droid.dialogSetNegativeButtonText(neg_text)
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
print "modal question: result is none"
return modal_question(q,msg, pos_text, neg_text)
return result.get(' | which') == 'positive'
def edit_label(addr):
v | = modal_input('Edit label', None, wallet.labels.get(addr))
if v is not None:
wallet.set_label(addr, v)
droid.fullSetProperty("labelTextView", "text", v)
def select_from_contacts():
title = 'Contacts:'
droid.dialogCreateAlert(title)
l = contacts.keys()
droid.dialogSetItems(l)
droid.dialogSetPositiveButtonText('New contact')
droid.dialogShow()
response = droid.dialogGetResponse().result
droid.dialogDismiss()
if response.get('which') == 'positive':
return 'newcontact'
result = response.get('item')
if result is not None:
t, v = contacts.get(result)
return v
def protocol_name(p):
if p == 't': return 'TCP'
if p == 's': return 'SSL'
def protocol_dialog(host, protocol, z):
droid.dialogCreateAlert('Protocol', host)
protocols = filter(lambda x: x in "ts", z.keys())
l = []
current = protocols.index(protocol)
for p in protocols:
l.append(protocol_name(p))
droid.dialogSetSingleChoiceItems(l, current)
droid.dialogSetPositiveButtonText('OK')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
selected_item = droid.dialogGetSelectedItems().result
droid.dialogDismiss()
if not response:
return
if not selected_item:
return
if response.get('which') == 'positive':
return protocols[selected_item[0]]
def make_layout(s, scrollable = False):
content = """
<LinearLayout
android:id="@+id/zz"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:background="#ff222222">
<TextView
android:id="@+id/textElectrum"
android:text="Electrum"
android:textSize="7pt"
android:textColor="#ff4444ff"
android:gravity="left"
android:layout_height="wrap_content"
android:layout_width="match_parent"
/>
</LinearLayout>
%s """%s
if scrollable:
content = """
<ScrollView
android:id="@+id/scrollview"
android:layout_width="match_parent"
android:layout_height="match_parent" >
<LinearLayout
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="wrap_content" >
%s
</LinearLayout>
</ScrollView>
"""%content
return """<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/background"
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:background="#ff000022">
%s
</LinearLayout>"""%content
def main_layout():
h = get_history_layout(15)
l = make_layout("""
<TextView android:id="@+id/balanceTextView"
android:layout_width="match_parent"
android:text=""
android:textColor="#ffffffff"
android:textAppearance="?android:attr/textAppearanceLarge"
android:padding="7dip"
android:textSize="8pt"
android:gravity="center_vertical|center_horizontal|left">
</TextView>
<TextView android:id="@+id/historyTextView"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Recent transactions"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>
%s """%h,True)
return l
def qr_layout(addr, amount, message):
addr_view= """
<TextView android:id="@+id/addrTextView"
android:layout_width="match_parent"
android:layout_height="50"
android:text="%s"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>"""%addr
if amount:
amount_view = """
<TextView android:id="@+id/amountTextView"
android:layout_width="match_parent"
android:layout_height="50"
android:text="Amount: %s"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>"""%format_satoshis(amount)
else:
amount_view = ""
if message:
message_view = """
<TextView android:id="@+id/messageTextView"
android:layout_width="match_parent"
android:layout_height="50"
android:text="Message: %s"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>"""%message
else:
message_view = ""
return make_layout("""
%s
%s
%s
<ImageView
android:id="@+id/qrView"
android:gravity="center"
android:layout_width="match_parent"
android:layout_height="350"
android:antialias="false"
android:src="file:///sdcard/sl4a/qrcode.bmp" />
"""%(addr_view, amount_view, message_view), True)
payto_layout = make_layout("""
<TextView android:id="@+id/recipientTextView"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Pay to:"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="left">
</TextView>
<EditText android:id="@+id/recipient"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:tag="Tag Me" android:inputType="text">
</EditText>
<LinearLayout android:id="@+id/linearLayout1"
andr |
googleapis/python-resource-manager | samples/generated_samples/cloudresourcemanager_v3_generated_folders_update_folder_sync.py | Python | apache-2.0 | 1,634 | 0.000612 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this fi | le except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, | either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateFolder
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-resourcemanager
# [START cloudresourcemanager_v3_generated_Folders_UpdateFolder_sync]
from google.cloud import resourcemanager_v3
def sample_update_folder():
# Create a client
client = resourcemanager_v3.FoldersClient()
# Initialize request argument(s)
folder = resourcemanager_v3.Folder()
folder.parent = "parent_value"
request = resourcemanager_v3.UpdateFolderRequest(
folder=folder,
)
# Make the request
operation = client.update_folder(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END cloudresourcemanager_v3_generated_Folders_UpdateFolder_sync]
|
dag/genshi | genshi/template/base.py | Python | bsd-3-clause | 22,687 | 0.002028 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Basic templating functionality."""
from collections import deque
import os
import sys
from genshi.compat import StringIO, BytesIO
from genshi.core import Attrs, Stream, StreamEventKind, START, TEXT, _ensure
from genshi.input import ParseError
__all__ = ['Context', 'DirectiveFactory', 'Template', 'TemplateError',
'TemplateRuntimeError', 'TemplateSyntaxError', 'BadDirectiveError']
__docformat__ = 'restructuredtext en'
class TemplateError(Exception):
"""Base exception class for errors related to template processing."""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Create the exception.
:param message: the error message
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
:param offset: the column number at which the error occurred
"""
if filename is None:
filename = '<string>'
self.msg = message #: the error message string
if filename != '<string>' or lineno >= 0:
message = '%s (%s, line %d)' % (self.msg, filename, lineno)
Exception.__init__(self, message)
self.filename = filename #: the name of the template file
self.lineno = lineno #: the number of the line containing the error
self.offset = offset #: the offset on the line
class TemplateSyntaxError(TemplateError):
"""Exception raised when an expression in a template causes a Python syntax
error, or the template is not well-formed.
"""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Create the exception
:param message: the error message
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
:param offset: the column number at which the error occurred
"""
if isinstance(message, SyntaxError) and message.lineno is not None:
message = str(message).replace(' (line | %d)' % message.lineno, '')
TemplateError.__init__(self, message, filename, lineno)
class BadDirectiveError(TemplateSyntaxError):
"""Exception raised when an unknown directive is encountered when parsing
a template.
An unknown directive is any attribute using the namespace for directives,
with a local name that doesn't match any registered directive.
"""
def __init__(self, name, filename=None, lineno=-1):
"""Create the exceptio | n
:param name: the name of the directive
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
"""
TemplateSyntaxError.__init__(self, 'bad directive "%s"' % name,
filename, lineno)
class TemplateRuntimeError(TemplateError):
"""Exception raised when an the evaluation of a Python expression in a
template causes an error.
"""
class Context(object):
"""Container for template input data.
A context provides a stack of scopes (represented by dictionaries).
Template directives such as loops can push a new scope on the stack with
data that should only be available inside the loop. When the loop
terminates, that scope can get popped off the stack again.
>>> ctxt = Context(one='foo', other=1)
>>> ctxt.get('one')
'foo'
>>> ctxt.get('other')
1
>>> ctxt.push(dict(one='frost'))
>>> ctxt.get('one')
'frost'
>>> ctxt.get('other')
1
>>> ctxt.pop()
{'one': 'frost'}
>>> ctxt.get('one')
'foo'
"""
def __init__(self, **data):
"""Initialize the template context with the given keyword arguments as
data.
"""
self.frames = deque([data])
self.pop = self.frames.popleft
self.push = self.frames.appendleft
self._match_templates = []
self._choice_stack = []
# Helper functions for use in expressions
def defined(name):
"""Return whether a variable with the specified name exists in the
expression scope."""
return name in self
def value_of(name, default=None):
"""If a variable of the specified name is defined, return its value.
Otherwise, return the provided default value, or ``None``."""
return self.get(name, default)
data.setdefault('defined', defined)
data.setdefault('value_of', value_of)
def __repr__(self):
return repr(list(self.frames))
def __contains__(self, key):
"""Return whether a variable exists in any of the scopes.
:param key: the name of the variable
"""
return self._find(key)[1] is not None
has_key = __contains__
def __delitem__(self, key):
"""Remove a variable from all scopes.
:param key: the name of the variable
"""
for frame in self.frames:
if key in frame:
del frame[key]
def __getitem__(self, key):
"""Get a variables's value, starting at the current scope and going
upward.
:param key: the name of the variable
:return: the variable value
:raises KeyError: if the requested variable wasn't found in any scope
"""
value, frame = self._find(key)
if frame is None:
raise KeyError(key)
return value
def __len__(self):
"""Return the number of distinctly named variables in the context.
:return: the number of variables in the context
"""
return len(self.items())
def __setitem__(self, key, value):
"""Set a variable in the current scope.
:param key: the name of the variable
:param value: the variable value
"""
self.frames[0][key] = value
def _find(self, key, default=None):
"""Retrieve a given variable's value and the frame it was found in.
Intended primarily for internal use by directives.
:param key: the name of the variable
:param default: the default value to return when the variable is not
found
"""
for frame in self.frames:
if key in frame:
return frame[key], frame
return default, None
def get(self, key, default=None):
"""Get a variable's value, starting at the current scope and going
upward.
:param key: the name of the variable
:param default: the default value to return when the variable is not
found
"""
for frame in self.frames:
if key in frame:
return frame[key]
return default
def keys(self):
"""Return the name of all variables in the context.
:return: a list of variable names
"""
keys = []
for frame in self.frames:
keys += [key for key in frame if key not in keys]
return keys
def items(self):
"""Return a list of ``(name, value)`` tuples for all variables in the
context.
:return: a list of variables
"""
return [(key, self.get(key)) for key in self.keys()]
def update(self, mapping):
"""Update the context from the mapping provided."""
self.frames[0].update(mapping)
def push(self, data):
"""Push a new scope on the stack.
|
tylertian/Openstack | openstack F/horizon/horizon/tests/api_tests/keystone_tests.py | Python | apache-2.0 | 3,893 | 0.000514 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from keystoneclient.v2_0 import client as keystone_client
from horizon import api
from horizon import test
class FakeConnection(object):
pass
class ClientConnectionTests(test.TestCase):
def setUp(self):
super(ClientConnectionTests, self).setUp()
self.mox.StubOutWithMock(keystone_client, "Client")
self.internal_url = api.base.url_for(self.request,
'identi | ty',
endpoint_type='internalURL')
self.admin_url = api.base.url_for(self.request,
'identity',
endpoint_type='adminURL')
self.conn = FakeConnection()
class RoleAPITests(test.APITestCase):
def setUp(self):
super(RoleAPITests, self).setUp()
self.role = self.roles.member
self.roles = | self.roles.list()
def test_remove_tenant_user(self):
"""
Tests api.keystone.remove_tenant_user
Verifies that remove_tenant_user is called with the right arguments
after iterating the user's roles.
There are no assertions in this test because the checking is handled
by mox in the VerifyAll() call in tearDown().
"""
keystoneclient = self.stub_keystoneclient()
tenant = self.tenants.first()
keystoneclient.roles = self.mox.CreateMockAnything()
keystoneclient.roles.roles_for_user(self.user.id,
tenant.id).AndReturn(self.roles)
for role in self.roles:
keystoneclient.roles.remove_user_role(self.user.id,
role.id,
tenant.id)
self.mox.ReplayAll()
api.keystone.remove_tenant_user(self.request, tenant.id, self.user.id)
def test_get_default_role(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.roles = self.mox.CreateMockAnything()
keystoneclient.roles.list().AndReturn(self.roles)
self.mox.ReplayAll()
role = api.keystone.get_default_role(self.request)
self.assertEqual(role, self.role)
# Verify that a second call doesn't hit the API again,
# (it would show up in mox as an unexpected method call)
role = api.keystone.get_default_role(self.request)
class ServiceAPITests(test.APITestCase):
def test_service_wrapper(self):
catalog = self.service_catalog
identity_data = api.base.get_service_from_catalog(catalog, "identity")
identity_data['id'] = 1
service = api.keystone.Service(identity_data)
self.assertEqual(unicode(service), u"identity (native backend)")
self.assertEqual(service.region,
identity_data["endpoints"][0]["region"])
self.assertEqual(service.url,
"http://int.keystone.example.com:5000/v2.0")
self.assertEqual(service.host, "int.keystone.example.com")
|
gleb812/pch2csd | tests/test_utils.py | Python | mit | 2,150 | 0.004186 | from unittest import TestCase, skip
from pch2csd.patch import ModuleParameters, Location, Module
from pch2csd.resources import ProjectData
from pch2csd.util import AttrEqMixin
class TestAttrEqMixin(TestCase):
class ThreeSlots(AttrEqMixin):
def __init__(self, one, two, th | ree):
self.one = one
self.two = two
self.three = three
def test_different_args(self):
o1 = self.ThreeSlots(1, [x for x | in 'test'], [x * 2 for x in range(10)])
o2 = self.ThreeSlots(1, [x for x in 'test'], [x * 2 for x in range(10)])
self.assertTrue(o1.attrs_equal(o2))
def test_overloaded_eq(self):
o1 = ModuleParameters(1, 2, [(1, 32), (3, '43')])
o2 = ModuleParameters(1, 2, [x for x in [(1, 32), (3, '43')]])
self.assertTrue(o1 == o2)
def test_modified_arg__fail(self):
o1 = self.ThreeSlots(1, [x for x in 'test'], [x * 2 for x in range(10)])
o2 = self.ThreeSlots(1, [x for x in 'test'], [x * 2 for x in range(10)])
o2.one = 2
self.assertFalse(o1.attrs_equal(o2))
def test_extra_args__fail(self):
o1 = self.ThreeSlots(1, [x for x in 'test'], [x * 2 for x in range(10)])
o2 = self.ThreeSlots(1, [x for x in 'test'], [x * 2 for x in range(10)])
o2.__dict__['mod'] = 1
self.assertFalse(o1.attrs_equal(o2))
def test_different_types__fail(self):
o1 = self.ThreeSlots(1, [x for x in 'test'], [x * 2 for x in range(10)])
o2 = ModuleParameters(1, 2, [x for x in [(1, 32), (3, '43')]])
self.assertFalse(o1.attrs_equal(o2))
@skip('Need new ReprStrMixin examples after adding more properties to the classes')
class TestReprStrMixin(TestCase):
def setUp(self):
self.data = ProjectData()
def test_simple(self):
m = Module(self.data, Location.VOICE_AREA, 1, 1)
expected_str = 'Module(type=1, type_name=Keyboard, id=1, location=Location.VOICE_AREA)'
self.assertEqual(str(m), expected_str)
def test_repr_equal_to_str(self):
m = Module(self.data, Location.VOICE_AREA, 1, 1)
self.assertEqual(m.__repr__(), m.__str__())
|
bretth/woven | woven/deployment.py | Python | bsd-3-clause | 8,823 | 0.012581 | #!/usr/bin/env python
from functools import wraps
from glob import glob
from hashlib import sha1
import os, shutil, sys, tempfile
from django.template.loader import render_to_string
from fabric.state import env
from fabric.operations import run, sudo, put
from fabric.context_managers import cd, settings, hide
from fabric.contrib.files import exists
from fabric.contrib.project import rsync_project
def _backup_file(path):
"""
Backup a file but never overwrite an existing backup file
"""
backup_base = '/var/local/woven-backup'
backup_path = ''.join([backup_base,path])
if not exists(backup_path):
directory = ''.join([backup_base,os.path.split(path)[0]])
sudo('mkdir -p %s'% directory)
sudo('cp %s %s'% (path,backup_path))
def _restore_file(path, delete_backup=True):
"""
Restore a file if it exists and remove the backup
"""
backup_base = '/var/local/woven-backup'
backup_path = ''.join([backup_base,path])
if exists(backup_path):
if delete_backup:
sudo('mv -f %s %s'% (backup_path,path))
else:
sudo('cp -f %s %s'% (backup_path,path))
def _get_local_files(local_dir, pattern=''):
"""
Returns a dictionary with directories as keys, and filenames as values
for filenames matching the glob ``pattern`` under the ``local_dir``
``pattern can contain the Boolean OR | to evaluated multiple patterns into
a combined set.
"""
local_files = {}
if pattern:
cwd = os.getcwd()
os.chdir(local_dir)
patterns = pattern.split('|')
local_list = set([])
for p in patterns: local_list = local_list | set(glob(p))
for path in local_list:
dir, file = os.path.split(path)
if os.path.isfile(path):
local_files[dir] = local_files.get(dir,[])+[file]
elif os.path.isdir(path):
local_files[file] = local_files.get(dir,[])
os.chdir(cwd)
return local_files
def _stage_local_files(local_dir, local_files={}):
"""
Either ``local_files`` and/or ``context`` should be supplied.
Will stage a ``local_files`` dictionary of path:filename pairs where path
is relative to ``local_dir`` into a local tmp staging directory.
Returns a path to the temporary local staging directory
"""
staging_dir = os.path.join(tempfile.mkdtemp(),os.path.basename(local_dir))
os.mkdir(staging_dir)
for root, dirs, files in os.walk(local_dir):
relative_tr | ee = root.replace(local_dir,'')
if relative_tree:
relative_tree = relative_tree[1:]
if local_files:
files = local_files.get(relative_tree,[])
for file in files:
if relative_tre | e:
filepath = os.path.join(relative_tree,file)
if not os.path.exists(os.path.join(staging_dir,relative_tree)):
os.mkdir(os.path.join(staging_dir,relative_tree))
else: filepath = file
shutil.copy2(os.path.join(root,file),os.path.join(staging_dir,filepath))
return staging_dir
def deploy_files(local_dir, remote_dir, pattern = '',rsync_exclude=['*.pyc','.*'], use_sudo=False):
"""
Generic deploy function for cases where one or more files are being deployed to a host.
Wraps around ``rsync_project`` and stages files locally and/or remotely
for network efficiency.
``local_dir`` is the directory that will be deployed.
``remote_dir`` is the directory the files will be deployed to.
Directories will be created if necessary.
Note: Unlike other ways of deploying files, all files under local_dir will be
deployed into remote_dir. This is the equivalent to cp -R local_dir/* remote_dir.
``pattern`` finds all the pathnames matching a specified glob pattern relative
to the local_dir according to the rules used by the Unix shell.
``pattern`` enhances the basic functionality by allowing the python | to include
multiple patterns. eg '*.txt|Django*'
``rsync_exclude`` as per ``rsync_project``
Returns a list of directories and files created on the host.
"""
#normalise paths
if local_dir[-1] == os.sep: local_dir = local_dir[:-1]
if remote_dir[-1] == '/': remote_dir = remote_dir[:-1]
created_list = []
staging_dir = local_dir
#resolve pattern into a dir:filename dict
local_files = _get_local_files(local_dir,pattern)
#If we are only copying specific files or rendering templates we need to stage locally
if local_files: staging_dir = _stage_local_files(local_dir, local_files)
remote_staging_dir = '/home/%s/.staging'% env.user
if not exists(remote_staging_dir):
run(' '.join(['mkdir -pv',remote_staging_dir])).split('\n')
created_list = [remote_staging_dir]
#upload into remote staging
rsync_project(local_dir=staging_dir,remote_dir=remote_staging_dir,exclude=rsync_exclude,delete=True)
#create the final destination
created_dir_list = mkdirs(remote_dir, use_sudo)
if not os.listdir(staging_dir): return created_list
func = use_sudo and sudo or run
#cp recursively -R from the staging to the destination and keep a list
remote_base_path = '/'.join([remote_staging_dir,os.path.basename(local_dir),'*'])
copy_file_list = func(' '.join(['cp -Ruv',remote_base_path,remote_dir])).split('\n')
if copy_file_list[0]: created_list += [file.split(' ')[2][1:-1] for file in copy_file_list if file]
#cleanup any tmp staging dir
if staging_dir <> local_dir:
shutil.rmtree(staging_dir,ignore_errors=True)
return created_list
def mkdirs(remote_dir, use_sudo=False):
"""
Wrapper around mkdir -pv
Returns a list of directories created
"""
func = use_sudo and sudo or run
result = func(' '.join(['mkdir -pv',remote_dir])).split('\n')
#extract dir list from ["mkdir: created directory `example.com/some/dir'"]
if result[0]: result = [dir.split(' ')[3][1:-1] for dir in result if dir]
return result
def upload_template(filename, destination, context={}, use_sudo=False, backup=True, modified_only=False):
"""
Render and upload a template text file to a remote host using the Django
template api.
``filename`` should be the Django template name.
``context`` is the Django template dictionary context to use.
The resulting rendered file will be uploaded to the remote file path
``destination`` (which should include the desired remote filename.) If the
destination file already exists, it will be renamed with a ``.bak``
extension.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
"""
#Replaces the default fabric.contrib.files.upload_template
basename = os.path.basename(filename)
text = render_to_string(filename,context)
func = use_sudo and sudo or run
#check hashed template on server first
if modified_only:
hashfile_dir, hashfile = os.path.split(destination)
hashfile_dir = ''.join(['/var/local/woven-backup',hashfile_dir])
hashfile = '%s.hashfile'% hashfile
hashfile_path = os.path.join(hashfile_dir, hashfile)
hashed = sha1(text).hexdigest()
if hashfile:
if not exists(hashfile_dir): sudo('mkdir -p %s'% hashfile_dir)
sudo('touch %s'% hashfile_path) #store the hash near the template
previous_hashed = sudo('cat %s'% hashfile_path).strip()
if previous_hashed == hashed:
return False
else: sudo('echo %s > %s'% (hashed, hashfile_path))
temp_destination = '/tmp/' + basename
# This temporary file should not be automatically deleted on close, as we
# need it there to upload it (Windows locks the file for reading while open).
tempfile_fd, tempfile_name = tempfile.mkstemp()
output = open(tempfile_name, "w+b")
output.write(text)
output.close()
# Upload the file.
put(tempfile_name, temp_destination)
os.close(te |
pombredanne/anitya | anitya/tests/lib/backends/test_hackage.py | Python | gpl-2.0 | 3,310 | 0.000604 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
anitya tests for the custom backend.
'''
import json
import unittest
import anitya.lib.backends.hackage as backend
import anitya.lib.model as model
from anitya.lib.exceptions import AnityaPluginException
from anitya.tests.base import Modeltests, create_distro, skip_jenkins
BACKEND = 'Hackage'
class HackageBackendtests(Modeltests):
""" Hackage backend tests. """
@skip_jenkins
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(HackageBackendtests, self).setUp()
create_distro(self.session)
self.create_project()
def create_project(self):
""" Create some basic projects to work with. """
project = model.Project(
name='Biobase',
homepage='http://hackage.haskell.org/package/Biobase',
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = model.Project(
name='foobar',
homepage='http://hackage.haskell.org/package/foobar',
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
def test_get_version(self):
""" Test the get_version function of the custom backend. """
pid = 1
project = model.Project.get(self.session, pid)
exp = '0.3.1.1'
obs = backend.HackageBackend.get_version(project)
self.assertEqual(obs, exp)
pid = 2
project = model.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException,
backend.HackageBackend.get_version,
project
)
def test_get_versions(self):
""" Test the get_versions function of the custom backend. """
pid = 1
project = model.Project.get(self.session, pid)
exp = ['0.3.1.1']
obs = backend.HackageBackend.get_ordered_versions(project)
self.assertEqual(obs, exp)
pid = 2
project = model.Project.get(self.session, pid)
| self.assertRaises(
AnityaPluginException,
backend.HackageBackend.get_version,
project
)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase( | HackageBackendtests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
Stibbons/dopplerr | cfgtree/dictxpath.py | Python | mit | 5,244 | 0.001716 | # coding: utf-8
# Standard Libraries
import logging
import re
# Third Party Libraries
from cfgtree import LOGGER_NAME
log = logging.getLogger(LOGGER_NAME)
# tells flake8 to ignore complexity check for this file
# flake8: noqa
def get_node_by_xpath(mapping, xpath, default=None, ignore_errors=False,
handle_list_selector=False):
'''Return the node pointed to by xpath from mapping.
Args:
mapping: nested dictionary.
xpath: string-like selector.
default: default value if the attribute doesn't exist.
ignore_errors: if True, pass silently if the xpath is invalid.
handle_list_selector: allow to support list element selector
Example:
>>> tree = {'level1': {'level2': {'level3': 'bottom'}}}
>>> get_node_by_xpath(tree, 'level1.level2.level3') == 'bottom'
True
'''
if not isinstance(mapping, dict):
if not ignore_errors:
raise KeyError("Mapping is not dictionary: {!r}".format(mapping))
return default
for segment in xpath.split('.'):
if not mapping:
if not ignore_errors:
raise KeyError("Empty mapping, but need to access to '{}'".format(xpath))
return default
if segment not in mapping:
if handle_list_selector and '[' in segment:
re_subselector = re.compile(r"(.*)\[(\d+)\]$")
m = re_subselector.match(segment)
if m:
key = m.group(1)
index = int(m.group(2))
if key not in mapping:
if not ignore_errors:
raise KeyError("Invalid '{}' index selector: '{}' does not match "
"anything. Available keys: {!r}".format(
xpath, key, list(mapping.keys())))
return default
items = mapping[key]
if not isinstance(items, list):
if not ignore_errors:
raise KeyError("Invalid '{}' selector: '{}' is not a list, is: {}"
.format(xpath, key, type(items)))
return default
if l | en(items) <= index:
if not ignore_errors:
raise KeyError("Invalid '{}' selector: item index '{}' of '{}' is "
"outside of the list boundaries. Length is: {}".format(
xpath, index, key, len(items)))
return default
ma | pping = items[index]
continue
elif not ignore_errors:
raise KeyError("Invalid '{}' selector: '{}' doesn't match "
"anything. Available keys: {!r}".format(
xpath, segment, list(mapping.keys())))
return default
mapping = mapping[segment]
return mapping
def set_node_by_xpath(mapping, xpath, value, extend=False, setter_attr=None):
'''Set the node pointed to by xpath from mapping.
Args:
mapping: nested dictionary.
xpath: string-like selector.
value: value to set.
extend: if True, create the nested structure if it doesn't exist,
otherwise, raise an exception.
setter_attr: use a special setter method attribute in mapping, instead of replacing
the node by the new value (note: do not use a property setter attribute)
Example:
>>> tree = {'level1': {'level2': {'level3': 'bottom'}}}
>>> set_node_by_xpath(tree, 'level1.level2.level3', 'bottom')
'''
segments = xpath.split('.')
attrname = segments.pop()
for segment in segments:
if segment not in mapping:
if not extend:
raise KeyError("Invalid '{}' selector: '{}' doesn't match "
"anything.".format(xpath, segment))
mapping[segment] = {}
mapping = mapping[segment]
if setter_attr:
# setter attribute defined, calling this setter
setter = getattr(mapping[attrname], setter_attr)
setter(value)
else:
mapping[attrname] = value
def delete_node_by_xpath(mapping, xpath, ignore_errors=False):
'''Delete the node pointed to by xpath from mapping.
Args:
mapping: nested dictionary.
xpath: string-like selector.
ignore_errors: if True, pass silently if the node doesn't exist,
otherwise, raise an exception.
Example:
>>> tree = {'level1': {'level2': {'level3': 'bottom'}}}
>>> delete_node_by_xpath(tree, 'level1.level2')
>>> tree
{'level1': {}}
'''
segments = xpath.split('.')
attrname = segments.pop()
for segment in segments:
if segment not in mapping:
if ignore_errors:
return
raise KeyError("Invalid '{}' selector: '{}' doesn't match "
"anything.".format(xpath, segment))
mapping = mapping[segment]
return mapping.pop(attrname, None)
|
rebost/django | tests/modeltests/model_forms/models.py | Python | bsd-3-clause | 8,153 | 0.005151 | """
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
from __future__ import unicode_literals
import os
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
temp_storage_dir = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __unicode__(self):
return self.name
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self):
import datetime
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save()
def __unicode__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __unicode__(self):
return "%s is %s" % (self.writer, self.age)
from django.contrib.localflavor.us.models import PhoneNumberField
class PhoneNumber(models.Model):
phone = PhoneNumberField()
description = models.CharField(max_length=20)
def __unicode__(self):
return self.phone
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __unicode__(self):
return self.description
try:
# If PIL is available, try testing ImageFields. Checking for the existence
# of Image is enough for CPython, but for PyPy, you need to check for the
# underlying modules If PIL is not available, ImageField tests are omitted.
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image, _imaging
except ImportError:
import Image, _imaging
test_images = True
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
| width_field='width', height_field='height',
blank=True, null=True)
width = models | .IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
except ImportError:
test_images = False
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __unicode__(self):
return self.field
class Product(models.Model):
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.slug
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __unicode__(self):
return self.key
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __unicode__(self):
return self.name
class DerivedPost(Post):
pass
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __unicode__(self):
return unicode(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
|
MiiRaGe/pyreport | setup.py | Python | mit | 108 | 0.027778 | from setuptools import setup, find_packages
setup(
name='pyreport' | ,
ve | rsion='0.3',
packages=['pyreport']
)
|
wimleers/fileconveyor | fileconveyor/arbitrator.py | Python | unlicense | 59,744 | 0.004067 | import logging
import logging.handlers
import Queue
import os
import stat
import threading
import time
import sys
import sqlite3
from UserList import UserList
import os.path
import signal
FILE_CONVEYOR_PATH = os.path.abspath(os.path.dirname(__file__))
# HACK to make sure that Django-related libraries can be loaded: include dummy
# settings if necessary.
if not 'DJANGO_SETTINGS_MODULE' in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'fileconveyor.django_settings'
from settings import *
from config import *
from persistent_queue import *
from persistent_list import *
from fsmonitor import *
from filter import *
from processors.processor import *
from transporters.transporter import Transporter, ConnectionError
from daemon_thread_runner import *
# Copied from django.utils.functional
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
class AdvancedQueue(UserList):
"""queue that supports peeking and jumping"""
def peek(self):
return s | elf[0]
def jump(self, item):
self.insert(0, item)
def put(self, item):
self.append(item)
def get(self):
return self.pop(0)
def qsize(self) | :
return len(self)
# Define exceptions.
class ArbitratorError(Exception): pass
class ArbitratorInitError(ArbitratorError): pass
class ConfigError(ArbitratorInitError): pass
class ProcessorAvailabilityTestError(ArbitratorInitError): pass
class TransporterAvailabilityTestError(ArbitratorInitError): pass
class ServerConnectionTestError(ArbitratorInitError): pass
class FSMonitorInitError(ArbitratorInitError): pass
class Arbitrator(threading.Thread):
"""docstring for arbitrator"""
DELETE_OLD_FILE = 0xFFFFFFFF
PROCESSED_FOR_ANY_SERVER = None
def __init__(self, configfile="config.xml", restart=False):
threading.Thread.__init__(self, name="ArbitratorThread")
self.lock = threading.Lock()
self.die = False
self.processorchains_running = 0
self.transporters_running = 0
self.last_retry = 0
# Set up logger.
self.logger = logging.getLogger("Arbitrator")
self.logger.setLevel(FILE_LOGGER_LEVEL)
# Handlers.
fileHandler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=5242880, backupCount=5)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(CONSOLE_LOGGER_LEVEL)
# Formatters.
formatter = logging.Formatter("%(asctime)s - %(name)-25s - %(levelname)-8s - %(message)s")
fileHandler.setFormatter(formatter)
consoleHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
self.logger.addHandler(consoleHandler)
if restart:
self.logger.warning("File Conveyor has restarted itself!")
self.logger.warning("File Conveyor is initializing.")
# Load config file.
self.configfile = configfile
self.logger.info("Loading config file.")
self.config = Config("Arbitrator")
self.config_errors = self.config.load(self.configfile)
self.logger.warning("Loaded config file.")
if self.config_errors > 0:
self.logger.error("Cannot continue, please fix the errors in the config file first.")
raise ConfigError("Consult the log file for details.")
# TRICKY: set the "symlinkWithin" setting for "symlink_or_copy"
# transporters First calculate the value for the "symlinkWithin"
# setting.
source_paths = []
for source in self.config.sources.values():
source_paths.append(source["scan_path"])
symlinkWithin = ":".join(source_paths)
# Then set it for every server that uses this transporter.
for name in self.config.servers.keys():
if self.config.servers[name]["transporter"] == "symlink_or_copy":
self.config.servers[name]["settings"]["symlinkWithin"] = symlinkWithin
# Verify that all referenced processors are available.
processors_not_found = 0
for source in self.config.rules.keys():
for rule in self.config.rules[source]:
if not rule["processorChain"] is None:
for processor in rule["processorChain"]:
processor_class = self._import_processor(processor)
if not processor_class:
processors_not_found += 1
if processors_not_found > 0:
raise ProcessorAvailabilityTestError("Consult the log file for details")
# Verify that all referenced transporters are available.
transporters_not_found = 0
for server in self.config.servers.keys():
transporter_name = self.config.servers[server]["transporter"]
transporter_class = self._import_transporter(transporter_name)
if not transporter_class:
transporters_not_found += 1
if transporters_not_found > 0:
raise TransporterAvailabilityTestError("Consult the log file for details")
# Verify that each of the servers works.
successful_server_connections = 0
for server in self.config.servers.keys():
transporter = self.__create_transporter(server)
if transporter:
successful_server_connections += 1
del transporter
failed_server_connections = len(self.config.servers) - successful_server_connections
if failed_server_connections > 0:
self.logger.error("Server connection tests: could not connect with %d servers." % (failed_server_connections))
raise ServerConnectionTestError("Consult the log file for details.")
else:
self.logger.warning("Server connection tests succesful!")
def __setup(self):
self.processor_chain_factory = ProcessorChainFactory("Arbitrator", WORKING_DIR)
# Create transporter (cfr. worker thread) pools for each server.
# Create one initial transporter per pool, possible other transporters
# will be created on-demand.
self.transporters = {}
for server in self.config.servers.keys():
self.transporters[server] = []
self.logger.warning("Setup: created transporter pool for the '%s' server." % (server))
# Collecting all necessary metadata for each rule.
self.rules = []
for source in self.config.sources.values():
# Create a function to prepend the source's scan path to another
# path.
prepend_scan_path = lambda path: os.path.join(source["scan_path"], path)
if self.config.rules.has_key(source["name"]):
for rule in self.config.rules[source["name"]]:
if rule["filterConditions"] is None:
filter = None
else:
if rule["filterConditions"].has_key("paths"):
# Prepend the source's scan path (effectively the
# "root path") for a rule to each of the paths in
# the "paths" condition in the filter.
paths = map(prepend_scan_path, rule["filterConditions"]["paths"].split(":"))
rule["filterConditions"]["paths"] = ":".join(paths)
filter = Filter(rule["filterConditions"])
# Store all the rule metadata.
self.rules.append({
"source" : source["name"],
"label" : rule["label"],
"filter" : filter,
"processorChain" : rule["processorChain"],
"destinations" : rule["destinations"],
"deletionDelay" : rule["deletionDelay"],
})
self.logger.info("Setup: collected all metadata for rule '%s' (source: '%s')." % (rule["label"], source["name"]))
# Initia |
bengarrett/RetroTxt | ext/fonts/woff-to-woff2.py | Python | lgpl-3.0 | 558 | 0 | #!/usr/bi | n/python3
#
# On Windows or a current Linux distro.
# pip3 install fontTools[woff]
#
import os
from fontTools.ttLib import TTFont
directory = '.'
for name in os.listdir(directory):
file = os.path.join(directory, name)
if os.path.isfile(file) and name.endswith(".woff"):
(b, ext) = os.path.splitext(name)
woff2 = b+".woff2"
w2 = os.path.join(directory, woff2)
if os.path.isfile(w2):
| continue
print(file, "=>", w2)
f = TTFont(file)
f.flavor = "woff2"
f.save(woff2)
|
hotchemi/mvns | version.py | Python | apache-2.0 | 42 | 0 | # -*- | coding:utf-8 -*-
VERSION = "0.1.3 | "
|
mkoura/dump2polarion | tests/test_requirement_exporter.py | Python | gpl-2.0 | 2,552 | 0.002351 | # pylint: disable=missing-docstring,redefined-outer-name,no-self-use,protected-access
import copy
import os
from collections import OrderedDict
import pytest
from dump2polarion.exceptions import Dump2PolarionException, NothingToDoException
from dump2polarion.exporters.requirements_exporter import RequirementExport
from tests import conf
REQ_DATA = [
OrderedDict(
(
("title", "req01"),
("approver-ids", "sbulage:approved"),
("assignee", "mkourim"),
("category-ids", "CAT-01"),
("dueDate", "2018-05-30"),
("plannedIn", "PROJ-01"),
("initialEstimate", "1/4h"),
("priority", "medium"),
("severity", "nice_to_have"),
("status", "STAT-01"),
("reqtype", "functional"),
)
),
OrderedDict(
(
("title", "req02"),
("description", "requirement description"),
("assignee-id", "mkourim"),
("initial-estimate", "1/4h"),
)
),
OrderedDict((("id", "PROJ-01"), ("title", "req03"), ("initial-estimate", None))),
OrderedDict((("id", "PROJ-02"),)),
]
@pytest.fixture(scope="module")
def config_cloudtp(config_prop):
cloudtp = copy.deepcopy(config_prop)
cloudtp["polarion-project-id"] = "CLOUDTP"
cloudtp["requirements-document-relative-path"] = "testing/requirements"
cloudtp["requirements_import_properties"] = {"prop1": "val1", "prop2": "val2"}
return cloudtp
class TestRequirement:
def test_export(self, config_cloudtp):
req_exp = RequirementExport(REQ_DATA, config_cloudtp)
complete = req_exp.export()
fname = "requirement_complete.xml"
with open(os.path.join(conf.DATA_PATH, fname), encoding="utf-8") as input_xml:
parsed = input_xml.read()
assert complete == parsed
def test_invalid_lookup(self, config_cloudtp):
new_config = copy.deepcopy(config_cloudtp)
new_config["requirements_import_properties"] = {"lookup-method": "inv"}
req_exp = RequirementExport(REQ_DATA, new_config)
with pytest.raises(Dump2PolarionException) | as excinfo:
req_exp.export()
assert "Invalid value 'inv' for the 'lookup-method' property" in str(excinfo.value)
def test_no_requirements(self, config_cloudtp):
req_exp = RequirementExport([], config_cloudtp)
with pytest.raises(NothingToDoException) | as excinfo:
req_exp.export()
assert "Nothing to export" in str(excinfo.value)
|
zouyapeng/horizon-newtouch | horizon/templatetags/horizon.py | Python | apache-2.0 | 6,643 | 0 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from horizon.contrib import bootstrap_datepicker
from django.conf import settings
from django import template
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from horizon.base import Horizon # noqa
from horizon import conf
register = template.Library()
@register.filter
def has_permissions(user, component):
"""Checks if the given user meets the permissions requirements for
the component.
"""
return user.has_perms(getattr(component, 'permissions', set()))
@register.filter
def has_permissions_on_list(components, user):
return [component for component
in components if has_permissions(user, component)]
@register.inclusion_tag('horizon/_accordion_nav.html', takes_context=True)
def horizon_nav(context):
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
current_panel = context['request'].horizon.get('panel', None)
dashboards = []
for dash in Horizon.get_dashboards():
panel_groups = dash.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
if (callable(panel.nav) and panel.nav(context) and
panel.can_access(context)):
allowed_panels.append(panel)
elif (not callable(panel.nav) and panel.nav and
panel.can_access(context)):
allowed_panels.append(panel)
if allowed_panels:
non_empty_groups.append((group.name, allowed_panels))
if (callable(dash.nav) and dash.nav(context) and
dash.can_access(context)):
dashboards.append((dash, SortedDict(non_empty_groups)))
elif (not callable(dash.nav) and dash.nav and
dash.can_access(context)):
dashboards.append((dash, SortedDict(non_empty_groups)))
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'current_panel': current_panel.slug if current_panel else '',
'request': context['request']}
@register.inclusion_tag('horizon/_nav_list.html', takes_context=True)
def horizon_main_nav(context):
"""Generates top-level dashboard na | vigation entries."""
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if dash.can_access(context):
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif | dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'request': context['request']}
@register.inclusion_tag('horizon/_subnav_list.html', takes_context=True)
def horizon_dashboard_nav(context):
"""Generates sub-navigation entries for the current dashboard."""
if 'request' not in context:
return {}
dashboard = context['request'].horizon['dashboard']
panel_groups = dashboard.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
if (callable(panel.nav) and panel.nav(context) and
panel.can_access(context)):
allowed_panels.append(panel)
elif (not callable(panel.nav) and panel.nav and
panel.can_access(context)):
allowed_panels.append(panel)
if allowed_panels:
if group.name is None:
non_empty_groups.append((dashboard.name, allowed_panels))
else:
non_empty_groups.append((group.name, allowed_panels))
return {'components': SortedDict(non_empty_groups),
'user': context['request'].user,
'current': context['request'].horizon['panel'].slug,
'request': context['request']}
@register.filter
def quota(val, units=None):
if val == float("inf"):
return _("No Limit")
elif units is not None:
return "%s %s %s" % (val, force_text(units),
force_text(_("Available")))
else:
return "%s %s" % (val, force_text(_("Available")))
@register.filter
def quotainf(val, units=None):
if val == float("inf"):
return _("No Limit")
elif units is not None:
return "%s %s" % (val, units)
else:
return val
class JSTemplateNode(template.Node):
"""Helper node for the ``jstemplate`` template tag."""
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context,):
output = self.nodelist.render(context)
output = output.replace('[[[', '{{{').replace(']]]', '}}}')
output = output.replace('[[', '{{').replace(']]', '}}')
output = output.replace('[%', '{%').replace('%]', '%}')
return output
@register.tag
def jstemplate(parser, token):
"""Replaces ``[[[`` and ``]]]`` with ``{{{`` and ``}}}``,
``[[`` and ``]]`` with ``{{`` and ``}}`` and
``[%`` and ``%]`` with ``{%`` and ``%}`` to avoid conflicts
with Django's template engine when using any of the Mustache-based
templating libraries.
"""
nodelist = parser.parse(('endjstemplate',))
parser.delete_first_token()
return JSTemplateNode(nodelist)
@register.assignment_tag
def load_config():
return conf.HORIZON_CONFIG
@register.assignment_tag
def datepicker_locale():
locale_mapping = getattr(settings, 'DATEPICKER_LOCALES',
bootstrap_datepicker.LOCALE_MAPPING)
return locale_mapping.get(translation.get_language(), 'en')
|
sgabe/Enumerator | enumerator/lib/nmap.py | Python | mit | 3,708 | 0.003776 | #!/usr/bin/env python
"""This module is the first
step in gathering initial
service enumeration data from
a list of hosts. It initializes
the scanning commands and parses
the scan results. The scan results
are then passed to the delegator
module which determines what enumerator
should do next.
@author: Steve Coward (steve<at>sugarstack.io)
@version: 1.0
"""
import sys
import os
import re
import glob
import subprocess
from blinker import signal
from config import Config
import delegator
from .process_manager import ProcessManager
PROCESSES = [{
'command': 'nmap -Pn %(scan_mode)s -sS -sV -oN %(output_dir)s/%(host)s-tcp-standard.txt -oG %(output_dir)s/% | (host)s-tcp-greppable.txt -oX %(output_dir)s/%(host)s-tcp.xml %(host)s',
'normal': '-T4 -p-',
'stealth': '-T2',
}, {
'command': 'nmap -Pn %(scan_mode)s -sU -sV --open -oN %(output_dir)s/%(host)s-udp-standard.txt | -oG %(output_dir)s/%(host)s-udp-greppable.txt -oX %(output_dir)s/%(host)s-udp.xml %(host)s',
'normal': '-T4 --top-ports 100',
'stealth': '-T2 --top-ports 10',
}]
# Refined regex pattern for greppable nmap output.
SERVICE_PATTERN = re.compile(
'\s(\d+)\/([^/]+)?\/([^/]+)?\/([^/]+)?\/([^/]+)?\/([^/]+)?\/([^/]+)?\/')
# Instantiate signal to delegate further service enumeration.
delegate_service_enumeration = signal('delegate_service_enumeration')
delegate_service_enumeration.connect(delegator.receive_service_data)
def parse_results(ip, directory):
"""Find greppable nmap scan output, extract service data.
@param ip: IP Address
@param directory: Directory to search for scan input
"""
# Output structure to store results
results = {
ip: {
'tcp': [],
'udp': [],
},
}
# Find greppable nmap output files
scan_output = glob.glob('%s/*greppable*' % directory)
for output_file in scan_output:
contents = ''
with open(output_file, 'r') as fh:
contents = fh.read()
# Locate service-related output from file contents
services = SERVICE_PATTERN.findall(contents)
for service_entry in services:
try:
port, state, protocol, owner, service, rpc_info, version = service_entry
results[ip][protocol].append({
'port': port,
'state': state,
'owner': owner,
'service': service,
'version': version,
})
except Exception as exception:
pass
# Clean up scan files used for enumerator, standard nmap output files
# can stay.
os.remove(output_file)
return results
def scan(args):
"""Build output folder structure and initiate multiprocessing threads
@param args: tuple containing IP address and output directory
"""
ip, directory = args
config = Config().scan
# Ensure output directory exists; if it doesn't, create it
output_dir = '%s/%s' % (directory, ip)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print '[+] running TCP & UDP scans for host %s' % ip
pm = ProcessManager()
for process in PROCESSES:
pm.start_processes(process.get('command'), params={
'host': ip,
'output_dir': output_dir,
'scan_mode': process.get(config['mode']),
})
# nmap scans have completed at this point, send results to delegation
# system.
delegation_result = delegate_service_enumeration.send(
'enumerator.lib.nmap', scan_results=parse_results(ip, output_dir), directory=output_dir)
if __name__ == '__main__':
scan(sys.argv[1], sys.argv[2])
|
mrquim/mrquimrepo | script.module.exodus/lib/resources/lib/indexers/episodes.py | Python | gpl-2.0 | 67,539 | 0.011845 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import trakt
from resources.lib.modules import cleantitle
from resources.lib.modules import cleangenre
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
from resources.lib.modules import utils
import os,sys,re,json,zipfile,StringIO,urllib,urllib2,urlparse,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict()
action = params.get('action')
control.moderator()
class seasons:
def __init__(self):
self.list = []
self.lang = control.apiLanguage()['tvdb']
self.showunaired = control.setting('showunaired') or 'true'
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.tvdb_key = 'MUQ2MkYyRjkwMDMwQzQ0NA=='
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key.decode('base64'), '%s', '%s')
self.tvdb_by_imdb = 'http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s'
self.tvdb_by_query = 'http://thetvdb.com/api/GetSeries.php?seriesname=%s'
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
def get(self, tvshowtitle, year, imdb, tvdb, idx=True, create_directory=True):
if control.window.getProperty('PseudoTVRunning') == 'True':
return episodes().get(tvshowtitle, year, imdb, tvdb)
if idx == True:
self.list = cache.get(self.tvdb_list, 24, tvshowtitle, year, imdb, tvdb, self.lang)
if create_directory == True: self.seasonDirectory(self.list)
return self.list
else:
self.list = self.tvdb_list(tvshowtitle, year, imdb, tvdb, 'en')
return self.list
def tvdb_list(self, tvshowtitle, year, imdb, tvdb, lang, limit=''):
try:
if imdb == '0':
try:
imdb = trakt.SearchTVShow(tvshowtitle, year, full=False)[0]
imdb = imdb.get('show', '0')
imdb = imdb.get('ids', {}).get('imdb', '0')
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
if not imdb: imdb = '0'
except:
imdb = '0'
if tvdb == '0' and not imdb == '0':
url = self.tvdb_by_imdb % imdb
result = client.request(url, timeout='10')
try: tvdb = client.parseDOM(result, 'seriesid')[0]
except: tvdb = '0'
try: name = client.parseDOM(result, 'SeriesName')[0]
except: name = '0'
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(name)
if len(dupe) > 0: tvdb = str(dupe[0])
if tvdb == '': tvdb = '0'
if tvdb == '0':
url = self.tvdb_by_query % (urllib.quote_plus(tvshowtitle))
| years = [str(year), str(int(year)+1), str(int(year)-1)]
tvdb = client.request(url, timeout='10')
tvdb = re.sub(r'[^\x00-\x7F]+', '', tvdb)
tvdb = client.replaceHTMLCodes(tvdb)
tvdb = client.parseDOM(tvdb, 'Series')
tvdb = [(x, client.parseDOM(x, 'SeriesName'), client.parseDOM(x, 'FirstAired')) fo | r x in tvdb]
tvdb = [(x, x[1][0], x[2][0]) for x in tvdb if len(x[1]) > 0 and len(x[2]) > 0]
tvdb = [x for x in tvdb if cleantitle.get(tvshowtitle) == cleantitle.get(x[1])]
tvdb = [x[0][0] for x in tvdb if any(y in x[2] for y in years)][0]
tvdb = client.parseDOM(tvdb, 'seriesid')[0]
if tvdb == '': tvdb = '0'
except:
return
try:
if tvdb == '0': return
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
dupe = client.parseDOM(result, 'SeriesName')[0]
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(dupe)
if len(dupe) > 0:
tvdb = str(dupe[0]).encode('utf-8')
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
if not lang == 'en':
url = self.tvdb_info_link % (tvdb, lang)
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result2 = zip.read('%s.xml' % lang)
zip.close()
else:
result2 = result
artwork = artwork.split('<Banner>')
artwork = [i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i]
artwork = [i for i in artwork if not 'seasonswide' in re.findall('<BannerPath>(.+?)</BannerPath>', i)[0]]
result = result.split('<Episode>')
result2 = result2.split('<Episode>')
item = result[0] ; item2 = result2[0]
episodes = [i for i in result if '<EpisodeNumber>' in i]
episodes = [i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i]
episodes = [i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i]
seasons = [i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i]
locals = [i for i in result2 if '<EpisodeNumber>' in i]
result = '' ; result2 = ''
if limit == '':
episodes = []
elif limit == '-1':
seasons = []
else:
episodes = [i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i]
seasons = []
try: poster = client.parseDOM(item, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == ' |
Advance2/ADVANCE-2.0-Terminal | terminal.py | Python | gpl-2.0 | 690 | 0.010145 | """
Advnace 2.0 Terminal system
created by DatOneLefty
DatOneLefty also develops ForceOS
"""
f = open("log.adv2", "w")
def init():
pass
def disp(text):
"""
use: to display text and to log what is displayed
How to: disp(te | xt to display)
"""
f.write("DISPLAY: " + text)
print text
def askq(quest):
"""
use: to ask a question and log it
How to: var_to_set = askq(question to ask)
"""
feedback = raw_input (quest)
f.write("QUE | STION: " + quest + " With answer: " + feedback)
def log(logid, logdisp):
f.write (logid + ": " + logdisp)
def stop():
import shutil
shutil.rmtree("save")
disp("The system will stop NOW")
|
ppolewicz/logfury | src/logfury/v0_1/__init__.py | Python | bsd-3-clause | 400 | 0.0025 | from .meta import AbstractTracePublicCallsMeta, DefaultTraceAbstractMeta, Default | TraceMeta, TraceAllPublicCallsMeta
from .trace_call import trace_ca | ll
from .tuning import limit_trace_arguments, disable_trace
assert AbstractTracePublicCallsMeta
assert DefaultTraceAbstractMeta
assert DefaultTraceMeta
assert TraceAllPublicCallsMeta
assert disable_trace
assert limit_trace_arguments
assert trace_call
|
dslab-epfl/asap | utils/release/findRegressions-simple.py | Python | bsd-2-clause | 4,045 | 0.020766 | #!/usr/bin/env python
import re, string, sys, os, time, math
DEBUG = 0
(tp, exp) = ('compile', 'exec')
def parse(file):
f = open(file, 'r')
d = f.read()
# Cleanup weird stuff
d = re.sub(r',\d+:\d', '', d)
r = re.findall(r'TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n', d)
test = {}
fname = ''
for t in r:
if DEBUG:
print t
if t[0] == 'PASS' or t[0] == 'FAIL' :
tmp = t[2].split('llvm-test/')
if DEBUG:
print tmp
if len(tmp) == 2:
fname = tmp[1].strip('\r\n')
else:
fname = tmp[0].strip('\r\n')
if not test.has_key(fname):
test[fname] = {}
test[fname][t[1] + ' state'] = t[0]
test[fname][t[1] + ' time'] = float('nan')
else :
try:
n = t[0].split('RESULT-')[1]
if DEBUG:
print "n == ", n;
if n == 'compile-success':
test[fname]['compile time'] = float(t[2].split('program')[1].strip('\r\n'))
elif n == 'exec-success':
test[fname]['exec time'] = float(t[2].split('program')[1].strip('\r\n'))
if DEBUG:
print test[fname][string.replace(n, '-success', '')]
else :
# print "ERROR!"
sys.exit(1)
except:
continue
return test
# Diff results and look for regressions.
def diffResults(d_old, d_new):
regressions = {}
passes = {}
removed = ''
for x in ['compile state', 'compile time', 'exec state', 'exec time']:
regressions[x] = ''
passes[x] = ''
for t in sorted(d_old.keys()) :
if d_new.has_key(t):
# Check if the test passed or failed.
for x in ['compile state', 'compile time', 'exec state', 'exec time']:
if not d_old[t].has_key(x) and not d_new[t].has_key(x):
continue
if d_old[t].has_key(x):
if d_new[t].has_key(x):
if d_old[t][x] == 'PASS':
if d_new[t][x] != 'PASS':
regressions[x] += t + "\n"
else:
if d_new[t][x] == 'PASS':
passes[x] += t + "\n"
else :
regressions[x] += t + "\n"
if x == 'compile state' or x == 'exec state':
continue
# For execution time, if there is no result it's a fail.
if not d_old[t].has_key(x) and not d_new[t].has_key(x):
continue
elif not d_n | ew[t].has_key(x):
regressions[x] += t + "\n"
elif not d_old[t].has_key(x):
passes[x] += t + "\n"
if math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
continue
elif math.isnan(d_old[t][x]) and not math.isnan(d_new[t][x]):
passes[x] += t + "\n"
elif not math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
regressions[x] += t + ": NaN%\n"
if d_new[t][ | x] > d_old[t][x] and d_old[t][x] > 0.0 and \
(d_new[t][x] - d_old[t][x]) / d_old[t][x] > .05:
regressions[x] += t + ": " + "{0:.1f}".format(100 * (d_new[t][x] - d_old[t][x]) / d_old[t][x]) + "%\n"
else :
removed += t + "\n"
if len(regressions['compile state']) != 0:
print 'REGRESSION: Compilation Failed'
print regressions['compile state']
if len(regressions['exec state']) != 0:
print 'REGRESSION: Execution Failed'
print regressions['exec state']
if len(regressions['compile time']) != 0:
print 'REGRESSION: Compilation Time'
print regressions['compile time']
if len(regressions['exec time']) != 0:
print 'REGRESSION: Execution Time'
print regressions['exec time']
if len(passes['compile state']) != 0:
print 'NEW PASSES: Compilation'
print passes['compile state']
if len(passes['exec state']) != 0:
print 'NEW PASSES: Execution'
print passes['exec state']
if len(removed) != 0:
print 'REMOVED TESTS'
print removed
# Main
if len(sys.argv) < 3 :
print 'Usage:', sys.argv[0], '<old log> <new log>'
sys.exit(-1)
d_old = parse(sys.argv[1])
d_new = parse(sys.argv[2])
diffResults(d_old, d_new)
|
telecombcn-dl/2017-cfis | sessions/dream.py | Python | mit | 2,614 | 0.005738 | from keras.preprocessing.image import load_img, img_to_array
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from keras.applications import vgg16
from keras import backend as K
from keras.layers import Input
def preprocess_image(image_path,img_height,img_width):
img = load_img(image_path, target_size=(img_height, img_width))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x,img_height,img_width):
if K.image_dim_ordering() == 'th':
x = x.reshape((3, img_height, img_width))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_height, img_width, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def continuity_loss(x,img_height,img_width):
a = K.square(x[:, :img_height - 1, :img_width - 1, :] -
x[:, 1:, :img_width - 1, :])
b = K.square(x[:, :img_height - 1, :img_width - 1, :] -
x[:, :img_height - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
def eval_loss_and_grads(x,img_size,f_outputs):
x = x.reshape((1,) + img_size)
o | uts = f_outputs([x])
| loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self,img_size,f_outputs):
self.loss_value = None
self.grad_values = None
self.img_size = img_size
self.f_outputs = f_outputs
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x,self.img_size,
self.f_outputs)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
|
minlexx/pyevemon | esi_client/models/get_markets_structures_structure_id_200_ok.py | Python | gpl-3.0 | 11,924 | 0.001593 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetMarketsStructuresStructureId200Ok(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, duration=None, is_buy_order=None, issued=None, location_id=None, min_volume=None, order_id=None, price=None, range=None, type_id=None, volume_remain=None, volume_total=None):
"""
GetMarketsStructuresStructureId200Ok - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'duration': 'int',
'is_buy_order': 'bool',
'issued': 'datetime',
'location_id': 'int',
'min_volume': 'int',
'order_id': 'int',
'price': 'float',
'range': 'str',
'type_id': 'int',
'volume_remain': 'int',
'volume_total': 'int'
}
self.attribute_map = {
'duration': 'duration',
'is_buy_order': 'is_buy_order',
'issued': 'issued',
'location_id': 'location_id',
'min_volume': 'min_volume',
'order_id': 'order_id',
'price': 'price',
'range': 'range',
'type_id': 'type_id',
'volume_remain': 'volume_remain',
'volume_total': 'volume_total'
}
self._duration = duration
self._is_buy_order = is_buy_order
self._issued = issued
self._location_id = location_id
self._min_volume = min_volume
self._order_id = order_id
self._price = price
self._range = range
self._type_id = type_id
self._volume_remain = volume_remain
self._volume_total = volume_total
@property
def duration(self):
"""
Gets the duration of this GetMarketsStructuresStructureId200Ok.
duration integer
:return: The duration of this GetMarketsStructuresStructureId200Ok.
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this GetMarketsStructuresStructureId200Ok.
duration integer
:param duration: The duration of this GetMarketsStructuresStructureId200Ok.
:type: int
"""
if duration is None:
raise ValueError("Invalid value for `duration`, must not be `None`")
self._duration = duration
@property
def is_buy_order(self):
"""
Gets the is_buy_order of this GetMarketsStructuresStructureId200Ok.
is_buy_order boolean
:return: The is_buy_order of this GetMarketsStructuresStructureId200Ok.
:rtype: bool
"""
return self._is_buy_order
@is_buy_order.setter
def is_buy_order(self, is_buy_order):
"""
Sets the is_buy_order of this GetMarketsStructuresStructureId200Ok.
is_buy_order boolean
:param is_buy_order: The is_buy_order of this GetMarketsStructuresStructureId200Ok.
:type: bool
"""
if is_buy_order is None:
raise ValueError("Invalid value for `is_buy_order`, must not be `None`")
self._is_buy_order = is_buy_order
@property
def issued(self):
"""
Gets the issued of this GetMarketsStructuresStructureId200Ok.
issued string
:return: The issued of this GetMarketsStructuresStructureId200Ok.
:rtype: datetime
"""
return self._issued
@issued.setter
def issued(self, issued):
"""
Sets the issued of this GetMarketsStructuresStructureId200Ok.
issued string
:param issued: The issued of this GetMarketsStructuresStructureId200Ok.
:type: datetime
"""
if issued is None:
raise ValueError("Invalid value for `issued`, must not be `None`")
self._issued = issued
@property
def location_id(self):
"""
Gets the location_id of this GetMarketsStructuresStructureId200Ok.
location_id integer
:return: The location_id of this GetMarketsStructuresStructureId200Ok.
:rtype: int
"""
return self._location_id
@location_id.setter
def location_id(self, location_id):
"""
Sets the location_id of this GetMarketsStructuresStructureId200Ok.
lo | cation_id integer
:param location_id: The | location_id of this GetMarketsStructuresStructureId200Ok.
:type: int
"""
if location_id is None:
raise ValueError("Invalid value for `location_id`, must not be `None`")
self._location_id = location_id
@property
def min_volume(self):
"""
Gets the min_volume of this GetMarketsStructuresStructureId200Ok.
min_volume integer
:return: The min_volume of this GetMarketsStructuresStructureId200Ok.
:rtype: int
"""
return self._min_volume
@min_volume.setter
def min_volume(self, min_volume):
"""
Sets the min_volume of this GetMarketsStructuresStructureId200Ok.
min_volume integer
:param min_volume: The min_volume of this GetMarketsStructuresStructureId200Ok.
:type: int
"""
if min_volume is None:
raise ValueError("Invalid value for `min_volume`, must not be `None`")
self._min_volume = min_volume
@property
def order_id(self):
"""
Gets the order_id of this GetMarketsStructuresStructureId200Ok.
order_id integer
:return: The order_id of this GetMarketsStructuresStructureId200Ok.
:rtype: int
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""
Sets the order_id of this GetMarketsStructuresStructureId200Ok.
order_id integer
:param order_id: The order_id of this GetMarketsStructuresStructureId200Ok.
:type: int
"""
if order_id is None:
raise ValueError("Invalid value for `order_id`, must not be `None`")
self._order_id = order_id
@property
def price(self):
"""
Gets the price of this GetMarketsStructuresStructureId200Ok.
price number
:return: The price of this GetMarketsStructuresStructureId200Ok.
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""
Sets the price of this GetMarketsStructuresStructureId200Ok.
price number
:param price: The price of this GetMarketsStructuresStructureId200Ok.
:type: float
"""
if price is None:
raise ValueError("Invalid value for `price`, must not be `None`")
self._price = price
@property
def range(self):
"""
Gets the range of this GetMarketsStructuresStructureId200Ok.
range string
:return: The range of this GetMarketsStructuresStructureId200Ok.
:rtype: str
"""
return self._range
@range.setter
def range(self, range):
"""
Sets the range of this GetMarketsStructuresStructureId200Ok.
range string
:param range: The range of this GetMarketsStructuresStructureId200Ok.
:type: str
"""
allowed_values = ["station", "region", "solarsystem", "1", "2", "3", "4", "5", "10", "20", "30", "40"]
if range not in allowed_values:
raise ValueError(
"Invalid value for `range` ({0}), must be one of {1}"
.format(range, allowed_values)
)
self._range = range |
progrium/duplex | python/demo/demo.py | Python | mit | 626 | 0.011182 |
import asyncio
import websockets
imp | ort duplex
rpc = duplex.RPC("json")
@asyncio.coroutine
def echo(ch):
obj, _ = yield from ch.recv()
yield from ch.send(obj)
rpc.register("echo", echo)
@asyncio.coroutine
def do_msgbox(ch):
text, _ = yield from ch.recv()
yield from ch.call("msgbox", text, async=True)
rpc.register("doMsgbox", do_msgbox)
@asyncio.coroutine
def server(conn, path):
peer = yield from rpc.accept(conn)
yield from peer.route()
start_server = websockets.serve(s | erver, 'localhost', 8001)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
praba230890/PYPOWER | pypower/_compat.py | Python | bsd-3-clause | 103 | 0 |
"""
Compatibility helpers for older Python versions.
"""
import sys
PY2 = sys.versio | n_info[0] | == 2
|
Elico-Corp/odoo-addons | website_captcha_nogoogle_crm/__openerp__.py | Python | agpl-3.0 | 517 | 0 | # -*- coding: utf-8 -*-
# © 2015 Elico corp (www.elico-corp.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'nam | e': 'Contact Form CAPTCHA',
'version': '8.0.1.0.0',
'category': 'Website',
'depends': [
'website_crm',
'website_captcha_nogoogle',
],
'author': 'Elico Corp',
'license': ' | LGPL-3',
'website': 'https://www.elico-corp.com',
'data': [
'views/website_crm.xml',
],
'installable': True,
'auto_install': False,
}
|
tehamalab/dgs | goals/filters.py | Python | unlicense | 4,056 | 0 | from django import forms
from django.contrib.postgres.forms import SimpleArrayField
import django_filters
from .models import (Plan, Goal, Theme, Sector, Target, Indicator, Component,
Progress, Area, AreaType)
class SimpleIntegerArrayField(SimpleArrayField):
def __init__(self, base_field=forms.IntegerField(), delimiter=',',
max_length=None, min_length=None, *args, **kwargs):
super(SimpleIntegerArrayField, self).__init__(
base_field=base_field, delimiter=delimiter,
max_length=max_length, min_length=min_length, *args, **kwargs)
class IntegerArrayFilter(django_filters.Filter):
field_class = SimpleIntegerArrayField
class AreaFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
type = django_filters.CharFilter(lookup_expr='iexact')
class Meta:
model = Area
fields = ['code', 'level']
class SectorFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
description = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Sector
fields = ['parent', 'themes']
class PlanFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
class Meta:
model = Plan
fields = ['code']
class ThemeFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
description = django_filters.CharFilter(lookup_expr='icontains')
plan_code = django_filters.CharFilter(name='plan__code')
class Meta:
model = Theme
fields = ['plan', 'code']
class GoalFilter(ThemeFilter):
class Meta:
model = Goal
fields = ['plan', 'code']
class TargetFilter(django_filters.FilterSet):
description = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Target
fields = ['goal', 'code']
class IndicatorFilter(django_filters.FilterSet):
goal = django_filters.ModelChoiceFil | ter(name='target__goal' | ,
queryset=Goal.objects.all())
description = django_filters.CharFilter(lookup_expr='icontains')
data_source = django_filters.CharFilter(lookup_expr='icontains')
agency = django_filters.CharFilter(lookup_expr='iexact')
progress_count = django_filters.NumberFilter(lookup_expr='gte')
sectors_ids = IntegerArrayFilter(lookup_expr='contains')
class Meta:
model = Indicator
fields = ['plan_id', 'theme', 'target', 'sector', 'code']
class ComponentFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
description = django_filters.CharFilter(lookup_expr='icontains')
goal = django_filters.ModelChoiceFilter(
name='indicators__target__goal', queryset=Goal.objects.all())
progress_count = django_filters.NumberFilter(lookup_expr='gte')
class Meta:
model = Component
fields = ['indicators', 'code', 'stats_available']
class ProgressFilter(django_filters.FilterSet):
indicator = django_filters.ModelChoiceFilter(
name='component__indicators', queryset=Indicator.objects.all())
target = django_filters.ModelChoiceFilter(
name='component__indicators__target',
queryset=Target.objects.all())
area_code = django_filters.CharFilter(name='area__code')
area_name = django_filters.CharFilter(name='area__name')
area_type = django_filters.ModelChoiceFilter(
name='area__type', queryset=AreaType.objects.all())
area_type_code = django_filters.CharFilter(name='area__type__code')
area_type_name = django_filters.CharFilter(name='area__type__name')
class Meta:
model = Progress
fields = {
'component': ['exact'],
'area': ['exact'],
'year': ['exact', 'lt', 'lte', 'gt', 'gte'],
'fiscal_year': ['exact', 'lt', 'lte', 'gt', 'gte'],
'value': ['exact', 'lt', 'lte', 'gt', 'gte']
}
|
harryfb/DST5 | ArcPy Code/Topography.py | Python | apache-2.0 | 1,297 | 0.023901 | # import system modules
import arcpy
from arcpy import env
# Set environment settings
env.workspace = "C:\Users\Ewan\Desktop\SFTPDST5\MapFiles"
try:
# Set the local variable
in_Table = "Topography.csv"
x_coords = "x"
y_coords = "y"
out_Layer = "Topography_Layer"
saved_Layer = "c:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\Topography.lyr"
# Set the spatial reference
spRef = r"Coordinate Systems\Geographic Coordinate Systens\World\WGS 1984.prj"
# Make the XY Event Layer
arcpy.MakeXYEventLayer_management(in_Table, x_coords, y_coords, out_Layer, spRef)
# | Save to a layer file
arcpy.SaveToLayerFile_management(out_Layer, saved_Layer)
except Exception as err:
print(err.args[0])
# Set local variables
inFeatures = "Topography.lyr"
valField = "Topography"
outRaster = "C:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\TopographyR"
assignmentType = "MOST_FREQUENT"
priorityField = ""
cellSize = 0 | .000005
# Execute PointToRaster
arcpy.PointToRaster_conversion(inFeatures, valField, outRaster, assignmentType, priorityField, cellSize)
##Assign colormap using clr file
arcpy.AddColormap_management("c:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\TopographyR", "#", "c:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\colormap.clr") |
YACOWS/PyNFe | pynfe/entidades/emitente.py | Python | lgpl-3.0 | 1,321 | 0.000757 | from base import Entidade
from pynfe.utils.flags import CODIGO_BRASIL
class Emitente(Entidade):
# Dados do Emitente
# - Nome/Razao Social (obrigatorio)
razao_social = str()
# - Nome Fantasia
nome_fantasia = str()
# - CNPJ (obrigatorio)
cnpj = str()
# - Inscricao Estadual (obrigatorio)
inscricao_estadual = str()
# - CNAE Fiscal
cnae_fiscal = str()
# - Inscricao Municipal
inscricao_municipal = str()
# - Inscricao Estadual (Subst. Tributario)
inscricao_estadual_subst_tributaria = str()
# - Codigo de Regime Tributario (obrigatorio)
codigo_de_regime_tributario = str()
# Endereco
# - Logradouro (obrigatorio)
endereco_logradouro = str()
# - Numero (obrigato | rio)
endereco_numero = str()
# - Complemento
endereco_complemento = str()
# - Bairro (obrigatorio)
endereco_bairro = str()
# - CEP
endereco_cep = str()
# - Pais (aceita somente Brasil)
endereco_pais = CODIGO_BRASIL
| # - UF (obrigatorio)
endereco_uf = str()
# - Municipio (obrigatorio)
endereco_municipio = str()
# - Codigo Municipio (opt)
endereco_cod_municipio = str()
# - Telefone
endereco_telefone = str()
# Logotipo
logotipo = None
def __str__(self):
return self.cnpj
|
scode/pants | src/python/pants/backend/jvm/tasks/jvm_compile/scala/zinc_compile.py | Python | apache-2.0 | 12,212 | 0.007943 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import textwrap
from contextlib import closing
from xml.etree import ElementTree
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.scala.zinc_analysis import ZincAnalysis
from pants.backend.jvm.tasks.jvm_compile.scala.zinc_analysis_parser import ZincAnalysisParser
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.option.custom_types import dict_option
from pants.util.contextutil import open_zip
from pants.util.dirutil import relativize_paths, safe_open
# Well known metadata file required to register scalac plugins with nsc.
_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
class ZincCompile(JvmCompile):
"""Compile Scala and Java code using Zinc."""
_ZINC_MAIN = 'org.pantsbuild.zinc.Main'
_name = 'zinc'
_supports_concurrent_execution = True
@staticmethod
def write_plugin_info(resources_dir, target):
root = os.path.join(resources_dir, target.id)
plugin_info_file = os.path.join(root, _PLUGIN_INFO_FILE)
with safe_open(plugin_info_file, 'w') as f:
f.write(textwrap.dedent("""
<plugin>
<name>{}</name>
<classname>{}</classname>
</plugin>
""".format(target.plugin, target.classname)).strip())
return root, plugin_info_file
@classmethod
def subsystem_dependencies(cls):
return super(ZincCompile, cls).subsystem_dependencies() + (ScalaPlatform, DistributionLocator)
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-S-encoding', '-SUTF-8', '-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-S-deprecation', '-S-unchecked')
@classmethod
def get_no_warning_args_default(cls):
return ('-S-nowarn',)
@classmethod
def register_options(cls, register):
super(ZincCompile, cls).register_options(register)
register('--plugins', advanced=True, action='append', fingerprint=True,
help='Use these scalac plugins.')
register('--plugin-args', advanced=True, type=dict_option, default={}, fingerprint=True,
help='Map from plugin name to list of arguments for that plugin.')
register('--name-hashing', advanced=True, action='store_true', default=False, fingerprint=True,
help='Use zinc name hashing.')
cls.register_jvm_tool(register,
'zinc',
classpath=[
JarDependency('org.pantsbuild', 'zinc', '1.0.8')
],
main=cls._ZINC_MAIN,
custom_rules=[
# The compiler-interface and sbt-interface tool jars carry xsbt and
# xsbti interfaces that are used across the shaded tool jar boundary so
# we preserve these root packages wholesale along with the core scala
# APIs.
Shader.exclude_package('scala', recursive=True),
Shader.exclude_package('xsbt', recursive=True),
Shader.exclude_package('xsbti', recursive=True),
])
def sbt_jar(name, **kwargs):
return JarDependency(org='com.typesafe.sbt', name=name, rev='0.13.9', **kwargs)
cls.register_jvm_tool(register,
'compiler-interface',
classpath=[
sbt_jar(name='compiler-interface',
classifier='sources',
# We just want the single compiler-interface jar and not its
# dep on scala-lang
intransitive=True)
])
cls.register_jvm_tool(register,
'sbt-interface',
classpath=[
sbt_jar(name='sbt-interface',
# We just want the single sbt-interface jar and not its dep
# on scala-lang
intransitive=True)
])
# By default we expect no plugin-jars classpath_spec is filled in by the user, so we accept an
# empty classpath.
cls.register_jvm_tool(register, 'plugin-jars', classpath=[])
def select(self, target):
return target.has_sources('.java') or target.has_sources('.scala')
def select_source(self, source_file_path):
return source_file_path.endswith('.java') or source_file_path.endswith('.scala')
def __init__(self, *args, **kwargs):
super(ZincCompile | , self).__init__(*args, **kwargs)
# A directory independent of any other classpath which can contain per-target
# plugin resource files.
self._plugin_info_dir = os.path.join(self.workdir, 'sca | lac-plugin-info')
self._lazy_plugin_args = None
def create_analysis_tools(self):
return AnalysisTools(DistributionLocator.cached().real_home, ZincAnalysisParser(), ZincAnalysis)
def zinc_classpath(self):
# Zinc takes advantage of tools.jar if it's presented in classpath.
# For example com.sun.tools.javac.Main is used for in process java compilation.
def locate_tools_jar():
try:
return DistributionLocator.cached(jdk=True).find_libs(['tools.jar'])
except DistributionLocator.Error:
self.context.log.info('Failed to locate tools.jar. '
'Install a JDK to increase performance of Zinc.')
return []
return self.tool_classpath('zinc') + locate_tools_jar()
def compiler_classpath(self):
return ScalaPlatform.global_instance().compiler_classpath(self.context.products)
def extra_compile_time_classpath_elements(self):
# Classpath entries necessary for our compiler plugins.
return self.plugin_jars()
def plugin_jars(self):
"""The classpath entries for jars containing code for enabled plugins."""
if self.get_options().plugins:
return self.tool_classpath('plugin-jars')
else:
return []
def plugin_args(self):
if self._lazy_plugin_args is None:
self._lazy_plugin_args = self._create_plugin_args()
return self._lazy_plugin_args
def _create_plugin_args(self):
if not self.get_options().plugins:
return []
plugin_args = self.get_options().plugin_args
active_plugins = self._find_plugins()
ret = []
for name, jar in active_plugins.items():
ret.append('-S-Xplugin:{}'.format(jar))
for arg in plugin_args.get(name, []):
ret.append('-S-P:{}:{}'.format(name, arg))
return ret
def _find_plugins(self):
"""Returns a map from plugin name to plugin jar."""
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = set([p for val in self.get_options().plugins for p in val.split(',')])
plugins = {}
buildroot = get_buildroot()
for jar in self.plugin_jars():
with open_zip(jar, 'r') as jarfile:
try:
with closing(jarfile.open(_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
plugin_info = ElementTree.parse(plugin_info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError(
'File {} in {} is not a valid scalac plugin descriptor'.format(_PLUGIN_INFO_FILE,
|
dalf/searx | searx/engines/sepiasearch.py | Python | agpl-3.0 | 2,928 | 0.000342 | # SPDX-License-Identifier: AGPL-3.0-or-later
"""
SepiaSearch (Videos)
"""
from json import loads
from dateutil import parser, relativedelta
from urllib.parse import urlencode
from datetime import datetime
# about
about = {
"website": 'https://sepiasearch.org',
"wikidata_id": None,
"official_api_documentation": "https://framagit.org/framasoft/peertube/search-index/-/tree/master/server/controllers/api", # NOQA
"use_official_api": True,
"require_api_key": False,
"results": 'JSON',
}
categories = ['videos']
paging = True
time_range_support = True
safesearch = True
supported_languages = [
'en', 'fr', 'ja', 'eu', 'ca', 'cs', 'eo', 'el',
'de', 'it', 'nl', 'es', 'oc', 'gd', 'zh', 'pt',
'sv', 'pl', 'fi', 'ru'
]
base_url = 'https://sepiasearch.org/api/v1/search/videos'
safesearch_table = {
0: 'both',
1: 'false',
2: 'false'
}
time_range_table = {
'day': relativedelta.relativedelta(),
'week': relativedelta.relativedelta(weeks=-1),
'month': relativede | lta.relativedelta(months=-1),
'year': re | lativedelta.relativedelta(years=-1)
}
embedded_url = '<iframe width="540" height="304" src="{url}" frameborder="0" allowfullscreen></iframe>'
def minute_to_hm(minute):
if isinstance(minute, int):
return "%d:%02d" % (divmod(minute, 60))
return None
def request(query, params):
params['url'] = base_url + '?' + urlencode({
'search': query,
'start': (params['pageno'] - 1) * 10,
'count': 10,
'sort': '-match',
'nsfw': safesearch_table[params['safesearch']]
})
language = params['language'].split('-')[0]
if language in supported_languages:
params['url'] += '&languageOneOf[]=' + language
if params['time_range'] in time_range_table:
time = datetime.now().date() + time_range_table[params['time_range']]
params['url'] += '&startDate=' + time.isoformat()
return params
def response(resp):
results = []
search_results = loads(resp.text)
if 'data' not in search_results:
return []
for result in search_results['data']:
title = result['name']
content = result['description']
thumbnail = result['thumbnailUrl']
publishedDate = parser.parse(result['publishedAt'])
embedded = embedded_url.format(url=result.get('embedUrl'))
author = result.get('account', {}).get('displayName')
length = minute_to_hm(result.get('duration'))
url = result['url']
results.append({'url': url,
'title': title,
'content': content,
'author': author,
'length': length,
'template': 'videos.html',
'publishedDate': publishedDate,
'embedded': embedded,
'thumbnail': thumbnail})
return results
|
boisde/Greed_Island | business_logic/order_collector/transwarp/orm.py | Python | mit | 11,968 | 0.003593 | #!/usr/bin/env python
# coding:utf-8
"""
Database operation module. This module is independent with web module.
"""
import time, logging
import db
class Field(object):
_count = 0
def __init__(self, **kw):
self.name = kw.get('name', None)
self.ddl = kw.get('ddl', '')
self._default = kw.get('default', None)
self.comment = kw.get('comment', '')
self.nullable = kw.get('nullable', False)
self.updatable = kw.get('updatable', True)
self.insertable = kw.get('insertable', True)
self.unique_key = kw.get('unique_key', False)
self.non_unique_key = kw.get('key', False)
self.primary_key = kw.get('primary_key', False)
self._order = Field._count
Field._count += 1
@property
def default(self):
d = self._default
return d() if callable(d) else d
def __str__(self):
s = ['<%s:%s,%s,default(%s),' % (self.__class__.__name__, self.name, self.ddl, self._default)]
self.nullable and s.append('N')
self.updatable and s.append('U')
self.insertable and s.append('I')
s.append('>')
return ''.join(s)
class StringField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'varchar(255)'
super(StringField, self).__init__(**kw)
class IntegerField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = 0
if not 'ddl' in kw:
kw['ddl'] = 'bigint'
super(IntegerField, self).__init__(**kw)
class FloatField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = 0.0
if not 'ddl' in kw:
kw['ddl'] = 'real'
super(FloatField, self).__init__(**kw)
class BooleanField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = False
if not 'ddl' in kw:
kw['ddl'] = 'bool'
super(BooleanField, self).__init__(**kw)
class TextField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'text'
super(TextField, self).__init__(**kw)
class BlobField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'blob'
super(BlobField, self).__init__(**kw)
class VersionField(Field):
def __init__(self, name=None):
super(VersionField, self).__init__(name=name, default=0, ddl='bigint')
class DateTimeField(Field):
def __init__(self, **kw):
| if 'ddl' not in kw:
kw['ddl'] = 'datetime'
super(DateTimeField, self).__init__(**kw)
class DateField(Field):
def __init__(self, **kw):
if 'ddl' not in kw:
kw['ddl'] = 'date'
super(DateField, self).__init__(**kw)
class EnumField | (Field):
def __init__(self, **kw):
if 'ddl' not in kw:
kw['ddl'] = 'enum'
super(EnumField, self).__init__(**kw)
_triggers = frozenset(['pre_insert', 'pre_update', 'pre_delete'])
def _gen_sql(table_name, mappings):
pk, unique_keys, keys = None, [], []
sql = ['-- generating SQL for %s:' % table_name, 'create table `%s` (' % table_name]
for f in sorted(mappings.values(), lambda x, y: cmp(x._order, y._order)):
if not hasattr(f, 'ddl'):
raise StandardError('no ddl in field "%s".' % f)
ddl = f.ddl
nullable = f.nullable
has_comment = not (f.comment == '')
has_default = f._default is not None
left = nullable and ' `%s` %s' % (f.name, ddl) or ' `%s` %s not null' % (f.name, ddl)
mid = has_default and ' default \'%s\'' % f._default or None
right = has_comment and ' comment \'%s\',' % f.comment or ','
line = mid and '%s%s%s' % (left, mid, right) or '%s%s' % (left, right)
if f.primary_key:
pk = f.name
line = ' `%s` %s not null auto_increment,' % (f.name, ddl)
elif f.unique_key:
unique_keys.append(f.name)
elif f.non_unique_key:
keys.append(f.name)
sql.append(line)
for uk in unique_keys:
sql.append(' unique key(`%s`),' % uk)
for k in keys:
sql.append(' key(`%s`),' % k)
sql.append(' primary key(`%s`)' % pk)
sql.append(')ENGINE=InnoDB DEFAULT CHARSET=utf8;')
return '\n'.join(sql)
class ModelMetaclass(type):
"""
Metaclass for model objects.
"""
def __new__(cls, name, bases, attrs):
# skip base Model class:
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
# store all subclasses info:
if not hasattr(cls, 'subclasses'):
cls.subclasses = {}
if not name in cls.subclasses:
cls.subclasses[name] = name
else:
logging.warning('Redefine class: %s', name)
logging.info('Scan ORMapping %s...', name)
mappings = dict()
primary_key = None
for k, v in attrs.iteritems():
if isinstance(v, Field):
if not v.name:
v.name = k
logging.debug('Found mapping: %s => %s' % (k, v))
# check duplicate primary key:
if v.primary_key:
if primary_key:
raise TypeError('Cannot define more than 1 primary key in class: %s' % name)
if v.updatable:
# logging.warning('NOTE: change primary key to non-updatable.')
v.updatable = False
if v.nullable:
# logging.warning('NOTE: change primary key to non-nullable.')
v.nullable = False
primary_key = v
mappings[k] = v
# check exist of primary key:
if not primary_key:
raise TypeError('Primary key not defined in class: %s' % name)
for k in mappings.iterkeys():
attrs.pop(k)
if '__table__' not in attrs:
attrs['__table__'] = name.lower()
attrs['__mappings__'] = mappings
attrs['__primary_key__'] = primary_key
attrs['__sql__'] = lambda self: _gen_sql(attrs['__table__'], mappings)
for trigger in _triggers:
if trigger not in attrs:
attrs[trigger] = None
return type.__new__(cls, name, bases, attrs)
class Model(dict):
"""
Base class for ORM.
>>> class User(Model):
... id = IntegerField(primary_key=True)
... name = StringField()
... email = StringField(updatable=False)
... passwd = StringField(default=lambda: '******')
... last_modified = FloatField()
... def pre_insert(self):
... self.last_modified = time.time()
>>> u = User(id=10190, name='Michael', email='orm@db.org')
>>> r = u.insert()
>>> u.email
'orm@db.org'
>>> u.passwd
'******'
>>> u.last_modified > (time.time() - 2)
True
>>> f = User.get(10190)
>>> f.name
u'Michael'
>>> f.email
u'orm@db.org'
>>> f.email = 'changed@db.org'
>>> r = f.update() # change email but email is non-updatable!
>>> len(User.find_all())
1
>>> g = User.get(10190)
>>> g.email
u'orm@db.org'
>>> r = g.mark_deleted()
>>> len(db.select('select * from user where id=10190'))
0
>>> import json
>>> print User().__sql__()
-- generating SQL for user:
create table `user` (
`id` bigint not null,
`name` varchar(255) not null,
`email` varchar(255) not null,
`passwd` varchar(255) not null,
`last_modified` real not null,
primary key(`id`)
);
"""
__metaclass__ = ModelMetaclass
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' ob |
abadger/mondegreen | mondegreen/config.py | Python | gpl-3.0 | 3,476 | 0.00374 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Toshio Kuratomi
# License: GPLv3+
#
# This file is part of Mondegreen.
#
# Mondegreen is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This file is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this file. If not, see <http://www.gnu.org/licenses/>.
#
'''
--------------------------------
Base Config and Argument Parsing
--------------------------------
In Mondegreen, configuration and argument parsing are somewhat merged. Most
arguments are saved into a :class:`~configobj.ConfigObj` and accessed from
there.
This module sets up pieces of the configuration that any program using the
mondegreen framework will have access to. Configuration of the adapters, for
instance. Everything in here should be extendable by the individual
Mondegreen program. That way, the different services can add additional
config values and command line switches.
.. codeauthor:: Toshio Kuratomi <toshio@fedoraproject.org>
.. sectionauthor:: Toshio Kuratomi <toshio@fedoraproject.org>
.. versionadded:: 0.1
'''
import ast
from argparse import ArgumentParser
from urllib.parse import urlparse
from validate import Validator, ValidateError
from . import __version__
### TODO: The validators and configspec specific to an adaptor should be
### placed alongside the adaptor rather than here. Need a plugin system
### that we can iterate over to load them into the default set.
IDONETHIS_FILTERS = frozenset(('team',))
def idt_read_filter_check(value):
value = ast.literal_eval(value)
if not isinstance(value, dict):
raise ValidateError('read_filter must be a dict')
keys = frozenset(value.keys())
if not keys.issubset(IDONETHIS_FILTERS):
raise ValidateError('invalid filter types specified: {0}'.format(
keys.difference(IDONETHIS_FILTERS)))
return value
def url_filter_check(value, schemes=('http', 'https'), non_local=True):
expanded = urlparse(value)
if expanded.scheme not in schemes:
raise ValidateError('url not one of the allowed schemes: {0}'.format(
schemes))
if non_local and not expanded.netloc:
raise ValidateError('url must specify a remote server')
return value
validator = Validator({'idt_read_filter': idt_read_filter_check,
'url': url_filter_check})
combinedspec = '''
[idonethis]
auth_token=string
posting_team=string|idt_posting_team
read_filter=idt_read_filter
[slack]
webhook=url
posting_channel=string|slack_posting_channel
'''.splitlines()
class BaseArgParser(ArgumentParser):
def __init__(self, *args, **kwargs):
super(BaseArgParser, self).__init__(*args, **kwargs)
self.add_argument('--config-file', '-f', dest='config', action='append',
def | ault=list())
| self.add_argument('--version', action='version', version=__version__)
self.add_argument('--idt-posting-team',
dest='idt_posting_team')
self.add_argument('--slack-posting-channel',
dest='slack_posting_channel')
|
Csega/PythonCAD3 | pythoncad_qt.py | Python | gpl-2.0 | 1,067 | 0.005623 | #!/usr/bin/env python
#
# This is only needed for Python v2 but is harmless for Python v3.
#
import PyQt5.sip as sip
sip.setapi('QString', 2)
#
from PyQt5 import QtCore, QtGui, QtWidgets
#
import sys
import os
import sqlite3 as sqlite
#
# this is needed for me to use unpickle objects
#
sys.path.append(os.path.join(os.getcwd(), 'Generic'))
genericPath = sys.path[len(sys.path) - 1]
sys.path.append(os.path.join(genericPath, 'Kernel'))
sys.path.append(os.path.join(genericPath, 'Interface'))
#
from Interface.cadwin | dow import CadWindowMdi
def getPythonCAD():
app = QtWidgets.QApplication(sys.argv)
# Splash screen
splashPath = os.path.join(os.getcwd(), 'icons', 'splashScreen1.png')
splash_pix = QtGui.QPixmap(splashPath)
splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.W | indowStaysOnTopHint)
splash.setMask(splash_pix.mask())
splash.show()
w = CadWindowMdi()
w.show()
# End of splash screen
splash.finish(w)
return w, app
if __name__ == '__main__':
w, app = getPythonCAD()
sys.exit(app.exec_())
|
masml/masmlblog | Pandas/pandas_masml.py | Python | mit | 7,022 | 0.017374 |
# coding: utf-8
# In[1]:
#Importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
#Read csv file
#data_frame = pd.read_csv('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\nyc_weather.csv')
#Read excel file
data_frame_1 = pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\nyc_weather.xlsx', "nyc_1")
data_frame_2 = pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\nyc_weather.xlsx', "nyc_2")
# In[3]:
#Web Scraping
#Libraries- html5lib, lxml, BeautifulSoup4
import html5lib
from lxml import etree
url = 'https://en.wikipedia.org/wiki/States_and_union_territories_of_India'
df_web = pd.read_html(url, header = 0, flavor='html5lib')
# In[4]:
df_web[2].set_index('Vehicle code')
# In[5]:
df = data_frame_2.set_index('EST')
df
# In[6]:
#We need to remove all the missing data from our dataset.
#So we have 3 options
#1. Remove all the unneccessary elements
#2. Replace the unavailable elements with some base value
#3. Improve upon the previous by interpolation
#Option 1
method_1 = pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\nyc_weather.xlsx', "nyc_2",
na_values={'Events':['n.a'], 'WindSpeedMPH':[-1]})
'''
There are 3 parameters: pd.read_excel('file_path','sheet_name','na_values')
na values are the missing values in the dataset. Your dataset may have different messy values like 'n.a', 'not available'
(for string types) and negative values for numeric types (not necessarily like penalty score is a correct negative value)
We are specifying all the messy values using a dictionary in which the column name and list of messy values is specified
'''
#Removes all those na_values
method_1.dropna()
# In[7]:
#Option 2
method_2 = pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\nyc_weather.xlsx', "nyc_2",
na_values={'Events':['n.a'], 'WindSpeedMPH':[-1]})
'''
There are 3 parameters: pd.read_excel('file_path','sheet_name','na_values')
na values are the missing values in the dataset. Your dataset may have different messy values like 'n.a', 'not available'
(for string types) and negative values for numeric types (not necessarily like penalty score is a correct negative value)
We are specifying all the messy values using a dictionary in which the column name and list of messy values is specified
'''
#If we do not want to remove the na values as in the previous step, we can replace the na values with some chosen base value
method_2.fillna({
'WindSpeedMPH':0,
'Events':"Sunny",
'CloudCover':3
})
# In[8]:
'''
If you do not want to specify your own base values and want it to be based on certain criterion
like same as the previous or the next day with an available values, so we need to specify that as well
df.fillna(method="method_name")
You have a lot of options like ffill to carry forward the previous value, bfill (just the opposite) and so on
To view the complete documentation, you can refer to
" http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html"
'''
method_2.fillna(method="ffill", inplace=True)
'''
We specified an extra parameter inplace. What it does is that it performs all the changes on the original dataframe itself which
is by default false.
method_2.fillna(method="ffill")
This would have made no changes in the original dataframe, i.e., method_2 itself.
'''
method_2
# In[9]:
#Option 3
method_3 = pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\nyc_weather.xlsx', "nyc_2",
na_values={'Events':['n.a'], 'WindSpeedMPH':[-1]})
method_3.interpolate()
#By default, interpolate implements a linear approach to fill the data.(9th and 10th value changed)
#You can specify the method by introducing the method parameter which can be done as follows:
method_3.interpolate(method="quadratic")
#Note the change in the values due to linear and quadratic approach. You have a lot of different methods which
#can be referred in the provided link above
# In[10]:
df= method_2.groupby('Events')
df.get_group('Snow')
# In[11]:
df= method_2.groupby('Events')
for events, events_df in df:
print(events)
print(events_df)
get_ipython().magic('matplotlib inline')
df.plot()
# In[12]:
#Concating two parts of a dataframe
df1=data_frame_1
df2=data_frame_2
merge_df = pd.merge(df1,df2,on="EST")
merge_df
#It takes redundant columns into consideration too, axis=1 implies vertical concatentation
# In[13]:
concat_frame_1 = pd.read_csv('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\weather2.csv')
concat_frame_2 = pd.read_csv('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\weather3.csv')
#concated_df= pd.concat([concat_frame_1,concat_frame_2],ignore_index=True)
concated_df= pd.concat([concat_frame_1,concat_frame_2],keys=["Set_1","Set_2"])
#By default, concat retains the index of the original data frame, so you can either seperate the groups by providing | a list of keys
#or to have a continuous indexing, just apply ignore_index=True
concated_df
# In[14]:
cross_tab_df = pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\survey.xls')
pd.crosstab([cross_tab_df.Nationality,cross_tab_df.Sex],cross_tab_df.Handedness,margins | =False,normalize="index")
#check the index parameter
# In[15]:
df= pd.read_csv('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\weather3.csv')
print(df)
#Make sure
transform_df_1= df.pivot(index="date", columns="city")
print(transform_df_1)
transform_df_2= df.pivot_table(index="date", columns="city", margins=True, aggfunc='sum')
print(transform_df_2)
df.stack(level=0)
# In[16]:
stack_df= pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\stocks_3_levels.xlsx', header=[0,1,2])
stack_df
stack_df.stack()
#There is a level parameter which is by default set to the innermost level (in this case= 2)
# In[17]:
#Make sure if you are changing value, you may encounter some NaN values since the columns are divided
#and choosing a higher level may not have all the values
stack_df.stack(level=1)
# In[21]:
#Let us look at the general information we can get about a given data set
analytics_df= pd.read_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\survey.xls')
#Shape shows the dimensions of the dataset
print(analytics_df.shape)
#Describe shows the overall statistics of the dataset like mean, standard deviation, etc
print(analytics_df.describe())
#Replace is used to replace the data with some other value of your choice
df_replaced= analytics_df.replace(['Male','Female'],[0,1], inplace=True)
df_replaced= analytics_df.replace(['Left','Right'],[0,1])
'''analytics_df.replace({
'Sex':"[A-Za-z]"
},"lol",regex=True)'''
df_replaced
# In[19]:
final= stack_df.stack()
#Now that we have a final dataset for use, let us write it for future use.
#The write is similar to the read and we can write in various formats like csv,xls,etc.
final.to_csv('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\processed_dataset.csv')
final.to_excel('C:\\Users\\hp\\Desktop\\MAS ML\\datasets\\processed_dataset.xlsx')
|
code-dice/dice | dice/client/__init__.py | Python | gpl-2.0 | 12,056 | 0 | from __future__ import print_function
import argparse
import collections
import io
import json
import logging
import os
# pylint: disable=import-error
import queue
import random
import re
import requests
import sys
import traceback
import threading
import time
from ..core import provider
from ..utils import rnd
from . import window
logger = logging.getLogger('dice')
class _TestThread(threading.Thread):
"""
Thread class for running the main tests.
"""
def __init__(self, exc_queue, app, **kwargs):
threading.Thread.__init__(self, **kwargs)
self.exc_queue = exc_queue
self.app = app
def run(self):
try:
self.app.run_tests()
# pylint: disable=broad-except
except Exception:
self.exc_queue.put(sys.exc_info())
class _TestStat(object):
"""
Class to store the tests and statistics information.
"""
def __init__(self, key, queue_ma | x=100, method='exact'):
self.key = key
self.counter = 0
self.queue_max = queue_max
self.method = method
self.queue = collections.deque([], queue_max)
def match(self, text):
if self.method == 'exact':
return text == self.key
elif self.method == 'regex':
return re.match(self.key + '$', text)
de | f append(self, result):
self.counter += 1
self.queue.append(result)
def extend(self, stat):
for result in stat.queue:
self.append(result)
class DiceApp(object):
"""
Curses-based DICE client application.
"""
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'providers',
nargs='?',
action='store',
help="list of test providers separated by ','. Default to current "
"working directory",
default=os.getcwd(),
)
self.parser.add_argument(
'--server',
action='store',
help='server address',
dest='server',
default=None,
)
self.parser.add_argument(
'--port',
action='store',
help='server port',
dest='port',
default='8067',
)
self.parser.add_argument(
'--username',
action='store',
help='server authentication user name',
dest='username',
)
self.parser.add_argument(
'--password',
action='store',
help='server authentication password',
dest='password',
)
self.parser.add_argument(
'--no-ui',
action='store_false',
help="don't show terminal interactive user interface.",
dest='ui',
default=True,
)
self.args, _ = self.parser.parse_known_args()
try:
self.providers = self._process_providers()
except provider.ProviderError as detail:
exit(detail)
self.stats = {
"skip": {},
"failure": {},
"success": {},
"timeout": {},
"expected_neg": {},
"unexpected_neg": {},
"unexpected_pass": {},
}
self.QUEUE_MAX = 100
self.exiting = False
self.pause = False
self.setting_watch = False
self.show_log = False
self.watching = ''
self.scroll_x = 0
self.scroll_y = 0
self.test_excs = queue.Queue()
self.test_thread = _TestThread(self.test_excs, self)
self.send_queue = []
self.last_send_thread = None
self.last_item = None
self.cur_counter = 'failure'
if self.args.ui:
self.window = window.Window(self)
self.window.stat_panel.set_select_callback(self._update_items)
self.window.stat_panel.add_keypress_listener(
'merge_stat', 'm', self._merge_stat)
self.window.items_panel.set_select_callback(self._update_content)
self.stream = io.StringIO()
self.cur_class = (None, None)
self.cur_item = (None, None)
def _update_items(self, cat_name, item_idx):
self.cur_class = (cat_name, item_idx)
def _update_content(self, cat_name, item_idx):
self.cur_item = (cat_name, item_idx)
def _merge_stat(self, panel):
self.pause = True
cat_name, _ = panel.cur_key
text = self.window.get_input()
match_keys = []
for key in self.stats[cat_name]:
res = re.match(text, key)
if res is not None:
match_keys.append(key)
stat = self.stats[cat_name][text] = _TestStat(text, method='regex')
for key in match_keys:
stat.extend(self.stats[cat_name][key])
del self.stats[cat_name][key]
self.pause = False
def _stat_result(self, item):
"""
Categorizes and keep the count of a result of a test item depends on
the expected failure patterns.
"""
res = item.res
fail_patts = item.fail_patts
key = res.stderr
catalog = None
if res:
if res.exit_status == 'timeout':
catalog = 'timeout'
if self.watching and self.watching in res.stderr:
self.pause = True
if fail_patts:
if res.exit_status == 'success':
catalog = 'unexpected_pass'
elif res.exit_status == 'failure':
found = False
for patt in fail_patts:
if re.search(patt, res.stderr):
catalog = 'expected_neg'
key = patt
found = True
break
if not found:
catalog = 'unexpected_neg'
else:
if res.exit_status == 'success':
catalog = 'success'
elif res.exit_status == 'failure':
catalog = 'failure'
else:
catalog = 'skip'
found = False
for stat in self.stats[catalog].values():
if stat.match(key):
found = True
key = stat.key
break
if not found:
self.stats[catalog][key] = _TestStat(key)
stat = self.stats[catalog][key]
stat.append(res)
def _process_providers(self):
"""
Print a list of available providers if --list-providers is set
or return a dict of specified providers.
"""
providers = {}
if self.args.providers:
for path in self.args.providers.split(','):
prvdr = provider.Provider(path)
providers[prvdr.name] = prvdr
else:
sys.exit('Error: --providers option not specified')
return providers
def _send(self, item_queue):
"""
Serialize a list of test results and send them to remote server.
"""
content = []
for item in item_queue:
content.append(item.serialize())
data = json.dumps(content)
headers = {}
headers['content-type'] = 'application/json'
url = 'http://%s:%s/api/tests/' % (self.args.server, self.args.port)
try:
response = requests.post(
url,
data=data,
headers=headers,
auth=(self.args.username, self.args.password),
)
if response.status_code != 201:
logger.debug('Failed to send result (HTTP%s):',
response.status_code)
if 'DOCTYPE' in response.text:
html_path = 'debug_%s.html' % rnd.regex('[a-z]{4}')
with open(html_path, 'w') as fp:
fp.write(response.text)
logger.debug('Html response saved to %s',
os.path.abspath(html_path))
|
frasern/ADL_LRS | oauth_provider/tests/xauth.py | Python | apache-2.0 | 2,360 | 0.002966 | # -*- coding: utf-8 -*-
import time
import urllib
from urlparse import parse_qs
from oauth_provider.tests.auth import BaseOAuthTestCase, METHOD_URL_QUERY, METHOD_AUTHORIZATION_HEADER, METHOD_POST_REQUEST_BODY
class XAuthTestCase(BaseOAuthTestCase):
def setUp(self):
super(XAuthTestCase, self).setUp()
self.consumer.xauth_allowed = True
self.consumer.save()
def _accesss_token(self, method=METHOD_URL_QUERY):
parameters = {
"oaut | h_consumer_key": self.CONSUMER_KEY,
"oauth_consumer_secret": self.CONSUMER_SECRET,
"oauth_nonce": "12981230918711",
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, ""),
'oauth_timestamp': str(int(time.time())),
'oauth_version': '1.0',
'x_auth_mode': "client_auth",
'x_auth_password': self.password,
| 'x_auth_username': self.username,
}
if method==METHOD_AUTHORIZATION_HEADER:
header = self._get_http_authorization_header(parameters)
response = self.c.get("/oauth/access_token/", HTTP_AUTHORIZATION=header)
elif method==METHOD_URL_QUERY:
response = self.c.get("/oauth/access_token/", parameters)
elif method==METHOD_POST_REQUEST_BODY:
body = urllib.urlencode(parameters)
response = self.c.post("/oauth/access_token/", body, content_type="application/x-www-form-urlencoded")
else:
raise NotImplementedError
self.assertEqual(response.status_code, 200)
response_params = parse_qs(response.content)
self.ACCESS_TOKEN_KEY = response_params['oauth_token'][0]
self.ACCESS_TOKEN_SECRET = response_params['oauth_token_secret'][0]
def test_xauth(self):
self._access_token(x_auth_mode="client_auth",
x_auth_password=self.password,
x_auth_username=self.username)
assert self.ACCESS_TOKEN_KEY
assert self.ACCESS_TOKEN_SECRET
def test_xauth_using_email(self):
self._access_token(x_auth_mode="client_auth",
x_auth_password=self.password,
x_auth_username=self.email)
assert self.ACCESS_TOKEN_KEY
assert self.ACCESS_TOKEN_SECRET |
brosner/django-sqlalchemy | tests/apps/inventory/models.py | Python | bsd-3-clause | 250 | 0.012 | from django.db import | models
class Tag(models.Model):
tag = models.CharField(max_length=10, primary_key=True)
class Product(models.Model):
name = models.CharField(max_length=18, primary_key=True)
tags = models.ManyToManyFi | eld(Tag)
|
antmicro/distant-bes | distantbes/enums.py | Python | apache-2.0 | 1,058 | 0.006616 | from enum import Enum
EXIT_CODES = [
"SUCCESS",
"BUILD_FAILURE",
"PARSING_FAILURE",
"COMMAND_LINE_ERROR",
"TESTS_FAILED",
"PARTIAL_ANALYSIS_FAILURE",
"NO_TESTS_FOUND",
"RUN_FAILURE",
"ANALYSIS_FAILURE",
"INTERRUPTED",
"LOCK_HELD_NOBLOCK_FOR_LOCK",
"REMOTE_ENVIRONMENTAL_ERROR",
"OOM_ERROR",
"REMOTE_ERROR",
"LOCAL_ENVIRONMENT_ERROR",
"BLAZE_INTERNAL_ERROR",
"PUBLISH_ERROR",
"PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR"
]
class DistantEnum(Enum):
def __str__(self):
return str(self.value)
class CPU(DistantEnum):
k8 = "k8"
piii = "piii"
dar | win = "darwin"
freebsd = "freebsd"
armeabi = "armeabi-v7a"
arm = "arm"
aarch64 = "aarch64"
x64_windows = "x64_windows"
x64_w | indows_msvc = "x64_windows_msvc"
s390x = "s390x"
ppc = "ppc"
ppc64 = "ppc64"
class CompilationMode(DistantEnum):
fastbuild = "fastbuild"
dbg = "dbg"
opt = "opt"
|
obeattie/sqlalchemy | lib/sqlalchemy/test/testing.py | Python | mit | 27,201 | 0.003787 | """TestCase and TestSuite artifacts and testing decorators."""
import itertools
import operator
import re
import sys
import types
import warnings
from cStringIO import StringIO
from sqlalchemy.test import config, assertsql, util as testutil
from sqlalchemy.util import function_named, py3k
from engines import drop_all_tables
from sqlalchemy import exc as sa_exc, util, types as sqltypes, schema, pool, orm
from sqlalchemy.engine import default
from nose import SkipTest
_ops = { '<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
# sugar ('testing.db'); set here by config() at runtime
db = None
# more sugar, installed by __init__
requires = None
def fails_if(callable_, reason=None):
"""Mark a test as expected to fail if callable_ returns True.
If the callable returns false, the test is run and reported as normal.
However if the callable returns true, the test is expected to fail and the
unit test logic is inverted: if the test fails, a success is reported. If
the test succeeds, a failure is reported.
"""
docstring = getattr(callable_, '__doc__', None) or callable_.__name__
description = docstring.split('\n')[0]
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if not callable_():
return fn(*args, **kw)
else:
try:
fn(*args, **kw)
except Exception, ex:
print ("'%s' failed as expected (condition: %s): %s " % (
fn_name, description, str(ex)))
return True
else:
raise AssertionError(
"Unexpected success for '%s' (condition: %s)" %
(fn_name, description))
return function_named(maybe, fn_name)
return decorate
def future(fn):
"""Mark a test as expected to unconditionally fail.
Takes no arguments, omit parens when using as a decorator.
"""
fn_name = fn.__name__
def decorated(*args, **kw):
try:
fn(*args, **kw)
except Exception, ex:
print ("Future test '%s' failed as expected: %s " % (
fn_name, str(ex)))
return True
else:
raise AssertionError(
"Unexpected success for future test '%s'" % fn_name)
return function_named(decorated, fn_name)
def db_spec(*dbs):
dialects = set([x for x in dbs if '+' not in x])
drivers = set([x[1:] for x in dbs if x.startswith('+')])
specs = set([tuple(x.split('+')) for x in dbs if '+' in x and x not in drivers])
def check(engine):
return engine.name in dialects or \
engine.driver in drivers or \
(engine.name, engine.driver) in specs
return check
def fails_on(dbs, reason):
"""Mark a test as expected to fail on the specified database
implementation.
Unlike ``crashes``, tests marked as ``fails_on`` will be run
for the named databases. The test is expected to fail and the unit test
logic is inverted: if the test fails, a success is reported. If the test
succeeds, a failure is reported.
"""
spec = db_spec(dbs)
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if not spec(config.db):
return fn(*args, **kw)
else:
try:
fn(*args, **kw)
except Exception, ex:
print ("'%s' failed as expected on DB implementation "
"'%s+%s': %s" % (
fn_name, config.db.name, config.db.driver, reason))
return True
else:
raise AssertionError(
"Unexpected success for '%s' on DB implementation '%s+%s'" %
(fn_name, config.db.name, config.db.driver))
return function_named(maybe, fn_name)
return decorate
def fails_on_everything_except(*dbs):
"""Mark a test as expected to fail on most database implementations.
Like ``fails_on``, except failure is the expected outcome on all
databases except those listed.
"""
spec = db_spec(*dbs)
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if spec(config.db):
return fn(*args, **kw)
else:
try:
fn(*args, **kw)
except Exception, ex:
print ("'%s' failed as expected on DB implementation "
"'%s+%s': %s" % (
fn_name, config.db.name, config.db.driver, str(ex)))
return True
else:
raise AssertionError(
"Unexpected success for '%s' on DB implementation '%s+%s'" %
(fn_name, config.db.name, config.db.driver))
return function_named(maybe, fn_name)
return decorate
def crashes(db, reason):
"""Mark a test as unsupported by a database implementation.
``crashes`` tests will be skipped unconditionally. Use for feature tests
that cause deadlocks or other fatal problems.
"""
carp = _should_carp_about_exclusion(reason)
spec = db_spec(db)
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if spec(config.db):
msg = "'%s' unsupported on DB implementation '%s+%s': %s" % (
fn_name, config.db.name, config.db.driver, reason)
print msg
if carp:
print >> sys.stderr, msg
return True
else:
return fn(*args, **kw)
return function_named(maybe, fn_name)
return decorate
def _block_unconditionally(db, reason):
"""Mark a test as unsupported by a database implementation.
Will never run the test against any version of the given database, ever,
no matter what. Use when your assumptions are infallible; past, present
and future.
"""
carp = _should_carp_about_exclusion(reason)
spec = db_spec(db)
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if spec(config.db):
msg = "'%s' unsupported on DB implementation '%s+%s': %s" % (
fn_name | , config.db.name, config.db.driver, reason)
print msg
if carp:
print >> sys.stderr, msg
return True
else:
return fn(*args, **kw)
return function_named(maybe, fn_name)
return decorate
def only_on(db, reason):
carp = _should_carp_about_exclusion(reason)
spec = db_spec(db)
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, ** | kw):
if spec(config.db):
return fn(*args, **kw)
else:
msg = "'%s' unsupported on DB implementation '%s+%s': %s" % (
fn_name, config.db.name, config.db.driver, reason)
print msg
if carp:
print >> sys.stderr, msg
return True
return function_named(maybe, fn_name)
return decorate
def exclude(db, op, spec, reason):
"""Mark a test as unsupported by specific database server versions.
Stackable, both with other excludes and other decorators. Examples::
# Not supported by mydb versions less than 1, 0
@exclude('mydb', '<', (1,0))
# Other operators work too
@exclude('bigdb', '==', (9,0,9))
@exclude('yikesdb', 'in', ((0, 3, 'alpha2'), (0, 3, 'alpha3')))
"""
carp = _should_carp_about_exclusion(reason)
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if _is_excluded(db, op, spec): |
oroulet/python-aravis | examples/save-frame.py | Python | gpl-3.0 | 538 | 0.005576 | import sys
import logging
import numpy as np
from aravis import Camera
if __name__ == "__main__":
#cam = ar.get_camera("Prosilica-02-2130A-06106")
#cam = Camera("AT-Automation Technology GmbH-20805103")
cam = Camera(loglevel=logging.DEBUG)
if len(sys.argv) > 1:
pa | th = sys.argv[1]
else:
path = "frame.npy"
#cam.start_acquisition_trigger()
cam.start_acquisition_continuous()
frame = cam.pop_frame()
print("Saving frame to ", path)
np.save(p | ath, frame)
cam.stop_acquisition()
|
stffer/yunshu | article/migrations/0003_auto_20170512_1614.py | Python | mit | 645 | 0.00155 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-12 08:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0002_auto_20170512_1554'),
]
operations = [
migratio | ns.RemoveField(
model_name='talks',
name='article',
),
migrations.AddField(
model_name='article',
name='talks',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to= | 'article.Talks'),
),
]
|
geronimo-iia/brume | tests/test_template.py | Python | mit | 2,787 | 0.001435 | import os
import unittest
import boto3
import pytest
from moto import mock_s3
from brume.template import Template
CONFIG = {
'region': 'eu-west-1',
'local_path': 'test_stack',
's3_bucket': 'dummy-bucket',
}
class TestTemplate(unittest.TestCase):
"""Test for brume.Template."""
def setUp(self):
self.template_path = 'tests/test_stack/main.json'
self.template = Template(self.template_path, CONFIG)
@mock_s3
def test_upload(self):
"""A template can be uploaded to S3."""
conn = boto3.resource('s3')
conn.create_bucket(Bucket=CONFIG['s3_bucket'])
self.template.upload()
body = conn.Object(CONFIG['s3_bucket'], self.template.s3_key).get()['Body'].read().decode("utf-8")
with open(self.template_path, 'r') as f:
assert body == f.read()
@mock_s3
def test_upload_with_copy(self):
"""A template can be uploaded to S3."""
conn = boto3.resource('s3')
conn.create_bucket(Buc | ket=CONFIG['s3_bucket'])
self.template.upload(copy=True)
body = conn.Object(CONFIG['s3_bucket'], self.template.s3_key + '.copy').get()['Body'].read().decode("utf-8")
with open(self.template_path, 'r') as f:
assert body == f.read()
def test_public_url(self):
assert self.template.public_url == 'https://dummy-bucket.s3.amazonaws.com/te | sts/main.json'
def test_s3_key(self):
assert self.template.s3_key == 'tests/main.json'
def test_public_url_with_s3_path(self):
config = {
'region': 'eu-west-1',
'local_path': 'test_stack',
's3_bucket': 'dummy-bucket',
's3_path': 'cloudformation',
}
template = Template(self.template_path, config)
assert template.public_url == 'https://dummy-bucket.s3.amazonaws.com/cloudformation/tests/main.json'
def test_s3_key_with_s3_path(self):
config = {
'region': 'eu-west-1',
'local_path': 'test_stack',
's3_bucket': 'dummy-bucket',
's3_path': 'cloudformation',
}
template = Template(self.template_path, config)
assert template.s3_key == 'cloudformation/tests/main.json'
def test_size(self):
assert self.template.size == 236
def test_content(self):
with open(self.template_path, 'r') as f:
assert self.template.content == f.read()
@pytest.mark.skipif('CI' in os.environ,
reason="requires AWS credentials")
def test_validate_template(self):
bad_template = Template('tests/test_stack/invalid_stack.json', CONFIG)
assert not bad_template.validate()
assert self.template.validate()
if __name__ == '__main__':
unittest.main()
|
certego/pcapoptikon | main/api.py | Python | gpl-2.0 | 2,785 | 0.006463 | #!/usr/bin/env python
#
# api.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed | in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have | received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# Author: Pietro Delsante <p.delsante@certego.net>
# www.certego.net
#
import os
from django.contrib.auth.models import User
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.fields import ListField, ForeignKey
from tastypie.authentication import BasicAuthentication, ApiKeyAuthentication, SessionAuthentication, MultiAuthentication
from pcapoptikon.authorization import CertegoDjangoAuthorization
from pcapoptikon.fields import Base64FileField
from main.models import *
def is_post(bundle):
if bundle.request.method == 'post':
return True
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'user'
authentication = MultiAuthentication(BasicAuthentication(), ApiKeyAuthentication(), SessionAuthentication())
authorization = CertegoDjangoAuthorization()
allowed_methods = ['get']
fields = ['id', 'username']
ordering = ['id', 'username']
class TaskResource(ModelResource):
pcap_file = Base64FileField("pcap_file", use_in=is_post)
user = ForeignKey(UserResource, 'user', full=True)
results = ListField(attribute='results', null=True, blank=True, default=None)
def obj_create(self, bundle, **kwargs):
return super(TaskResource, self).obj_create(bundle, user=bundle.request.user)
def alter_list_data_to_serialize(self, request, data):
for item in data['objects']:
item.data['filename'] = os.path.basename(Task.objects.get(pk=item.data['id']).pcap_file.name)
return data
class Meta:
queryset = Task.objects.all().order_by('-id')
resource_name = 'task'
allowed_methods = ['get', 'post']
authentication = MultiAuthentication(BasicAuthentication(), ApiKeyAuthentication(), SessionAuthentication())
authorization = CertegoDjangoAuthorization()
filtering = {
'submitted_on': ALL,
'user': ALL,
'status': ALL,
}
ordering = ['id', 'submitted_on', 'status']
|
andrei-karalionak/ggrc-core | test/integration/ggrc_basic_permissions/test_creator_audit.py | Python | apache-2.0 | 7,603 | 0.005261 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Test Creator role with Audit scoped roles
"""
from ggrc import db
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import Generator
from integration.ggrc.generator import ObjectGenerator
from integration.ggrc.models import factories
class TestCreatorAudit(TestCase):
"""Set up necessary objects and test Creator role with Audit roles"""
def setUp(self):
TestCase.setUp(self)
self.generator = Generator()
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_roles()
self.init_test_cases()
self.objects = {}
def init_test_cases(self):
"""Create a dict of all possible test cases."""
self.test_cases = {
"Auditor": {
"audit_role": "Auditor",
"objects": {
"audit": {
"get": 200,
"put": 200,
"delete": 403
},
"mapped_Issue": {
"get": 200,
"put": 200,
"delete": 200
},
"unrelated_Issue": {
"get": 403,
"put": 403,
"delete": 403,
"map": 403,
},
"mapped_Assessment": {
"get": 200,
"put": 200,
"delete": 200
},
"unrelated_Assessment": {
"get": 403,
"put": 403,
"delete": 403,
"map": 403,
}
}
},
}
def init_roles(self):
"""Create a delete request for the given object."""
response = self.api.get_query(all_models.Role, "")
self.roles = {}
for role in response.json.get("roles_collection").get("roles"):
self.roles[role.get("name")] = role
def init_users(self):
"""Create users used by test cases."""
self.people = {}
for name in ["creator", "notmapped", "mapped", "Auditor"]:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role="Creator")
self.people[name] = user
_, user = self.object_generator.generate_person(
data={"name": "editor"}, user_role="Editor")
self.people["editor"] = user
def delete(self, obj):
"""Create a delete request for the given object.
Args:
obj (model instance): target object to delete
Returns:
int: http response status code
"""
return self.api.delete(obj).status_code
def get(self, obj):
"""Create a get request for the given object.
Args:
obj (model instance): target object to get
Returns:
int: http response status code
"""
return self.api.get(obj.__class__, obj.id).status_code
def put(self, obj):
"""Create a put request for the given object.
Args:
obj (model instance): target object to put
Returns:
int: http response status code
"""
response = self.api.get(obj.__class__, obj.id)
if response.status_code == 200:
return self.api.put(obj, response.json).status_code
else:
return response.status_code
def map(self, dest):
"""Map audit to dest.
Args:
dest (model instance): target object to map to the audit
Returns:
int: http response status code
"""
response = self.api.post(all_models.Relationship, {
"relationship": {"source": {
"id": self.objects["audit"].id,
"type": self.objects["audit"].type,
}, "destination": {
"id": dest.id,
"type": dest.type
}, "context": None},
})
return response.status_code
def init_objects(self, test_case_name):
"""Create a Program, an Audit, and a Mapped object for the test case.
Args:
test_case_name (string): test case to init for
"""
# Create a program
test_case = self.test_cases[test_case_name]
editor = self.people.get('editor')
self.api.set_user(editor)
random_title = factories.random_str()
response = self.api.post(all_models.Program, {
"program": {"title": random_title, "context": None},
})
self.assertEqual(response.status_code, 201)
program_id = response.json.get("program").get("id")
self.objects["program"] = all_models.Program.query.get(program_id)
response = self.api.post(all_models.Audit, {
"audit": {
"title": random_title + " audit",
'program': {'id': program_id},
"status": "Planned",
"context": None
}
})
self.assertEqual(response.status_code, 201)
context_id = response.json.get("audit").get("context").get("id")
audit_id = response.json.get("audit").get("id")
self.objects["audit"] = all_models.Audit.query.get(audit_id)
for prefix in ("mapped", "unrelated"):
random_title = factories.random_str()
response = self.api.post(all_models.Issue, {
"issue": {"title": random_title, "context": None},
})
self.assertEqual(response.status_code, 201)
issue_id = response.json.get("issue").get("id")
self.objects[prefix + "_Issue"] = all_models.Issue.query.get(issue_id)
response = self.api.post(all_models.Assessment, {
"assessment": {"title": random_title, "context": None},
})
self.assertEqual(response.status_code, 201)
assessment_id = response.json.get("assessment").get("id")
self.objects[prefix + "_Assessment"] = \
all_models.Assessment.query.get(assessment_id)
self.assertEqual(self.map(self.objects["mapped_Issue"]), 201)
self.assertEqual(self.map(self.objects["mapped_Assessment"]), 201)
# Add roles to mapped users:
if "audit_role" in test_case:
person = self.people.get(test_case_name)
role = self.roles[test_case["audit_role"]]
response = self.api.post(all_models.UserRole, {"user_role": {
"person": {
"id": person.id,
"type": "Person",
"href": "/api/people/{}".format(person.id),
}, "role": {
"type": "Role",
"href": "/api/roles/{}".format(role["id"]),
"id": role["id"],
}, "context": {
"type": "Context",
| "id": context_i | d,
"href": "/api/contexts/{}".format(context_id)
}}})
self.assertEqual(response.status_code, 201)
def test_creator_audit_roles(self):
""" Test creator role with all audit scoped roles """
# Check permissions based on test_cases:
errors = []
for test_case in self.test_cases:
self.init_objects(test_case)
person = self.people.get(test_case)
objects = self.test_cases.get(test_case).get('objects')
self.api.set_user(person)
for obj, actions in objects.iteritems():
for action in ("map", "get", "put", "delete"):
if action not in actions:
continue
# reset sesion:
db.session.commit()
func = getattr(self, action)
res = func(self.objects[obj])
if res != actions[action]:
errors.append(
"{}: Tried {} on {}, but received {} instead of {}".format(
test_case, action, obj, res, actions[action]))
self.assertEqual(errors, [])
|
alexeyum/scikit-learn | sklearn/random_projection.py | Python | bsd-3-clause | 22,132 | 0 | # -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array
from .exceptions import DataDimensionalityWarning
from .exceptions import NotFittedError
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
""" Generate a dense Gaussian random matrix.
The comp | onents of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection spac | e.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components: numpy array or CSR matrix with shape [n_comp |
shanzi/myicons | iconcollections/migrations/0002_auto_20141027_1107.py | Python | bsd-2-clause | 2,135 | 0.001405 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iconcollections', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='notes',
field=models.CharField(default=b'', max_length=140, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='collection',
name='prefix',
field=models.CharField(default=b'', max_length=16, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='collection',
name='token',
field=models.CharField(unique=True, max_length=32, editable=False, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='collectionicon',
name='collection',
field=models.ForeignKey(related_name='icons', editable=False, to='iconcollections.Collection'),
preserve_default=True,
),
migrations.AlterField(
model_name='collectionicon',
name='packicon',
field=models.ForeignKey(related_name='collectionicons', on_delete=django.db.models.deletion.SET_NULL, editable=False, to='iconpacks.PackIcon', null=True),
preserve_default=True,
| ),
migrations.AlterField(
model_name=' | collectionicon',
name='svg_d',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='collectionicon',
name='tagnames',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='collectionicon',
name='width',
field=models.FloatField(default=1.0, blank=True),
preserve_default=True,
),
]
|
mbertrand/cga-worldmap | geonode/register/forms.py | Python | gpl-3.0 | 2,566 | 0.007405 | # -*- coding: UTF-8 -*-
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from registration.forms import RegistrationFormUniqueEmail
from django.utils.translation import ugettext_lazy as _
from geonode.maps.models import Contact
from registration.models import RegistrationProfile
attrs_dict = { 'class': 'required' }
class ForgotUsernameForm(forms.Form):
emai | l = forms.EmailField(widget=fo | rms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_(u'Email Address'))
class UserRegistrationForm(RegistrationFormUniqueEmail):
if (settings.USE_CUSTOM_ORG_AUTHORIZATION):
is_org_member = forms.TypedChoiceField(coerce=lambda x: bool(int(x)),
choices=((1, _(u'Yes')), (0, _(u'No'))),
widget=forms.RadioSelect,
initial=0, label=settings.CUSTOM_ORG_AUTH_TEXT
)
username = forms.RegexField(regex=r'^\w+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_(u'username'),
error_messages = {
'invalid': _(u'Username must contain only letters, numbers, and underscores, and start with a letter'),
}
)
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_(u'email address'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_(u'password'))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_(u'password (again)'))
def save(self, profile_callback=None):
new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'],
site = Site.objects.get_current())
new_profile = new_user.get_profile()
new_profile.email = new_user.email
if (settings.USE_CUSTOM_ORG_AUTHORIZATION):
new_profile.is_org_member=self.cleaned_data['is_org_member']
new_profile.save()
return new_user |
BridgesLab/mousedb | mousedb/veterinary/__init__.py | Python | bsd-3-clause | 730 | 0.006849 | '''The veterinary app is for medical issues associated with animals.
The primary functions of this app are to:
* Store data about mice which have some medical problem
* Describe the problem including its duration.
* Store details about the treatment of that problem.
As such there are three data structures in this app including :class:`~mousedb.veterinary.models.MedicalIssue`, the master model with links to:
* :class:`~mousedb.veterinary.MedicalCondition, which generically describes the condit | ion.
* | :class:`~mousedb.veterinary.MedicalTreatment, which describes the treatment response.
The goal of this app is to more accurately systematize medical data, and to link that data back to differences in strains or mice.``''' |
tkao1000/pinot | thirdeye/thirdeye-pinot/src/main/resources/scripts/detector_admin.py | Python | apache-2.0 | 19,104 | 0.005392 | # fix desktop python path for argparse
import sys
sys.path.insert(1, '/usr/local/linkedin/lib/python2.6/site-packages')
import argparse
import cmd
from datetime import date, datetime, timedelta
import json
from pprint import pprint
import httplib
import re
import urllib
client = None
class ThirdEyeHttpClient(object):
def __init__(self, base, app_port=1346, admin_port=1353):
base = str(base)
print "Using host: ", base
self.application_host = base + ":" + str(app_port)
self.admin_host = base + ":" + str(admin_port)
def curl(self, method, endpoint, additional_params={}):
return self.curl_helper(method, endpoint, **additional_params)
def curl_helper(self, method, endpoint, data=None, print_result=False, is_admin_request=False):
host = self.application_host if not is_admin_request else self.admin_host
print method, host + endpoint, data or ''
conn = httplib.HTTPConnection(host)
conn.request(method, endpoint, data, headers={'Content-type': 'application/json'})
resp = conn.getresponse()
result = resp.read()
conn.close()
status = resp.status
reason = resp.reason
print status, reason
if status == 200 and result:
# byteify if applicable
try:
result = byteify(json.loads(result))
except Exception:
pass
if print_result:
if status == 200 or 204: # 204 = no content
if callable(print_result):
result = print_result(result)
elif not result and type(print_result) == str:
result = print_result
if result:
if type(result) == str:
print result
else:
pprint(result)
if type(result) == tuple or type(result) == list:
print "%d results" % len(result)
#TODO raise error if failed.
return resp.status, resp.reason, result
API = '/api/' #'/'
ADMIN = '/admin/' #'/'
FUNCTIONS_ENDPOINT = API + 'anomaly-functions/'
JOBS_ENDPOINT = API + 'anomaly-jobs/'
EMAIL_REPORTS_ENDPOINT = API + 'email-reports/'
ANOMALY_RESULTS_ENDPOINT = API + 'anomaly-results/'
EMAIL_RESET_ENDPOINT = ADMIN + 'tasks/email?action=reset'
MULTIPLE_INP_KEY = "inps"
""" Command Loop """
class DetectorAdminShell(cmd.Cmd):
intro = "Type ? or 'help' for a full list of available command line commands, or 'usage' for detector actions."
prompt = "\n(thirdeye-detector) "
def __init__(self, parser):
self.parser = parser
cmd.Cmd.__init__(self)
def default(self, line):
try:
args = vars(self.parser.parse_args(line.split()))
func = args.pop('func')
func(**args)
except SystemExit:
#keep looping if the internal parser tries to exit.
pass
except Exception as e:
print type(e), e
def do_bye(self, arg):
#DUBAI hehe :D
'Exits in a fun manner.'
return self._exit_()
def do_exit(self, arg):
'Exits the current program.'
return self._exit_()
def do_quit(self, arg):
'Exits the current program.'
return self._exit_()
def do_usage(self, arg):
'Displays usage info detector admin commands'
self.parser.print_help()
def help_help(self):
#really??
print "Really? Shows a help message"
def start(self):
try:
self.cmdloop()
except KeyboardInterrupt:
self._exit_()
def _exit_(self):
print "Exiting..."
return True
""" Parsers """
def add_function_subparser(subparsers):
""" GET, GET <id>, POST <data>, DELETE <id> """
functions = subparsers.add_parser('functions', help='anomaly function definitions')
function_subparsers = functions.add_subparsers()
show_parser = function_subparsers.add_parser('show', help='show all functions')
show_parser.set_defaults(func=show_functions)
show_ids_parser = function_subparsers.add_parser('show_ids', help='show only function ids')
show_ids_parser.set_defaults(func=show_function_ids)
find_parser = function_subparsers.add_parser('find', help='find a function')
find_parser.add_argument('inps', type=int, nargs='+', help='function ids', metavar='ids')
find_parser.set_defaults(func=find_function)
create_parser = function_subparsers.add_parser('create', help='create a new function')
create_parser.add_argument('inps', nargs='+', help='JSON files specifying functions to be created', metavar='file_paths')
create_parser.set_defaults(func=create_function)
delete_parser = function_subparsers.add_parser('delete', help='delete a function')
delete_parser.add_argument('inps', type=int, nargs='+', help='function ids', metavar='ids')
delete_parser.set_defaults(func=delete_function)
def add_jobs_subparser(subparsers):
""" GET, POST <id>, POST <id> (adhoc, optional start+end), DELETE <id> """
jobs = subparsers.add_parser('jobs', help='anomaly function schedules')
jobs_subparsers = jobs.add_subparsers()
show_parser = jobs_subparsers.add_parser('show', help='show all active jobs')
show_parser.set_defaults(func=show_active_jobs)
enable_parser = jobs_subparsers.add_parser('enable', help='enable job schedule')
enable_parser.add_argument('inps', type=int, nargs='+', help='job ids', metavar='ids')
enable_parser.set_defaults(func=enable_job)
adhoc_parser = jobs_subparsers.add_parser('adhoc', help='run adhoc job')
adhoc_parser.add_argument('inps', type=int, nargs='+', help='job ids', metavar='ids')
adhoc_parser.add_argument('--start', help='start time in IS08601 or as daysago(#)', required=False)
adhoc_parser.add_argument('--end', help='end time in IS08601 or as daysago(#)', required=False)
adhoc_parser.set_defaults(func=adhoc_job)
from_file_parser = jobs_subparsers.add_parser('from_file', help='send adhoc function spec from a file configuration')
from_file_parser.add_argument('inps', nargs='+', help='JSON files specifying function spec to be created', metavar='file_paths')
from_file_parser.add_argument('--id', help='id of existing function to group anomalies under', required=True)
from_file_parser.add_argument('--start', help='start time in IS08601 or as daysago(#)', required=False)
from_file_parser.add_argument('--end', help='end time in IS08601 or as daysago(#)', required=False)
from_file_parser.set_defaults(func=test_function_spec_from_file)
disable_parser = jobs_subparsers.add_parser('disable', help='disable job schedule')
disable_parser.add_argument('inps', type=int, nargs='+', help='job ids', metavar='ids')
disable_parser.set_defaults(func=disable_job)
def add_email_reports_subparser(subparsers):
""" GET, GET <id>, POST <data>, POST <id> (adhoc), DELETE <id> """
email_reports = subparsers.add_parser('reports', help='email report definitions')
email_reports_subparser = email_reports.add_subparsers()
show_parser = email_reports_subparser.add_parser('show', help='show all email reports')
show_parser.set_defaults(func=show_email_reports)
show_ids_parser = email_reports_subparser.add_parser('show_ids', help='show only email report ids')
show_ids_parser.set_defaults | (func=show_email_report_ids)
find_parser = email_reports_subparser.add_parser('find', help='find an email report')
find_parser.add_argument('inps', type=int, nargs='+', help='email_report ids', metav | ar='ids')
find_parser.set_defaults(func=find_email_report)
create_parser = email_reports_subparser.add_parser('create', help='create a new email report. be sure to reset the scheduler afterwards!')
create_parser.add_argument('inps', nargs='+', help='JSON files specifying email reports to be created', metavar='file_paths')
create_parser.set_defaults(func=create_email_report)
adhoc_parser = email_reports_subparser.add_parser('adhoc', help='send adhoc email report')
adhoc_parser.add_argument('inps' |
RohanNagar/lightning | scripts/tester.py | Python | mit | 5,735 | 0.002267 | import argparse
import hashlib
import json
import requests
from pprint import pprint
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class TestCase:
def __init__(self, method, endpoint, auth, params=None, headers=None, files=None, data=None,
expected=requests.codes.ok):
self.method = method
self.endpoint = endpoint
self.auth = auth
self.params = params
self.headers = headers
self.files = files
self.data = data
self.expected = expected
def run(self, base_url, verbose=False):
print(Colors.BOLD + self.method + ' ' + self.endpoint)
r = requests.request(self.method, base_url + self.endpoint,
params=self.params,
auth=self.auth,
headers=self.headers,
files=self.files,
data=self.data)
if r.status_code == self.expected:
print(Colors.OKGREEN + 'SUCCESS' + Colors.ENDC)
if verbose:
try:
pprint(r.json())
except json.decoder.JSONDecodeError:
print(r.text)
print()
return 0
else:
print(Colors.FAIL + 'FAILURE' + Colors.ENDC)
if verbose:
print(r.text)
print()
return 1
# Runs all TestCase objects in the tests parameter
def run_all(tests, base_url, verbose=False):
failures = 0
# Each run will return a 0 on success and a 1 on failure
# Summing will get the number of failures
for test in tests:
failures += test.run(base_url, verbose)
print(Colors.BOLD + '-----------------------------------')
if failures > 0:
print(Colors.FAIL + '{} Failures'.format(failures) + Colors.ENDC)
else:
print(Colors.OKGREEN + '{}/{} Pass'.format(len(tests), len(tests)) + Colors.ENDC)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Test runner for all Lightning endpoints')
# Add command line args
parser.add_argument('-e', '--endpoint', type=str, default='http://localhost:9000',
help='the base endpoint to connect to')
parser.add_argument('-m', '--email', type=str, default='Testy@gmail.com',
help='the email of the Pilot user to fetch data for')
parser.add_argument('-p', '--password', type=str, default='password',
help='the password of this user')
parser.add_argument('-a', '--auth', type=str, default='application:secret',
help='authentication credentials to connect to all endpoints')
parser.add_argument('-v', '--verbose', action='store_true',
help='increase output verbosity')
args = parser.parse_args()
# Hash password
m = hashlib.md5()
m.update(args.password.encode('utf-8'))
password = m.hexdigest()
# Separate auth
authentication = (args.auth.split(':')[0], args.auth.split(':')[1])
# Define test cases
all_tests = [
# Facebook
TestCase('GET', '/facebook/oauthUrl', authentication,
params={'redirect': 'sample://url'}),
TestCase('GET', '/facebook/users' | , authentication,
params={'email': args.email},
headers={'password': password}),
TestCase('GET', '/facebook/photos', authentication,
params={'email': args.email},
headers={'password': password}),
TestCase('GET', '/facebook/videos', authentication,
params={'email': args.email},
headers={'password': | password}),
TestCase('GET', '/facebook/extendedToken', authentication,
params={'email': args.email},
headers={'password': password}),
TestCase('POST', '/facebook/publish', authentication,
params={'email': args.email, 'type': 'photo', 'message': 'Lightning Logo'},
headers={'password': password},
files={'file': open('application/src/main/resources/logo.png', 'rb')},
data={'title': 'Logo'},
expected=requests.codes.created),
TestCase('POST', '/facebook/publish', authentication,
params={'email': args.email, 'type': 'text', 'message': 'Hello World!'},
headers={'password': password},
files={'file': ''},
expected=requests.codes.created),
# Twitter
TestCase('GET', '/twitter/oauthUrl', authentication,
params={'redirect': 'sample://url'}),
TestCase('GET', '/twitter/users', authentication,
params={'email': args.email},
headers={'password': password}),
TestCase('POST', '/twitter/publish', authentication,
params={'email': args.email, 'type': 'photo', 'message': 'Test Image'},
headers={'password': password},
files={'file': open('application/src/main/resources/logo.png', 'rb')},
expected=requests.codes.created),
TestCase('POST', '/twitter/publish', authentication,
params={'email': args.email, 'type': 'text', 'message': 'Hello World!'},
headers={'password': password},
files={'file': ''},
expected=requests.codes.created),
]
# Run tests
run_all(all_tests, args.endpoint, verbose=args.verbose)
|
fernandog/Sick-Beard | sickbeard/postProcessor.py | Python | gpl-3.0 | 41,517 | 0.00737 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import glob
import os
import re
import shlex
import subprocess
import stat
import sickbeard
from sickbeard import db
from sickbeard import classes
from sickbeard import common
from sickbeard import exceptions
from sickbeard import helpers
from sickbeard import history
from sickbeard import logger
from sickbeard import notifiers
from sickbeard import show_name_helpers
from sickbeard import scene_exceptions
from | sick | beard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class PostProcessor(object):
"""
A class which will process a media file according to the post processing settings in the config.
"""
EXISTS_LARGER = 1
EXISTS_SAME = 2
EXISTS_SMALLER = 3
DOESNT_EXIST = 4
IGNORED_FILESTRINGS = [ "/.AppleDouble/", ".DS_Store" ]
NZB_NAME = 1
FOLDER_NAME = 2
FILE_NAME = 3
def __init__(self, file_path, nzb_name = None):
"""
Creates a new post processor with the given file path and optionally an NZB name.
file_path: The path to the file to be processed
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
"""
# absolute path to the folder that is being processed
self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path))
# full path to file
self.file_path = file_path
# file name only
self.file_name = ek.ek(os.path.basename, file_path)
# the name of the folder only
self.folder_name = ek.ek(os.path.basename, self.folder_path)
# name of the NZB that resulted in this folder
self.nzb_name = nzb_name
self.in_history = False
self.release_group = None
self.is_proper = False
self.good_results = {self.NZB_NAME: False,
self.FOLDER_NAME: False,
self.FILE_NAME: False}
self.log = ''
def _log(self, message, level=logger.MESSAGE):
"""
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
message: The string to log (unicode)
level: The log level to use (optional)
"""
logger.log(message, level)
self.log += message + '\n'
def _checkForExistingFile(self, existing_file):
"""
Checks if a file exists already and if it does whether it's bigger or smaller than
the file we are post processing
existing_file: The file to compare to
Returns:
DOESNT_EXIST if the file doesn't exist
EXISTS_LARGER if the file exists and is larger than the file we are post processing
EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
EXISTS_SAME if the file exists and is the same size as the file we are post processing
"""
if not existing_file:
self._log(u"There is no existing file so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
# if the new file exists, return the appropriate code depending on the size
if ek.ek(os.path.isfile, existing_file):
# see if it's bigger than our old file
if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path):
self._log(u"File "+existing_file+" is larger than "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_LARGER
elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path):
self._log(u"File "+existing_file+" is the same size as "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SAME
else:
self._log(u"File "+existing_file+" is smaller than "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SMALLER
else:
self._log(u"File "+existing_file+" doesn't exist so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
def _list_associated_files(self, file_path, subtitles_only=False):
"""
For a given file path searches for files with the same name but different extension and returns their absolute paths
file_path: The file to check for associated files
Returns: A list containing all files which are associated to the given file
"""
if not file_path:
return []
file_path_list = []
base_name = file_path.rpartition('.')[0]+'.'
# don't strip it all and use cwd by accident
if not base_name:
return []
# don't confuse glob with chars we didn't mean to use
base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
for associated_file_path in ek.ek(glob.glob, base_name+'*'):
# only add associated to list
if associated_file_path == file_path:
continue
# only list it if the only non-shared part is the extension or if it is a subtitle
if '.' in associated_file_path[len(base_name):] and not associated_file_path[len(associated_file_path)-3:] in common.subtitleExtensions:
continue
if subtitles_only and not associated_file_path[len(associated_file_path)-3:] in common.subtitleExtensions:
continue
file_path_list.append(associated_file_path)
return file_path_list
def _delete(self, file_path, associated_files=False):
"""
Deletes the file and optionally all associated files.
file_path: The file to delete
associated_files: True to delete all files which differ only by extension, False to leave them
"""
if not file_path:
return
# figure out which files we want to delete
file_list = [file_path]
if associated_files:
file_list = file_list + self._list_associated_files(file_path)
if not file_list:
self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG)
return
# delete the file and any other files which we want to delete
for cur_file in file_list:
self._log(u"Deleting file " + cur_file, logger.DEBUG)
if ek.ek(os.path.isfile, cur_file):
#check first the read-only attribute
file_attribute = ek.ek(os.stat, cur_file)[0]
if (not file_attribute & stat.S_IWRITE):
# File is read-only, so make it writeable
self._log('Read only mode on file ' + cur_file + ' Will try to make it writeable', logger.DEBUG)
try:
ek.ek(os.chmod,cur_file,stat.S_IWRITE)
except:
self._log(u'Cannot change permissions of ' + cur_file, logger.WARNING)
ek.ek(os.remove, cur_file)
# do the library upda |
dotKom/onlineweb4 | apps/slack/urls.py | Python | mit | 191 | 0 | from apps.api.utils import SharedAPIRootRouter
from apps.slack import views
urlpatterns = []
router = SharedA | PIRootRouter()
router.register('slack', | views.InviteViewSet, base_name='slack')
|
wilsonkichoi/zipline | tests/test_panel_daily_bar_reader.py | Python | apache-2.0 | 2,906 | 0 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import permutations, product
import numpy as np
import pandas as pd
from zipline.data.us_equity_pricing import PanelDailyBarReader
from zipline.testing import ExplodingObject
from zipline.testi | ng.fixtures import (
WithAssetFinder,
WithNYSETradingDays,
ZiplineTestCase,
)
class TestPanelDailyBarReader(WithAssetFinder,
WithNYSETradingDays,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-02-01', tz='ut | c')
@classmethod
def init_class_fixtures(cls):
super(TestPanelDailyBarReader, cls).init_class_fixtures()
finder = cls.asset_finder
days = cls.trading_days
items = finder.retrieve_all(finder.sids)
major_axis = days
minor_axis = ['open', 'high', 'low', 'close', 'volume']
shape = tuple(map(len, [items, major_axis, minor_axis]))
raw_data = np.arange(shape[0] * shape[1] * shape[2]).reshape(shape)
cls.panel = pd.Panel(
raw_data,
items=items,
major_axis=major_axis,
minor_axis=minor_axis,
)
cls.reader = PanelDailyBarReader(days, cls.panel)
def test_spot_price(self):
panel = self.panel
reader = self.reader
for asset, date, field in product(*panel.axes):
self.assertEqual(
panel.loc[asset, date, field],
reader.spot_price(asset, date, field),
)
def test_duplicate_values(self):
UNIMPORTANT_VALUE = 57
panel = pd.Panel(
UNIMPORTANT_VALUE,
items=['a', 'b', 'b', 'a'],
major_axis=['c'],
minor_axis=['d'],
)
unused = ExplodingObject()
axis_names = ['items', 'major_axis', 'minor_axis']
for axis_order in permutations((0, 1, 2)):
transposed = panel.transpose(*axis_order)
with self.assertRaises(ValueError) as e:
PanelDailyBarReader(unused, transposed)
expected = (
"Duplicate entries in Panel.{name}: ['a', 'b'].".format(
name=axis_names[axis_order.index(0)],
)
)
self.assertEqual(str(e.exception), expected)
|
wenottingham/ansible | lib/ansible/plugins/connection/ssh.py | Python | gpl-3.0 | 32,355 | 0.00306 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import fcntl
import os
import pipes
import pty
import select
import subprocess
import time
from ansible import constants as C
from ansible.compat.six import PY3, text_type, binary_type
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.utils.boolean import boolean
from ansible.utils.path import unfrackpath, makedirs_safe
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
SSHPASS_AVAILABLE = None
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(b_command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for b_arg in (a.lower() for a in b_command):
if b'controlpersist' in b_arg:
controlpersist = True
elif b'controlpath' in b_arg:
controlpath = True
return controlpersist, controlpath
def _add_args(self, b_command, b_args, explanation):
"""
Adds arguments to the ssh command and displays a caller-supplied explanation of why.
:arg b_command: A list containing the command to add the new arguments to.
This list will be modified by this method.
:arg b_args: An iterable of new arguments to add. This iterable is used
more than once so it must be persistent (ie: a list is okay but a
StringIO would not)
:arg explanation: A text string containing explaining why the arguments
were added. It will be displayed with a high enough verbosity.
.. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
"""
display.vvvvv(u'SSH: %s: | (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self._play_context.remote_addr)
b_command += b_args
def _build_command(self, binary, *other_args):
'''
Takes a binary (ssh, scp, sftp) and optional extra arguments and returns
a command line as an | array that can be passed to subprocess.Popen.
'''
b_command = []
#
# First, the command to invoke
#
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
if self._play_context.password:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
self.sshpass_pipe = os.pipe()
b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
#
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
if binary == 'sftp' and C.DEFAULT_SFTP_BATCH_MODE:
if self._play_context.password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
b_command += [b'-b', b'-']
if self._play_context.verbosity > 3:
b_command.append(b'-vvv')
#
# Next, we add [ssh_connection]ssh_args from ansible.cfg.
#
if self._play_context.ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(self._play_context.ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
# Now we add various arguments controlled by configuration file settings
# (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
# a combination thereof.
if not C.HOST_KEY_CHECKING:
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
if self._play_context.port is not None:
b_args = (b"-o", b"Port=" + to_bytes(self._play_context.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
key = self._play_context.private_key_file
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
if not self._play_context.password:
self._add_args(
b_command, (
b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
b"-o", b"PasswordAuthentication=no"
),
u"ansible_password/ansible_ssh_pass not set"
)
user = self._play_context.remote_user
if user:
self._add_args(b_command,
(b"-o", b"User=" + to_bytes(self._play_context.remote_user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
self. |
alliance-genome/agr_prototype | indexer/src/files/comment_file.py | Python | mit | 327 | 0 |
class CommentFile:
def __init__ | (self, f, commentstring="#"):
self.f = f
self.commentstring = commentstring
def next(self):
line = self.f.next()
while line.startswith(self.commentstring):
line = self.f.next()
return l | ine
def __iter__(self):
return self
|
back-to/streamlink | src/streamlink/plugins/ard_mediathek.py | Python | bsd-2-clause | 4,132 | 0.00121 | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HDSStream, HLSStream, HTTPStream
MEDIA_URL = "http://www.ardmediathek.de/play/media/{0}"
SWF_URL = "http://www.ardmediathek.de/ard/static/player/base/flash/PluginFlash.swf"
HDCORE_PARAMETER = "?hdcore=3.3.0"
QUALITY_MAP = {
"auto": "auto",
3: "544p",
2: "360p",
1: | "288p",
0: "144p"
}
_url_re = re.compile(r"http(s)?://(?:(\w+\.)?ardmediathek.de/tv|mediathek.daserste.de/)")
_media_id_re = re.compile(r"/play/(?:media|config)/(\d+)")
_media_s | chema = validate.Schema({
"_mediaArray": [{
"_mediaStreamArray": [{
validate.optional("_server"): validate.text,
"_stream": validate.any(validate.text, [validate.text]),
"_quality": validate.any(int, validate.text)
}]
}]
})
_smil_schema = validate.Schema(
validate.union({
"base": validate.all(
validate.xml_find("head/meta"),
validate.get("base"),
validate.url(scheme="http")
),
"cdn": validate.all(
validate.xml_find("head/meta"),
validate.get("cdn")
),
"videos": validate.all(
validate.xml_findall("body/seq/video"),
[validate.get("src")]
)
})
)
class ard_mediathek(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_http_streams(self, info):
name = QUALITY_MAP.get(info["_quality"], "vod")
urls = info["_stream"]
if not isinstance(info["_stream"], list):
urls = [urls]
for url in urls:
stream = HTTPStream(self.session, url)
yield name, stream
def _get_hds_streams(self, info):
# Needs the hdcore parameter added
url = info["_stream"] + HDCORE_PARAMETER
return HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL).items()
def _get_hls_streams(self, info):
return HLSStream.parse_variant_playlist(self.session, info["_stream"]).items()
def _get_smil_streams(self, info):
res = self.session.http.get(info["_stream"])
smil = self.session.http.xml(res, "SMIL config", schema=_smil_schema)
for video in smil["videos"]:
url = "{0}/{1}{2}".format(smil["base"], video, HDCORE_PARAMETER)
streams = HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL, is_akamai=smil["cdn"] == "akamai")
for stream in streams.items():
yield stream
def _get_streams(self):
res = self.session.http.get(self.url)
match = _media_id_re.search(res.text)
if match:
media_id = match.group(1)
else:
return
self.logger.debug("Found media id: {0}", media_id)
res = self.session.http.get(MEDIA_URL.format(media_id))
media = self.session.http.json(res, schema=_media_schema)
for media in media["_mediaArray"]:
for stream in media["_mediaStreamArray"]:
stream_ = stream["_stream"]
if isinstance(stream_, list):
if not stream_:
continue
stream_ = stream_[0]
if stream_.endswith(".f4m"):
parser = self._get_hds_streams
parser_name = "HDS"
elif stream_.endswith(".smil"):
parser = self._get_smil_streams
parser_name = "SMIL"
elif stream_.endswith(".m3u8"):
parser = self._get_hls_streams
parser_name = "HLS"
elif stream_.startswith("http"):
parser = self._get_http_streams
parser_name = "HTTP"
try:
for s in parser(stream):
yield s
except IOError as err:
self.logger.error("Failed to extract {0} streams: {1}",
parser_name, err)
__plugin__ = ard_mediathek
|
SEA000/uw-empathica | empathica/gluon/contrib/login_methods/motp_auth.py | Python | mit | 4,542 | 0.008587 | #!/usr/bin/env python
import time
from hashlib import md5
from gluon.dal import DAL
def motp_auth(db=DAL('sqlite://storage.sqlite'),
time_offset=60):
"""
motp allows you to login with a one time password(OTP) generated on a motp client,
motp clients are available for practically all platforms.
to know more about OTP visit http://en.wikipedia.org/wiki/One-time_password
to know more visit http://motp.sourceforge.net
Written by Madhukar R Pai (madspai@gmail.com)
License : MIT or GPL v2
thanks and credits to the web2py community
to use motp_auth:
motp_auth.py has to be located in gluon/contrib | /login_methods/ folder
first auth_user has to have 2 extra fields - motp_secret and motp_pin
for that define auth like shown below:
## after auth = Auth(db)
db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, d | efault=''),
Field('last_name', length=128, default=''),
Field('email', length=128, default='', unique=True), # required
Field('password', 'password', length=512, # required
readable=False, label='Password'),
Field('motp_secret',length=512,default='',
label='MOTP Seceret'),
Field('motp_pin',length=128,default='',
label='MOTP PIN'),
Field('registration_key', length=512, # required
writable=False, readable=False, default=''),
Field('reset_password_key', length=512, # required
writable=False, readable=False, default=''),
Field('registration_id', length=512, # required
writable=False, readable=False, default=''))
##validators
custom_auth_table = db[auth.settings.table_user_name] # get the custom_auth_table
custom_auth_table.first_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.last_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.password.requires = CRYPT()
custom_auth_table.email.requires = [
IS_EMAIL(error_message=auth.messages.invalid_email),
IS_NOT_IN_DB(db, custom_auth_table.email)]
auth.settings.table_user = custom_auth_table # tell auth to use custom_auth_table
## before auth.define_tables()
##after that:
from gluon.contrib.login_methods.motp_auth import motp_auth
auth.settings.login_methods.append(motp_auth(db=db))
##Instructions for using MOTP
- after configuring motp for web2py, Install a MOTP client on your phone (android,IOS, java, windows phone, etc)
- initialize the motp client (to reset a motp secret type in #**#),
During user creation enter the secret generated during initialization into the motp_secret field in auth_user and
similarly enter a pre-decided pin into the motp_pin
- done.. to login, just generate a fresh OTP by typing in the pin and use the OTP as password
###To Dos###
- both motp_secret and pin are stored in plain text! need to have some way of encrypting
- web2py stores the password in db on successful login (should not happen)
- maybe some utility or page to check the otp would be useful
- as of now user field is hardcoded to email. Some way of selecting user table and user field.
"""
def verify_otp(otp,pin,secret,offset=60):
epoch_time = int(time.time())
time_start = int(str(epoch_time - offset)[:-1])
time_end = int(str(epoch_time + offset)[:-1])
for t in range(time_start-1,time_end+1):
to_hash = str(t)+secret+pin
hash = md5(to_hash).hexdigest()[:6]
if otp == hash: return True
return False
def motp_auth_aux(email,
password,
db=db,
offset=time_offset):
if db:
user_data = db(db.auth_user.email == email ).select().first()
if user_data:
if user_data['motp_secret'] and user_data['motp_pin']:
motp_secret = user_data['motp_secret']
motp_pin = user_data['motp_pin']
otp_check = verify_otp(password,motp_pin,motp_secret,offset=offset)
if otp_check: return True
else: return False
else: return False
return False
return motp_auth_aux
|
Yarr/Yarr | localdb/lib/localdb-tools/modules/db_logging.py | Python | gpl-2.0 | 3,337 | 0.011387 | #!/usr/bin/env python3
#################################
# Author: Arisa Kubota
# Email: arisa.kubota at cern.ch
# Date: July 2020
# Project: Local Database for YARR
#################################
import os
from copy import copy
from logging import getLogger, getLoggerClass, Formatter, FileHandler, StreamHandler, addLevelName, DEBUG, INFO, WARNING, ERROR, CRITICAL
import coloredlogs
from datetime import datetime
_level = INFO
#_level = DEBUG
logger = getLogger("Log")
logger.setLevel(_level)
LevelNames = {
'DEBUG' : "[ debug ]", # white
'IN | FO' : "[ info ]", # cyan
'WARNING' : "[warning ]", # yellow
'ERROR' : "[ error ]", # red
'CRITICAL': "[critical]", # white on red bg
}
LevelColors = {
'DEBUG' : 37, # white
'INFO' : 32, # green
'WARNING' : 33, # yellow
'ERROR' : 31, # red
'CRITICAL': 41, # white on red bg
}
class ColoredFormatter(Formatter):
def __init__(self, patern):
Formatter._ | _init__(self, patern, datefmt="%H:%M:%S")
def format(self, record):
colored_record = copy(record)
levelname = colored_record.levelname
color = LevelColors.get(levelname, 37)
name = LevelNames .get(levelname, "[unknown ]")
colored_levelname = "\033[{0}m{1}[ Local DB ]:\033[0m".format(color, name)
colored_record.levelname = colored_levelname
return Formatter.format(self, colored_record)
class LogFileFormatter(Formatter):
def __init__(self, patern):
Formatter.__init__(self, patern)
def format(self, record):
file_record = copy(record)
levelname = file_record.levelname
name = LevelNames .get(levelname, "[unknown ]")
file_levelname = "{0}[ Local DB ]:".format(name)
file_record.levelname = file_levelname
return Formatter.format(self, file_record)
def setLog(level=_level):
console = StreamHandler()
console.setLevel(level)
formatter = ColoredFormatter("[%(asctime)s:%(msecs)-3d]%(levelname)s %(message)s")
console.setFormatter(formatter)
logger.addHandler(console)
logger.setLevel(level)
logger.debug('Set log')
def setLogFile(filename='', level=_level):
if filename=='':
home = os.environ['HOME']
dirname = '{0}/.yarr/localdb/log/'.format(home)
if os.path.isfile('{}/log'.format(dirname)):
size = os.path.getsize('{}/log'.format(dirname))
if size/1000. > 1000: # greather than 1MB
os.rename('{}/log'.format(dirname), '{}/log-old-0'.format(dirname))
for i in reversed(range(10)):
if os.path.isfile('{0}/log-old-{1}'.format(dirname, i)): os.rename('{0}/log-old-{1}'.format(dirname, i), '{0}/log-old-{1}'.format(dirname, i+1))
if os.path.isfile('{0}/log-old-{1}'.format(dirname, 10)): os.remove('{0}/log-old-{1}'.format(dirname, 10))
filename = '{}/log'.format(dirname)
dir_path = os.path.dirname(os.path.abspath(filename))
os.makedirs(dir_path, exist_ok=True)
handler = FileHandler(filename)
handler.setLevel(level)
formatter = ColoredFormatter("[%(asctime)s:%(msecs)-3d]%(levelname)s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Set log file: {}'.format(filename))
|
txomon/vdsm | vdsm/storage/hsm.py | Python | gpl-2.0 | 138,543 | 0.000079 | #
# Copyright 2009-2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
This is the Host Storage Manager module.
"""
import os
import threading
import logging
import glob
from fnmatch import fnmatch
from itertools import imap
from collections import defaultdict
from functools import partial, wraps
import errno
import time
import signal
import types
import math
import numbers
import stat
from vdsm.config import config
import sp
from spbackends import MAX_POOL_DESCRIPTION_SIZE, MAX_DOMAINS
from spbackends import StoragePoolDiskBackend
from spbackends import StoragePoolMemoryBackend
import monitor
import sd
import blockSD
import nfsSD
import glusterSD
import localFsSD
import lvm
import fileUtils
import multipath
import outOfProcess as oop
from sdc import sdCache
import image
import volume
import iscsi
import misc
from misc import deprecated
import taskManager
import clusterlock
import storage_exception as se
from threadLocal import vars
from vdsm import constants
from storageConstants import STORAGE
import resourceManager as rm
from resourceFactories import IMAGE_NAMESPACE
import devicemapper
import logUtils
import mount
import dispatcher
import supervdsm
import storageServer
from vdsm import utils
from vdsm import qemuimg
GUID = "guid"
NAME = "name"
UUID = "uuid"
TYPE = "type"
INITIALIZED = "initialized"
CAPACITY = "capacity"
PATHLIST = "pathlist"
logged = partial(
logUtils.logcall, "dispatcher", "Run and protect: %s",
resPattern="Run and protect: %(name)s, Return response: %(result)s")
rmanager = rm.ResourceManager.getInstance()
# FIXME: moved from spm.py but this should be somewhere else
SECTOR_SIZE = 512
STORAGE_CONNECTION_DIR = os.path.join(constants.P_VDSM_LIB, "connections/")
QEMU_READABLE_TIMEOUT = 30
HSM_DOM_MON_LOCK = "HsmDomainMonitorLock"
def public(f=None, **kwargs):
if f is None:
return partial(public, **kwargs)
publicFunctionLogger = kwargs.get("logger", logged())
return dispatcher.exported(wraps(f)(publicFunctionLogger(f)))
# Connection Management API competability code
# Remove when deprecating dis\connectStorageServer
CON_TYPE_ID_2_CON_TYPE = {
sd.LOCALFS_DOMAIN: 'localfs',
sd.NFS_DOMAIN: 'nfs',
sd.ISCSI_DOMAIN: 'iscsi',
# FCP domain shouldn't even be on the list but VDSM use to just
# accept this type as iscsi so we are stuck with it
sd.FCP_DOMAIN: 'iscsi',
sd.POSIXFS_DOMAIN: 'posixfs',
sd.GLUSTERFS_DOMAIN: 'glusterfs'}
def _updateIfaceNameIfNeeded(iface, netIfaceName):
if iface.netIfaceName is None:
iface.netIfaceName = netIfaceName
iface.update()
return True
return False
def _resolveIscsiIface(ifaceName, initiatorName, netIfaceName):
if not ifaceName:
return iscsi.IscsiInterface('default')
for iface in iscsi.iterateIscsiInterfaces():
if iface.name != ifaceName:
continue
if netIfaceName is not None:
if (not _updateIfaceNameIfNeeded(iface, netIfaceName) and
netIfaceName != iface.netIfaceName):
logging.error('iSCSI netIfaceName coming from engine [%s] '
'is different from iface.net_ifacename '
'present on the system [%s]. Aborting iscsi '
'iface [%s] configuration.' %
(netIfaceName, iface.netIfaceName, iface.name))
raise se.iSCSIifaceError()
return iface
iface = iscsi.IscsiInterface(ifaceName, initiatorName=initiatorName,
netIfaceName=netIfaceName)
iface.create()
return iface
def _connectionDict2ConnectionInfo(conTypeId, conDict):
def getIntParam(optDict, key, default):
res = optDict.get(key, default)
if res is None:
return res
try:
return int(res)
except ValueError:
raise se.InvalidParameterException(key, res)
# FIXME: Remove when nfs_mount_options is no longer supported. This is
# in the compatibility layer so that the NFSConnection class stays clean.
# Engine options have precendence, so use deprecated nfs_mount_options
# only if engine passed nothing (indicated by default params of 'None').
def tryDeprecatedNfsParams(conDict):
if (conDict.get('protocol_version', None),
conDict.get('retrans', None),
conDict.get('timeout', None)) == (None, None, None):
conf_optio | ns = config.get(
'irs', 'nfs_mount_options').replace(' ', '')
if (frozenset(conf_options.split(',')) !=
frozenset(storageServer.NFSConnection.DEFAULT_OPTIONS)):
logging.warning("Using deprecated nfs_mount_options from"
" vdsm.conf to mount %s: %s",
conDict.get('connection', '(unknown)'),
conf_options)
return sto | rageServer.PosixFsConnectionParameters(
conDict.get('connection', None), 'nfs', conf_options)
return None
typeName = CON_TYPE_ID_2_CON_TYPE[conTypeId]
if typeName == 'localfs':
params = storageServer.LocaFsConnectionParameters(
conDict.get('connection', None))
elif typeName == 'nfs':
params = tryDeprecatedNfsParams(conDict)
if params is not None:
# Hack to support vdsm.conf nfs_mount_options
typeName = 'posixfs'
else:
version = conDict.get('protocol_version', "3")
version = str(version)
if version == "auto":
version = None
params = storageServer.NfsConnectionParameters(
conDict.get('connection', None),
getIntParam(conDict, 'retrans', None),
getIntParam(conDict, 'timeout', None),
version,
conDict.get('mnt_options', None))
elif typeName == 'posixfs':
params = storageServer.PosixFsConnectionParameters(
conDict.get('connection', None),
conDict.get('vfs_type', None),
conDict.get('mnt_options', None))
elif typeName == 'glusterfs':
params = storageServer.GlusterFsConnectionParameters(
conDict.get('connection', None),
conDict.get('vfs_type', None),
conDict.get('mnt_options', None))
elif typeName == 'iscsi':
portal = iscsi.IscsiPortal(
conDict.get('connection', None),
int(conDict.get('port', None)))
tpgt = int(conDict.get('tpgt', iscsi.DEFAULT_TPGT))
target = iscsi.IscsiTarget(portal, tpgt, conDict.get('iqn', None))
iface = _resolveIscsiIface(conDict.get('ifaceName', None),
conDict.get('initiatorName', None),
conDict.get('netIfaceName', None))
cred = None
username = conDict.get('user', None)
password = conDict.get('password', None)
if username or password:
cred = iscsi.ChapCredentials(username, password)
params = storageServer.IscsiConnectionParameters(target, iface, cred)
else:
raise se.StorageServerActionError()
return storageServer.ConnectionInfo(typeName, params)
class HSM(object):
"""
This is the HSM class. It controls all the stuff relate to the |
wangshiphys/HamiltonianPy | HamiltonianPy/quantumoperator/factory.py | Python | gpl-3.0 | 15,843 | 0.000126 | """
This module provides functions that generate commonly used Hamiltonian terms.
"""
__all__ = [
"Annihilator",
"Creator",
"CPFactory",
"HoppingFactory",
"PairingFactory",
"HubbardFactory",
"Coulo | mbFactory",
"HeisenbergFactory",
"IsingFactory",
"TwoSpinTermFactory",
]
from HamiltonianPy.quantumope | rator.constant import ANNIHILATION, CREATION, \
SPIN_DOWN, SPIN_UP
from HamiltonianPy.quantumoperator.particlesystem import AoC, ParticleTerm
from HamiltonianPy.quantumoperator.spinsystem import *
def Creator(site, spin=0, orbit=0):
"""
Generate creation operator: $c_i^{\\dagger}$.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1,2 or 3.
spin : int, optional
The spin index of the single-particle state.
Default: 0.
orbit : int, optional
The orbit index of the single-particle state.
Default: 0.
Returns
-------
operator : AoC
The corresponding creation operator.
Examples
--------
>>> from HamiltonianPy.quantumoperator import Creator
>>> Creator((0, 0), spin=1)
AoC(otype=CREATION, site=(0, 0), spin=1, orbit=0)
"""
return AoC(CREATION, site=site, spin=spin, orbit=orbit)
def Annihilator(site, spin=0, orbit=0):
"""
Generate annihilation operator: $c_i$.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1,2 or 3.
spin : int, optional
The spin index of the single-particle state.
Default: 0.
orbit : int, optional
The orbit index of the single-particle state.
Default: 0.
Returns
-------
operator : AoC
The corresponding annihilation operator.
Examples
--------
>>> from HamiltonianPy.quantumoperator import Annihilator
>>> Annihilator((0, 0), spin=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
"""
return AoC(ANNIHILATION, site=site, spin=spin, orbit=orbit)
def CPFactory(site, *, spin=0, orbit=0, coeff=1.0):
"""
Generate chemical potential term: '$\\mu c_i^{\\dagger} c_i$'.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1,2 or 3.
spin : int, optional, keyword-only
The spin index of the single-particle state.
Default: 0.
orbit : int, optional, keyword-only
The orbit index of the single-particle state.
Default: 0.
coeff : int or float, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term : ParticleTerm
The corresponding chemical potential term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import CPFactory
>>> term = CPFactory((0, 0))
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
"""
c = AoC(CREATION, site=site, spin=spin, orbit=orbit)
a = AoC(ANNIHILATION, site=site, spin=spin, orbit=orbit)
return ParticleTerm((c, a), coeff=coeff, classification="number")
def HoppingFactory(
site0, site1, *, spin0=0, spin1=None, orbit0=0, orbit1=None, coeff=1.0
):
"""
Generate hopping term: '$t c_i^{\\dagger} c_j$'.
These parameters suffixed with '0' are for the creation operator and '1'
for annihilation operator.
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
`site0` and `site1` should be 1D array with length 1, 2 or 3.
spin0, spin1 : int, optional, keyword-only
The spin index of the single-particle state.
The default value for `spin0` is 0;
The default value for `spin1` is None, which implies that `spin1`
takes the same value as `spin0`.
orbit0, orbit1 : int, optional, keyword-only
The orbit index of the single-particle state.
The default value for `orbit0` is 0;
The default value for `orbit1` is None, which implies that `orbit1`
takes the same value as `orbit0`.
coeff : int, float or complex, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term : ParticleTerm
The corresponding hopping term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import HoppingFactory
>>> term = HoppingFactory(site0=(0, 0), site1=(1, 1), spin0=1)
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=1, orbit=0)
AoC(otype=ANNIHILATION, site=(1, 1), spin=1, orbit=0)
>>> term = HoppingFactory(site0=(0, 0), site1=(1, 1), spin0=0, spin1=1)
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(1, 1), spin=1, orbit=0)
"""
if spin1 is None:
spin1 = spin0
if orbit1 is None:
orbit1 = orbit0
c = AoC(CREATION, site=site0, spin=spin0, orbit=orbit0)
a = AoC(ANNIHILATION, site=site1, spin=spin1, orbit=orbit1)
classification = "hopping" if c.state != a.state else "number"
return ParticleTerm((c, a), coeff=coeff, classification=classification)
def PairingFactory(
site0, site1, *, spin0=0, spin1=0, orbit0=0, orbit1=0,
coeff=1.0, which="h"
):
"""
Generate pairing term: '$p c_i^{\\dagger} c_j^{\\dagger}$' or '$p c_i c_j$'.
These parameters suffixed with '0' are for the 1st operator and '1' for
2nd operator.
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
`site0` and `site1` should be 1D array with length 1, 2 or 3.
spin0, spin1 : int, optional, keyword-only
The spin index of the single-particle state.
Default: 0.
orbit0, orbit1 : int, optional, keyword-only
The orbit index of the single-particle state.
Default: 0.
coeff : int, float or complex, optional, keyword-only
The coefficient of this term.
Default: 1.0.
which : str, optional, keyword-only
Determine whether to generate a particle- or hole-pairing term.
Valid values:
["h" | "hole"] for hole-pairing;
["p" | "particle"] for particle-pairing.
Default: "h".
Returns
-------
term : ParticleTerm
The corresponding pairing term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import PairingFactory
>>> term = PairingFactory((0, 0), (1, 1), spin0=0, spin1=1, which="h")
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(1, 1), spin=1, orbit=0)
>>> term = PairingFactory((0, 0), (1, 1), spin0=0, spin1=1, which="p")
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=CREATION, site=(1, 1), spin=1, orbit=0)
"""
assert which in ("h", "hole", "p", "particle")
otype = ANNIHILATION if which in ("h", "hole") else CREATION
aoc0 = AoC(otype, site=site0, spin=spin0, orbit=orbit0)
aoc1 = AoC(otype, site=site1, spin=spin1, orbit=orbit1)
return ParticleTerm((aoc0, aoc1), coeff=coeff)
def HubbardFactory(site, *, orbit=0, coeff=1.0):
"""
Generate Hubbard term: '$U n_{i\\uparrow} n_{i\\downarrow}$'.
This function is valid only for spin-1/2 system.
Parameters
----------
site : list, tuple or 1D np.ndarray
The c |
timsnyder/bokeh | examples/custom/font-awesome/named_icon.py | Python | bsd-3-clause | 7,263 | 0.008674 | from bokeh.core.enums import enumeration
NamedIcon = enumeration(*[
"adjust", "adn", "align-center", "align-justify", "align-left", "align-right", "ambulance",
"anchor", "android", "angellist", "angle-double-down", "angle-double-left", "angle-double-right",
"angle-double-up", "angle-down", "angle-left", "angle-right", "angle-up", "apple", "archive",
"area-chart", "arrow-circle-down", "arrow-circle-left", "arrow-circle-o-down", "arrow-circle-o-left",
"arrow-circle-o-right", "arrow-circle-o-up", "arrow-circle-right", "arrow-circle-up", "arrow-down",
"arrow-left", "arrow-right", "arrow-up", "arrows", "arrows-alt", "arrows-h", "arrows-v", "asterisk",
"at", "automobile", "backward", "ban", "bank", "bar-chart", "bar-chart-o", "barcode", "bars", "beer",
"behance", "behance-square", "bell", "bell-o", "bell-slash", "bell-slash-o", "bicycle", "binoculars",
"birthday-cake", "bitbucket", "bitbucket-square", "bitcoin", "bold", "bolt", "bomb", "book", "bookmark",
"bookmark-o", "briefcase", "btc", "bug", "building", "building-o", "bullhorn", "bullseye", "bus", "cab",
"calculator", "calendar", "calendar-o", "camera", "camera-retro", "car", "caret-down", "caret-left",
"caret-right", "caret-square-o-down", "caret-square-o-left", "caret-square-o-right", "caret-square-o-up",
"caret-up", "cc", "cc-amex", "cc-discover", "cc-mastercard", "cc-paypal", "cc-stripe", "cc-visa", "certificate",
"chain", "chain-broken", "check", "check-circle", "check-circle-o", "check-square", "check-square-o",
"chevron-circle-down", "chevron-circle-left", "chevron-circle-right", "chevron-circle-up", "chevron-down",
"chevron-left", "chevron-right", "chevron-up", "child", "circle", "circle-o", "circle-o-notch", "circle-thin",
"clipboard", "clock-o", "close", "cloud", "cloud-download", "cloud-upload", "cny", "code", "code-fork",
"codepen", "coffee", "cog", "cogs", "columns", "comment", "comment-o", "comments", "comments-o", "compass",
"compress", "copy", "copyright", "credit-card", "crop", "crosshairs", "css3", "cube", "cubes", "cut", "cutlery",
"dashboard", "database", "dedent", "delicious", "desktop", "deviantart", "digg", "dollar", "dot-circle-o",
"download", "dribbble", "dropbox", "drupal", "edit", "eject", "ellipsis-h", "ellipsis-v", "empire", "envelope",
"envelope-o", "envelope-square", "eraser", "eur", "euro", "exchange", "exclamation", "exclamation-circle",
"exclamation-triangle", "expand", "external-link", "external-link-square", "eye", "eye-slash", "eyedropper",
"facebook", "facebook-square", "fast-backward", "fast-forward", "fax", "female", "fighter-jet", "file",
"file-archive-o", "file-audio-o", "file-code-o", "file-excel-o", "file-image-o", "file-movie-o", "file-o",
"file-pdf-o", "file-photo-o", "file-picture-o", "file-powerpoint-o", "file-sound-o", "file-text", "file-text-o",
"file-video-o", "file-word-o", "file-zip-o", "files-o", "film", "filter", "fire", "fire-extinguisher", "flag",
"flag-checkered", "flag-o", "flash", "flask", "flickr", "floppy-o", "folder", "folder-o", "folder-open",
"folder-open-o", "font", "forward", "foursquare", "frown-o", "futbol-o", "gamepad", "gavel", "gbp", "ge",
"gear", "gears", "gift", "git", "git-square", "github", "github-alt", "github-square", "gittip", "glass",
"globe", "google", "google-plus", "google-plus-square", "google-wallet", "graduation-cap", "group",
"h-square", "hacker-news", "hand-o-down", "hand-o-left", "hand-o-right", "hand-o-up", "hdd-o", "header",
"headphones", "heart", "heart-o", "history", "home", "hospital-o", "html5", "ils", "image", "inbox", "indent",
"info", "info-circle", "inr", "instagram", "institution", "ioxhost", "italic", "joomla", "jpy", "jsfiddle",
"key", "keyboard-o", "krw", "language", "laptop", "lastfm", "lastfm-square", "leaf", "legal", "lemon-o",
"level-down", "level-up", "life-bouy", "life-buoy", "life-ring", "life-saver", "lightbulb-o", "line-chart",
"link", "linkedin", "linkedin-square", "linux", "list", "list-alt", "list-ol", "list-ul", "location-arrow",
"lock", "long-arrow-down", "long-arrow-left", "long-arrow-right", "long-arrow-up", "magic", "magnet", "mail-forward",
"mail-reply", "mail-reply-all", "male", "map-marker", "maxcdn", "meanpath", "medkit", "meh-o", "microphone",
"microphone-slash", "minus", "minus-circle", "minus-square", "minus-square-o", "mobile", "mobile-phone", "money",
"moon-o", "mortar-board", "music", "navicon", "newspaper-o", "openid", "outdent", "pagelines", "paint-brush",
"paper-plane", "paper-plane-o", "paperclip", "paragraph", "paste", "pause", "paw", "paypal", "pencil", "pencil-square",
"pencil-square-o", "phone", "phone-square", "photo", "picture-o", "pie-chart", "pied-piper", "pied-piper-alt",
"pinterest", "pinterest-square", "plane", "play", "play-circle", "play-circle-o", "plug", "plus", "plus-circle",
"plus-square", "plus-square-o", "power-off", "print", "puzzle-piece", "qq", "qrcode", "question", "question-circle",
"quote-left", "quote-right", "ra", "random", "rebel", "recycle", "reddit", "reddit-square", "refresh", "remove",
"renren", "reorder", "repeat", "reply", "reply-all", "retweet", "rmb", "road", "rocket", "rotate-left", "rotate-right",
"rouble", "rss", "rss-square", "rub", "ruble", "rupee", "save", "scissors", "search", "search-minus", "search-plus",
"send", "send-o", "share", "share-alt", "share-alt-square", "share-square", "share-square-o", "shekel", "sheqel",
"shield", "shopping-cart", "sign-in", "sign-out", "signal", "sitemap", "skype", "slack", "sliders", "slideshare", "smile-o",
"soccer-ball-o", "sort", "sort-alpha-asc", "sort-alpha-desc", "sort-amount-asc", "sort-amount-desc", "sort-asc",
"sort-desc", "sort-down", "sort-numeric-asc", "sort-numeric-desc", "sort-up", "soundcloud", "space-shuttle", "spinner",
"spoon", "spotify", "square", "square-o", "stack-exchange", "stack-overflow", "star", "star-half", "star-half-empty",
"star-half-full", "star-half-o", "star-o", "steam", "steam-square", "step-backward", "step-forward", "stethoscope",
"stop", "strikethrough", "stumbleupon", "stumbleupon-circle", "subscript", "suitcase", "sun-o", "superscript",
"support", "table", "tablet", "tachometer", "tag", "tags", " | tasks", "taxi", "tencent-weibo", "terminal", "text-height",
"text-width", "th", "th-large", "th-list", "thumb-tack", "thum | bs-down", "thumbs-o-down", "thumbs-o-up", "thumbs-up",
"ticket", "times", "times-circle", "times-circle-o", "tint", "toggle-down", "toggle-left", "toggle-off", "toggle-on",
"toggle-right", "toggle-up", "trash", "trash-o", "tree", "trello", "trophy", "truck", "try", "tty", "tumblr",
"tumblr-square", "turkish-lira", "twitch", "twitter", "twitter-square", "umbrella", "underline", "undo", "university",
"unlink", "unlock", "unlock-alt", "unsorted", "upload", "usd", "user", "user-md", "users", "video-camera", "vimeo-square",
"vine", "vk", "volume-down", "volume-off", "volume-up", "warning", "wechat", "weibo", "weixin", "wheelchair", "wifi",
"windows", "won", "wordpress", "wrench", "xing", "xing-square", "yahoo", "yelp", "yen", "youtube", "youtube-play",
"youtube-square",
])
|
css-umsetzung/three.js | utils/exporters/blender/2.60/scripts/addons/io_mesh_threejs/__init__.py | Python | mit | 15,481 | 0.018022 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# ################################################################
# Init
# ################################################################
bl_info = {
"name": "three.js format",
"author": "mrdoob, kikko, alteredq, remoe, pxf",
"version": (1, 2, 0),
"blender": (2, 6, 0),
"api": 35622,
"location": "File > Import-Export",
"description": "Import-Export three.js meshes",
"warning": "",
"wiki_url": "https://github.com/mrdoob/three.js/tree/master/utils/exporters/blender",
"tracker_url": "https://github.com/mrdoob/three.js/issues",
"category": "Import-Export"}
# To support reload properly, try to access a package var,
# if it's there, reload everything
import bpy
if "bpy" in locals():
import imp
if "export_threejs" in locals():
imp.reload(export_threejs)
if "import_threejs" in locals():
imp.reload(import_threejs)
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
# ################################################################
# Custom properties
# ################################################################
bpy.types.Object.THREE_castsShadow = bpy.props.BoolProperty()
bpy.types.Object.THREE_meshCollider = bpy.props.BoolProperty()
bpy.types.Object.THREE_exportGeometry = bpy.props.BoolProperty(default = True)
THREE_trigger_types = [("None", "None", "None"), ("Small", "Small", "Small"), ("Large", "Large", "Large")]
bpy.types.Object.THREE_triggerType = EnumProperty(name = "Trigger type", description = "Trigger type", items = THREE_trigger_types, default = "None")
bpy.types.Material.THREE_useVertexColors = bpy.props.BoolProperty()
THREE_material_types = [("Basic", "Basic", "Basic"), ("Phong", "Phong", "Phong"), ("Lambert", "Lambert", "Lambert")]
bpy.types.Material.THREE_materialType = EnumProperty(name = "Material type", description = "Material type", items = THREE_material_types, default = "Lambert")
class OBJECT_PT_hello( bpy.types.Panel ):
bl_label = "THREE"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.label(text="Selected object: " + obj.name )
row = layout.row()
row.prop( obj, "THREE_exportGeometry", text="Export geometry" )
row = layout.row()
row.prop( obj, "THREE_castsShadow", text="Casts shadow" )
row = layout.row()
row.prop( obj, "THREE_meshCollider", text="Mesh collider" )
row = layout.row()
row.prop( obj, "THREE_triggerType", text="Trigger type" )
class MATERIAL_PT_hello( bpy.types.Panel ):
bl_label = "THREE"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
def draw(self, context):
layout = self.layout
mat = context.material
row = layout.row()
row.label(text="Selected material: " + mat.name )
row = layout.row()
row.prop( mat, "THREE_materialType", text="Material type" )
row = layout.row()
row.prop( mat, "THREE_useVertexColors", text="Use vertex colors" )
# ################################################################
# Importer
# ################################################################
class ImportTHREEJS(bpy.types.Operator, ImportHelper):
'''Load a Three.js ASCII JSON model'''
bl_idname = "import.threejs"
bl_label = "Import Three.js"
filename_ext = ".js"
filter_glob = StringProperty(default="*.js", options={'HIDDEN'})
option_flip_yz = BoolProperty(name="Flip YZ", description="Flip YZ", default=True)
recalculate_normals = BoolProperty(name="Recalculate normals", description="Recalculate vertex normals", default=True)
option_worker = BoolProperty(name="Worker", description="Old format using | workers", default=False)
def execute(self, context):
import io_mesh_threejs.import_threejs
return io_mesh_threejs.import_threejs.load(self, context, **self.properties)
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self.properties, "option_flip_yz")
row = layout.row()
row.prop(self.properties, "recalculate_normals")
row = layout.row()
row.prop(self.pr | operties, "option_worker")
# ################################################################
# Exporter - settings
# ################################################################
SETTINGS_FILE_EXPORT = "threejs_settings_export.js"
import os
import json
def file_exists(filename):
"""Return true if file exists and accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_settings_fullpath():
return os.path.join(bpy.app.tempdir, SETTINGS_FILE_EXPORT)
def save_settings_export(properties):
settings = {
"option_export_scene" : properties.option_export_scene,
"option_embed_meshes" : properties.option_embed_meshes,
"option_url_base_html" : properties.option_url_base_html,
"option_copy_textures" : properties.option_copy_textures,
"option_lights" : properties.option_lights,
"option_cameras" : properties.option_cameras,
"option_animation" : properties.option_animation,
"option_frame_step" : properties.option_frame_step,
"option_all_meshes" : properties.option_all_meshes,
"option_flip_yz" : properties.option_flip_yz,
"option_materials" : properties.option_materials,
"option_normals" : properties.option_normals,
"option_colors" : properties.option_colors,
"option_uv_coords" : properties.option_uv_coords,
"option_faces" : properties.option_faces,
"option_vertices" : properties.option_vertices,
"option_vertices_truncate" : properties.option_vertices_truncate,
"option_scale" : properties.option_scale,
"align_model" : properties.align_model
}
fname = get_settings_fullpath()
f = open(fname, "w")
json.dump(settings, f)
def restore_settings_export(properties):
settings = {}
fname = get_settings_fullpath()
if file_exists(fname):
f = open(fname, "r")
settings = json.load(f)
properties.option_vertices = settings.get("option_vertices", True)
properties.option_vertices_truncate = settings.get("option_vertices_truncate", False)
properties.option_faces = settings.get("option_faces", True)
properties.option_normals = settings.get("option_normals", True)
properties.option_colors = settings.get("option_colors", True)
properties.option_uv_coords = settings.get("option_uv_coords", True)
properties.option_materials = settings.get("option_materials", True)
properties.align_model = settings.get("align_model", "None")
properties.option_scale = settings.get("option_scale", 1.0)
properties.option_flip_yz = settings.get("option_flip_yz", True)
properties.option_export_scene = settings.get("option_export_scene", False)
properties.option_embed_meshes = settings.get("option_embed_meshes", True)
properties.option_ |
MediaKraken/mkarchive | main_download.py | Python | gpl-2.0 | 3,059 | 0.002615 | '''
Copyright (C) 2017 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import subprocess
import time
import pika
from common import common_global
from common import common_logging_elasticsearch
from common import common_network
# start logging
common_global.es_inst = co | mmon_logging_elasticsearch.CommonElasticsearch('main_download')
def on_message(channel, method_frame, header_frame, body):
"""
Process pika message
"""
if body | is not None:
common_global.es_inst.com_elastic_index('info', {'msg body': body})
json_message = json.loads(body)
if json_message['Type'] == 'youtube':
dl_pid = subprocess.Popen(['youtube-dl', '-i', '--download-archive',
'/mediakraken/archive.txt', json_message['Data']],
shell=False)
dl_pid.wait()
if json_message['Type'] == 'image':
common_network.mk_network_fetch_from_url(json_message['URL'],
json_message['Local'])
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
# fire off wait for it script to allow rabbitmq connection
wait_pid = subprocess.Popen(['/mediakraken/wait-for-it-ash.sh', '-h',
'mkrabbitmq', '-p', ' 5672'], shell=False)
wait_pid.wait()
# pika rabbitmq connection
parameters = pika.ConnectionParameters('mkrabbitmq',
credentials=pika.PlainCredentials('guest', 'guest'))
connection = pika.BlockingConnection(parameters)
# setup channels and queue
channel = connection.channel()
exchange = channel.exchange_declare(exchange="mkque_download_ex", exchange_type="direct",
durable=True)
queue = channel.queue_declare(queue='mkdownload', durable=True)
channel.queue_bind(exchange="mkque_download_ex", queue='mkdownload')
channel.basic_qos(prefetch_count=1)
while True:
time.sleep(1)
# grab message from rabbitmq if available
try: # since can get connection drops
method_frame, header_frame, body = channel.basic_get(
queue='mkdownload', no_ack=False)
on_message(channel, method_frame, header_frame, body)
except:
pass
# close the pika connection
connection.cancel()
|
facebook/mcrouter | mcrouter/test/test_mcrouter_to_mcrouter_tko.py | Python | mit | 1,294 | 0.002318 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import time
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestMcrouterToMcrouterTko(McrouterTestCase):
config = './mcrouter/test/test_mcrouter_to_mcrouter_tko.json'
extra_args = ['--timeouts-until-tko', '1', '--group-remote-errors']
def setUp(self):
self.underlying_mcr = self.add_mcrouter(self.config,
extra_args=self.extra_args, bg_mcrouter=True)
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_underlying_tko(self):
mcr = self.get_mcrouter()
self.assertFalse(mcr.delete("key"))
retries = 10
while self.underlying_mcr.stats()['cmd_delete_count'] != 1 and retries > 0:
retries = retries - 1
time.sleep(1)
stats = self.underlying_mcr.stats("suspect_servers")
print(stats)
self.assertEqual(1, len(st | ats))
self.assertTrue(re.match("status:(tko|down)", list(stats.values())[0] | ))
stats = mcr.stats("suspect_servers")
self.assertEqual(0, len(stats))
|
diogo149/treeano | treeano/sandbox/nodes/interval_relu.py | Python | apache-2.0 | 1,043 | 0 | """
relu where each channel has a different leak rate
"""
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("interval_relu")
class IntervalReLUNode(treeano.NodeImpl):
hyperparameter_names = ("leak_min",
"leak_max")
def compute_output(self, network, in_vw):
leak_min = network.find_hyperparameter(["leak_min"], 0)
leak_max = network.find_hyperparameter(["leak_max"], 1)
num_channels = in_v | w.shape[1]
alpha = np.linspace(leak_min, leak_max, num_channels).astype(fX)
pattern = ["x" if i != 1 else 0 for i in range(in_vw.ndim)]
alpha_var = T.constant(alpha).dimshuffle(*pattern)
out_var = treeano.utils.rectify(in_vw.variable,
negative_coefficient=alpha_var)
network.create_vw(
"default",
variable=out_var,
shape=in_v | w.shape,
tags={"output"},
)
|
movicha/dcos | packages/bootstrap/extra/dcos_internal_utils/utils/__init__.py | Python | apache-2.0 | 1,151 | 0 | import fcntl
import logging
import os
log = logging.getLogger(__name__)
def read_file_line(filename):
with open(filename, 'r') as f:
return f.read().strip()
class Directory:
def __init__(self, path):
self.path = path
def __enter__(self):
log.info('Opening {}'.format(self.path))
self.fd = os.open(self.path, os.O_RDONLY)
log.info('Opened {} with fd {}'.format(self.path, self.fd))
return self
def __exit__(self, exc_type, exc_value, traceback):
| log.info('Closing {} with fd {}'.format(self.path, self.fd))
os.close(self.fd)
def lock(self):
return Flock(self.fd, fcntl.LOCK_EX)
class Flock:
def | __init__(self, fd, op):
(self.fd, self.op) = (fd, op)
def __enter__(self):
log.info('Locking fd {}'.format(self.fd))
# If the fcntl() fails, an IOError is raised.
fcntl.flock(self.fd, self.op)
log.info('Locked fd {}'.format(self.fd))
return self
def __exit__(self, exc_type, exc_value, traceback):
fcntl.flock(self.fd, fcntl.LOCK_UN)
log.info('Unlocked fd {}'.format(self.fd))
|
YuukanOO/beard | tests/test_pos.py | Python | mit | 454 | 0.006608 | from beard import pos
import base_test
import unittest
class TestPosModule(base_test.RequireTokens):
def test_create_from_tokens(self):
data = pos.create_from_tokens(se | lf.tokens_01)
w = data.get('words', {})
p = data.get('parts_of_speech', {})
self.assertTrue(w)
self.assertT | rue(p)
self.assertEqual(len(w), 14)
self.assertEqual(len(p), 7) # Don't forget the None Pos (start of paragraph)
|
kamatama41/luigi-bigquery | luigi_bigquery/tests/test_helper.py | Python | apache-2.0 | 3,217 | 0.002176 | from luigi_bigquery import ResultProxy
import os
import shutil
import tempfile
class MockClient(object):
def __init__(self, datasets, tables, jobs):
self._datasets = datasets
self._tables = tables
self._jobs = jobs
def create_dataset(self, dataset_id, friendly_name=None, description=None, access=None):
dataset_data = _dataset_resource(dataset_id, friendly_name, description, access)
self._datasets.append(dataset_data)
return dataset_data
def get_datasets(self):
return self._datasets
def check_dataset(self, dataset_id):
| return dataset_id in [ds['datasetReference']['datasetId'] for ds in self.get_datasets()]
def get_table(self, dataset_id, table_id):
for table in self._tables:
ref = table['tableReference']
if ref['datasetId'] == dataset_id and ref['tableId'] == table_id:
return table
return {}
def delete_table(self, dataset_id, table_id):
pass
def check_job(self, job_id):
job = self._job(job_id)
return (job.get('job_c | omplete', False), int(job.get('total_rows', 0)))
def get_query_schema(self, job_id):
job = self._job(job_id)
return job['schema']
def get_query_rows(self, job_id):
job = self._job(job_id)
return job['rows']
def query(self, query):
return (self._jobs[0]['job_id'], None)
def _job(self, job_id):
for job in self._jobs:
if job['job_id'] == job_id:
return job
return {}
def _dataset_resource(self, dataset_id, friendly_name=None, description=None, access=None):
data = {
"datasetReference": {
"datasetId": dataset_id,
"projectId": 'test-project-id'
}
}
if friendly_name:
data["friendlyName"] = friendly_name
if description:
data["description"] = description
if access:
data["access"] = access
return data
class MockGCSClient(object):
def __init__(self, objects):
self._objects = objects
def get_file(self, bucket_name, path):
for obj in self._objects:
if obj['bucket'] == bucket_name and obj['name'] == path:
return obj
return {}
def check_file(self, bucket_name, path):
file = self.get_file(bucket_name, path)
return bool(file)
class TestConfig(object):
def __init__(self, datasets=[], tables=[], jobs=[], objects=[]):
self.datasets = datasets
self.tables = tables
self.objects = objects
self._jobs = jobs
self.tmp_dir = None
def setUp(self):
if not self.tmp_dir:
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir)
self.tmp_dir = None
def get_tmp_path(self, filename):
return os.path.join(self.tmp_dir, filename)
def get_client(self):
return MockClient(datasets=self.datasets, tables=self.tables, jobs=self._jobs)
def get_gcs_client(self):
return MockGCSClient(objects=self.objects)
|
skitzycat/beedraw | beesessionstate.py | Python | gpl-2.0 | 21,483 | 0.045012 | # Beedraw/Hive network capable client and server allowing collaboration on a single image
# Copyright (C) 2009 Thomas Becker
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import PyQt4.QtCore as qtcore
import PyQt4.QtGui as qtgui
from sketchlog import SketchLogWriter
from beeapp import BeeApp
from beetypes im | port *
from beeeventstack import *
from beelayer import BeeLayerState
from beeutil import getTimeString
from Queue import Queue
import os
from datetime import datetime
class BeeSessionState:
""" Represents the state of a current drawing with all layers and the current composition of them to be displayed on the screen
"""
def __init__(self,master,width,height,type):
| # save passed values
self.networkhistorysize=0
self.docwidth=width
self.docheight=height
self.type=type
self.master=master
self.remoteidlock=qtcore.QReadWriteLock()
self.remoteid=0
self.historysize=20
self.nextfloatinglayerkey=-1
self.nextfloatinglayerkeylock=qtcore.QReadWriteLock()
self.layers=[]
# mutex for messing with the list of layer: adding, removing or rearranging
self.layerslistlock=qtcore.QReadWriteLock()
# never have a local clip path
self.clippath=None
# set unique ID
self.id=master.getNextWindowId()
# initialize values
self.backdropcolor=0xFFFFFFFF
self.remotecommandstacks={}
self.curlayerkey=None
self.curlayerkeymutex=qtcore.QMutex()
self.docsizelock=qtcore.QReadWriteLock()
self.nextlayerkey=1
self.nextlayerkeymutex=qtcore.QMutex()
self.remotecommandqueue=Queue(0)
self.remoteoutputqueue=Queue(0)
self.remotedrawingthread=None
# register state so the master can get back to here
master.registerWindow(self)
# start log if autolog is enabled
self.log=None
if self.master.getConfigOption("autolog"):
# don't do this for animations, there's already a log of it if there's an animation
if type!=WindowTypes.animation:
self.startLog()
self.curtool=None
def setRemoteId(self,id):
lock=qtcore.QWriteLocker(self.remoteidlock)
self.remoteid=id
def ownedByNobody(self,owner):
if self.type==WindowTypes.networkclient or self.type==WindowTypes.standaloneserver or self.type==WindowTypes.integratedserver:
if owner==0:
return True
return False
def localLayer(self,layerkey):
""" return True if the key passed is for a layer that is local, False otherwise """
layer=self.getLayerForKey(layerkey)
proplock=qtcore.QReadLocker(layer.propertieslock)
return self.ownedByMe(layer.owner)
def ownedByMe(self,owner):
""" return True if the layer is under the control of this state keeper or False if it's under the control of something else (ie an animation process or another network client or unowned by anyone in a network session)
"""
lock=qtcore.QReadLocker(self.remoteidlock)
if self.type==WindowTypes.networkclient or self.type==WindowTypes.standaloneserver or self.type==WindowTypes.integratedserver:
if owner==self.remoteid:
return True
elif owner==0 or owner==self.remoteid:
return True
return False
def deleteLayerHistory(self,oldowner,layerkey):
if self.ownedByMe(oldowner):
self.localcommandstack.cleanLocalLayerHistory()
elif oldowner in self.remotecommandstacks:
self.remotecommandstacks[oldowner].cleanRemoteLayerHistory(layerkey)
def addGiveUpLayerToQueue(self,key,id=0,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.networkcontrol,NetworkControlCommandTypes.giveuplayer,id,key),source)
def addChangeLayerOwnerToQueue(self,key,owner,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.networkcontrol,NetworkControlCommandTypes.layerowner,owner,key),source)
def addRequestLayerToQueue(self,key,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.networkcontrol,NetworkControlCommandTypes.requestlayer,0,key),source)
def addRemoveLayerRequestToQueue(self,key,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.alllayer,AllLayerCommandTypes.deletelayer,key),source)
def addExitEventToQueue(self,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.quit,),source)
def removeLayer(self,layer,history=True,listlock=None):
if layer:
if not listlock:
listlock=qtcore.QWriteLocker(self.layerslistlock)
if layer in self.layers:
index=self.layers.index(layer)
if history:
self.addCommandToHistory(DelLayerCommand(layer,index))
self.layers.pop(index)
self.requestLayerListRefresh(listlock)
self.reCompositeImage()
return (layer,index)
return (None,None)
def removeLayerByKey(self,key,history=True,lock=None):
""" remove layer with key equal to passed value, each layer should have a unique key so there is no need to check for multiples
The history argument is 0 if the event should be added to the undo/redo history and -1 if it shouldn't. This is needed so when running an undo/redo command it doesn't get added again.
"""
print_debug("calling removeLayerByKey for %d" % key)
# get a lock so we don't get a collision ever
if not lock:
lock=qtcore.QWriteLocker(self.layerslistlock)
curlaylock=qtcore.QMutexLocker(self.curlayerkeymutex)
layer=self.getLayerForKey(key,lock)
if layer:
return self.removeLayer(layer,history,lock)
return None
def addLayerDownToQueue(self,key,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.alllayer,AllLayerCommandTypes.layerdown,key),source)
def layerDown(self,key,history=True):
index=self.getLayerIndexForKey(key)
lock=qtcore.QWriteLocker(self.layerslistlock)
if index>0:
self.layers[index],self.layers[index-1]=self.layers[index-1],self.layers[index]
lock.unlock()
self.reCompositeImage()
self.requestLayerListRefresh()
# if we are only running locally add command to local history
# otherwise do nothing
# layer movment operations can't be undone with an undo command when in a network session
if self.type==WindowTypes.singleuser and history:
self.addCommandToHistory(LayerDownCommand(key))
def addLayerUpToQueue(self,key,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.alllayer,AllLayerCommandTypes.layerup,key),source)
def layerUp(self,key,history=True):
lock=qtcore.QWriteLocker(self.layerslistlock)
index=self.getLayerIndexForKey(key)
if index==None:
return
if index<len(self.layers)-1:
self.layers[index],self.layers[index+1]=self.layers[index+1],self.layers[index]
lock.unlock()
self.requestLayerListRefresh()
self.reCompositeImage()
# if we are only running locally add command to local history
# otherwise do nothing
if self.type==WindowTypes.singleuser and history:
self.addCommandToHistory(LayerUpCommand(key))
def reCompositeImage(self,dirtyrect=None):
""" This is not needed to actually do anything in all state keepers, but it needs to be here so it can be called
"""
pass
def getClipPathCopy(self):
return None
def addPenEnterToQueue(self,layerkey=None,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.layer,LayerCommandTypes.penenter,layerkey),source)
def addPenLeaveToQueue(self,layerkey=None,source=ThreadTypes.user):
self.queueCommand((DrawingCommandTypes.layer,LayerCommandTypes.penleave,layerkey),source)
def addPenDownToQueue(self,x,y,pressure,layerkey,tool,source=ThreadTypes.user,modkeys=qtcore.Qt.NoModifier):
self.queueCommand((DrawingCommandTypes.layer,LayerCommandTypes.pendown,layerkey,x,y,pressure,tool),source)
def addPenMotionToQueue(s |
exaile/exaile | xl/migrations/database/from1to2.py | Python | gpl-2.0 | 2,024 | 0.000988 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version | .
from gi.repository import Gio
def migrate(db, pdata, oldversion, newversion):
for k in (x for x in pdata.keys() if x.startswith("tracks-")):
| p = pdata[k]
tags = p[0]
try:
loc = tags['__loc']
except KeyError:
continue
if not loc or not loc.startswith("file://"):
continue
loc = loc[7:]
gloc = Gio.File.new_for_uri(loc)
uri = gloc.get_uri()
tags['__loc'] = uri
pdata[k] = (tags, p[1], p[2])
if '_serial_libraries' in pdata:
libs = pdata['_serial_libraries']
for l in libs:
l['location'] = Gio.File.new_for_uri(l['location']).get_uri()
pdata['_serial_libraries'] = libs
pdata['_dbversion'] = newversion
pdata.sync()
|
ActiveState/code | recipes/Python/416087_Persistent_environment_variables/recipe-416087.py | Python | mit | 1,724 | 0.012761 | from _winreg import *
import os, sys, win32gui, win32con
def queryValue(key, name):
value, type_id = QueryValueEx(key, name)
return value
def show(key):
for i in range(1024):
try:
n,v,t = EnumValue(key, i)
print '%s=%s' % (n, v)
except EnvironmentError:
break
def main():
try:
path = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'
reg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
key = OpenKey(reg, path, 0, KEY_ALL_ACCESS)
if len(sys.argv) == 1:
show(key)
else:
name, value = sys.argv[1].split('=')
if name.upper() == 'PATH':
value = queryValue(key, name) + ';' + value
if value:
SetValueEx(key, name, 0, REG_EXPAND_SZ, value)
else:
DeleteValue(key, name)
win32gui.SendMessage(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment')
except Exception, e:
print e
CloseKey(key)
CloseKey(reg)
if __name__=='__main__':
usage = \
"""
Usage:
Show all environment vsarisbles - enver
Add/Modify/Delete environment variable - enver <name>=[value]
If <name> is PATH enver will append the value prefixed with ;
If there is no va | lue enver will delete the <name> environment variable
Note that the current command window will not be affected,
only new command windows.
"""
argc = len(sys.argv)
if argc > 2 or (argc == 2 and sys.argv[1].find('=') == -1):
print usage
| sys.exit()
main()
|
KristianOellegaard/python-cloudfoundry | cloudfoundry/apps.py | Python | mit | 1,241 | 0.003223 |
class CloudFoundryApp(object):
environment_variables = []
instances = 0
meta = {}
created = 0
debug = None
version = 0
running_instances = 0
services = []
state = ""
uris = []
def __init__(self, name, env=None, instanc | es=None, meta=None, created=None, debug=None, version=None,
runningInstances=None, services=None, state=None, uris=None, staging=None, resources=None,
interface=None):
self._name = name
self.environment_variables = env
self.instances = instances
self.meta = meta
self.created = created
self.debug = debug
self.version = version
self.running_instances = runningInstances
self.services = serv | ices
self.state = state
self.uris = uris
self.interface = interface
@property
def name(self):
return self._name
@staticmethod
def from_dict(dict, interface=None):
return CloudFoundryApp(interface=interface, **dict)
def delete(self):
if not self.interface:
raise Exception("Tried to delete app %s without providing an interface for doing so" % self.name)
self.interface.delete_app(self.name) |
Yellowen/Owrang | patches/may_2013/p02_update_valuation_rate.py | Python | agpl-3.0 | 1,359 | 0.031641 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
def execute():
from stock.stock_ledger import update_entries_after
item_warehouse = []
# update valuation_rate in transaction
doctypes = {"Purchase Receipt": "purchase_receipt_details", "Purchase Invoice": "entries"}
for dt in doctypes:
for d in webnotes.conn.sql("""select name from `tab%s`
where modified >= '2013-05-09' and docstatus=1""" % dt):
rec = webnotes.get_obj(dt, d[0])
rec.update_valuation_rate(doctypes[dt])
for item in rec.doclist.get({"parentfield": doctypes[dt]}):
webnotes.conn.sql("""update `tab%s Item` set valuation_rate = %s
where name = %s"""% | (dt, '%s', '%s'), tuple([item.valuation_rate, item.name]))
if dt == "Purchase Receipt":
webnotes.conn.sql("""update `tabStock Ledger Entry` set incoming_rate = %s
wh | ere voucher_detail_no = %s""", (item.valuation_rate, item.name))
if [item.item_code, item.warehouse] not in item_warehouse:
item_warehouse.append([item.item_code, item.warehouse])
for d in item_warehouse:
try:
update_entries_after({"item_code": d[0], "warehouse": d[1],
"posting_date": "2013-01-01", "posting_time": "00:05:00"})
webnotes.conn.commit()
except:
pass |
kfox1111/apps-catalog-ui | app_catalog/panel.py | Python | apache-2.0 | 734 | 0 | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file | except in compliance with the License.
# You ma | y obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class AppCatalog(horizon.Panel):
name = _("Applications")
slug = "app_catalog"
|
ianunruh/hvac | tests/unit_tests/api/secrets_engines/test_aws.py | Python | apache-2.0 | 3,608 | 0.000554 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from unittest import TestCase
import requests_mock
from parameterized import parameterized, param
from hvac.adapters import JSONAdapter
from hvac.api.secrets_engines import Aws
from hvac.api.secrets_engines.aws import DEFAULT_MOUNT_POINT
from hvac.exceptions import ParamValidationError
class TestAws(TestCase):
@parameterized.expand(
[
param(
"success",
),
]
)
def test_rotate_root_iam_credentials(
self, test_label, mount_point=DEFAULT_MOUNT_POINT
):
expected_status_code = 200
mock_response = {"data": {"access_key": "AKIA..."}}
aws = Aws(adapter= | JSONAdapter())
mock_url = "http://localhost:8200/v1/{mount_point}/config/ | rotate-root".format(
mount_point=mount_point,
)
logging.debug("Mocking URL: %s" % mock_url)
with requests_mock.mock() as requests_mocker:
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
rotate_root_response = aws.rotate_root_iam_credentials(
mount_point=mount_point,
)
logging.debug("rotate_root_response: %s" % rotate_root_response)
self.assertEqual(
first=mock_response,
second=rotate_root_response,
)
@parameterized.expand(
[
param(
"success",
),
param(
"invalid endpoint",
endpoint="cats",
raises=ParamValidationError,
exception_msg="cats",
),
]
)
def test_generate_credentials(
self,
test_label,
role_name="hvac-test-role",
mount_point=DEFAULT_MOUNT_POINT,
endpoint="creds",
raises=None,
exception_msg="",
):
expected_status_code = 200
mock_response = {
"data": {
"access_key": "AKIA...",
"secret_key": "xlCs...",
"security_token": None,
}
}
mock_url = "http://localhost:8200/v1/{mount_point}/creds/{role_name}".format(
mount_point=mount_point,
role_name=role_name,
)
logging.debug("Mocking URL: %s" % mock_url)
aws = Aws(adapter=JSONAdapter())
with requests_mock.mock() as requests_mocker:
requests_mocker.register_uri(
method="GET",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
if raises:
with self.assertRaises(raises) as cm:
aws.generate_credentials(
name=role_name,
endpoint=endpoint,
mount_point=mount_point,
)
self.assertIn(
member=exception_msg,
container=str(cm.exception),
)
else:
gen_creds_response = aws.generate_credentials(
name=role_name,
endpoint=endpoint,
mount_point=mount_point,
)
logging.debug("gen_creds_response: %s" % gen_creds_response)
self.assertEqual(
first=mock_response,
second=gen_creds_response,
)
|
dev1972/Satellitecoin | qa/rpc-tests/util.py | Python | mit | 12,392 | 0.008554 | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The STLL developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "stll.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
stlld and stll-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run stlld:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "stlld"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "stll-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in stll.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcpo | rt:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a stlld and return RPC c | onnection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "stlld"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "stll-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple stllds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
|
ewbankkit/cloud-custodian | tools/c7n_azure/c7n_azure/resources/vm.py | Python | apache-2.0 | 6,309 | 0.00111 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n_azure.actions.base import AzureBaseAction
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n.filters.core import ValueFilter, type_schema
from c7n.filters.related import RelatedResourceFilter
@resources.register('vm')
class VirtualMachine(ArmResourceManager):
"""Virtual Machine Resource
:example:
Stop all running VMs
.. code-block:: yaml
policies:
- name: stop-running-vms
resource: azure.vm
filters:
- type: instance-view
key: statuses[].code
op: in
value_type: swap
value: PowerState/running
actions:
- type: stop
:example:
Start all VMs
.. code-block:: yaml
policies:
- name: start-vms
resource: azure.vm
actions:
- type: start
:example:
Restart all VMs
.. code-block:: yaml
policies:
- name: start-vms
resource: azure.vm
actions:
- type: restart
:example:
Delete specific VM by name
.. code-block:: yaml
policies:
- name: stop-running-vms
resource: azure.vm
filters:
- type: value
key: name
op: eq
value_type: normalize
value: fake_vm_name
actions:
- type: delete
:example:
Find all VMs with a Public IP address
.. code-block:: yaml
policies:
- name: vms-with-public-ip
resource: azure.vm
filters:
- type: network-interface
key: 'properties.ipConfigurations[].properties.publicIPAddress.id'
value: not-null
:example:
This policy will find all VMs that have Percentage CPU usage >= 75% over the last 72 hours
.. code-block:: yaml
policies:
- name: busy-vms
resource: azure.vm
filters:
- type: metric
metric: Percentage CPU
op: ge
aggregation: average
threshold: 75
timeframe: 72
:example:
This policy will find all VMs that have Percentage CPU usage <= 1% over the last 72 hours,
mark for deletion in 7 days
.. code-block:: yaml
policies:
- name: delete-unused-vms
resource: azure.vm
filters:
- type: metric
metric: Percentage CPU
op: le
aggregation: average
threshold: 1
timeframe: 72
actions:
- type: mark-for-op
op: delete
days: 7
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Compute']
service = 'azure.mgmt.compute'
client = 'ComputeManagementClient'
enum_spec = ('virtual_machines', 'list_all', None)
diagnostic_settings_enabled = False
default_report_fields = (
'name',
'location',
'resourceGroup',
'properties.hardwareProfile.vmSize',
)
resource_type = 'Microsoft.Compute/virtualMachines'
@VirtualMachine.filter_registry.register('instance-view')
class InstanceViewFilter(ValueFilter):
schema = type_schema('instance-view', rinherit=ValueFilter.schema)
schema_alias = True
def __call__(self, i):
if 'instanceView' not in i:
client = self.manager.get_client()
instance = (
client.virtual_machines
.get(i['resourceGroup'], i['name'], expand='instanceview')
.instance_view
)
i['instanceView'] = instance.serialize()
| return super(InstanceViewFilter, self).__call__(i['instanceView'])
@VirtualMachine.filter_registry.register('network-interface')
class NetworkInterfaceFilter(RelatedResourceFilter):
schema = type_schema('network-interface', rinherit=ValueFilter.sch | ema)
RelatedResource = "c7n_azure.resources.network_interface.NetworkInterface"
RelatedIdsExpression = "properties.networkProfile.networkInterfaces[0].id"
@VirtualMachine.action_registry.register('poweroff')
class VmPowerOffAction(AzureBaseAction):
schema = type_schema('poweroff')
def _prepare_processing(self,):
self.client = self.manager.get_client()
def _process_resource(self, resource):
self.client.virtual_machines.power_off(resource['resourceGroup'], resource['name'])
@VirtualMachine.action_registry.register('stop')
class VmStopAction(AzureBaseAction):
schema = type_schema('stop')
def _prepare_processing(self,):
self.client = self.manager.get_client()
def _process_resource(self, resource):
self.client.virtual_machines.deallocate(resource['resourceGroup'], resource['name'])
@VirtualMachine.action_registry.register('start')
class VmStartAction(AzureBaseAction):
schema = type_schema('start')
def _prepare_processing(self,):
self.client = self.manager.get_client()
def _process_resource(self, resource):
self.client.virtual_machines.start(resource['resourceGroup'], resource['name'])
@VirtualMachine.action_registry.register('restart')
class VmRestartAction(AzureBaseAction):
schema = type_schema('restart')
def _prepare_processing(self,):
self.client = self.manager.get_client()
def _process_resource(self, resource):
self.client.virtual_machines.restart(resource['resourceGroup'], resource['name'])
|
ptpt/taoblog | taoblog/views/api.py | Python | mit | 7,738 | 0.000258 | from flask import (Blueprint, current_app as app,
g, request, make_response,
jsonify, session)
from ..models import ModelError, Session
from ..models.post import Post, Draft, PostOperator
from .helpers import (require_int, JumpDirectly)
api_bp = Blueprint('api', __name__)
post_op = PostOperator(Session())
def get_plain_dict(post, meta):
post_dict = post.as_dict(meta)
if 'created_at' in post_dict:
post_dict['created_at'] = post_dict['created_at'] and \
unicode(post_dict['created_at'])
if 'updated_at' in post_dict:
post_dict['updated_at'] = post_dict['updated_at'] and \
unicode(post_dict['updated_at'])
return post_dict
def jsonify_error(message, status):
return make_response(
jsonify({'stat': 'fail',
'message': message}),
status)
def jsonify_posts(posts, meta=False, **kwargs):
response = kwargs
response.update({'posts': [get_plain_dict(post, meta) for post in posts],
'total_posts': len(posts)})
return jsonify({'stat': 'ok',
'response': response})
def get_status_code(status_string):
""" Parse status string like public+private into a list of status code. """
statuses = status_string.lower().split('+')
status_code = []
for status in set(statuses):
if status == 'public':
status_code.append(Post.STATUS_PUBLIC)
elif status == 'private':
status_code.append(Post.STATUS_PRIVATE)
if not g.is_admin:
raise JumpDirectly(jsonify_error('admin required', 403))
elif status == 'trash':
status_code.append(Post.STATUS_TRASH)
if not g.is_admin:
raise JumpDirectly(jsonify_error('admin required', 403))
else:
raise JumpDirectly(jsonify_error('invalid status', 400))
if len(status_code) == 0:
raise JumpDirectly(jsonify_error('status not found', 400))
return status_code
@api_bp.route('/drafts/', methods=['DELETE'])
def delete_drafts():
if not g.is_admin:
return jsonify_error('admin required', 403)
ids = [require_int(id, JumpDirectly(jsonify_error('invalid draft id', 400)))
for id in request.args.get('bulk', '').split(',') if id.strip()]
if len(ids) > 0:
deleted_rows = post_op.session.query(Draft).\
filter(Draft.id.in_(ids)).\
delete(synchronize_session='fetch')
post_op.session.commit()
else:
deleted_rows = 0
return jsonify({'stat': 'ok', 'response': {'total_drafts': deleted_rows}})
@api_bp.route('/posts/', methods=['DELETE'])
def delete_posts():
"""
Delete posts from server.
* admin required
* optional arguments: bulk, status
bulk is a list of post IDs separated by comma.
"""
if not g.is_admin:
return jsonify_error('admin required', 403)
posts = []
# get posts from post IDs
ids = [require_int(id, JumpDirectly(jsonify_error('invalid post id', 400)))
for id in request.args.get('bulk', '').split(',') if id.strip()]
if len(ids) > 0:
posts = post_op.session.query(Post).filter(Post.id.in_(ids)).all()
# get all posts in specified status
status = request.args.get('status')
if status:
status_code = get_status_code(status)
posts += post_op.session.query(Post).\
filter(Post.status.in_(status_code)).all()
# delete all of them
if len(posts) > 0:
post_op.delete_posts(posts)
return jsonify({'stat': 'ok', 'response': {'total_posts': len(posts)}})
@api_bp.route('/posts/', methods=['POST'])
def create_post():
"""
Create a post. Return the post.
* admin required
* required post data: title, slug
* optional post data: text, tags, private
"""
if not g.is_admin:
return jsonify_error('admin required', 403)
title = request.form.get('title')
if not title:
return jsonify_error('title required', 400)
slug = request.form.get('slug')
if not slug:
return jsonify_error('slug required', 400)
post = post_op.get_post_by_permalink(slug)
if post is not None:
return jsonify_error('slug is not unique', 400)
private = bool(request.form.get('private', False))
text = request.form.get('text')
tags = request.form.get('tags')
author_id = session['uid']
if tags:
tags = tags.split()
try:
post = Post(title=title, text=text,
slug=slug, author_id=author_id)
if private:
post.status = Post.STATUS_PRIVATE
post_op.create_post(post)
if tags:
post.set_tags(tags)
except ModelError as err:
return jsonify_error(err.message, 400)
return jsonify_posts([post])
@api_bp.route( | '/posts/')
def get_posts():
"""
Get posts.
* admin required
* arguments: offset, limit, status, meta, sort, asc, tags, id
"""
offset = require_int(request.args.get('offset', 0),
JumpDirectly(jsonify_error('invalid offset', 400)))
limit = require_int(
request.args.get('limit', app.config['POST_API_PERPAGE']),
JumpDirectly(jsonify_error('invalid limit', 400)))
stat | us = request.args.get('status', 'public+private').lower()
# admin may be required in this function
status_code = get_status_code(status)
meta = 'meta' in request.args
sort = request.args.get('sort', 'created_at')
# if asc argument is found, do asc sort
asc = 'asc' in request.args
tags = request.args.get('tags')
id = request.args.get('id')
if id is None:
# get multi posts
try:
posts, more = post_op.query_posts(
status=status_code, offset=offset, limit=limit,
tags=tags and tags.split('+'),
date=None, sort=sort, asc=asc)
except ModelError as err:
return jsonify_error(err.message, 400)
else:
# get single post when post id is specified
more = False
id = require_int(id, JumpDirectly(jsonify_error('invalid id', 400)))
post = post_op.get_post(id)
if post is None:
return jsonify_error('post not found', 404)
else:
if not g.is_admin and (post.status in (Post.STATUS_PRIVATE,
Post.STATUS_TRASH)):
return jsonify_error('admin required', 403)
posts = [post]
return jsonify_posts(posts, meta, more=more)
def set_status(status):
"""
Set post status (publish, hide, or trash post)
* admin required
* required arguments: id, or id list
"""
if not g.is_admin:
return jsonify_error('admin required', 403)
if status not in [Post.STATUS_TRASH, Post.STATUS_PUBLIC, Post.STATUS_PRIVATE]:
return jsonify_error('invalid status', 400)
id_param = request.form.get('id')
if id_param is None:
return jsonify_error('invalid id parameter', 400)
id_list = [require_int(id, JumpDirectly(jsonify_error('invalid id', 400)))
for id in id_param.split(',')]
posts = post_op.session.query(Post).filter(Post.id.in_(id_list)).all()
for post in posts:
post.status = status
post_op.session.commit()
return jsonify_posts(posts, meta=True)
@api_bp.route('/posts/trash', methods=['POST'])
def trash_posts():
return set_status(Post.STATUS_TRASH)
@api_bp.route('/posts/publish', methods=['POST'])
def publish_posts():
return set_status(Post.STATUS_PUBLIC)
@api_bp.route('/posts/hide', methods=['POST'])
def hide_posts():
return set_status(Post.STATUS_PRIVATE)
|
FaradayRF/Faraday-Software | faraday/deviceconfiguration.py | Python | gpl-3.0 | 26,804 | 0.003582 | #-------------------------------------------------------------------------------
# Name: /faraday/deviceconfiguration.py
# Purpose: Configure the Faraday radio by manipulating relevant INI files
# and providing a Flask server to kick off programming with via
# proxy.
#
# Author: Brent Salmi / Bryce Salmi
#
# Created: 7/2/2017
# Licence: GPLv3
#-------------------------------------------------------------------------------
import time
import os
import sys
import json
import ConfigParser
import base64
import argparse
import requests
from flask import Flask
from flask import request
from faraday.proxyio import faradaybasicproxyio
from faraday.proxyio import faradaycommands
from faraday.proxyio import deviceconfig
from classes import helper
# Global Filenames
configTruthFile = "deviceconfiguration.sample.ini"
configFile = "deviceconfiguration.ini"
faradayTruthFile = "faraday_config.sample.ini"
faradayFile = "faraday_config.ini"
# Start logging after importing modules
faradayHelper = helper.Helper("DeviceConfiguration")
logger = faradayHelper.getLogger()
# Create configuration paths
deviceConfigPath = os.path.join(faradayHelper.path, configFile)
faradayConfigPath = os.path.join(faradayHelper.path, faradayFile)
deviceConfigurationConfig = ConfigParser.RawConfigParser()
deviceConfigurationConfig.read(deviceConfigPath)
# Command line input
parser = argparse.ArgumentParser(description='Device Configuration application provides a Flask server to program Faraday radios via an API')
parser.add_argument('--init-config', dest='init', action='store_true', help='Initialize Device Configuration configuration file')
parser.add_argument('--init-faraday-config', dest='initfaraday', action='store_true', help='Initialize Faraday configuration file')
parser.add_argument('--start', action='store_true', help='Start Device Configuration server')
parser.add_argument('--faradayconfig', action='store_true', help='Display Faraday configuration file contents')
# Faraday Configuration
parser.add_argument('--callsign', help='Set Faraday radio callsign')
parser.add_argument('--nodeid', type=int, help='Set Faraday radio nodeid', default=1)
parser.add_argument('--redledtxon', action='store_true', help='Set Faraday radio RED LED during RF transmissions ON')
parser.add_argument('--redledtxoff', action='store_true', help='Set Faraday radio RED LED during RF transmissions OFF')
parser.add_argument('--greenledrxon', action='store_true', help='Set Faraday radio GREEN LED during RF reception ON')
parser.add_argument('--greenledrxoff', action='store_true', help='Set Faraday radio GREEN LED during RF reception OFF')
parser.add_argument('--unitconfigured', action='store_true', help='Set Faraday radio configured bit ON')
parser.add_argument('--unitunconfigured', action='store_true', help='Set Faraday radio configured bit OFF')
parser.add_argument('--gpiop3on', type=int, help='Set Faraday radio GPIO port 3 bits on, specify bit to turn ON')
parser.add_argument('--gpiop3off', type=int, help='Set Faraday radio GPIO port 3 bits on, specify bit to turn OFF')
parser.add_argument('--gpiop3clear', action='store_true', help='Reset Faraday radio GPIO port 3 bits to OFF')
parser.add_argument('--gpiop4on', type=int, help='Set Faraday radio GPIO port 4 bits on, specify bit to turn ON')
parser.add_argument('--gpiop4off', type=int, help='Set Faraday radio GPIO port 4 bits on, specify bit to turn OFF')
parser.add_argument('--gpiop4clear', action='store_true', help='Reset Faraday radio GPIO port 4 bits to OFF')
parser.add_argument('--gpiop5on', type=int, help='Set Faraday radio GPIO port 5 bits on, specify bit to turn ON')
parser.add_argument('--gpiop5off', type=int, help='Set Faraday radio GPIO port 5 bits on, specify bit to turn OFF')
parser.add_argument('--gpiop5clear', action='store_true', help='Reset Faraday radio GPIO port 5 bits to OFF')
parser.add_argument('--gpiop5', type=int, help='Set Faraday radio fgpio_p5')
parser.add_argument('--bootfrequency', type=float, help='Set Faraday radio boot frequency', default=914.5)
parser.add_argument('--bootrfpower', type=int, help='Set Faraday radio boot RF power', default=20)
parser.add_argument('--latitude', type=float, help='Set Faraday radio default latitude. Format \"ddmm.mmmm\"')
parser.add_argument('--longitude', type=float, help='Set Faraday radio default longitude. Format \"dddmm.mmmm\"')
parser.add_argument('--latitudedir', help='Set Faraday radio default latitude direction (N/S)')
parser.add_argument('--longitudedir', help='Set Faraday radio default longitude direction (E/W)')
parser.add_argument('--altitude', type=float, help='Set Faraday radio default altitude in meters. Maximum of 17999.99 Meters')
# Purposely do not allow editing of GPS altitude units
parser.add_argument('--gpsbooton', action='store_true', help='Set Faraday radio GPS boot power ON')
parser.add_argument('--gpsbootoff', action='store_true', help='Set Faraday radio GPS boot power OFF')
parser.add_argument('--gpsenabled', action='store_true', help='Set Faraday radio GPS use ON')
parser.add_argument('--gpsdisabled', action='store_true', help='Set Faraday radio GPS use OFF')
parser.add_argument('--uarttelemetryenabled', action='store_true', help='Set Faraday radio UART Telemetry ON')
parser.add_argument('--uarttelemetrydisabled', action='store_true', help='Set Faraday radio UART Telemetry OFF')
parser.add_argument('--rftelemetryenabled', action='store_true', help='Set Faraday radio RF Telemetry ON')
parser.add_argument('--rftelemetrydisabled', action='store_true', help='Set Faraday radio RF Telemetry OFF')
parser.add_argument('--uartinterval', type=int, help='Set Faraday radio UART telemetry interval in seconds', default=5)
parser.add_argument('--rfinterval', type=int, help='Set Faraday radio RF telemetry interval in seconds', default=3)
# Parse the arguments
args = parser.parse_args()
def proxyConfig(host, port):
r = requests.get("http://{0}:{1}/config".format(host, port))
return r.json()
def initializeDeviceConfigurationConfig():
'''
Initialize device configuration configuration file from deviceconfiguration.sample.ini
:return: None, exits program
'''
faradayHelper.initializeConfig(configTruthFile, configFile)
sys.exit(0)
def initializeFaradayConfig():
'''
| Initialize Faraday radio configuration file from faraday_config.sample.ini
:return: None, exits program
'''
faradayHelper.initializeConfig(faradayTruthFile, faradayFile)
sys.exit(0)
def programFaraday(deviceConfigurationConfigPath):
'''
Programs Faraday by generating a HTTP POST query that Proxy uses to send | data to the CC430 FLASH memory.
:param deviceConfigurationConfigPath: Path to deviceconfiguration.ini file
:return: None
'''
config = ConfigParser.RawConfigParser()
config.read(deviceConfigPath)
# Variables
local_device_callsign = config.get("DEVICES", "CALLSIGN")
local_device_node_id = config.get("DEVICES", "NODEID")
local_device_callsign = str(local_device_callsign).upper()
hostname = config.get("PROXY", "HOST")
port = config.get("PROXY", "PORT")
cmdPort = config.get("PROXY", "CMDPORT")
# Send POST data to Proxy to configure unit
try:
r = requests.post('http://{0}:{1}'.format(hostname, port),
params={'callsign': str(local_device_callsign), 'nodeid': int(local_device_node_id), 'port': cmdPort})
logger.info(r.url)
logger.info("Sent Programming Request")
except requests.exceptions.RequestException as e:
# Some error occurred
logger.error(e)
logger.error(r.text)
def displayConfig(faradayConfigPath):
'''
Prints out the Faraday Configuration file
:param faradayConfigPath: path to faraday configuration file
:return: None
'''
with open(faradayConfigPath, 'r') as configFile:
print configFile.read()
sys.exit(0)
def eightBitListToInt(list):
'''
Turn an eight bit list of integers into an integer
:param list: list to convert to an integer
:return: integer
'''
if len(list) == 8 |
marrow/wsgi.objects | tests/test_adapters/test_content_adapters.py | Python | mit | 2,463 | 0.007714 | # encoding: utf-8
from __future__ import unicode_literals, division, print_function, absolute_import
try: # This to handle Python 2.6 which is missing a lot.
from unittest2 import TestCase
except ImportError:
from unittest import TestCase
fr | om marrow.wsgi.objects.adapters.content import ContentType, ContentEncoding
from helpers import MockObject
class TestContentType(TestCase):
class Mock(MockObject):
mime = ContentType('CONTENT_TYPE', None)
def setUp(self):
self.inst = self.Mock()
def test_empty(self):
self.assertEquals(None, self.inst.mime)
def test_default(self):
class Mock(MockObject):
mime = ContentType('CONTENT_TYPE', b"text/html")
| inst = Mock()
self.assertEquals(b"text/html", inst.mime)
def test_assignment(self):
self.assertEquals(None, self.inst.mime)
__import__('pprint').pprint(self.inst)
self.inst.mime = "text/html"
self.assertEquals(b"text/html", self.inst.mime)
def test_assignment_with_charset(self):
self.inst['CONTENT_TYPE'] = b'text/html; charset=utf8'
self.assertEquals(b"text/html", self.inst.mime)
self.inst.mime = "text/plain"
self.assertEquals(b'text/plain; charset=utf8', self.inst['CONTENT_TYPE'])
class TestContentEncoding(TestCase):
class Mock(MockObject):
encoding = ContentEncoding('CONTENT_TYPE')
def setUp(self):
self.inst = self.Mock()
def test_default(self):
self.assertEquals("utf8", self.inst.encoding)
def test_default_with_mime(self):
self.inst['CONTENT_TYPE'] = b"text/html"
self.assertEquals(None, self.inst.encoding)
def test_assignment(self):
self.inst.encoding = "latin1"
self.assertEquals(b'; charset=latin1', self.inst['CONTENT_TYPE'])
self.inst['CONTENT_TYPE'] = b"text/html"
self.inst.encoding = "latin1"
self.assertEquals(b'text/html; charset=latin1', self.inst['CONTENT_TYPE'])
def test_removal(self):
self.inst.encoding = "latin1"
self.assertEquals(b'; charset=latin1', self.inst['CONTENT_TYPE'])
self.inst.encoding = None
self.assertEquals(b'', self.inst['CONTENT_TYPE'])
self.inst.encoding = "latin1"
del self.inst.encoding
self.assertEquals(b'', self.inst['CONTENT_TYPE'])
|
scheib/chromium | third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink.py | Python | bsd-3-clause | 10,076 | 0.000595 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""TestResultSink uploads test results and artifacts to ResultDB via ResultSink.
ResultSink is a micro service that simplifies integration between ResultDB
and domain-specific test frameworks. It runs a given test framework and uploads
all the generated test results and artifacts to ResultDB in a progressive way.
- APIs: https://godoc.org/go.chromium.org/luci/resultdb/proto/sink/v1
TestResultSink implements methods for uploading test results and artifacts
via ResultSink, and is activated only if LUCI_CONTEXT is present with ResultSink
section.
"""
import json
import logging
import requests
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.web_tests.models.typ_types import ResultType
_log = logging.getLogger(__name__)
# A map from the enum values of typ.ResultType to ResultSink.Status.
# The enum values of ResultSink.Status can be found at
# https://godoc.org/go.chromium.org/luci/resultdb/proto/sink/v1#pkg-variables.
_result_type_to_sink_status = {
ResultType.Pass:
'PASS',
ResultType.Failure:
'FAIL',
# timeout is just a special case of a reason to abort a test result.
ResultType.Timeout:
'ABORT',
# 'Aborted' is a web_tests-specific type given on TestResults with a device
# failure.
'Aborted':
'ABORT',
ResultType.Crash:
'CRASH',
ResultType.Skip:
'SKIP',
}
class TestResultSinkClosed(Exception):
"""Raises if sink() is called over a closed TestResultSink instance."""
def CreateTestResultSink(port):
"""Creates TestResultSink, if result_sink is present in LUCI_CONTEXT.
Args:
port: A blinkpy.web_tests.port.Port object
Returns:
TestResultSink object if result_sink section is present in LUCI_CONTEXT.
None, otherwise.
"""
luci_ctx_path = port.host.environ.get('LUCI_CONTEXT')
if luci_ctx_path is None:
return None
with port.host.filesystem.open_text_file_for_reading(luci_ctx_path) as f:
sink_ctx = json.load(f).get('result_sink')
if sink_ctx is None:
return None
return TestResultSink(port, sink_ctx)
class TestResultSink(object):
"""A class for uploading test results and artifacts via ResultSink."""
def __init__(self, port, sink_ctx):
self._port = port
self.is_closed = False
self._sink_ctx = sink_ctx
self._url = (
'http://%s/prpc/luci.resultsink.v1.Sink/ReportTestResults' %
self._sink_ctx['address'])
self._session = requests.Session()
sink_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'ResultSink %s' % self._sink_ctx['auth_token'],
}
self._session.headers.update(sink_headers)
def _send(self, data):
self._session.post(self._url, data=json.dumps(data)).raise_for_status()
def _status(self, result):
"""Returns the TestStatus enum value corresponding to the result type.
Args:
result: The TestResult object to find the status of.
Returns:
The corresponding enum value.
"""
status = _result_type_to_sink_status.get(
'Aborted' if result.device_failed else result.type)
assert status is not None, 'unsupported result.type %r' % result.type
return status
def _tags(self, result, expectations):
"""Returns a list of tags that should be added into a given test result.
Args:
result: The TestResult object to generate Tags for.
expectations: A test_expectations.TestExpectations object to pull
expectation data from.
Returns:
A list of {'key': 'tag-name', 'value': 'tag-value'} dicts.
"""
# the message structure of the dict can be found at
# https://chromium.googlesource.com/infra/luci/luci-go/+/master/resultdb/proto/type/common.proto#56
pair = lambda k, v: {'key': k, 'value': v}
tags = [
pair('test_name', result.test_name),
pair('web_tests_device_failed', str(result.device_failed)),
pair('web_tests_result_type', result.type),
pair('web_tests_flag_specific_config_name',
self._port.flag_specific_config_name() or ''),
pair('web_tests_base_timeout',
str(self._port.timeout_ms() / 1000)),
]
for used_file in self._port.used_expectations_files():
tags.append(
pair('web_tests_used_expectations_file',
self._port.relative_test_filename(used_file)))
if expectations:
expectation_tags = expectations.system_condition_tags
test_expectation = expectations.get_expectations(result.test_name)
raw_expected_results = test_expectation.raw_results
for expectation in raw_expected_results:
tags.append(pair('raw_typ_expectation', expectation))
for tag in expectation_tags:
tags.append(pair('typ_tag', tag))
return tags
def _artifacts(self, result):
"""Returns a dict of artifacts with the absolute file paths.
Args:
result: The TestResult object to look for the artifacts of.
summaries: A list of strings to be included in the summary html.
Returns:
A list of artifact HTML tags to be added into the summary html
A dict of artifacts, where the key is the artifact ID and
the value is a dict with the absolute file path.
"""
ret = {}
summaries = []
base_dir = self._port.results_directory()
for | name, paths in result.artifacts.artifacts.items():
for p in paths:
art_id = name
i = 1
while art_id in ret:
art_id = '%s-%d' % (name, i)
i += 1
ret[art_id] = {
'filePath': s | elf._port.host.filesystem.join(base_dir, p),
}
# Web tests generate the same artifact names for text-diff(s)
# and image diff(s).
# - {actual,expected}_text, {text,pretty_text}_diff
# - {actual,expected}_image, {image,pretty_image}_diff
# - reference_file_{mismatch,match}
#
# Milo recognizes the names and auto generates a summary html
# to render them with <text-diff-artifact> or
# <img-diff-artifact>.
#
# command, stderr and crash_log are artifact names that are
# not included in the auto-generated summary. This uses
# <text-artifact> to render them in the summary_html section
# of each test.
if name in ['command', 'stderr', 'crash_log']:
summaries.append(
'<h3>%s</h3>'
'<p><text-artifact artifact-id="%s" /></p>' %
(art_id, art_id))
# Sort summaries to display "command" at the top of the summary.
return sorted(summaries), ret
def sink(self, expected, result, expectations):
"""Reports the test result to ResultSink.
Args:
expected: True if the test was expected to fail and actually failed.
False, otherwise.
result: The TestResult object to report.
expectations: A test_expectations.TestExpectations object to pull
expectation data from.
Exceptions:
requests.exceptions.ConnectionError, if there was a network
connection error.
requests.exceptions.HTTPError, if ResultSink responded an error
for the request.
ResultSinkClosed, if sink.close() was called prior to sink().
"""
if self.is_closed:
raise TestResultSinkClose |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.