repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rackerlabs/horizon
|
openstack_dashboard/dashboards/project/networks/views.py
|
Python
|
apache-2.0
| 5,149
| 0.000971
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Networks.
"""
import logging
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.forms import UpdateNetwork
from openstack_dashboard.dashboards.project.networks.ports.tables \
import PortsTable
from openstack_dashboard.dashboards.project.networks.subnets.tables \
import SubnetsTable
from openstack_dashboard.dashboards.project.networks.tables \
import NetworksTable
from openstack_dashboard.dashboards.project.networks.workflows \
import CreateNetwork
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
table_class = NetworksTable
template_name = 'project/networks/index.html'
def get_data(self):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(self.request,
tenant_id)
except:
networks = []
msg = _('Network list can not be retrieved.')
exceptions.handle(self.request, msg)
for n in networks:
n.set_id_as_name_if_empty()
return networks
class CreateView(workflows.WorkflowView):
workflow_class = CreateNetwork
def get_initial(self):
pass
class UpdateView(forms.ModalFormView):
form_class = UpdateNetwork
template_name = 'project/networks/update.html'
context_object_name = 'network'
success_url = reverse_lazy("horizon:project:networks:index")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["network_id"] = self.kwargs['network_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
network_id = self.kwargs['network_id']
try:
self._object = api.neutron.network_get(self.request,
network_id)
except:
redirect = self.success_url
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up']}
class DetailView(tables.MultiTableView):
table_classes = (SubnetsTable, PortsTable)
template_name = 'project/networks/detail.html'
failure_url = reverse_lazy('horizon:project:networks:index')
def get_subnets_data(self):
try:
network = self._get_data()
subnets = api.neutron.subnet_list(self.request,
network_
|
id=network.id)
except:
subnets = []
msg = _('Subnet list can not be retrieved.')
exceptions.handle(self.request, msg)
for s in subnets:
s.set_id_as_name_if_empty()
return subnets
def get_ports_data(self):
try:
network_id = self.kwargs['network_id']
ports = api.neutron.port_list(self.request, network_id=network_id)
except:
ports = []
|
msg = _('Port list can not be retrieved.')
exceptions.handle(self.request, msg)
for p in ports:
p.set_id_as_name_if_empty()
return ports
def _get_data(self):
if not hasattr(self, "_network"):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
self._network = network
return self._network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["network"] = self._get_data()
return context
|
ekaputra07/wpcdesk
|
wpcdesk/comment_editor.py
|
Python
|
gpl-3.0
| 4,371
| 0.004576
|
# -*- coding: utf-8 -*-
# wpcdesk - WordPress Comment Desktop
# Copyright (C) 2012 Eka Putra - ekaputra@balitechy.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui, QtCore
from gui.comment_window import Ui_CommentWindow
from wpcdesk_threads import EditCommentThread, DeleteCommentThread
class CommentEditor(QtGui.QDialog):
def __init__(self, parent=None, data=None):
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_CommentWindow()
self.ui.setupUi(self)
self.ui.progressBar.hide()
self.set_validator()
self.parent = parent
self.data = data
self.fill_form(self.data)
QtCore.QObject.connect(self.ui.btn_save, QtCore.SIGNAL("clicked()"), self.saveComment)
QtCore.QObject.connect(self.ui.btn_delete, QtCore.SIGNAL("clicked()"), self.deleteComment)
self.edit_comment_thread = EditCommentThread()
self.edit_comment_thread.is_loading.connect(self.loading)
self.edit_comment_thread.is_success.connect(self.edit_status)
self.delete_comment_thread = DeleteCommentThread(self.data)
self.delete_comment_thread.is_loading.connect(self.loading)
self.delete_comment_thread.is_success.connect(self.delete_status)
def set_validator(self):
# Email Validator
email_pattern = QtCore.QRegExp( r"^([a-zA-Z0-9_\.\-\+])+\@(([a-zA-Z0-9\-])+\.)+([a-zA-Z0-9]{2,4})+$" )
email_validator = QtGui.QRegExpValidator(email_pattern , self )
self.ui.edit_email.setValidator(email_validator)
def fill_form(self, data):
self.comment_id = data['comment_id']
self.ui.lbl_post.setText(data['comment_post'])
self.ui.lbl_date.setText(data['co
|
mment_date'])
self.ui.edit_name.setText(data['comment_author'])
self.ui.edit_email.setText(data['comment_email'])
self.ui.edit_comment.setText(data['comment_content'])
if data['comment_status'] == 'Approved':
self.ui.cb_status.setChe
|
cked(True)
else:
self.ui.cb_status.setChecked(False)
def saveComment(self):
data = {}
if self.ui.cb_status.isChecked():
data['status'] = 'approve'
else:
data['status'] = 'hold'
data['content'] = str(self.ui.edit_comment.toPlainText())
data['author'] = str(self.ui.edit_name.text())
data['author_email'] = str(self.ui.edit_email.text())
self.edit_comment_thread.set_comment_id(int(self.data['comment_id']))
self.edit_comment_thread.set_data(data)
self.edit_comment_thread.start()
def deleteComment(self):
answer = QtGui.QMessageBox.question(self, 'Confirmation','Are you sure want to delete this comment?', QtGui.QMessageBox.Yes|QtGui.QMessageBox.Cancel)
if answer == QtGui.QMessageBox.Yes:
self.delete_comment_thread.start()
else:
return
def loading(self, is_loading):
if is_loading:
self.ui.progressBar.show()
else:
self.ui.progressBar.hide()
def edit_status(self, status):
if status:
self.parent.loadComments()
QtGui.QMessageBox.information(self, 'Comment updated!','Comment successfuly updated.', QtGui.QMessageBox.Ok)
else:
QtGui.QMessageBox.warning(self, 'Failed!','Failed to update comment.', QtGui.QMessageBox.Ok)
def delete_status(self, status):
if status:
self.parent.loadComments()
QtGui.QMessageBox.information(self, 'Comment Deleted','Comment successfuly deleted.', QtGui.QMessageBox.Ok)
self.close()
else:
QtGui.QMessageBox.warning(self, 'Failed!','Failed to delete comment.', QtGui.QMessageBox.Ok)
|
fukatani/CW_gui
|
examples/mnist/get_mnist_prediction.py
|
Python
|
bsd-3-clause
| 87
| 0
|
impo
|
rt chainer
def main():
return chainer.datasets.
|
get_mnist(withlabel=False)[0]
|
coen-hyde/dotfiles
|
libs/eb/scli/constants.py
|
Python
|
mit
| 21,046
| 0.013779
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from lib.utility.basetype import ValuedEnum
from lib.utility.basetype import OrderedEnum
EbCliVersion = 'v2.6.0'
class Key(object):
Default = 'default'
Options = 'options'
#----------------------------------------------
# Parameters
#----------------------------------------------
# Standard name of parameters used in Elastic Beanstalk Command Line Interface
ParameterName = ValuedEnum({
u'Command' : 0,
u'SubCommand' : 1,
u'AwsAccessKeyId' : 11,
u'AwsSecretAccessKey' : 12,
u'AwsCredentialFile' : 13,
u'Region' : 21,
u'OriginalRegion' : 22,
u'ServiceEndpoint' : 31,
u'DevToolsEndpoint' : 41,
u'ApplicationName': 101,
u'OriginalApplicationName': 102,
u'ApplicationVersionName':111,
u'EnvironmentName':121,
u'EnvironmentId':122,
u'EnvironmentTier':150,
u'SolutionStack' : 201,
u'OriginalSolutionStack' : 202,
u'EnvironmentType' : 211,
u'Branches': 301,
u'CurrentBranch': 302,
u'BranchMapping': 303,
u'DefaultEnvironmentName': 351,
u'OptionSettingFile' : 501,
u'ConfigFileExtra' : 511,
u'RdsEnabled': 601,
u'RdsEndpoint': 602,
u'RdsSnippetUrl': 603,
u'RdsSourceSnapshotName': 606,
u'RdsEngine': 611,
u'RdsEngineVersion': 612,
u'RdsInstanceClass': 613,
u'RdsMultiAZ': 614,
u'RdsLicenseModel': 615,
u'RdsAllocatedStorage': 616,
u'RdsInstanceName': 621,
u'RdsMasterUsername': 622,
u'RdsMasterPassword': 623,
u'RdsDbName' : 631,
u'RdsDeletionPolicy': 651,
u'InstanceProfileName': 701,
u'ServiceConnectionTimeout' : 1001,
u'ServiceRetryThreshold' : 1011,
u'Force' : 1021,
u'Verbose' : 1051,
u'WaitForFinishTimeout': 1101,
u'WaitForUpdateTimeout': 1102,
u'PollDelay' : 1201,
u'CreateEnvironmentRequestID' : 2001,
u'TerminateEnvironmentRequestID' : 2002,
u'UpdateEnvironmentRequestID' : 2003,
u'RequestEnvInfoRequestID' : 2004,
u'AvailableSolutionStacks': 2101,
})
# Source of parameter value
ParameterSource = ValuedEnum({
u'CliArgument' : 0,
u'Terminal' : 1,
u'ConfigFile' : 2,
u'OsEnvironment' : 3,
u'OperationOutput' : 4,
u'Default' : 10,
})
#----------------------------------------------
# Command
#----------------------------------------------
CommandType = OrderedEnum([
u'INIT',
u'BRANCH',
u'START',
u'STATUS',
u'UPDATE',
u'STOP',
u'DELETE',
u'LOGS',
u'EVENTS',
u'PUSH',
])
SubCommandType = OrderedEnum([
# LOGS command
u'TAIL',
u'OPEN'
])
CommandCombination = {
CommandType.LOGS : {
Key.Default : SubCommandType.TAIL,
Key.Options : [
SubCommandType.TAIL,
]
},
}
#----------------------------------------------
# Terminal
#----------------------------------------------
class TerminalConstant(object):
Y = u'Y'
Yes = u'Yes'
N = u'N'
No = u'No'
TRUE = u'True'
FALSE = u'False'
RdsSnapshotListNumber = 5
IamProfileListNumber = 6
#----------------------------------------------
# Services
#----------------------------------------------
ServiceRegion = OrderedEnum([
u'UsEast1',
u'UsWest1',
u'UsWest2',
u'EuWest1',
u'ApNortheast1',
u'ApSoutheast1',
u'ApSoutheast2',
u'SaEast1',
])
AvailableServiceRegion = [
ServiceRegion.UsEast1,
ServiceRegion.UsWest2,
ServiceRegion.UsWest1,
ServiceRegion.EuWest1,
ServiceRegion.ApSoutheast1,
ServiceRegion.ApNortheast1,
ServiceRegion.ApSoutheast2,
ServiceRegion.SaEast1,
]
ServiceRegionName = {
ServiceRegion.ApNortheast1 : u'Asia Pacific (Tokyo)',
ServiceRegion.ApSoutheast1 : u'Asia Pacific (Singapore)',
ServiceRegion.ApSoutheast2 : u'Asia Pacific (Sydney)',
ServiceRegion.EuWest1: u'EU West (Ireland)',
ServiceRegion.SaEast1: u'South America (Sao Paulo)',
ServiceRegion.UsEast1 : u'US East (Virginia)',
ServiceRegion.UsWest1 : u'US West (North California)',
|
ServiceRegion.UsWest2 : u'US West (Oregon)',
}
ServiceRegionId = {
ServiceRegion
|
.ApNortheast1 : u'ap-northeast-1',
ServiceRegion.ApSoutheast1 : u'ap-southeast-1',
ServiceRegion.ApSoutheast2 : u'ap-southeast-2',
ServiceRegion.EuWest1: u'eu-west-1',
ServiceRegion.SaEast1: u'sa-east-1',
ServiceRegion.UsEast1 : u'us-east-1',
ServiceRegion.UsWest1 : u'us-west-1',
ServiceRegion.UsWest2 : u'us-west-2',
}
ServiceEndpoint = {
ServiceRegion.ApNortheast1 : u'https://elasticbeanstalk.ap-northeast-1.amazonaws.com',
ServiceRegion.ApSoutheast1 : u'https://elasticbeanstalk.ap-southeast-1.amazonaws.com',
ServiceRegion.ApSoutheast2 : u'https://elasticbeanstalk.ap-southeast-2.amazonaws.com',
ServiceRegion.EuWest1: u'https://elasticbeanstalk.eu-west-1.amazonaws.com',
ServiceRegion.SaEast1: u'https://elasticbeanstalk.sa-east-1.amazonaws.com',
ServiceRegion.UsEast1 : u'https://elasticbeanstalk.us-east-1.amazonaws.com',
ServiceRegion.UsWest1 : u'https://elasticbeanstalk.us-west-1.amazonaws.com',
ServiceRegion.UsWest2 : u'https://elasticbeanstalk.us-west-2.amazonaws.com',
}
SnippetBucket = {
ServiceRegion.ApNortheast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-ap-northeast-1/eb_snippets',
ServiceRegion.ApSoutheast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-ap-southeast-1/eb_snippets',
ServiceRegion.ApSoutheast2 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-ap-southeast-2/eb_snippets',
ServiceRegion.EuWest1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-eu-west-1/eb_snippets',
ServiceRegion.SaEast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-sa-east-1/eb_snippets',
ServiceRegion.UsEast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-east-1/eb_snippets',
ServiceRegion.UsWest1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-west-1/eb_snippets',
ServiceRegion.UsWest2 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-west-2/eb_snippets',
}
PolicyBucket = {
ServiceRegion.ApNortheast1 : u'https://elasticbeanstalk-env-resources-ap-northeast-1.s3.amazonaws.com/eb_policies',
ServiceRegion.ApSoutheast1 : u'https://elasticbeanstalk-env-resources-ap-southeast-1.s3.amazonaws.com/eb_policies',
ServiceRegion.ApSoutheast2 : u'https://elasticbeanstalk-env-resources-ap-southeast-2.s3.amazonaws.com/eb_policies',
ServiceRegion.EuWest1 : u'https://elasticbeanstalk-env-resources-eu-west-1.s3.amazonaws.com/eb_policies',
ServiceRegion.SaEast1 : u'https://elasticbeanstalk-env-resources-sa-east-1.s3.amazonaws.com/eb_policies',
ServiceRegion.UsEast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-east-1/eb_policies',
ServiceRegion.UsWest1 : u'https://elasticbeanstalk-env-resources-us-west-1.s3.amazonaws.com/eb_policies',
ServiceRegion.UsWest2 : u'https://elasticbeanstalk-env-resources-us-west-2.s3.amazonaws.com/eb_policies',
}
DevToolsEndpoint = {
ServiceRegion.ApNor
|
helldorado/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_servicebus.py
|
Python
|
gpl-3.0
| 6,397
| 0.003126
|
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'sup
|
ported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_servicebus
version_added: "2.8"
short_description: Manage Azure Service Bus.
description:
- Create, update or delete an Azure Service Bus namespaces.
options:
resource_group:
description:
- name of resource group.
required: true
name:
description:
- name of the servicebus namespace
required: true
state:
description:
|
- Assert the state of the route. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
location:
description:
- Namespace location.
sku:
description:
- Namespace sku.
choices:
- standard
- basic
- premium
default:
standard
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Create a namespace
azure_rm_servicebus:
name: deadbeef
location: eastus
'''
RETURN = '''
id:
description: Current state of the service bus.
returned: success
type: str
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
from ansible.module_utils._text import to_native
from datetime import datetime, timedelta
class AzureRMServiceBus(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
location=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
sku=dict(type='str', choices=['basic', 'standard', 'premium'], default='standard')
)
self.resource_group = None
self.name = None
self.state = None
self.sku = None
self.location = None
self.results = dict(
changed=False,
id=None
)
super(AzureRMServiceBus, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
changed = False
if not self.location:
resource_group = self.get_resource_group(self.resource_group)
self.location = resource_group.location
original = self.get()
if self.state == 'present' and not original:
self.check_name()
changed = True
if not self.check_mode:
original = self.create()
elif self.state == 'absent' and original:
changed = True
original = None
if not self.check_mode:
self.delete()
self.results['deleted'] = True
if original:
self.results = self.to_dict(original)
self.results['changed'] = changed
return self.results
def check_name(self):
try:
check_name = self.servicebus_client.namespaces.check_name_availability_method(self.name)
if not check_name or not check_name.name_available:
self.fail("Error creating namespace {0} - {1}".format(self.name, check_name.message or str(check_name)))
except Exception as exc:
self.fail("Error creating namespace {0} - {1}".format(self.name, exc.message or str(exc)))
def create(self):
self.log('Cannot find namespace, creating a one')
try:
sku = self.servicebus_models.SBSku(name=str.capitalize(self.sku))
poller = self.servicebus_client.namespaces.create_or_update(self.resource_group,
self.name,
self.servicebus_models.SBNamespace(location=self.location,
sku=sku))
ns = self.get_poller_result(poller)
except Exception as exc:
self.fail('Error creating namespace {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc)))
return ns
def delete(self):
try:
self.servicebus_client.namespaces.delete(self.resource_group, self.name)
return True
except Exception as exc:
self.fail("Error deleting route {0} - {1}".format(self.name, str(exc)))
def get(self):
try:
return self.servicebus_client.namespaces.get(self.resource_group, self.name)
except Exception:
return None
def to_dict(self, instance):
result = dict()
attribute_map = self.servicebus_models.SBNamespace._attribute_map
for attribute in attribute_map.keys():
value = getattr(instance, attribute)
if not value:
continue
if isinstance(value, self.servicebus_models.SBSku):
result[attribute] = value.name.lower()
elif isinstance(value, datetime):
result[attribute] = str(value)
elif isinstance(value, str):
result[attribute] = to_native(value)
elif attribute == 'max_size_in_megabytes':
result['max_size_in_mb'] = value
else:
result[attribute] = value
return result
def is_valid_timedelta(value):
if value == timedelta(10675199, 10085, 477581):
return None
return value
def main():
AzureRMServiceBus()
if __name__ == '__main__':
main()
|
dsanders11/django-newsletter
|
docs/conf.py
|
Python
|
agpl-3.0
| 9,244
| 0.006274
|
#
# django-newsletter documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 13 13:53:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from pkg_resources import get_distribution
# Determine whether rendering on RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# Django bogus settings for autodoc
import django
from django.conf import settings
settings.configure(
SECRET_KEY='bogus', SITE_ID=1,
INSTALLED_APPS=[
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.sites',
'sorl.thumbnail',
'newsletter'
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
)
django.setup()
from django.core.management import call_command
call_command('migrate', interactiv
|
e=False)
autodoc_default_flags = ['members', 'show-inheritance']
autodoc_member_order = 'bysource'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it
|
here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-newsletter'
copyright = '2013, Mathijs de Bruin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
release = get_distribution('django-newsletter').version
# for example take major/minor
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['newsletter']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-newsletterdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-newsletter.tex', 'django-newsletter Documentation',
'Mathijs de Bruin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-newsletter', 'django-newsletter Documentation',
['Mathijs de Bruin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = Fals
|
sudkannan/xen-hv
|
tools/python/xen/xend/XendBootloader.py
|
Python
|
gpl-2.0
| 7,323
| 0.004779
|
#
# XendBootloader.py - Framework to run a boot loader for picking the kernel
#
# Copyright 2005-2006 Red Hat, Inc.
# Jeremy Katz <katzj@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os, select, errno, stat, signal, tty
import random
import shlex
from xen.xend import sxp
from xen.util import mkdir, oshelp
from XendLogging import log
from XendError import VmError
import pty, termios, fcntl
from xen.lowlevel import ptsname
def bootloader(blexec, disk, dom, quiet = False, blargs = '', kernel = '',
ramdisk = '', kernel_args = ''):
"""Run the boot loader executable on the given disk and return a
config image.
@param blexec Binary to use as the boot loader
@param disk Disk to run the boot loader on.
@param dom DomainInfo representing the domain being booted.
@param quiet Run in non-interactive mode, just booting the default.
@param blargs Arguments to pass to the bootloader."""
if not os.access(blexec, os.X_OK):
msg = "Bootloader isn't executable"
log.error(msg)
raise VmError(msg)
if not os.access(disk, os.R_OK):
msg = "Disk isn't accessible"
log.error(msg)
raise VmError(msg)
if os.uname()[0] == "NetBSD" and disk.startswith('/dev/'):
disk = disk.replace("/dev/", "/dev/r")
mkdir.parents("/var/run/xend/boot/", stat.S_IRWXU)
while True:
fifo = "/var/run/xend/boot/xenbl.%s" %(random.randint(0, 32000),)
try:
os.mkfifo(fifo, 0600)
except OSError, e:
if (e.errno != errno.EEXIST):
raise
break
# We need to present the bootloader's tty as a pty slave that xenconsole
# can access. Since the bootloader itself needs a pty slave,
# we end up with a connection like this:
#
# xenconsole -- (slave pty1 master) <-> (master pty2 slave) -- bootloader
#
# where we copy characters between the two master fds, as well as
# listening on the bootloader's fifo for the results.
(m1, s1) = pty.openpty()
# On Solaris, the pty master side will get cranky if we try
# to write to it while there is no slave. To work around this,
# keep the slave descriptor open until we're done. Set it
# to raw terminal parameters, otherwise it will echo back
# characters, which will confuse the I/O loop below.
# Furthermore, a raw master pty device has no terminal
# semantics on Solaris, so don't try to set any attributes
# for it.
if os.uname()[0] != 'SunOS' and os.uname()[0] != 'NetBSD':
tty.setraw(m1)
os.close(s1)
else:
tty.setraw(s1)
fcntl.fcntl(m1, fcntl.F_SETFL, os.O_NDELAY)
slavename = ptsname.ptsname(m1)
dom.storeDom("console/tty", slavename)
# Release the domain lock here, because we definitely don't want
# a stuck bootloader to deny service to other xend clients.
from xen.xend import XendDomain
domains = XendDomain.instance()
domains.domains_lock.release()
(child, m2) = pty.fork()
if (not child):
args = [ blexec ]
if kernel:
args.append("--kernel=%s" % kernel)
if ramdisk:
args.append("--ramdisk=%s" % ramdisk)
if kernel_args:
args.append("--args=%s" % kernel_args)
if quiet:
args.append("-q")
args.append("--output=%s" % fifo)
if blargs:
args.extend(shlex.split(blargs))
args.append(disk)
try:
log.debug("Launching bo
|
otloader as %s." % str(args))
env = os.environ.copy()
env['TERM'] = 'vt100'
oshelp.close_fds()
os.execvpe(args[0], args, env)
except OSError, e:
print e
pass
os._exit(1)
# record that this domain i
|
s bootloading
dom.bootloader_pid = child
# On Solaris, the master pty side does not have terminal semantics,
# so don't try to set any attributes, as it will fail.
if os.uname()[0] != 'SunOS':
tty.setraw(m2);
fcntl.fcntl(m2, fcntl.F_SETFL, os.O_NDELAY);
while True:
try:
r = os.open(fifo, os.O_RDONLY)
except OSError, e:
if e.errno == errno.EINTR:
continue
break
fcntl.fcntl(r, fcntl.F_SETFL, os.O_NDELAY);
ret = ""
inbuf=""; outbuf="";
# filedescriptors:
# r - input from the bootloader (bootstring output)
# m1 - input/output from/to xenconsole
# m2 - input/output from/to pty that controls the bootloader
# The filedescriptors are NDELAY, so it's ok to try to read
# bigger chunks than may be available, to keep e.g. curses
# screen redraws in the bootloader efficient. m1 is the side that
# gets xenconsole input, which will be keystrokes, so a small number
# is sufficient. m2 is pygrub output, which will be curses screen
# updates, so a larger number (1024) is appropriate there.
#
# For writeable descriptors, only include them in the set for select
# if there is actual data to write, otherwise this would loop too fast,
# eating up CPU time.
while True:
wsel = []
if len(outbuf) != 0:
wsel = wsel + [m1]
if len(inbuf) != 0:
wsel = wsel + [m2]
sel = select.select([r, m1, m2], wsel, [])
try:
if m1 in sel[0]:
s = os.read(m1, 16)
inbuf += s
if m2 in sel[1]:
n = os.write(m2, inbuf)
inbuf = inbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
try:
if m2 in sel[0]:
s = os.read(m2, 1024)
outbuf += s
if m1 in sel[1]:
n = os.write(m1, outbuf)
outbuf = outbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
if r in sel[0]:
s = os.read(r, 128)
ret = ret + s
if len(s) == 0:
break
del inbuf
del outbuf
os.waitpid(child, 0)
os.close(r)
os.close(m2)
os.close(m1)
if os.uname()[0] == 'SunOS' or os.uname()[0] == 'NetBSD':
os.close(s1)
os.unlink(fifo)
# Re-acquire the lock to cover the changes we're about to make
# when we return to domain creation.
domains.domains_lock.acquire()
if dom.bootloader_pid is None:
msg = "Domain was died while the bootloader was running."
log.error(msg)
raise VmError, msg
dom.bootloader_pid = None
if len(ret) == 0:
msg = "Boot loader didn't return any data!"
log.error(msg)
raise VmError, msg
pin = sxp.Parser()
pin.input(ret)
pin.input_eof()
blcfg = pin.val
return blcfg
def bootloader_tidy(dom):
if hasattr(dom, "bootloader_pid") and dom.bootloader_pid is not None:
pid = dom.bootloader_pid
dom.bootloader_pid = None
os.kill(pid, signal.SIGKILL)
|
bwmichael/jccc-cis142-python
|
old/check-quadrant.py
|
Python
|
apache-2.0
| 3,124
| 0.003201
|
# Brandon Michael
# cis142
# checkForQuadrant.py
# Goal: This program will keep asking for input values to check for the quadrant postion,
# origin, x-axis and y axis postions
# Notes: I used a while loop to make testing values easier and I used the input x,y
# Display program instructions
print("###################################################")
print("Quadrant Finder 1.0")
print("Enter the x and y coordinates to find the quadrant!")
print("Type [exit] to quit the program")
print("###################################################")
# Setup the x and y variables
xValue = None
yValue = None
# Setup a loop that breaks when you type exit
while True:
# Get the input values in a X,Y format
inputCoordinates = input("Type in coordinates [x,y]: ")
# Check if exit was typed, if so then exit the loop and end
if inputCoordinates == "exit":
break # stops the loop
# We want to make sure we can only strip out 2 input values
# and make sure there is a comma separating them
elif len(inputCoordinates.strip().split(',')) == 2 and inputCoordinates.count(',') == 1:
# Loop over the two numbers that are stripped out by the comma value
for coordinate in inputCoordinates.strip().split(','):
# This checks to see if we have set a value for x
# If it is still set to None then the first value is going to be xValue
if xValue is None:
xValue = int(coordinate)
# Since we are checking the xValue we can assume when the loop comes back
# a second time we can set it to yValue
else:
yValue = int(coordinate)
# If its a 0,0 value then its the Origin
if xValue == 0 and yValue == 0:
print("Origin")
else:
# If x = 0 and the y is greater or less than 0 its on the Y axis
if xValue == 0 and (yValue < 0 or yValue > 0):
print("Y - Axis")
# If x is greater or less than 0 and y = 0 its on the X axis
elif (xValue < 0 or xValue > 0) and yValu
|
e == 0:
print("X - Axis")
# Anything else and we need to check for quadr
|
ants
else:
# If x is a positive number and y is a negative positive its in Quadrant 1
if xValue > 0 and yValue > 0:
print("Quadrant I")
# If x is a negative number and y is a positive number then its in Quadrant 2
elif xValue < 0 and yValue > 0:
print("Quadrant II")
# If x is a negative number and y is negative number then its in Quadrant 3
elif xValue < 0 and yValue < 0:
print("Quadrant III")
# If x is a positive number and y is a negative number then its in Quadrant 4
elif xValue > 0 and yValue < 0:
print("Quadrant IV")
# If they typed anything but 2 numbers separated by a comma then ask for the input again
else:
print("Please type the input value as x,y")
print("Example: 1,-9")
|
sajuptpm/murano
|
contrib/plugins/murano_exampleplugin/murano_exampleplugin/cfg.py
|
Python
|
apache-2.0
| 822
| 0
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
def init_config(conf):
opts = [
cfg.IntOpt('api_version', default=2),
|
cfg.StrOpt('endpoint_type', default='publicURL')
]
conf.register_opts(opts, group="glance")
return con
|
f.glance
|
duncan-r/SHIP
|
ship/utils/fileloaders/datloader.py
|
Python
|
mit
| 9,666
| 0.001552
|
"""
Summary:
Factory class for building the AUnits from an ISIS data file.
This is used to read and build the parts of the ISIS dat file.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
There are a few functions in here that should be made protected. This
doesn't really make much difference in Python in terms of encapsulation,
but it makes it a bit clearer to any calling scripts that they might be
messing with something that they probablly shouldn't be messing with.
Comments are a bit over the top in this file. Need to go through and
decide what is helpful and what is just getting in the way.
Updates:
"""
from __future__ import unicode_literals
import os
from ship.utils.atool import ATool
from ship.utils.fileloaders.loader import ALoader
from ship.utils import filetools as ftools
from ship.fmp.fmpunitfactory import FmpUnitFactory
from ship.utils import utilfunctions as uf
from ship.fmp.datunits.isisunit import UnknownUnit
from ship.fmp.datcollection import DatCollection
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class DatLoader(ATool, ALoader):
"""
Isis data file (.DAT) I/O methods.
Factory for creating the .DAT file objects.
Identifies different section of the .DAT file and creates objects of
the different units. Also saves updated file.
All unknown data within the file is contained within UnkownSection units.
These read in the text as found and write out as found, with no knowledge
of the contents. Effectively bypassing the need to worry about parts that
aren't being used yet.
"""
def __init__(self):
"""Constructor."""
super(DatLoader, self).__init__()
logger.debug('Instantiating DatLoader')
self.cur_no_of_units = 0
self.contents = [] # Contents of dat file
self.temp_unit = None # AUnit
self.is_ied = False # If used to load an .ied file
self._ic_name_types = {}
# reach_info dictionary. Keeps track of the information needed to identify
# reach status. Contains:
# [0] = counter - iterated every time a new reach is started.
# [1] = same reach status - keeps track of whether it's in an existing
# reach or starting a new one.
self.reach_info = {'reach_number': 0, 'same_reach': False}
def loadFile(self, file_path, arg_dict={}):
"""Loads the ISIS .DAT file.
Splits it into objects for each unit type, initial conditions etc.
This is an epic if-else section for each unit type currently
represented.
Needs cleaning up and writing with a bit more style.
Easy to add another unit type, if it's not currently covered then it
will just be collected in the universal 'UnknownUnit' and printed
back out the same as it came in.
Args:
file_path (str): path to the .dat file to load.
Returns:
units - UnitCollection containing the dat file units or False if
they couldn't be loaded.
Raises:
IOError: If the file cannot be loaded or is empty.
AttributeError: if the file is not of an expected type (.dat/.ief).
See Also:
IsisUnitCollection
FactoryClasses
TODO: Decide if the observer style calls are ever going to be needed.
If they aren't then remove them rather than have them
cluttering up the file.
"""
line = ''
# Used to populate the data for the UnknownUnit
self.unknown_data = []
# Composite for all dat units
path_holder = ftools.PathHolder(file_path)
self.units = DatCollection(path_holder)
# self.units.file_dir, self.units.filename = os.path.split(file_path)
# self.units.filename = os.path.splitext(self.units.filename)[0]
if not uf.checkFileType(file_path, ext=['.dat', '.DAT']):
if not uf.checkFileType(file_path, ext=['.ied', '.IED']):
logger.error('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
raise AttributeError('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
else:
self.is_ied = True
contents = self.__loadFile(file_path)
if(contents == False):
raise IOError('Unable to load file at: ' + file_path)
return self.buildDat(contents, arg_dict)
def buildDat(self, contents, arg_dict={}):
"""
"""
self.contents = contents
# Counter for the number of rows that have been read from the
# file contents list.
i = 0
# Get an instance of the unit factory with the number of nodes in the file.
unit_factory = FmpUnitFactory()
# Dictionary containing the keys to identify units in the dat file
unit_vars = unit_factory.getUnitIdentifiers()
# Create a unit from the header data in the first few lines of the dat file.
if not self.is_ied:
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, 0, 'HEADER', 0)
in_unknown_section = False
# Now we can update the HeaderUnit subContents
self.updateSubContents()
in_unknown_section = False
while i < len(self.contents):
# Get the line and then split it to retrieve the first word.
# Check this word against the # unit_type keys we set above to see
line = self.contents[i]
temp_line = line.strip()
if temp_line:
first_word = line.split()[0].strip()
else:
first_word = 'Nothing'
|
if first_word in unit_vars:
# If building an UnknownUnit then create and reset
if(in_unknown_section == True):
self.createUnknownSection()
self.updateSubContents()
# Reset the reach for the UnknownUn
|
it
unit_factory.same_reach = False
'''Call the unit creator function and get back the unit and the
updated contents list index.
Most of these variables are self explanatory, but
unit_vars[first_word] is the key for the unit type to make.
'''
# i, self.temp_unit = unit_factory.createUnit(self.contents, i,
# unit_vars[first_word], self.cur_no_of_units)
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, i,
first_word,
self.cur_no_of_units)
'''In case we got in but found something wasn't supported.
it's i-1 because we can't return onto the same line that was
read or it will loop forever, so store it here and move on
'''
if self.temp_unit == False:
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
self.unknown_data.append(self.contents[i].rstrip('\n'))
in_unknown_section = True
else:
self.updateSubContents()
in_unknown_section = False
else:
in_unknown_section = True
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
line = None
del self.unknown_data
return self.units
def createUnknownSection(self):
"""Builds unidentified sections from the .DAT file.
All currently un-dealt-with sections of the .DAT file are
incorporated into this.
Loads in chunks of the file 'as-is' and prints them out the same way.
"""
# logger.debug('Creating UnknownUnit - Unit No:
|
sacharya/nova
|
nova/tests/conductor/test_conductor.py
|
Python
|
apache-2.0
| 83,151
| 0.000457
|
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import mock
import mox
from nova.api.ec2 import ec2utils
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.compute import test_compute
from nova.tests import fake_instance
from nova.tests import fake_instance_actions
from nova.tests import fake_notifier
from nova.tests.objects import test_migration
from nova import utils
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node
|
(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
|
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
migration_p = jsonutils.to_primitive(migration)
migration = self.conductor.migration_update(self.context, migration_p,
'finished')
self.assertEqual(migration['status'], 'finished')
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'])
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertTrue(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertFalse(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get(self):
aggregate_ref = self._setup_aggregate_with_host()
aggregate = self.conductor.aggregate_get(self.context,
aggregate_ref['id'])
self.assertEqual(jsonutils.to_primitive(aggregate_ref), aggregate)
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get_by_host(self):
self._setup_aggregate_with_host()
aggregates = self.conductor.aggregate_get_by_host(self.context, 'bar')
self.assertEqual(aggregates[0]['availability_zone'], 'foo')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('resu
|
mathcamp/aws-formula
|
_states/elb.py
|
Python
|
mit
| 3,106
| 0.001288
|
"""
This state is used to create and manage ELBs.
Examples
========
.. code-block:: yaml
.webserver-elb:
elb.managed:
- name: webserver-elb
- region: us-west-1
- zones:
- us-west-1a
- us-west-1c
- listeners:
|
- [80, 80, 'http', 'http']
- [443, 80, 'https', 'http', 'my_ssl_certificate']
- subnets:
- subnet1
- subnet2
- security_groups:
- my_elb_security_group
- my_other_elb_security_group
- scheme: internet-facing
- health_check:
target: HTTP:80/health
timeout: 3
interval: 30
healthy_threshold: 4
unhealthy_threshold: 2
- policies:
|
80:
type: app
cookie_name: my_cookie
443:
type: lb
cookie_expire: 60
- instances:
- i-deadbeef
- i-01234abc
.bad-elb:
elb.absent:
- name: bad-elb
- region: us-west-1
.add-server:
elb.add:
- name: my-server
- region: us-west-1
- elb: webserver-elb
.rm-badserver:
elb.remove:
- name: badserver
- region: us-west-1
- elb: webserver-elb
"""
# This prevents pylint from yelling at me
__opts__ = {}
__salt__ = {}
def managed(
name,
region,
zones,
listeners=None,
subnets=None,
security_groups=None,
scheme=None,
health_check=None,
policies=None,
instances=None):
"""
Ensure an ELB exists
The arguments are the same as the ``elb.manage`` module
"""
return __salt__['aws_util.run_aws_module'](
'elb.manage', 'ELB', name, region, name, region, zones, listeners,
subnets, security_groups, scheme, health_check, policies, instances,
__opts__['test'])
def absent(name, region):
"""
Ensure an ELB does not exist
Parameters
----------
name : str
The name of the ELB
region : str
The AWS region the ELB is in
"""
return __salt__['aws_util.run_aws_module'](
'elb.delete', 'ELB', name, region, name, region, test=__opts__['test'])
def add(
name,
region,
elb):
"""
Add a server to an ELB
Parameters
----------
name : str
The name or instance id of the server
region : str
The AWS region
elb : str
The name of the ELB to add the server to
"""
return __salt__['aws_util.run_aws_module'](
'elb.add', "ELB", elb, region, name, region,
elb, test=__opts__['test'])
def remove(
name,
region,
elb):
"""
Remove a server from an ELB
Parameters
----------
name : str
The name or instance id of the server
region : str
The AWS region
elb : str
The name of the ELB to remove the server from
"""
return __salt__['aws_util.run_aws_module'](
'elb.remove', "ELB", elb, region, name, region,
elb, test=__opts__['test'])
|
jameshy/libtree
|
tests/__init__.py
|
Python
|
mit
| 35
| 0
|
# C
|
opyright (c) 2015 Fabian Koch
|
em
|
lnls-fac/apsuite
|
apsuite/commisslib/measure_respmat_tbbo.py
|
Python
|
mit
| 8,002
| 0.000125
|
"""."""
import numpy as np
import pyaccel
from siriuspy.namesys import SiriusPVName as _PVName
from siriuspy.devices import SOFB
from ..optimization import SimulAnneal
from ..utils import ThreadedMeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class Params(_ParamsBaseClass):
"""."""
def __init__(self):
"""."""
super().__init__()
self.deltas = {
'CH': 0.3e-3, 'CV': 0.15e-3, 'InjSept': 0.3e-3, 'InjKckr': 0.3e-3}
self.wait_time = 2
self.timeout_orb = 10
self.num_points = 10
class MeasureRespMatTBBO(_BaseClass):
"""."""
def __init__(self, all_corrs):
"""."""
super().__init__(params=Params(), target=self._measure_matrix_thread)
self.devices = {
'bo_sofb': SOFB(SOFB.DEVICES.BO),
'tb_sofb': SOFB(SOFB.DEVICES.TB),
}
self._all_corrs = all_corrs
self._matrix = dict()
self._corrs_to_measure = []
@property
def trajx(self):
"""."""
return np.hstack(
[self.devices['tb_sofb'].trajx, self.devices['bo_sofb'].trajx])
@property
def trajy(self):
"""."""
return np.hstack(
[self.devices['tb_sofb'].trajy, self.devices['bo_sofb'].trajy])
def wait(self, timeout=10):
"""."""
self.devices['tb_sofb'].wait_buffer(timeout=timeout)
self.devices['bo_sofb'].wait_buffer(timeout=timeout)
def reset(self, wait=0):
"""."""
if self._stopevt.wait(wait):
return False
self.devices['tb_sofb'].cmd_reset()
self.devices['bo_sofb'].cmd_reset()
if self._stopevt.wait(1):
return False
return True
@property
def corr_names(self):
"""."""
corrs = sorted([
c for c in self._all_corrs if not c.dev.startswith('CV')])
corrs.extend(sorted([
c for c in self._all_corrs if c.dev.startswith('CV')]))
return corrs
@property
def corrs_to_measure(self):
"""."""
if not self._corrs_to_measure:
return sorted(self._all_corrs.keys() - self._matrix.keys())
return self._corrs_to_measure
@corrs_to_measure.setter
def corrs_to_measure(self, value):
"""."""
self._corrs_to_measure = sorted([_PVName(n) for n in value])
@property
def matrix(self):
"""."""
mat = np.zeros([len(self._all_corrs), 2*self.trajx.size], dtype=float)
for i, cor in enumerate(self.corr_names):
line = self._matrix.get(cor)
if line is not None:
mat[i, :] = line
return mat
@property
def nr_points(self):
"""."""
return min(
self.devices['tb_sofb'].nr_points,
self.devices['bo_sofb'].nr_points)
@nr_points.setter
def nr_points(self, value):
self.devices['tb_sofb'].nr_points = int(value)
self.devices['bo_sofb'].nr_points = int(value)
def _measure_matrix_thread(self):
self.nr_points = self.params.num_points
corrs = self.corrs_to_measure
print('Starting...')
for i, cor in enumerate(corrs):
print('{0:2d}|{1:2d}: {2:20s}'.format(i, len(corrs), cor), end='')
orb = []
delta = self.params.deltas[cor.dev]
origkick = self._all_corrs[cor].strength
print('orig ', end='')
if not self.reset(self.params.wait_time):
break
self.wait(self.params.timeout_orb)
orb.append(-np.hstack([self.trajx, self.trajy]))
sig = -2*int(origkick > 0) + 1
print('pos' if sig > 0 else 'neg')
self._all_corrs[cor].strength = origkick + sig*delta
if not self.reset(self.
|
params.wait_time):
break
self.wait(self.params.timeout_orb)
orb.append(np.hstack([self.trajx, self.trajy]))
self._all_corrs[cor].strength = origkick
if self._stopevt.is_set():
|
print('Stopped!')
break
else:
self._matrix[cor] = np.array(orb).sum(axis=0)/(sig*delta)
else:
print('Finished!')
def calc_model_respmatTBBO(
tb_mod, model, corr_names, elems, meth='middle', ishor=True):
"""."""
bpms = np.array(pyaccel.lattice.find_indices(model, 'fam_name', 'BPM'))[1:]
_, cumulmat = pyaccel.tracking.find_m44(
model, indices='open', fixed_point=[0, 0, 0, 0])
matrix = np.zeros((len(corr_names), 2*bpms.size))
for idx, corr in enumerate(corr_names):
elem = elems[corr]
indcs = np.array(elem.model_indices)
if corr.sec == 'BO':
print('Booster ', corr)
indcs += len(tb_mod)
cortype = elem.magnet_type
kxl = kyl = ksxl = ksyl = 0
if corr.dev == 'InjSept':
# kxl = tb_mod[indcs[0][1]].KxL
# kyl = tb_mod[indcs[0][1]].KyL
# ksxl = tb_mod[indcs[0][1]].KsxL
# ksyl = tb_mod[indcs[0][1]].KsyL
midx = pyaccel.lattice.find_indices(
tb_mod, 'fam_name', 'InjSeptM66')
for m in midx:
kxl += tb_mod[m].KxL
kyl += tb_mod[m].KyL
ksxl += tb_mod[m].KsxL
ksyl += tb_mod[m].KsyL
if not ishor and corr.dev in {'InjSept', 'InjKckr'}:
cortype = 'vertical'
matrix[idx, :] = _get_respmat_line(
cumulmat, indcs, bpms, length=elem.model_length,
kxl=kxl, kyl=kyl, ksxl=ksxl, ksyl=ksyl,
cortype=cortype, meth=meth)
return matrix
def _get_respmat_line(
cumul_mat, indcs, bpms, length, kxl=0, kyl=0, ksxl=0, ksyl=0,
cortype='vertical', meth='middle'):
idx = 3 if cortype.startswith('vertical') else 1
cor = indcs[0]
if meth.lower().startswith('end'):
cor = indcs[-1]+1
elif meth.lower().startswith('mid'):
# create a symplectic integrator of second order
# for the last half of the element:
drift = np.eye(4, dtype=float)
drift[0, 1] = length/2 / 2
drift[2, 3] = length/2 / 2
quad = np.eye(4, dtype=float)
quad[1, 0] = -kxl/2
quad[3, 2] = -kyl/2
quad[1, 2] = -ksxl/2
quad[3, 0] = -ksyl/2
half_cor = np.dot(np.dot(drift, quad), drift)
m0c = cumul_mat[cor]
if meth.lower().startswith('mid'):
m0c = np.linalg.solve(half_cor, m0c)
mat = np.linalg.solve(m0c.T, cumul_mat[bpms].transpose((0, 2, 1)))
mat = mat.transpose(0, 2, 1)
# if meth.lower().startswith('mid'):
# mat = np.dot(mat, half_cor)
respx = mat[:, 0, idx]
respy = mat[:, 2, idx]
respx[bpms < indcs[0]] = 0
respy[bpms < indcs[0]] = 0
return np.hstack([respx, respy])
class FindSeptQuad(SimulAnneal):
"""."""
def __init__(self, tb_model, bo_model, corr_names, elems,
respmat, nturns=5, save=False, in_sept=True):
"""."""
super().__init__(save=save)
self.tb_model = tb_model
self.bo_model = bo_model
self.corr_names = corr_names
self.elems = elems
self.nturns = nturns
self.respmat = respmat
self.in_sept = in_sept
def initialization(self):
"""."""
return
def calc_obj_fun(self):
"""."""
if self.in_sept:
sept_idx = pyaccel.lattice.find_indices(
self.tb_model, 'fam_name', 'InjSept')
else:
sept_idx = self.elems['TB-04:MA-CV-2'].model_indices
k, ks = self._position
pyaccel.lattice.set_attribute(self.tb_model, 'K', sept_idx, k)
pyaccel.lattice.set_attribute(self.tb_model, 'Ks', sept_idx, ks)
respmat = calc_model_respmatTBBO(
self.tb_model, self.bo_model, self.corr_names, self.elems)
respmat -= self.respmat
return np.sqrt(np.mean(respmat*respmat))
|
uranusjr/django
|
django/test/client.py
|
Python
|
bsd-3-clause
| 26,876
| 0.000595
|
import json
import mimetypes
import os
import re
import sys
from copy import copy
from functools import partial
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?')
# JSON Vendor Tree spec: https://tools.ietf.org/html/rfc6838#section-3.2
JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(vnd\..+\+)?json')
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
if request.method == 'HEAD':
if response.streaming:
response.streaming_c
|
ontent = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_check
|
s=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, 'name') and
|
eri-trabiccolo/exaile
|
xlgui/devices.py
|
Python
|
gpl-2.0
| 4,732
| 0.000634
|
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import logging
import threading
import gtk
from xl.nls import gettext as _
from xl import xdg, settings, event, devices
from xlgui import collection
logger = logging.getLogger(__name__)
class ManagerDialog(object):
"""
the device manager dialog
"""
def __init__(self, parent, main):
self.main = main
self.parent = parent
self.device_manager = self.main.exaile.devices
self.builder = gtk.Builder()
self.builder.add_from_file(xdg.get_data_path('ui/device_manager.ui'))
self.window = self.builder.get_object('device_manager')
self.window.set_transient_for(self.parent)
self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.window.connect('delete-event', self.on_close)
self.builder.connect_signals({
'on_btn_connect_clicked': self.on_connect,
'on_btn_disconnect_clicked': self.on_disconnect,
'on_btn_edit_clicked': self.on_edit,
'on_btn_add_clicked': self.on_add,
'on_btn_remove_clicked': self.on_remove,
'on_btn_close_clicked': self.on_close,
})
# TODO: make these actually work. For now, they are hidden
for item in ('add', 'edit', 'remove'):
self.builder.get_object('btn_%s' % item).destroy()
# object should really be devices.Device, but it doesnt work :/
self.model = gtk.ListStore(object, gtk.gdk.Pixbuf, str, str)
self.tree = self.builder.get_object('tree_devices')
self.tree.set_model(self.model)
render = gtk.CellRendererPixbuf()
col = gtk.TreeViewColumn(_("Icon"), render)
col.add_attribute(render, "pixbuf", 1)
self.tree.append_column(col)
render = gtk.CellRendererText()
col = gtk.TreeViewColumn(_("Device"), render)
col.set_expand(True)
col.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
col.add_attribute(render, "text", 2)
self.tree.append_column(col)
render = gtk.CellRendererText()
col = gtk.TreeViewColumn(_("Driver"), render)
col.add_attribute(render, "text", 3)
self.tree.append_c
|
olumn(col)
self.populate_tree()
event.add_callback(self.populate_tree, 'device_added')
event.add_callback(self.populate_tree, 'device_removed')
def populate_tree(self, *args):
self.model.clear()
for d in self.device_manager.list_devices():
|
self.model.append([d, None, d.get_name(), d.__class__.__name__])
def _get_selected_devices(self):
sel = self.tree.get_selection()
(model, paths) = sel.get_selected_rows()
devices = []
for path in paths:
iter = self.model.get_iter(path)
device = self.model.get_value(iter, 0)
devices.append(device)
return devices
def on_connect(self, *args):
devices = self._get_selected_devices()
for d in devices:
d.connect()
def on_disconnect(self, *args):
devices = self._get_selected_devices()
for d in devices:
d.disconnect()
def on_edit(self, *args):
logger.warning("NOT IMPLEMENTED")
def on_add(self, *args):
logger.warning("NOT IMPLEMENTED")
def on_remove(self, *args):
logger.warning("NOT IMPLEMENTED")
def on_close(self, *args):
self.window.hide()
self.window.destroy()
def run(self):
self.window.show_all()
|
markdrago/caboose
|
src/test/repo/date_iterator_tests.py
|
Python
|
mit
| 1,409
| 0.002839
|
import nose
from nose.tools import *
from unittest import TestCase
from datetime import datetime, timedelta
from repo.date_iterator import DateIterator
class DateIteratorTests(TestCase):
def test_date_iterator_returns_self_on_iter(self):
d = DateIterator(datetime.now(), datetime.now())
eq_(d, d.__iter__())
def test_date_iterator_gives_first_date_as_start_date(self):
start = datetime(2011, 3, 3)
end = datetime(2011, 3, 4)
d = DateIterator(start, end)
first = d.next()
eq_(start, first)
def test_date_iterator_gives_next_date_30_days_by_default(self):
start = datetime(2011, 3, 3)
next = datetime(2011, 4, 2)
end = datetime(2011, 4, 3)
d = DateIterator(start, end)
first = d.next()
second = d.next()
eq_(next, second)
def test_date_iterator_gives_
|
next_date_7_days(self):
start = datetime(2011, 3, 3)
next = datetime(2011, 3, 10)
end = datetime(2011, 3, 14)
d = DateIterator(start, end, delta=time
|
delta(days=7))
first = d.next()
second = d.next()
eq_(next, second)
@raises(StopIteration)
def test_date_iterator_raises_stop_exception(self):
start = datetime(2011, 3, 3)
end = datetime(2011, 4, 1)
d = DateIterator(start, end)
first = d.next()
second = d.next()
|
delattreb/TemperatureHumidityServer
|
src/dal/dal_dht22.py
|
Python
|
gpl-3.0
| 1,407
| 0.004975
|
"""
dal_dht11 v1.0.0
Auteur: Bruno DELATTRE
Date : 19/09/2016
"""
from lib import com_logger
class DAL_DHT22:
def __init__(self, connection, cursor):
self.connection = connection
self.cursor = cursor
self.logger = com_logger.Logger('DHT22 DAL')
""" Select"""
def get_dht22(self, lastdate):
try:
self.cursor.execute('SELECT date, name, temperature, humidity FROM DHT22 WHER
|
E date > "' + lastdate + '"')
rows = self.cursor.fetchall()
return rows
except Exception as exp:
self.logger.error(repr(exp))
self.connection.rollback()
def get_lastdata(self):
try:
self.cursor.execute('SELECT MAX(date) FROM DHT22')
rows = self.cursor.fetchall()
return rows
except Exception as exp:
self.logger.error(repr(exp))
|
self.connection.rollback()
""" Insert """
def set_dht22(self, name, temperature, humidity):
try:
self.cursor.execute(
'INSERT INTO DHT22 (date, name, temperature, humidity) VALUES (datetime("now","localtime"),"' + str(name) + '","' + str(temperature)[:4] + '","' + str(humidity)[:4] + '")')
self.connection.commit()
except Exception as exp:
self.logger.error(repr(exp))
self.connection.rollback()
|
timj/scons
|
src/engine/SCons/Tool/sgicc.py
|
Python
|
mit
| 1,780
| 0.001685
|
"""SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR
|
THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from . import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidt
|
h=4:
|
gerhc/django-encrypted-fields
|
encrypted_fields/fields.py
|
Python
|
mit
| 9,173
| 0.000654
|
import os
import types
import binascii
from django.db import models
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_str as smart_text
from keyczar import keyczar
class EncryptedFieldException(Exception):
pass
# Simple wrapper around keyczar to standardize the initialization
# of the crypter object and allow for others to extend as needed.
class KeyczarWrapper(object):
def __init__(self, keyname, *args, **kwargs):
self.crypter = keyczar.Crypter.Read(keyname)
def encrypt(self, cleartext):
return self.crypter.Encrypt(cleartext)
def decrypt(self, ciphertext):
return self.crypter.Decrypt(ciphertext)
class EncryptedFieldMixin(object, metaclass=models.SubfieldBase):
"""
EncryptedFieldMixin will use keyczar to encrypt/decrypt data that is being
marshalled in/out of the database into application Django model fields.
This is very helpful in ensuring that data at rest is encrypted and
minimizing the effects of SQL Injection or insider access to sensitive
databases containing sensitive information.
The most basic use of this mixin is to have a single encryption key for all
data in your database. This lives in a Keyczar key directory specified by:
the setting - settings.ENCRYPTED_FIELDS_KEYDIR -
Optionally, you can name specific encryption keys for data-specific purposes
in your model such as:
special_data = EncrytpedCharField( ..., keyname='special_data' )
The Mixin will handle the encryption/decryption seamlessly, but native
SQL queries may need a way to filter data that is encrypted. Using the
optional 'prefix' kwarg will prepend a static identifier to your encrypted
data before it is written to the database.
There are other use cases where you may not wish to encrypt all of the data
in a database. For example, if you have a survey application that allows
users to enter arbitrary questions and answers, users may request sensitive
information to be stored such as SSN, Driver License #, Credit Card, etc.
Your application can detect these sensitive fields, manually encrypt the
data and store that in the database mixed with other cleartext data.
The model should then only decrypt the specific fields needed. Use the
kwarg 'decrypt_only' to specify this behavior and the model will not
encrypt the data inbound and only attempt to decrypt outbound.
Encrypting data will significantly change the size of the data being stored
and this may cause issues with your database column size. Before storing
any encrypted data in your database, ensure that you have the proper
column width otherwise you may experience truncation of your data depending
on the database engine in use.
To have the mixin enforce max field length, either:
a) set ENFORCE_MAX_LENGTH = True in your settings files
b) set 'enforce_max_length' to True in the kwargs of your model.
A ValueError will be raised if the encrypted length of the data (including
prefix if specified) is greater than the max_length of the field.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the EncryptedFieldMixin with the following
optional settings:
* keyname: The name of the keyczar key
* crypter_klass: A custom class that is extended from Keyczar.
* prefix: A static string prepended to all encrypted data
* decrypt_only: Boolean whether to only attempt to decrypt data coming
from the database and not attempt to encrypt the data
being written to the database.
"""
# Allow for custom class extensions of Keyczar.
self._crypter_klass = kwargs.pop('crypter_klass', KeyczarWrapper)
self.keyname = kwargs.pop('keyname', None)
# If settings.DEFAULT_KEY_DIRECTORY, then the key
# is located in DEFAULT_KEY_DIRECTORY/keyname
if self.keyname:
if hasattr(settings, 'DEFAULT_KEY_DIRECTORY'):
self.keydir = os.path.join(
settings.DEFAULT_KEY_DIRECTORY,
self.keyname
)
else:
raise ImproperlyConfigured(
'You must set settings.DEFAULT_KEY_DIRECTORY'
'when using the keyname kwarg'
)
# If the keyname is not defined on a per-field
# basis, then check for the global data encryption key.
if not self.keyname and hasattr(settings, 'ENCRYPTED_FIELDS_KEYDIR'):
self.keydir = settings.ENCRYPTED_FIELDS_KEYDIR
# If we still do not have a keydir, then raise an exception
if not self.keydir:
raise ImproperlyConfigured(
'You must set settings.ENCRYPTED_FIELDS_KEYDIR '
'or name a key with kwarg `keyname`'
)
# The name of the keyczar key without path for logging purposes.
self.keyname = os.path.dirname(self.keydir)
# Prefix encrypted data with a static string to allow filtering
# of encrypted data vs. non-encrypted data using vanilla MySQL queries.
self.prefix = kwargs.pop('prefix', '')
# Allow for model decryption-only, bypassing encryption of data.
# Useful for models that have a sparse amount of data that is required
|
# to be encrypted.
self.decrypt_only = kwargs.pop('decrypt_only', False)
self._crypter = self._crypter_klass(self.keydir)
# Ensure the encrypted data does not exceed the max_l
|
ength
# of the database. Data truncation is a possibility otherwise.
self.enforce_max_length = getattr(settings, 'ENFORCE_MAX_LENGTH', False)
if not self.enforce_max_length:
self.enforce_max_length = kwargs.pop('enforce_max_length', False)
super(EncryptedFieldMixin, self).__init__(*args, **kwargs)
def crypter(self):
return self._crypter
def get_internal_type(self):
return 'TextField'
def to_python(self, value):
if value is None or not isinstance(value, str):
return value
if self.prefix and value.startswith(self.prefix):
value = value[len(self.prefix):]
try:
value = self.crypter().decrypt(value)
# value = value.decode('unicode_escape')
except keyczar.errors.KeyczarError:
pass
except UnicodeEncodeError:
pass
except binascii.Error:
pass
return super(EncryptedFieldMixin, self).to_python(value)
def get_prep_value(self, value):
value = super(EncryptedFieldMixin, self).get_prep_value(value)
if value is None or value == '' or self.decrypt_only:
return value
if isinstance(value, str):
value = value.encode('unicode_escape')
# value = value.encode('ascii')
else:
value = str(value)
return self.prefix + self.crypter().encrypt(value)
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
if self.enforce_max_length:
if (
value
and hasattr(self, 'max_length')
and self.max_length
and len(value) > self.max_length
):
raise ValueError(
'Field {0} max_length={1} encrypted_len={2}'.format(
self.name,
self.max_length,
len(value),
)
)
return value
class EncryptedCharField(EncryptedFieldMixin, models.CharField):
pass
class EncryptedTextField(EncryptedFieldMixin, models.TextField):
pass
class EncryptedDateTimeField(EncryptedFieldM
|
dsuch/ConcurrentLogHandler
|
src/portalocker.py
|
Python
|
apache-2.0
| 3,780
| 0.003704
|
# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking.
# Requires python 1.5.2 or better.
"""Cross-platform (posix/nt) API for flock-style file locking.
Synopsis:
import portalocker
file = open("somefile", "r+")
portalocker.lock(file, portalocker.LOCK_EX)
file.seek(12)
file.write("foo")
file.close()
If you know what you're doing, you may choose to
portalocker.unlock(file)
before closing the file, but why?
Methods:
lock( file, flags )
unlock( file )
Constants:
LOCK_EX
LOCK_SH
LOCK_NB
Exceptions:
LockException
Notes:
For the 'nt' platform, this module requires the Python Extensions for Windows.
Be aware that this may not work as expected on Windows 95/98/ME.
History:
I learned the win32 technique for locking files from sample code
provided by John Nielsen <nielsenjf@my-deja.com> in the documentation
that accompanies the win32 modules.
Author: Jonathan Feinberg <jdf@pobox.com>,
Lowell Alleman <lalleman@mfps.com>,
Rick van Hattem <Rick.van.Hattem@Fawo.nl>
Version: 0.3
URL: https://github.com/WoLpH/portalocker
"""
__all__ = [
"lock",
"unlock",
"LOCK_EX",
"LOCK_SH",
"LOCK_NB",
"LockException",
]
import os
class LockException(Exception):
# Error codes:
LOCK_FAILED = 1
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
elif os.name == 'posix':
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
else:
raise RuntimeError("PortaLocker only defined for nt and posix platforms")
if os.name == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
# error: (33, 'LockFileEx', 'The process cannot access the file because
|
another process has locked a portion of th
|
e file.')
if exc_value[0] == 33:
raise LockException(LockException.LOCK_FAILED, exc_value[2])
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
if exc_value[0] == 158:
# error: (158, 'UnlockFileEx', 'The segment is already unlocked.')
# To match the 'posix' implementation, silently ignore this error
pass
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
elif os.name == 'posix':
def lock(file, flags):
try:
fcntl.flock(file.fileno(), flags)
except IOError, exc_value:
# The exception code varies on different systems so we'll catch
# every IO error
raise LockException(*exc_value)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
if __name__ == '__main__':
from time import time, strftime, localtime
import sys
import portalocker
log = open('log.txt', "a+")
portalocker.lock(log, portalocker.LOCK_EX)
timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time()))
log.write( timestamp )
print "Wrote lines. Hit enter to release lock."
dummy = sys.stdin.readline()
log.close()
|
hydroshare/hydroshare
|
hs_app_timeseries/receivers.py
|
Python
|
bsd-3-clause
| 16,631
| 0.002465
|
import os
import shutil
import logging
import csv
from dateutil import parser
from django.dispatch import receiver
from hs_core.signals import pre_create_resource, pre_add_files_to_resource, \
pre_delete_file_from_resource, post_add_files_to_resource, post_create_resource, \
pre_metadata_element_create, pre_metadata_element_update
from hs_core.hydroshare import utils, delete_resource_file_only, resource_modified
from hs_app_timeseries.models import TimeSeriesResource, TimeSeriesMetaData
from .forms import SiteValidationForm, VariableValidationForm, MethodValidationForm, \
ProcessingLevelValidationForm, TimeSeriesResultValidationForm, UTCOffSetValidationForm
from hs_file_types.models.timeseries import extract_metadata, validate_odm2_db_file, \
extract_cv_metadata_from_blank_sqlite_file, validate_csv_file, add_blank_sqlite_file
FILE_UPLOAD_ERROR_MESSAGE = "(Uploaded file was not added to the resource)"
@receiver(pre_create_resource, sender=TimeSeriesResource)
def resource_pre_create_handler(sender, **kwargs):
# if needed more actions can be taken here before the TimeSeries resource is created
pass
@receiver(pre_add_files_to_resource, sender=TimeSeriesResource)
def pre_add_files_to_resource_handler(sender, **kwargs):
# file upload is not allowed if the resource already
# has either a sqlite file or a csv file
resource = kwargs['resource']
files = kwargs['files']
validate_files_dict = kwargs['validate_files']
source_names = kwargs['source_names']
if __debug__:
assert(isinstance(source_names, list))
if files or source_names:
if resource.has_sqlite_file or resource.has_csv_file:
validate_files_dict['are_files_valid'] = False
validate_files_dict['message'] = 'Resource already has the necessary content files.'
@receiver(pre_delete_file_from_resource, sender=TimeSeriesResource)
def pre_delete_file_from_resource_handler(sender, **kwargs):
# if any of the content files (sqlite or csv) is deleted then reset the 'is_dirty' attribute
# for all extracted metadata to False
resource = kwargs['resource']
def reset_metadata_elements_is_dirty(elements):
# filter out any non-dirty element
elements = [element for element in elements if element.is_dirty]
for element in elements:
element.is_dirty = False
element.save()
if resource.metadata.is_dirty:
TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False)
# metadata object is_dirty attribute for some reason can't be set using the following
# 2 lines of code
# resource.metadata.is_dirty=False
# resource.metadata.save()
reset_metadata_elements_is_dirty(resource.metadata.sites.all())
reset_metadata_elements_is_dirty(resource.metadata.variables.all())
reset_metadata_elements_is_dirty(resource.metadata.methods.all())
reset_metadata_elements_is_dirty(resource.metadata.processing_levels.all())
reset_metadata_elements_is_dirty(resource.metadata.time_series_results.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_variable_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_variable_names.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_speciations.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_elevation_datums.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_site_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_method_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_units_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_statuses.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_aggregation_statistics.all())
@receiver(
|
post_add_files_to_resource, sender=TimeSeriesResource)
def post_add_files_to_resource_handler(sender, **kwargs):
resource = kwargs['resource']
files = kwargs['files']
validate_files_dict = kwargs['validate_files']
|
user = kwargs['user']
source_names = kwargs['source_names']
if __debug__:
assert(isinstance(source_names, list))
if files:
file_name = files[0].name
elif source_names:
file_name = os.path.basename(source_names[0])
# extract metadata from the just uploaded file
uploaded_file_to_process = None
uploaded_file_ext = ''
for res_file in resource.files.all():
_, res_file_name, uploaded_file_ext = utils.get_resource_file_name_and_extension(res_file)
if res_file_name == file_name:
uploaded_file_to_process = res_file
break
if uploaded_file_to_process:
if uploaded_file_ext == ".sqlite":
_process_uploaded_sqlite_file(user, resource, uploaded_file_to_process,
validate_files_dict,
delete_existing_metadata=True)
elif uploaded_file_ext == ".csv":
_process_uploaded_csv_file(resource, uploaded_file_to_process, validate_files_dict,
user, delete_existing_metadata=True)
@receiver(post_create_resource, sender=TimeSeriesResource)
def post_create_resource_handler(sender, **kwargs):
resource = kwargs['resource']
validate_files_dict = kwargs['validate_files']
user = kwargs['user']
# extract metadata from the just uploaded file
res_file = resource.files.all().first()
if res_file:
# check if the uploaded file is a sqlite file or csv file
file_ext = utils.get_resource_file_name_and_extension(res_file)[2]
if file_ext == '.sqlite':
# metadata can exist at this point if a timeseries resource is created
# using REST API since the API caller can pass metadata information. Before
# metadata can be extracted from the sqlite file and populated to database, existing
# metadata needs to be deleted.
_process_uploaded_sqlite_file(user, resource, res_file, validate_files_dict,
delete_existing_metadata=True)
elif file_ext == '.csv':
_process_uploaded_csv_file(resource, res_file, validate_files_dict, user,
delete_existing_metadata=False)
# since we are extracting metadata after resource creation
# metadata xml files need to be regenerated - so need to set the
# dirty bag flags
utils.set_dirty_bag_flag(resource)
def _process_uploaded_csv_file(resource, res_file, validate_files_dict, user,
delete_existing_metadata=True):
# get the csv file from iRODS to a temp directory
fl_obj_name = utils.get_file_from_irods(res_file)
validate_err_message = validate_csv_file(fl_obj_name)
if not validate_err_message:
# first delete relevant existing metadata elements
if delete_existing_metadata:
TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False)
_delete_extracted_metadata(resource)
# delete the sqlite file if it exists
_delete_resource_file(resource, ".sqlite")
# add the blank sqlite file
add_blank_sqlite_file(resource, upload_folder='')
resource_modified(resource, user, overwrite_bag=False)
# populate CV metadata django models from the blank sqlite file
extract_cv_metadata_from_blank_sqlite_file(resource)
else: # file validation failed
# delete the invalid file just uploaded
delete_resource_file_only(resource, res_file)
validate_files_dict['are_files_valid'] = False
validate_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE)
validate_files_dict['message'] = validate_err_message
# cleanup the temp csv file
if os.path.exists(fl_obj_name):
shutil.rmtree(os.path.dirname(fl_obj_name))
def _process_uploaded_sqlite_file(user, resource, res_file, validate_files_dict,
delete_existing
|
rplevka/robottelo
|
tests/foreman/api/test_remoteexecution.py
|
Python
|
gpl-3.0
| 3,759
| 0.001862
|
"""Test for Remote Execution
:Requirement: Remoteexecution
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: RemoteExecution
:Assignee: pondrejk
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from nailgun import client
from nailgun.entity_mixins import TaskFailedError
from robottelo.api.utils import wait_for_tasks
CAPSULE_TARGET_VERSION = '6.10.z'
@pytest.mark.tier4
def test_positive_run_capsule_upgrade_playbook(capsule_configured, default_sat):
"""Run Capsule Upgrade playbook against an External Capsule
:id: 9ec6903d-2bb7-46a5-8002-afc74f06d83b
:steps:
1. Create a Capsule VM, add REX key.
2. Run the Capsule Upgrade Playbook.
:expectedresults: Capsule is upgraded successfully
:CaseImportance: Medium
"""
template_id = (
default_sat.api.JobTemplate()
.search(query={'search': 'name="Capsule Upgrade Playbook"'})[0]
.id
)
capsule_configured.add_rex_key(satellite=default_sat)
job = default_sat.api.JobInvocation().run(
synchronous=False,
data={
'job_template_id': template_id,
'inputs': {
'target_version': CAPSULE_TARGET_VERSION,
'whitelist_options': 'repositories-validate,repositories-setup',
},
'targeting_type': 'static_query',
'search_query': f'name = {capsule_configured.hostname}',
},
)
wait_for_tasks(f'resource_type = JobInvocation and resource_id = {job["id"]}')
result = default_sat.api.JobInvocation(id=job['id']).read()
assert result.succeeded == 1
result = default_sat.execute('foreman-maintain health check')
assert result.status == 0
for line in resu
|
lt.stdout:
assert 'FAIL' not in line
result = default_sat.api.SmartProxy(
id=default_sat.api.SmartProxy(name=default_sat.hostname).search()[0].id
).refresh()
feature_list = [feat['name'] for feat in result['features']]
assert {'Discovery', 'Dynflow', 'Ansible', 'SSH', 'Logs', 'Pulp'}.issubset(feature_list)
@pytest.mark.destructive
def test_negativ
|
e_run_capsule_upgrade_playbook_on_satellite(default_sat):
"""Run Capsule Upgrade playbook against the Satellite itself
:id: 99462a11-5133-415d-ba64-4354da539a34
:steps:
1. Add REX key to the Satellite server.
2. Run the Capsule Upgrade Playbook.
3. Check the job output for proper failure reason.
:expectedresults: Should fail
:CaseImportance: Medium
"""
sat = default_sat.nailgun_host
template_id = (
default_sat.api.JobTemplate()
.search(query={'search': 'name="Capsule Upgrade Playbook"'})[0]
.id
)
default_sat.add_rex_key(satellite=default_sat)
with pytest.raises(TaskFailedError) as error:
default_sat.api.JobInvocation().run(
data={
'job_template_id': template_id,
'inputs': {
'target_version': CAPSULE_TARGET_VERSION,
'whitelist_options': "repositories-validqqate,repositories-setup",
},
'targeting_type': "static_query",
'search_query': f"name = {sat.name}",
}
)
assert 'A sub task failed' in error.value.args[0]
job = default_sat.api.JobInvocation().search(
query={'search': f'host={sat.name},status=failed,description="Capsule Upgrade Playbook"'}
)[0]
response = client.get(
f'{default_sat.url}/api/job_invocations/{job.id}/hosts/{sat.id}',
auth=(default_sat.username, default_sat.password),
verify=False,
)
assert 'This playbook cannot be executed on a Satellite server.' in response.text
|
philipl/mplayer
|
TOOLS/mphelp_check.py
|
Python
|
gpl-2.0
| 2,073
| 0.002412
|
#!/usr/bin/python
# Tool to compare MPlayer translation files against a base file. Reports
# conflicting definitions, mismatching arguments, extra definitions
# not present in the base file and (optionally) missing definitions.
# Written by Uoti Urpala
import sys
import re
def parse(filename):
r = {}
f = open(filename)
it = iter(f)
cur = ''
for line in it:
line = line.strip()
if not line.startswith('#define'):
while line and line[-1] == '\\':
line = it.next().strip()
continue
try:
_, name, value = line.split(None, 2)
except ValueError:
if name in r:
continue
value = value.strip('"')
while line[-1] == '\\':
line = it.next().strip()
value += line.rstrip('\\').strip('"')
if name in r:
print 'Conflict: ', name
print r[name]
print value
print
r[name] = value
f.close()
return r
def compare(base, other, show_missing=False):
r = re.compile('%[^diouxXeEfFgGaAcspn%]*[diouxXeEfFgGaAcspn%]')
missing = []
for key in base:
if key not in other:
missing.append(key)
continue
if re.findall(r, base[key]) != re.findall(r, other[key]):
print 'Mismatch: ', key
print base[key]
print other[key]
|
print
del other[key]
if other:
extra = other.keys()
extra.sort()
print 'Extra: ', '
|
'.join(extra)
if show_missing and missing:
missing.sort()
print 'Missing: ', ' '.join(missing)
if len(sys.argv) < 3:
print 'Usage:\n'+sys.argv[0]+' [--missing] base_helpfile otherfile1 '\
'[otherfile2 ...]'
sys.exit(1)
i = 1
show_missing = False
if sys.argv[i] in ( '--missing', '-missing' ):
show_missing = True
i = 2
base = parse(sys.argv[i])
for filename in sys.argv[i+1:]:
print '*****', filename
compare(base, parse(filename), show_missing)
print '\n'
|
DedMemez/ODS-August-2017
|
contextlib.py
|
Python
|
apache-2.0
| 2,267
| 0.002647
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: contextlib
import sys
from functools import wraps
from warnings import warn
__all__ = ['contextmanager', 'nested', 'closing']
class GeneratorContextManager(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = type()
try:
|
self.gen.throw(type, value, traceback)
|
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
return exc is not value
except:
if sys.exc_info()[1] is not value:
raise
return
def contextmanager(func):
@wraps(func)
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
return helper
@contextmanager
def nested(*managers):
warn('With-statements now directly support multiple context managers', DeprecationWarning, 3)
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
raise exc[0], exc[1], exc[2]
return
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
|
Nickito12/stepmania-server
|
test/factories/user_factory.py
|
Python
|
mit
| 1,150
| 0.00087
|
""" User factory """
import factory
from smserver import models
from test.factories import base
from test.factories.room_factory import RoomFactory
class UserFactory(base.BaseFactory):
""" Classic user name """
class Meta(base.BaseMeta):
model = models.User
name = factory.Sequence(lambda n: "User %s" % (n+1))
rank = 1
stepmania_version = "123"
@classmethod
def _after_postgeneration(cls, obj, _create, _results):
obj._room_level = {}
class AdminFactory(UserFactory):
""" Create an Admin user """
rank = 10
class PrivilegeFactory(base.BaseFactory):
""" Classic user name """
class Meta(base.BaseMeta):
model = models.Privilege
level = 1
room = factory.SubFactory(Roo
|
mFactory)
user = factory.SubFactory(UserFactory)
class UserWithRoomFactory(UserFactory):
""" User with a new room """
room = factory.SubFactory(RoomFacto
|
ry)
def user_with_room_privilege(level=1, **kwargs):
""" Return a User with privileges for a room """
user = UserWithRoomFactory(**kwargs)
PrivilegeFactory(user=user, room=user.room, level=level)
return user
|
austin987/bandit
|
docs/source/conf.py
|
Python
|
apache-2.0
| 2,480
| 0
|
# -*- coding: utf-8 -*-
# Licensed under the
|
Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
|
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bandit'
copyright = u'2015, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme_options = {}
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
|
rwstauner/run_control
|
python/startup.py
|
Python
|
mit
| 665
| 0.010526
|
# pylint: disable=unused-import, unused-variable, missing-docstring
def _readline():
try:
import readline
except ImportError:
print("Module readline not available.")
else:
import rlcompleter
readline.parse_and_bind("tab: complete")
import os
histfile = os.path.join(o
|
s.environ["HOME"], 'python', '.history')
tr
|
y:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
del os, histfile
_readline()
del _readline
import sys
sys.ps1 = "\001\033[01;33m\002>>>\001\033[00m\002 "
sys.ps2 = "\001\033[01;33m\002...\001\033[00m\002 "
|
kinglyduck/hackerspace
|
src/hackerspace_online/settings/production.py
|
Python
|
gpl-2.0
| 1,941
| 0.003606
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
#root of project: ...../src
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# This is a non-secret key. A different key is used in the productions settings file.
SECRET_KEY = '8(@^b-
|
s07o7a(*durcp#sx!-8=cnq2-shiq61!7nznn=h$az7n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# ALLOWED_HOSTS = [w
|
ww.hackerspace.sd72.bc.ca, hackerspace.sd72.bc.ca]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'timberline.hackerspace@gmail.com'
EMAIL_HOST_PASSWORD =""
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Static files (CSS, JavaScript, Images) ####################
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# The absolute path to the directory where collectstatic will collect static files for deployment.
# Set in production settings for deployment
STATIC_ROOT = "/home/couture/www/hackerspace/static"
# STATIC_ROOT = "/home/90158/www/hackerspace/static"
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_in_project", "static_root"),
# '/var/www/static/',
)
MEDIA_URL = "/media/"
# The absolute path to the directory where collectstatic will collect static files for deployment.
# Set properly in production settings for deployment
MEDIA_ROOT = "/home/couture/www/hackerspace/media"
# MEDIA_ROOT = "/home/90158/www/hackerspace/media"
# END STATIC #######################################
|
iulian787/spack
|
var/spack/repos/builtin/packages/rocblas/package.py
|
Python
|
lgpl-2.1
| 4,299
| 0.002326
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rocblas(CMakePackage):
"""Radeon Open Compute BLAS library"""
homepage = "https://github.com/ROCmSoftwarePlatform/rocBLAS/"
url = "https://github.com/ROCmSoftwarePlatform/rocBLAS/archive/rocm-3.5.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('3.10.0', sha256='9bfd0cf99662192b1ac105ab387531cfa9338ae615db80ed690c6a14d987e0e8')
version('3.9.0', sha256='3ecd2d9fd2be0e1697a191d143a2d447b53a91ae01afb50231d591136ad5e2fe')
version('3.8.0', sha256='568a9da0360349b1b134d74cc67cbb69b43c06eeca7c33b50072cd26cd3d8900')
version('3.7.0', sha256='9425db5f8e8b6f7fb172d09e2a360025b63a4e54414607709efc5acb28819642')
version('3.5.0', sha256='8560fabef7f13e8d67da997de2295399f6ec595edfd77e452978c140d5f936f0')
tensile_architecture = ('all', 'gfx803', 'gfx900', 'gfx906', 'gfx908')
variant('tensile_architecture', default='all', values=tensile_architecture, multi=False)
depends_on('cmake@3:', type='build')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0']:
depends_on('rocm-cmake@' + ver, type='build', when='@' + ver)
depends_on('rocm-device-libs@' + ver, type='build', when='@' + ver)
depends_on('hip@' + ver, when='@' + ver)
depends_on('comgr@' + ver, type='build', when='@' + ver)
# used in Tensile
depends_on('rocm-smi@' + ver, type='build', when='@' + ver)
depends_on('llvm-amdgpu@' + ver, type='build', when='@' + ver)
# This is the default library format since 3.7.0
depends_on('msgpack-c@3:', when='@3.7:')
depends_on('python', type='build')
depends_on('py-virtualenv', type='build')
depends_on('perl-file-which', type='build')
depends_on('py-pyyaml', type='build')
depends_on('py-wheel', type='build')
depends_on('py-msgpack', type='build')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='f842a1a4427624eff6cbddb2405c36dec9a210cd',
when='@3.5.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='af71ea890a893e647bf2cf4571a90297d65689ca',
when='@3.7.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='9123205f9b5f95c96ff955695e942d2c3b321cbf',
when='@3.8.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='b68edc65aaeed08c71b2b8622f69f83498b57d7a',
when='@3.9.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='ab44bf46b609b5a40053f310bef2ab7511f726ae',
when='@3.10.0')
# Status: https://github.com/ROCmSoftwarePlatform/Tensile/commit/a488f7dadba34f84b9658ba92ce9ec5a0615a087
# Not yet landed in 3.7.0, nor 3.8.0.
patch('0001-Fix-compilation-error-with-StringRef-to-basic-string.patch', when='@:3.8')
def setup_build_environment(self, env):
env.set('CXX', self.spec['hip'].hipcc)
def cmake_args(self):
arch = self.spec.variants['tensile_architecture'].value
tensile = joi
|
n_path(self.stage.s
|
ource_path, 'Tensile')
args = [
'-Damd_comgr_DIR={0}'.format(self.spec['comgr'].prefix),
'-DBUILD_CLIENTS_TESTS=OFF',
'-DBUILD_CLIENTS_BENCHMARKS=OFF',
'-DBUILD_CLIENTS_SAMPLES=OFF',
'-DRUN_HEADER_TESTING=OFF',
'-DBUILD_WITH_TENSILE=ON',
'-DTensile_TEST_LOCAL_PATH={0}'.format(tensile),
'-DTensile_COMPILER=hipcc',
'-DTensile_ARCHITECTURE={0}'.format(arch),
'-DTensile_LOGIC=asm_full',
'-DTensile_CODE_OBJECT_VERSION=V3',
'-DBUILD_WITH_TENSILE_HOST={0}'.format(
'ON' if '@3.7.0:' in self.spec else 'OFF'
)
]
if '@3.7.0:' in self.spec:
args.append('-DTensile_LIBRARY_FORMAT=msgpack')
return args
|
stormi/tsunami
|
src/primaires/objet/commandes/oedit/__init__.py
|
Python
|
bsd-3-clause
| 3,339
| 0.002402
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided
|
that the following conditions are met:
#
#
|
* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'oedit'."""
from primaires.interpreteur.commande.commande import Commande
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.objet.editeurs.oedit.presentation import EdtPresentation
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
class CmdOedit(Commande):
"""Commande 'oedit'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "oedit", "oedit")
self.groupe = "administrateur"
self.schema = "<ident>"
self.nom_categorie = "batisseur"
self.aide_courte = "ouvre l'éditeur d'objet"
self.aide_longue = \
"Cette commande permet d'accéder à l'éditeur d'objet. Elle " \
"prend en paramètre l'identifiant de l'objet (que des " \
"minuscules, des chiffres et le signe |ent|_|ff|). Si l'objet " \
"n'existe pas, il est créé."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
ident_objet = dic_masques["ident"].ident
if ident_objet in type(self).importeur.objet.prototypes:
prototype = type(self).importeur.objet.prototypes[ident_objet]
enveloppe = EnveloppeObjet(EdtPresentation, prototype, "")
contexte = enveloppe.construire(personnage)
personnage.contextes.ajouter(contexte)
contexte.actualiser()
else:
editeur = type(self).importeur.interpreteur.construire_editeur(
"oedit", personnage, ident_objet)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
|
MaximeRaynal/SimpleNote
|
src/SimpleNote/wsgi.py
|
Python
|
mit
| 395
| 0.002532
|
"""
WSGI config for SimpleNote project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault
|
("DJANGO_SETTINGS_MODULE", "SimpleNote.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
|
kirienko/gourmet
|
tests/test_importer.py
|
Python
|
gpl-2.0
| 1,162
| 0.018933
|
import unittest
from gourmet.importers import importer
class TestImporter (unittest.TestCase):
def setUp (self):
self.i = importer.Importer()
def _get_last_rec_ (self):
return self.i.added_recs[-1]
def testRecImport (self):
self.i.start_rec()
attrs = [('title','Foo'),('cuisine','Bar'),('yields',3),('yield_unit','cups')]
for att,val in attrs:
self.i.rec[att] = val
self.i.commit_rec()
rec = self._get_last_rec_()
for att,val in attrs:
self.assertEqual(getattr(rec,att),val)
def testIngredientImport (self):
self.i.start_rec()
self.i.rec['title']='Ingredient Import Test'
self.i.start_ing()
se
|
lf.i.add_amt(2)
self.i.add_unit('cups')
self.i.add_ite
|
m('water')
self.i.commit_ing()
self.i.commit_rec()
ings = self.i.rd.get_ings(self._get_last_rec_())
self.assertEqual(len(ings),1)
ing = ings[0]
self.assertEqual(ing.amount,2)
self.assertEqual(ing.unit,'cups')
self.assertEqual(ing.item,'water')
if __name__ == '__main__':
unittest.main()
|
Yelp/occam
|
occam/util.py
|
Python
|
mit
| 715
| 0.004196
|
import json
from dateutil import parser as datetime_parser
fro
|
m occam.app import get_redis
from occam.runtime import OCCAM_SERVER_CONFIG_KEY
def get_servers():
redis = get_redis()
servers = json.loads(redis.get(OCCAM_SERVER_CONFIG_KEY))
return servers.items()
def iterate_servers():
redis = get_redis()
servers = json.loads(redis.get(OCC
|
AM_SERVER_CONFIG_KEY))
for server_name, server_location in servers.iteritems():
yield server_name, server_location
def sorted_by_time_element(l, element_getter=None):
if not element_getter:
element_getter = lambda x: x
key_getter = lambda x: datetime_parser.parse(element_getter(x))
return sorted(l, key=key_getter)
|
wright-group/PyCMDS
|
pycmds/hardware/opas/opas.py
|
Python
|
mit
| 19,921
| 0.001456
|
### import ####################################################################
import os
import time
import pathlib
import shutil
import collections
import appdirs
import toml
import numpy as np
from PySide2 import QtWidgets
import WrightTools as wt
import attune
import pycmds.project.project_globals as g
import pycmds.project.widgets as pw
import pycmds.project.classes as pc
from pycmds.hardware import hardware as hw
from pycmds.hardware.opas.PoyntingCorrection.ZaberCorrectionDevice import ZaberCorrectionDevice
### driver ####################################################################
class Driver(hw.Driver):
def __init__(self, *args, **kwargs):
self.index = kwargs["index"]
self.motor_positions = collections.OrderedDict()
self.homeable = {} # TODO:
self.poynting_type = kwargs.pop("poynting_type", None)
self.poynting_correction = None
hw.Driver.__init__(self, *args, **kwargs)
if not hasattr(self, "motor_names"): # for virtual...
self.motor_names = ["Delay", "Crystal", "Mixer"]
if not hasattr(self, "curve_paths"): # for virtual...
self.curve_paths = collections.OrderedDict()
if not hasattr(self, "interaction_string_combo"): # for virtual...
self.interaction_string_combo = pc.Combo(allowed_values=["sig"])
if self.poynting_type is not None:
self.motor_names += ["Phi", "Theta"] # TODO: Generalize
self.curve = None
# poynting correction
if self.poynting_type == "zaber":
self.poynting_correction = ZaberCorrectionDevice(
kwargs.pop("poynting_port"), kwargs.pop("poynting_indexes")
)
else:
self.poynting_correction = None
self.poynting_type = None
if self.poynting_correction:
self.curve_paths["Poynting"] = pc.Filepath(initial_value=self.poynting_curve_path)
if self.model == "Virtual":
self.load_curve()
def _home_motors(self, motor_names):
raise NotImplementedError
def _load_curve(self, interaction):
if self.model == "Virtual":
colors = np.linspace(400, 10000, 17)
motors = []
motors.append(attune.Dependent(((colors - 500) / 1e4) ** 2, "Delay"))
motors.append(attune.Dependent(-((colors - 90) ** 0.25), "Crystal"))
motors.append(attune.Dependent((colors - 30) ** 0.25, "Mixer"))
name = "curve"
interaction = "sig"
kind = "Virtual"
colors = attune.Setpoints(colors, "Colors", units="nm")
self.curve = attune.Curve(colors, motors, name, interaction, kind)
self.curve.convert(self.native_units)
else:
raise NotImplementedError
def _set_motors(self, motor_destinations):
if self.model == "Virtual":
# Virtual hardware, just set the position directly
for k, v in motor_destinations.items():
self.motor_positions[k].write(v)
else:
raise NotImplementedError
def _update_api(self, interaction):
pass
def _wait_until_still(self, inputs=[]):
while self.is_busy():
time.sleep(
0.1
) # I've experienced hard crashes when wait set to 0.01 - Blaise 2015.12.30
self.get_motor_positions()
self.get_motor_positions()
def get_position(self):
position = self.hardware.destination.read()
self.position.write(position, self.native_units)
return position
def get_motor_positions(self):
pass
def home_all(self, inputs=[]):
|
names = [i for i in self.motor_names if self.homeable.get(i)]
if self.poynting_correction:
self.poynting_correction.home()
for n in self.poynting_correction.motor_names:
names.pop(n, None)
self._home_motors(names)
def home_motor(self, inputs):
# TODO: clean up for new inputs beh
|
avior
motor_name = inputs[0]
if self.poynting_correction:
if motor_name in self.poynting_correction.motor_names:
self.poynting_correction.home(motor_name)
return
if self.homeable.get(motor_name):
self._home_motors([motor_name])
def initialize(self):
# virtual stuff
if self.model == "Virtual":
self.motor_positions["Delay"] = pc.Number(0.0, display=True)
self.motor_positions["Crystal"] = pc.Number(0.0, display=True)
self.motor_positions["Mixer"] = pc.Number(0.0, display=True)
if self.poynting_correction:
# initialize
self.poynting_correction.initialize(self)
for name in self.poynting_correction.motor_names:
self.homeable[name] = True
number = self.poynting_correction.motor_positions[name]
self.motor_positions[name] = number
self.recorded[self.name + "_" + name] = [number, None, 1.0, name]
# get position
self.load_curve()
self.get_motor_positions()
self.get_position()
hw.Driver.initialize(self)
def load_curve(self, name=None, path=None, update=True):
interaction = self.interaction_string_combo.read()
# update curve_paths
if name is not None:
old_directory = os.path.dirname(str(self.curve_paths[name]))
p = shutil.copy(path, old_directory)
self.curve_paths[name].write(os.path.abspath(p))
# remake own curve object/
curve = self._load_curve(interaction)
if self.poynting_correction:
p = self.curve_paths["Poynting"].read()
self.curve = attune.Curve.read(p, subcurve=curve)
self.curve.kind = "poynting"
self.save_status()
self.curve.convert(self.native_units)
# update limits
self.limits.write(*self.curve.get_limits(), self.native_units)
if update:
self._update_api(interaction)
def set_motor(self, motor_name, destination, wait=True):
if self.poynting_correction:
if motor_name in self.poynting_correction.motor_names:
self.poynting_correction.set_motor(motor_name, destination)
return
self._set_motors({motor_name: destination})
if wait:
self.wait_until_still()
def set_motors(self, motor_names, motor_positions, wait=True):
destinations = {n: p for n, p in zip(motor_names, motor_positions)}
if self.poynting_correction:
for name, pos in zip(motor_names, motor_positions):
if name in self.poynting_correction.motor_names:
self.poynting_correction.set_motor(name, pos)
destinations.pop(name)
self._set_motors(destinations)
if wait:
self.wait_until_still()
def set_position(self, destination):
# coerce destination to be within current tune range
destination = np.clip(destination, *self.curve.get_limits())
# get destinations from curve
motor_destinations = self.curve(destination, self.native_units)
# poynting
if self.poynting_correction:
for m in self.poynting_correction.motor_names:
self.poynting_correction.set_motor(m, motor_destinations.pop(m))
# OPA
self._set_motors(motor_destinations)
time.sleep(0.01)
# finish
self.wait_until_still()
self.get_position()
self.save_status()
def set_position_except(self, destination, exceptions):
"""
set position, except for motors that follow
does not wait until still...
"""
self.hardware.destination.write(destination, self.native_units)
self.position.write(destination, self.native_units)
motor_destinations = self.curve(destination, self.native_units)
for e in exceptions:
motor_destinations.pop(e, None)
if self.poynting_correction:
for m in self.poynti
|
bbsan2k/nzbToMedia
|
libs/rebulk/test/test_match.py
|
Python
|
gpl-3.0
| 20,081
| 0.00239
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, unneeded-not
import pytest
import six
from ..match import Match, Matches
from ..pattern import StringPattern, RePattern
from ..formatters import formatters
class TestMatchClass(object):
def test_repr(self):
match1 = Match(1, 3, value="es")
assert repr(match1) == '<es:(1, 3)>'
match2 = Match(0, 4, value="test", private=True, name="abc", tags=['one', 'two'])
assert repr(match2) == '<test:(0, 4)+private+name=abc+tags=[\'one\', \'two\']>'
def test_names(self):
parent = Match(0, 10, name="test")
parent.children.append(Match(0, 10, name="child1", parent=parent))
parent.children.append(Match(0, 10, name="child2", parent=parent))
assert set(parent.names) == set(["child1", "child2"])
def test_equality(self):
match1 = Match(1, 3, value="es")
match2 = Match(1, 3, value="es")
other = object()
assert hash(match1) == hash(match2)
assert hash(match1) != hash(other)
assert match1 == match2
assert not match1 == other
def test_inequality(self):
match1 = Match(0, 2, value="te")
match2 = Match(2, 4, value="st")
match3 = Match(0, 2, value="other")
other = object()
assert hash(match1) != hash(match2)
assert hash(match1) != hash(match3)
assert match1 != other
assert match1 != match2
assert match1 != match3
def test_length(self):
match1 = Match(0, 4, value="test")
match2 = Match(0, 2, value="spanIsUsed")
assert len(match1) == 4
assert len(match2) == 2
def test_compare(self):
match1 = Match(0, 2, value="te")
match2 = Match(2, 4, value="st")
other = object()
assert match1 < match2
assert match1 <= match2
assert match2 > match1
assert match2 >= match1
if six.PY3:
with pytest.raises(TypeError):
match1 < other
with pytest.raises(TypeError):
match1 <= other
with pytest.raises(TypeError):
match1 > other
with pytest.raises(TypeError):
match1 >= other
else:
assert match1 < other
assert match1 <= other
assert not match1 > other
assert not match1 >= other
def test_value(self):
match1 = Match(1, 3)
match1.value = "test"
assert match1.value == "test"
class TestMatchesClass(object):
match1 = Match(0, 2, value="te", name="start")
match2 = Match(2, 3, value="s", tags="tag1")
match3 = Match(3, 4, value="t", tags=["tag1", "tag2"])
match4 = Match(2, 4, value="st", name="end")
def test_tag(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
assert "start" in matches.names
assert "end" in matches.names
assert "tag1" in matches.tags
assert "tag2" in matches.tags
tag1 = matches.tagged("tag1")
assert len(tag1) == 2
assert tag1[0] == self.match2
assert tag1[1] == self.match3
tag2 = matches.tagged("tag2")
assert len(tag2) == 1
assert tag2[0] == self.match3
start = matches.named("start")
assert len(start) == 1
assert start[0] == self.match1
end = matches.named("end")
assert len(end) == 1
assert end[0] == self.match4
def test_base(self):
matches = Matches()
matches.append(self.match1)
assert len(matches) == 1
assert repr(matches) == repr([self.match1])
assert list(matches.starting(0)) == [self.match1]
assert list(matches.ending(2)) == [self.match1]
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
assert len(matches) == 4
assert list(matches.starting(2)) == [self.match2, self.match4]
assert list(matches.starting(3)) == [self.match3]
assert list(matches.ending(3)) == [self.match2]
assert list(matches.ending(4)) == [self.match3, self.match4]
assert list(matches.range()) == [self.match1, self.match2, self.match4, self.match3]
assert list(matches.range(0)) == [self.match1, self.match2, self.match4, self.match3]
assert list(matches.range(0, 3)) == [self.match1, self.match2, self.match4]
assert list(matches.range(2, 3)) == [self.match2, self.match4]
assert list(matches.range(3, 4)) == [self.match4, self.match3]
matches.remove(self.match1)
assert len(matches) == 3
assert len(matches.starting(0)) == 0
assert len(matches.ending(2)) == 0
matches.clear()
assert len(matches) == 0
assert len(matches.starting(0)) == 0
assert len(matches.starting(2)) == 0
assert len(matches.starting(3)) == 0
assert len(matches.ending(2)) == 0
assert len(matches.ending(3)) == 0
assert len(matches.ending(4)) == 0
def test_get_slices(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
slice_matches = matches[1:3]
assert isinstance(slice_matches, Matches)
assert len(slice_matches) == 2
assert slice_matches[0] == self.match2
assert slice_matches[1] == self.match3
def test_remove_slices(self):
matches = Matches()
matches.append(self.match1)
matches.append(self
|
.match2)
matches.append(self.match3)
matches.append(self.match4)
del matches[1:3]
assert len(matches) == 2
assert matches[0] == self.match1
assert matches[1] == self.match4
def test_set_slices(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
|
matches[1:3] = self.match1, self.match4
assert len(matches) == 4
assert matches[0] == self.match1
assert matches[1] == self.match1
assert matches[2] == self.match4
assert matches[3] == self.match4
def test_set_index(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches[1] = self.match4
assert len(matches) == 3
assert matches[0] == self.match1
assert matches[1] == self.match4
assert matches[2] == self.match3
def test_constructor(self):
matches = Matches([self.match1, self.match2, self.match3, self.match4])
assert len(matches) == 4
assert list(matches.starting(0)) == [self.match1]
assert list(matches.ending(2)) == [self.match1]
assert list(matches.starting(2)) == [self.match2, self.match4]
assert list(matches.starting(3)) == [self.match3]
assert list(matches.ending(3)) == [self.match2]
assert list(matches.ending(4)) == [self.match3, self.match4]
def test_constructor_kwargs(self):
matches = Matches([self.match1, self.match2, self.match3, self.match4], input_string="test")
assert len(matches) == 4
assert matches.input_string == "test"
assert list(matches.starting(0)) == [self.match1]
assert list(matches.ending(2)) == [self.match1]
assert list(matches.starting(2)) == [self.match2, self.match4]
assert list(matches.starting(3)) == [self.match3]
assert list(matches.ending(3)) == [self.match2]
assert list(matches.ending(4)) == [self.match3, self.match4]
def test_crop(self):
input_string = "abcdefghijklmnopqrstuvwxyz"
match1 = Match(1, 10, input_string=input_string)
match2 = Match(0, 2, input_string=input_string)
match3 = Match(8, 15, input_string=input_string)
|
plamut/superdesk-core
|
tests/io/iptc7901_tests.py
|
Python
|
agpl-3.0
| 2,239
| 0.000447
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.tests import TestCase
from superdesk.io.iptc7901 import Iptc7901FileParser
def fixture(filename):
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dirname, 'fixtures', filename)
class IptcTestCase(TestCase):
parser = Iptc7901FileParser()
def open(self, filename):
provider = {'name': 'Test'}
return self.parser.parse_file(fixture(filename), provider
|
)
def test_open_iptc7901_file(self):
w
|
ith self.app.app_context():
item = self.open('IPTC7901.txt')
self.assertEqual('preformatted', item['type'])
self.assertEqual('062', item['ingest_provider_sequence'])
self.assertEqual('i', item['anpa_category'][0]['qcode'])
self.assertEqual(211, item['word_count'])
self.assertEqual('Germany Social Democrats: Coalition talks with Merkel could fail =', item['headline'])
self.assertRegex(item['body_html'], '^\n Berlin')
self.assertEqual('Germany-politics', item['slugline'])
self.assertEqual(4, item['priority'])
self.assertEqual([{'qcode': 'i'}], item['anpa_category'])
self.assertTrue(item['ednote'].find('## Editorial contacts'))
def test_open_iptc7901_file_odd_charset(self):
with self.app.app_context():
item = self.open('IPTC7901_odd_charset.txt')
self.assertTrue(item['body_html'].find('Müller'))
self.assertTrue(item['ednote'].find('## Editorial contacts'))
def test_map_priority(self):
self.assertEqual(1, self.parser.map_priority("1"))
self.assertEqual(2, self.parser.map_priority("2"))
self.assertEqual(3, self.parser.map_priority("3"))
self.assertEqual(5, self.parser.map_priority("5"))
self.assertEqual(6, self.parser.map_priority("eee"))
self.assertEqual(6, self.parser.map_priority(None))
|
NOAA-ORR-ERD/hazpy.unit_conversion
|
hazpy/unit_conversion/lat_long.py
|
Python
|
unlicense
| 11,710
| 0.004953
|
#!/usr/bin/env python
"""
Assorted utilities for manipulating latitude and longitude values
"""
from __future__ import unicode_literals
__version__ = "1.4"
import math, struct
def signbit(value):
"""
Test whether the sign bit of the given floating-point value is
set. If it is set, this generally means the given value is
negative. However, this is not the same as comparing the value
to C{0.0}. For example:
>>> NEGATIVE_ZERO < 0.0
False
since negative zero is numerically equal to positive zero. But
the sign bit of negative zero is indeed set:
>>> signbit(NEGATIVE_ZERO)
True
>>> signbit(0.0)
False
@type value: float
@param value: a Python (double-precision) float value
@rtype: bool
@return: C{True} if the sign bit of C{value} is set;
C{False} if it is not set.
signbit and doubleToRawLongBits
are from Martin Jansche:
http://symptotic.com/mj/code.html (MIT license).
This is required to capture the difference between -0.0 and 0.0, which is
useful if someone wants to convert a latitude or longitude like:
-0.0degrees, 34minutes to 0d34'00"S
"""
return (doubleToRawLongBits(value) >> 63) == 1
def doubleToRawLongBits(value):
"""
@type value: float
@param value: a Python (double-precision) float value
@rtype: long
@return: the IEEE 754 bit representation (64 bits as a long integer)
of the given double-precision floating-point value.
"""
# pack double into 64 bits, then unpack as long int
return struct.unpack(b'Q', struct.pack(b'd', value))[0]
class LatLongConverter:
@classmethod
def ToDecDeg(self, d=0, m=0, s=0, ustring = False, max=180):
"""
DecDegrees = ToDecDeg(d=0, m=0, s=0)
converts degrees, minutes, seconds to decimal degrees (returned as a Float).
"""
if m < 0 or s < 0:
raise ValueError("Minutes and Seconds have to be positive")
if m > 60.0 or s > 60.0:
raise ValueError("Minutes and Seconds have to be between -180 and 180")
if abs(d) > max:
raise ValueError("Degrees have to be between -180 and 180")
if signbit(d):
Sign = -1
d = abs(d)
else:
Sign = 1
deg_has_fract = bool(math.modf(d)[0])
min_has_fract = bool(math.modf(m)[0])
if deg_has_fract and (m != 0.0 or s != 0.0):
raise ValueError("degrees cannot have fraction unless both minutes"
"and seconds are zero")
if min_has_fract and s != 0.0:
raise ValueError("minutes cannot have fraction unless seconds are zero")
DecDegrees = Sign * (d + m/60.0 + s/3600.0)
if ustring:
return u"%.6f\xb0"%(DecDegrees)
else:
return DecDegrees
@classmethod
def ToDegMin(self, DecDegrees, ustring = False):
"""
Converts from decimal (binary float) degrees to:
Degrees, Minutes
If the optional parameter: "ustring" is True,
a Unicode string is returned
"""
if signbit(DecDegrees):
Sign = -1
DecDegrees = abs(DecDegrees)
else:
Sign = 1
Degrees = int(DecDegrees)
DecMinutes = round((DecDegrees - Degrees + 1e-14) * 60, 10)# add a tiny bit then round to avoid binary rounding issues
if ustring:
if Sign == 1:
return u"%i\xb0 %.3f'"%(Degrees, DecMinutes)
else:
return u"-%i\xb0 %.3f'"%(Degrees, DecMinutes)
else:
return (Sign*float(Degrees), DecMinutes) # float to preserve -0.0
@classmethod
def ToDegMinSec(self, DecDegrees, ustring = False):
"""
Converts from decimal (binary float) degrees to:
Degrees, Minutes, Seconds
If the optional parameter: "ustring" is True,
a unicode string is returned
"""
if signbit(DecDegrees):
Sign = -1
DecDegrees = abs(DecDegrees)
else:
Sign = 1
Degrees = int(DecDegrees)
DecMinutes = (DecDegrees - Degrees + 1e-14) * 60 # add a tiny bit to avoid rounding issues
Minutes = int(DecMinutes)
Seconds = round(((DecMinutes - Minutes) * 60), 10 )
if ustring:
if Sign == 1:
return u"%i\xb0 %i' %.2f\""%(Degrees, Minutes, Seconds)
else:
return u"-%i\xb0 %i' %.2f\""%(Degrees, Minutes, Seconds)
else:
return (Sign * float(Degrees), Minutes, Seconds)
## These are classes used in our web apps: ResponseLink, etc.
## They provide a different interface to lat-long format conversion
class Latitude:
"""An object that can interpret a latitude in various formats.
Constructor:
Latitude(deg, min=0.0, sec=0.0, direction=None)
- 'deg' may be between -90.0 and 90.0.
- if 'min' is nonzero, 'deg' cannot have a fractional part.
(This means 5 and 5.0 are acceptable but 5.1 is not.)
- if 'sec' is nonzero, 'deg' and 'min' cannot have fractional parts.
- 'direction' may be a string beginning with 'N' or 'S' (case
insensitive), or None.
- if 'direction' is not None, 'deg' cannot be negative.
Attributes:
.value : a float in decimal degrees. Positive is North; negative is
South. (These apply to zero too; positive zero is North.)
Methods:
.degrees() -> (float, str)
.degrees_minutes() -> (int, float, str)
.degrees_minutes_seconds() -> (int, int, float, str)
The 'str' argument is the direction: "North" or "South".
Example:
>>> lat1 = Latitude(-120.7625)
>>> lat2 = Latitude(-120, 45.7500)
>>> lat3 = Latitude(-120, 45, 45)
>>> lat4 = Latitude(120.7625, direction='South')
>>> lat5 = Latitude(120, 45.7500, direction='S')
>>> lat6 = Latitude(120, 45, 45, direction='south')
>>> (lat1.value == lat2.value == lat3.value == lat4.value ==
... lat5.value == lat6.value)
True
>>> lat1.value
-120.7625
>>> lat1.degrees()
(120.7625, 'South')
>>> lat1.degrees_minutes()
(120, 45.750000000000171, 'South')
>>> lat1.degrees_minutes_seconds()
(120, 45, 45.000000000010232, 'South')
>>> print str(lat1)
Latitude(-120.762500)
"""
negative_direction = "South"
positive_direction = "North"
min = -90.0
max = 90.0
def __init__(self, deg, min=0.0, sec=0.0, direction=None):
ndir = self.negative_direction[0].upper()
pdir = self.positive_direction[0].upper()
if direction:
if deg < 0.0:
msg = "degrees cannot be negative if direction is specified"
raise ValueError(msg)
if direction[0].upper() == pdir:
pass
elif direction[0].upper() == ndir:
deg = -deg
else:
msg = "direction must start with %r or %r" % (pdir, ndir)
raise ValueError(msg)
self.value = LatLongConverter.ToDec
|
Deg(deg, min, sec, max=self.max)
def direction(self):
if self.value < 0.0:
return self.negative_direction
else:
return self.positive_direction
def degrees(self):
deg = abs(self.value)
return deg, self.direction()
def degr
|
ees_minutes(self):
deg, min = LatLongConverter.ToDegMin(abs(self.value))
return deg, min, self.direction()
def degrees_minutes_seconds(self):
deg, min, sec = LatLongConverter.ToDegMinSec(abs(self.value))
return deg, min, sec, self.direction()
def __repr__(self):
try:
return "%s(%f)" % (self.__class__.__name__, self.value)
except AttributeError:
return "%s(uninitialized)" % self.__class__.__name__
def format(self, style):
"""
format(style)
returns formatt
|
genome/flow-core
|
flow/util/stats.py
|
Python
|
agpl-3.0
| 685
| 0.008759
|
import getpass
import statsd
import logging
LOG = logging.getLogger(__name__)
def increment_as_user(*label_components):
try:
statsd.increment(assemble_label(label_componen
|
ts, getpass.getuser()))
statsd.increment(assemble_label(label_components, 'total'))
except:
LOG.exception('failed to increment as user %s', label_components)
def increment(*args, **kwargs):
try:
statsd.increment(*args, **kwargs)
except:
LOG.exception('failed to increment args=%s, kwargs=%s', args, kwargs)
def create_timer(name):
return statsd.StatsdTimer(name)
def assemble_label(rest, tail):
lc
|
= list(rest) + [tail]
return '.'.join(lc)
|
gasman/wagtaildemo
|
wagtaildemo/settings/base.py
|
Python
|
bsd-3-clause
| 6,927
| 0.000866
|
# Django settings for wagtaildemo project.
import os
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..', '..')
BASE_DIR = PROJECT_ROOT
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
# Default to dummy email backend. Configure dev/production/local backend
# as per https://docs.djangoproject.com/en/dev/topics/email/#email-backends
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'wagtaildemo',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
# Note that with this set to True, Wagtail will fall back on using numeric dates
# in date fields, as opposed to 'friendly' dates like "24 Sep 2013", because
# Python's strptime doesn't support localised month names: https://code.djangoproject.com/ticket/13339
USE_L10N = Fa
|
lse
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DATE_FORMAT = 'j F Y'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing sl
|
ash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# ** You would never normally put the SECRET_KEY in a public repository,
# ** however this is a demo app so we're using the default settings.
# ** Don't use this key in any non-demo usage!
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'wq21wtjo3@d_qfjvd-#td!%7gfy2updj2z+nev^k$iy%=m4_tr'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
from django.conf import global_settings
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'wagtaildemo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wagtaildemo.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites', # Wagtail uses its own site management logic
'django.contrib.messages',
'django.contrib.staticfiles',
'compressor',
'taggit',
'modelcluster',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'wagtail.wagtailsites',
'demo',
)
EMAIL_SUBJECT_PREFIX = '[wagtaildemo] '
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2')
# django-compressor settings
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Auth settings
LOGIN_URL = 'django.contrib.auth.views.login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# WAGTAIL SETTINGS
WAGTAIL_SITE_NAME = 'wagtaildemo'
|
yardencsGitHub/tf_syllable_segmentation_annotation
|
src/tweetynet/network.py
|
Python
|
bsd-3-clause
| 8,302
| 0.00277
|
"""TweetyNet model"""
import torch
from torch import nn
from torch.nn import functional as F
class Conv2dTF(nn.Conv2d):
PADDING_METHODS = ('VALID', 'SAME')
"""Conv2d with padding behavior from Tensorflow
adapted from
https://github.com/mlperf/inference/blob/16a5661eea8f0545e04c86029362e22113c2ec09/others/edge/object_detection/ssd_mobilenet/pytorch/utils.py#L40
as referenced in this issue:
https://github.com/pytorch/pytorch/issues/3867#issuecomment-507025011
used to maintain behavior of original implementation of TweetyNet that used Tensorflow 1.0 low-level API
"""
def __init__(self, *args, **kwargs):
super(Conv2dTF, self).__init__(*args, **kwargs)
padding = kwargs.get("padding", "SAME")
if not isinstance(padding, str):
raise TypeError(f"value for 'padding' argument should be a string, one of: {self.PADDING_METHODS}")
padding = padding.upper()
if padding not in self.PADDING_METHODS:
raise ValueError(
f"value for 'padding' argument must be one of '{self.PADDING_METHODS}' but was: {padding}"
)
self.padding = padding
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == "VALID":
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
elif self.padding == "SAME":
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=self.dilation,
groups=self.groups,
)
class TweetyNet(nn.Module):
def __init__(self,
num_classes,
input_shape=(1, 513, 88),
padding='SAME',
conv1_filters=32,
conv1_kernel_size=(5, 5),
conv2_filters=64,
conv2_kernel_size=(5, 5),
pool1_size=(8, 1),
pool1_stride=(8, 1),
pool2_size=(8, 1),
pool2_stride=(8, 1),
hidden_size=None,
rnn_dropout=0.,
num_layers=1,
bidirectional=True,
):
"""initialize TweetyNet model
Parameters
----------
num_classes : int
number of classes to predict, e.g., number of syllable classes in an individual bird's song
input_shape : tuple
with 3 elements corresponding to dimensions of spectrogram windows: (channels, frequency bins, time bins).
i.e. we assume input is a spectrogram and treat it like an image, typically with one channel,
the rows are frequency bins, and the columns are time bins. Default is (1, 513, 88).
padding : str
type of padding to use, one of {"VALID", "SAME"}. Default is "SAME".
conv1_filters : int
Number of filters in first convolutional layer. Default is 32.
conv1_kernel_size : tuple
Size of kernels, i.e. filters, in first convolutional layer. Default is (5, 5).
conv2_filters : int
Number of filters in second convolutional layer. Default is 64.
conv2_kernel_size : tuple
Size of kernels, i.e. filters, in second convolutional layer. Default is (5, 5).
pool1_size : two element tuple of ints
Size of sliding window for first max pooling layer. Default is (1, 8)
pool1_stride : two element tuple of ints
Step size for sliding window of first max pooling layer. Default is (1, 8)
pool2_size : two element tuple of ints
Size of sliding window for second max pooling layer. Default is (1, 8),
pool2_stride : two eleme
|
nt tuple of ints
Step size for sliding window of second max pooling layer. Default is (1, 8)
hidden_size : int
number of features in the hidden state ``h``. Default is None,
in which case ``hidden_size`` is set to the dimensionality of the
output of the convolutional neural network. This default maintains
the original behavior of the network.
rnn_dropout : float
If non-zero, introduces a Dropout
|
layer on the outputs of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
num_layers : int
Number of recurrent layers. Default is 1.
bidirectional : bool
If True, make LSTM bidirectional. Default is True.
"""
super().__init__()
self.num_classes = num_classes
self.input_shape = input_shape
self.cnn = nn.Sequential(
Conv2dTF(in_channels=self.input_shape[0],
out_channels=conv1_filters,
kernel_size=conv1_kernel_size,
padding=padding
),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=pool1_size,
stride=pool1_stride),
Conv2dTF(in_channels=conv1_filters,
out_channels=conv2_filters,
kernel_size=conv2_kernel_size,
padding=padding,
),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=pool2_size,
stride=pool2_stride),
)
# determine number of features in output after stacking channels
# we use the same number of features for hidden states
# note self.num_hidden is also used to reshape output of cnn in self.forward method
batch_shape = tuple((1,) + input_shape)
tmp_tensor = torch.rand(batch_shape)
tmp_out = self.cnn(tmp_tensor)
channels_out, freqbins_out = tmp_out.shape[1], tmp_out.shape[2]
self.rnn_input_size = channels_out * freqbins_out
if hidden_size is None:
self.hidden_size = self.rnn_input_size
else:
self.hidden_size = hidden_size
self.rnn = nn.LSTM(input_size=self.rnn_input_size,
hidden_size=self.hidden_size,
num_layers=num_layers,
dropout=rnn_dropout,
bidirectional=bidirectional)
# for self.fc, in_features = hidden_size * 2 because LSTM is bidirectional
# so we get hidden forward + hidden backward as output
self.fc = nn.Linear(in_features=self.hidden_size * 2, out_features=num_classes)
def forward(self, x):
features = self.cnn(x)
# stack channels, to give tensor shape (batch, rnn_input_size, num time bins)
features = features.view(features.shape[0], self.rnn_input_size, -1)
# switch dimensions for feeding to rnn, to (num time bins, batch size, input size)
features = features.permute(2, 0, 1)
rnn_output, _ = self.rnn(features)
# permute back to (batch, time bins, hidden size) to project features down onto number of classes
rnn_output = rnn_output.permute(1, 0, 2)
logits = self.fc(rnn_output)
# permute yet again so that dimension order is (batc
|
giulioribe/car-pooling
|
testLock2.py
|
Python
|
gpl-3.0
| 303
| 0
|
"""import portalocke
|
r
with portalocker.Lock('text.txt', timeout=5) as fh:
fh.write("Sono in testLoxk2.py")
"""
from lockfile import LockFile
lock = LockFile('text.txt')
with lock:
print lock.path,
|
'is locked.'
with open('text.txt', "a") as file:
file.write("Sono in testLock2.py")
|
leonth/private-configs
|
sublime-text-3/Packages/SublimePythonIDE/server/decorators.py
|
Python
|
mit
| 598
| 0.001672
|
# Copyright (c) 2013 Oscar Campos <oscar.campos@member.fs
|
f.org>
# See LICENSE for more details
"""
.. module:: decorators
:platform: Unix, Windows
:synopsis: Decorators for SublimePython plugin
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
import os
import functools
def debug(f):
@functools.wrap(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
import traceback
with open(os.path.expa
|
nduser("~/trace"), "w") as fl:
traceback.print_exc(file=fl)
return wrapped
|
saukrIppl/seahub
|
tests/ui/driver.py
|
Python
|
apache-2.0
| 2,783
| 0.001078
|
import os
import urlparse
import requests
import splinter
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from tests.common.utils import urljoin
class Browser(object):
'''Drives the browser in the functional test'''
def __init__(self, start_url):
imp = os.environ.get('WEBDRIVER', 'firfox')
if imp in ('firefox', 'ff'):
driver = 'firefox'
else:
driver = 'phantomjs'
self.b = splinter.Browser(driver)
self.d = self.b.driver
self.d.set_window_size(1400, 1000)
self.start_url = start_url
def _el(self, selector):
return self.b.find_by_css(selector).first
@property
def title(self):
return self.b.title
@property
def path(self):
return urlparse.urlparse(self.b.url).path
def visit(self, url):
if not url.startswith('http'):
url = urljoin(self.start_url, url)
self.b.visit(url)
def gohome(self):
self.b.visit(self.start_url)
def click_link_by_text(self, text):
self.b.find_link_by_text(text).first.click()
def click_link_by_title(self, title):
self.b.find_by_xpath('//a[@title="%s"]' % title).first.click()
def find_link_by_text(self, text):
return self.b.find_link_by_text(text).
|
first
def element_text(self, selector):
return self._el(selector).text
def element_attr(self, selector, name):
return self._el(selector)._element.get_attribute(name)
|
def click(self, selector):
self._el(selector).click()
def fill_form(self, form_kvs):
self.b.fill_form(form_kvs)
def find_by_name(self, name):
return self.b.find_by_name(name)
def submit(self, form_sel):
self._el(form_sel)._element.submit()
def submit_by_input_name(self, name):
self.b.find_by_name(name).first._element.submit()
def fill(self, name, value):
self.b.fill(name, value)
def fill_input_by_label(self, label, value):
# TODO: implement this, and use it to locate inputs in tests, instead
# of locating inputs by css selector. This is better for blackbox testing.
pass
def click_btn_with_text(self, text):
# TODO: same as fill_input_by_label
pass
def quit(self):
self.b.quit()
def wait_for_element(self, selector, timeout):
wait = WebDriverWait(self.d, timeout)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
def get_file_content(self, url):
sessionid = self.d.get_cookie('sessionid')['value']
return requests.get(url, cookies={'sessionid': sessionid}).text
|
nicolashainaux/mathmakerlib
|
tests/00_main/01_exceptions_test.py
|
Python
|
gpl-3.0
| 2,289
| 0
|
# -*- coding: utf-8 -*-
# Mathmaker Lib offers lualatex-printable mathematical objects.
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker Lib.
# Mathmaker Lib
|
is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker Lib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more
|
details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker Lib; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from mathmakerlib.calculus import Number
from mathmakerlib.exceptions import MathmakerLibError, StopCalculation
from mathmakerlib.exceptions import ZeroBipoint, ZeroVector
from mathmakerlib.exceptions import ZeroLengthLineSegment
def test_MathmakerLibError():
"""Check the main mathmakerlib exception."""
with pytest.raises(MathmakerLibError) as excinfo:
raise MathmakerLibError
assert str(excinfo.value) == 'An error occured in Mathmaker Lib'
def test_StopCalculation():
"""Check StopCalculation exception."""
with pytest.raises(StopCalculation) as excinfo:
raise StopCalculation(Number('7.6'))
assert str(excinfo.value) == 'No further calculation can be done on ' \
'Number(\'7.6\').'
def test_ZeroBipoint():
"""Check ZeroBipoint exception."""
with pytest.raises(ZeroBipoint) as excinfo:
raise ZeroBipoint
assert str(excinfo.value) == 'Abusive use of a zero Bipoint.'
def test_ZeroVector():
"""Check ZeroVector exception."""
with pytest.raises(ZeroVector) as excinfo:
raise ZeroVector
assert str(excinfo.value) == 'Abusive use of a zero Vector.'
def test_ZeroLengthLineSegment():
"""Check ZeroLengthLineSegment exception."""
with pytest.raises(ZeroLengthLineSegment) as excinfo:
raise ZeroLengthLineSegment
assert str(excinfo.value) == 'Abusive use of a zero-length LineSegment.'
|
cprakashagr/PythonClass
|
src/scraper/scraper/items.py
|
Python
|
mit
| 286
| 0
|
# -*- coding: utf-8 -*-
# Define here the models
|
for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/to
|
pics/items.html
import scrapy
class ScraperItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
SUSE/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/compute/v2015_06_15/models/virtual_machine_image.py
|
Python
|
mit
| 2,245
| 0.001782
|
# coding=u
|
tf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project
|
root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .virtual_machine_image_resource import VirtualMachineImageResource
class VirtualMachineImage(VirtualMachineImageResource):
"""Describes a Virtual Machine Image.
:param id: Resource Id
:type id: str
:param name: The name of the resource.
:type name: str
:param location: The supported Azure location of the resource.
:type location: str
:param tags: The tags attached to the resource.
:type tags: dict
:param plan:
:type plan: :class:`PurchasePlan
<azure.mgmt.compute.compute.v2015_06_15.models.PurchasePlan>`
:param os_disk_image:
:type os_disk_image: :class:`OSDiskImage
<azure.mgmt.compute.compute.v2015_06_15.models.OSDiskImage>`
:param data_disk_images:
:type data_disk_images: list of :class:`DataDiskImage
<azure.mgmt.compute.compute.v2015_06_15.models.DataDiskImage>`
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
}
def __init__(self, name, location, id=None, tags=None, plan=None, os_disk_image=None, data_disk_images=None):
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
|
simonzhangsm/voltdb
|
tools/voter.d/voter.py
|
Python
|
agpl-3.0
| 2,792
| 0.013968
|
# This file is part of VoltDB.
# Copyright (C) 2008-2018 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# All the commands supported by the Voter application.
import os
@VOLT.Command(description = 'Build the Voter application and catalog.',
options = VOLT.BooleanOption('-C', '--cond
|
itional', 'conditional',
'only build when the catalog file is missing'))
def build(runner):
if not runner.opts.conditional or not os.path.exists('voter.jar'):
runner.java.compile('obj', 'src/voter/*.java', 'src/voter/procedures/*.java')
runner.call('volt.compile', '-c', 'obj', '-o', 'voter.jar', 'ddl.sql')
@VOLT.Command(description = 'Clean the Voter build output.')
|
def clean(runner):
runner.shell('rm', '-rfv', 'obj', 'debugoutput', 'voter.jar', 'voltdbroot')
@VOLT.Server('create',
description = 'Start the Voter VoltDB server.',
command_arguments = 'voter.jar',
classpath = 'obj')
def server(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.AsyncBenchmark', classpath = 'obj',
description = 'Run the Voter asynchronous benchmark.')
def async(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SyncBenchmark', classpath = 'obj',
description = 'Run the Voter synchronous benchmark.')
def sync(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.JDBCBenchmark', classpath = 'obj',
description = 'Run the Voter JDBC benchmark.')
def jdbc(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SimpleBenchmark', classpath = 'obj',
description = 'Run the Voter simple benchmark.')
def simple(runner):
runner.call('build', '-C')
runner.go()
|
bsipocz/ginga
|
ginga/aggw/AggHelp.py
|
Python
|
bsd-3-clause
| 1,635
| 0.005505
|
#
# AggHelp.py -- help classes for the Agg drawing
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD lic
|
ense.
# Please see the file LICENSE.txt for details.
import aggdraw as agg
from ginga import colors
class AggContext(object):
|
def __init__(self, canvas):
self.canvas = canvas
def set_canvas(self, canvas):
self.canvas = canvas
def get_color(self, color):
if isinstance(color, str):
r, g, b = colors.lookup_color(color)
elif isinstance(color, tuple):
# color is assumed to be a 3-tuple of RGB values as floats
# between 0 and 1
r, g, b = color
else:
r, g, b = 1.0, 1.0, 1.0
return (int(r*255), int(g*255), int(b*255))
def get_pen(self, color, linewidth=1):
# if hasattr(self, 'linestyle'):
# if self.linestyle == 'dash':
# cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
p = agg.Pen(self.get_color(color), width=linewidth)
return p
def get_brush(self, color):
p = agg.Brush(self.get_color(color))
return p
def get_font(self, name, size, color):
color = self.get_color(color)
# TODO: what kind of lookup can we use for this?
filename = '/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf'
f = agg.Font(color, filename, size=size)
return f
def text_extents(self, text, font):
wd, ht = self.canvas.textsize(text, font)
return wd, ht
#END
|
eswartz/RenderPipeline
|
rpcore/pynative/shadow_atlas.py
|
Python
|
mit
| 3,799
| 0.001316
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function, division
from rplibs.six.moves import range # pylint: disable=import-error
from panda3d.core import LVecBase4i, LVecBase4
class ShadowAtlas(object):
""" Please refer to the native C++ implementation for docstrings and comments.
This is just the python implementation, which does not contain documentat
|
ion! """
def __init__(self, size, tile_size=32):
self._size = size
|
self._tile_size = tile_size
self._num_used_tiles = 0
self.init_tiles()
def init_tiles(self):
self._num_tiles = self._size // self._tile_size
def row():
return [False for i in range(self._num_tiles)] # pylint: disable=unused-variable
self._flags = [row() for j in range(self._num_tiles)] # pylint: disable=unused-variable
def get_num_used_tiles(self):
return self._num_used_tiles
num_used_tiles = property(get_num_used_tiles)
def get_coverage(self):
return self._num_used_tiles / float(self._num_tiles ** 2)
coverage = property(get_coverage)
def reserve_region(self, x, y, w, h):
self._num_used_tiles += w * h
for x_offset in range(w):
for y_offset in range(h):
self._flags[x + x_offset][y + y_offset] = True
def find_and_reserve_region(self, tile_width, tile_height):
for x in range(self._num_tiles - tile_height + 1):
for y in range(self._num_tiles - tile_width + 1):
if self.region_is_free(x, y, tile_width, tile_height):
self.reserve_region(x, y, tile_width, tile_height)
return LVecBase4i(x, y, tile_width, tile_height)
print("Failed to find a free region of size", tile_width, "x", tile_height)
return LVecBase4i(-1)
def free_region(self, region):
self._num_used_tiles -= region.z * region.w
for x in range(region.z):
for y in range(region.w):
self._flags[region.x + x][region.y + y] = False
def get_tile_size(self):
return self._tile_size
def region_is_free(self, x, y, w, h):
for x_offset in range(w):
for y_offset in range(h):
if self._flags[x + x_offset][y + y_offset]:
return False
return True
def get_required_tiles(self, resolution):
if resolution % self._tile_size != 0:
print("ShadowAtlas: Invalid atlas resolution!")
return
return resolution // self._tile_size
def region_to_uv(self, region):
flt = LVecBase4(region.x, region.y, region.z, region.w)
return flt * (self._tile_size / self._size)
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/bin/checker.py
|
Python
|
apache-2.0
| 749
| 0
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from onnx import load, checker, NodeProto
def chec
|
k_model(): # type: () -> None
parser = argparse.ArgumentParser('check-model')
parser.add_argument('model_pb', type=argparse.FileType('rb'))
args = parser.parse_args()
model = load(args.model_pb)
checker.check_model(model)
def check_node(): # type: () -> None
parser = argparse.ArgumentParser('check-node')
parser.add_argument('node_pb', type=argparse.FileType('rb'))
args = parser.
|
parse_args()
node = NodeProto()
node.ParseFromString(args.node_pb.read())
checker.check_node(node)
|
CSC301H-Fall2013/ElectionSimulation
|
Code/ElectionSimulationInstaller/ElectionSimulation/manage.py
|
Python
|
mit
| 261
| 0.003831
|
#!/usr/bin/env python
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ElectionSimulation.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.
|
argv)
|
skosukhin/spack
|
var/spack/repos/builtin/packages/cppunit/package.py
|
Python
|
lgpl-2.1
| 1,545
| 0
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under
|
the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received
|
a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Cppunit(AutotoolsPackage):
"""Obsolete Unit testing framework for C++"""
homepage = "https://wiki.freedesktop.org/www/Software/cppunit/"
url = "http://dev-www.libreoffice.org/src/cppunit-1.13.2.tar.gz"
version('1.13.2', '0eaf8bb1dcf4d16b12bec30d0732370390d35e6f')
|
bitlinker/ArduWeather
|
Software/NarodmonDaemon/OregonSensor.py
|
Python
|
mit
| 2,140
| 0.001402
|
# Copyright (c) 2015 bitlinker@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from Sensor import Sensor
from SensorValue import SensorValue
# Oregon weather station sensor class
class OregonSensor(Sensor):
SENSOR_TYPE_THN132N = 'THN132N'
SENSOR_TYPE_THGN132N = 'THGN132N'
VALUE_BATTERY = 'B'
__type = None
__battery = None
__id = None
__channel = None
def __init__(self, type, id, channel, batteryHigh):
Sensor.__init__(self, t
|
ype)
self.__type = type
self.__id = id
self.__channel = channel
self.__battery = batteryHigh
def getType(self):
return self.__type
def getBatteryHigh(self):
return self.__battery
def getId(self):
return self.__id
def getChannel(self):
return self.__channel
def getUUID(s
|
elf):
return self.getName() + self.__id + str(self.__channel)
def getValuesList(self):
result = Sensor.getValuesList(self)
if (self.__battery):
result.append(SensorValue(self.getUUID(), self.VALUE_BATTERY, self.__battery))
return result
|
gdefias/StudyC
|
VS/test_CcallPY/Test_ccallpy/helloWorld.py
|
Python
|
gpl-2.0
| 153
| 0.045752
|
#codin
|
g=utf8
def hello(instr):
bufstr = " helloWorld!"
return (instr + bufstr), 123
if __name__ == "__main__":
k = "yzh"
pri
|
nt hello(k)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/external/org_mozilla_bleach/bleach/version.py
|
Python
|
bsd-2-clause
| 136
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
VERSION =
|
(1, 5, 0)
__version__ = '.'.join(
|
[str(n) for n in VERSION])
|
codepython/CollectorCity-Market-Place
|
stores/apps/sell/forms.py
|
Python
|
apache-2.0
| 1,696
| 0.014741
|
import re
from django import forms
from sell.models import ShippingData
from django.contrib.localflavor.us.forms import USStateSelect, USZipCodeField
class ShippingDataForm(forms.ModelForm):
state = forms.CharField(widget=USStateSelect)
save_shipping_info = forms.BooleanField(label="Save Shipping Infor
|
mation", widget=forms.CheckboxInput(), required=False)
class Meta:
model = ShippingData
def clean_zip(self):
zip = self.cleaned_data.get("zip", "")
if zip.strip() == "": raise forms.ValidationError("Zip is a required field.")
if not (re.match("[0-9]{5}(-[0-9]{4})?$", zip)): raise forms.ValidationError("Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX")
|
return zip
def clean(self):
first_name = self.cleaned_data.get("first_name", "")
last_name = self.cleaned_data.get("last_name", "")
country = self.cleaned_data.get("country", "")
street = self.cleaned_data.get("street_address", "")
city = self.cleaned_data.get("city", "")
if first_name.strip() == "": raise forms.ValidationError("First name is a required field.")
if last_name.strip() == "": raise forms.ValidationError("First name is a required field.")
if street.strip() == "": raise forms.ValidationError("Street is a required field.")
if city.strip() == "": raise forms.ValidationError("City is a required field.")
if country.strip() == "": raise forms.ValidationError("Country is a required field.")
return self.cleaned_data
def save_shipping(self):
return self.cleaned_data.get("save_shipping_info", False)
|
blesscat/flux_line_bot
|
fluxclient/scanner/scan_settings.py
|
Python
|
agpl-3.0
| 1,386
| 0
|
#!/usr/bin/env python3
from math import pi, atan
class ScanSetting(object):
"""docstring for ScanSetting"""
def __init__(self):
super(ScanSetti
|
ng, self).__init
|
__()
# for scan
self.scan_step = 400 # steps
self.theta_a = pi / 6 # radius between center and laser
self.img_width = 640
self.img_height = 480
self.sensorWidth = 3.67
self.sensorHeight = 2.74 + 0.08
self.focalLength = 3.6
# ######### mockup 2, measure by solidwork###
self.cab_m = self.img_width / 2
self.cab_l = self.img_width / 2
self.cab_r = self.img_width / 2
self.cameraX = 0.0
self.cameraY = 22.28 + 8
self.cameraZ = -174.70
self.laserX_L = -53.61
self.laserY_L = 31.62
self.laserZ_L = -76.47
self.laserX_R = 53.61
self.laserY_R = 31.62
self.laserZ_R = -76.47
self.theta_a = atan(self.laserX_L / self.laserZ_L)
self.MAXLaserRange = 65
self.LaserRangeMergeDistance = 65
self.MINLaserRange = 3
self.MagnitudeThreshold = 3
self.LLaserAdjustment = 0
self.RLaserAdjustment = 0
# for modeling
self.NoiseNeighbors = 50
self.NeighborhoodDistance = 10
self.SegmentationDistance = 2
self.CloseBottom = -1000
self.CloseTop = 1000
|
leveille/blog.v1
|
wurdig/controllers/js.py
|
Python
|
mit
| 1,892
| 0.006342
|
import logging
import hashlib
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to, etag_cache
from pylons.decorators import jsonify
from pylons.i18n.translation import _
from wurdig.lib.base import BaseController, render
log = logging.getLogger(__name__)
class JsController(BaseController):
@jsonify
def _json(self):
translations = {
'Are you positive you want to do that?': _('Are you positive '
'you want to do that?'),
'The item has successfully been deleted.': _('The item has '
'successfully been deleted.'),
'Disapprove': _('Disapprove'),
'The item has successfully been approved.': _('The item has '
|
'successfully been approved.'),
'Approve': _('Approve'),
'The item has successfully been disapproved.': _('The item has successfully '
'been disapproved.'),
'Your+request+has+been+completed+successfully': _('Your+request+has+been+'
|
'completed+successfully'),
'An unexpected error has occurred.': _('An unexpected error has occurred.'),
'Enter key word(s)': _('Enter key word(s)')
}
return translations
def translations(self):
json_string = "if(!this.WURDIG) {var WURDIG = {};}WURDIG.translate = %s" % self._json()
etag_cache(key=hashlib.md5(json_string).hexdigest())
response.content_type = 'application/x-javascript; charset=utf-8'
response.cache_control = 'max-age=2592000'
response.pragma = ''
return json_string
|
smartczm/python-learn
|
Old-day01-10/s13-day12/pub-sub/publish02.py
|
Python
|
gpl-2.0
| 289
| 0.003745
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# Author: ChenLiang
import channel
|
02
obj = channel02.RedisHelper()
while True:
inp
|
= input('>> ')
if inp == '':
print("当前输入为空, 请重新输入...")
continue
else:
obj.public(inp, 'fm103.7')
|
franklingu/leetcode-solutions
|
questions/1-bit-and-2-bit-characters/Solution.py
|
Python
|
mit
| 1,231
| 0.004874
|
"""
We have two special characters. The first character can be represented by one bit 0. The second character can be represented by two bits (10 or 11).
Now given a string represented by several bits. Return whether the last character must be a one-bit character or not. The given string will always end with a zero.
Example 1:
Input:
bits = [1, 0, 0]
Output: True
Explanation:
The only way to decode it is two-bit character and one-bit character. So the last character is one-bit character.
Example
|
2:
Input:
bits = [1, 1, 1, 0]
Output: False
Explanation:
The only way to decode it is two-bit character and two-bit character. So the last character is NOT one-bit character.
Note:
1 <= len(bits) <= 1000.
bits[i] is al
|
ways 0 or 1.
"""
class Solution(object):
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
skip_next, curr = False, None
for i in bits:
if skip_next:
skip_next = False
curr = 2
continue
if i == 1:
skip_next = True
curr = 2
else:
skip_next = False
curr = 1
return curr == 1
|
bullxpfs/lustre-shine
|
tests/Configuration/ConfigFileSystemTest.py
|
Python
|
gpl-2.0
| 21,146
| 0.003168
|
#!/usr/bin/env python
# Shine.Configuration.FileSystem class
# Copyright (C) 2009-2017 CEA
"""Unit test for Shine.Configuration.FileSystem"""
import unittest
import textwrap
import time
from Utils import makeTempFile, setup_tempdirs, clean_tempdirs
from Shine.Configuration.FileSystem import FileSystem, ModelFileIOError, ConfigDeviceNotFoundError
from Shine.Configuration.Exceptions import ConfigException, ConfigInvalidFileSystem
from Shine.Configuration.TargetDevice import TargetDevice
from Shine.Configuration.Backend.Backend import Backend
class FileSystemTest(unittest.TestCase):
def setUp(self):
self._fs = None
self._testfile = None
setup_tempdirs()
def tearDown(self):
# Remove file from cache
if self._fs:
self._fs.unregister()
# Delete the temp cache directory
clean_tempdirs()
def makeConfFileSystem(self, text):
"""
Create a temporary file instance and returns a FileSystem with it.
"""
self._testfile = makeTempFile(text)
fsconf = FileSystem.create_from_model(self._testfile.name)
return fsconf
def testLoadFile(self):
"""create a FileSystem from model example.lmf"""
fs = FileSystem(filename="../conf/models/example.lmf")
self.assertEqual(len(fs.model), 15)
def test_missing_config_file(self):
"""test missing config file detection"""
self.assertRaises(ModelFileIOError, FileSystem, filename="/bad/file")
def testMGSOnly(self):
"""filesystem with only a MGS"""
self._fs = self.makeConfFileSystem("""
fs_name: mgs
nid_map: nodes=foo1 nids=foo1@tcp
mgt: node=foo1 dev=/dev/dummy
""")
self.assertEqual(len(self._fs.model), 3)
def testRouterOnly(self):
"""filesystem with only routers"""
self._fs = self.makeConfFileSystem("""
fs_name: router
nid_map: nodes=foo1 nids=foo1@tcp
router: node=foo1
""")
self.assertEqual(len(self._fs.model), 3)
def testClientOnly(self):
"""filesystem with only clients"""
self._fs = self.makeConfFileSystem("""
fs_name: clients
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp
mgt: node=foo1 dev=/dev/dummy
client: node=foo[2-3]
""")
self.assertEqual(len(self._fs.model), 4)
def testMDTnoMGT(self):
"""filesystem with a MDT and no MGT"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: mdtnomgt
nid_map: nodes=foo1 nids=foo1@tcp
mdt: node=foo1 dev=/dev/dummy
""")
def testOSTnoMGT(self):
"""filesystem with OSTs and no MGT"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: ostnomgt
nid_map: nodes=foo[1,2] nids=foo[1,2]@tcp
ost: node=foo1 dev=/dev/dummy
ost: node=foo2 dev=/dev/dummy
""")
def testMGTandMDTnoOST(self):
"""filesystem with both MGT and MDT and no OST"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: example
nid_map: nodes=foo1 nids=foo1@tcp
mgt: node=foo1 dev=/dev/dummy2
mdt: node=foo1 dev=/dev/dummy1
""")
def testMultipleNidMap(self):
"""filesystem with complex nid setup"""
self._fs = self.makeConfFileSystem("""
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
nid_map: nodes=foo[1-2] nids=foo[1-2]-bone@tcp1
mgt: node=foo1 ha_node=foo2
""")
self.assertEqual(len(self._fs.model), 3)
self.assertEqual(self._fs.get_nid('foo1'), ['foo1@tcp0', 'foo1-bone@tcp1'])
self.assertEqual(self._fs.get_nid('foo2'), ['foo2@tcp0', 'foo2-bone@tcp1'])
def test_unbalanced_nid_map(self):
"""filesystem with nids with several ranges."""
self._fs = self.makeConfFileSystem("""
fs_name: nids
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp
nid_map: nodes=bar[1-3] nids=bar[1-3]@tcp
""")
self.assertEqual(self._fs.get_nid('foo1'), ['foo1@tcp'])
self.assertEqual(self._fs.get_nid('foo2'), ['foo2@tcp'])
self.assertEqual(self._fs.get_nid('bar1'), ['bar1@tcp'])
self.assertEqual(self._fs.get_nid('bar2'), ['bar2@tcp'])
self.assertEqual(self._fs.get_nid('bar3'), ['bar3@tcp'])
def test_big_nid_map_
|
scalable(self):
"""filesystem with nids with several ranges."""
|
before = time.time()
self._fs = self.makeConfFileSystem("""
fs_name: nids
nid_map: nodes=foo[1-9999] nids=bar[1-9999]@tcp
""")
elapsed = time.time() - before
self.assertTrue(elapsed < 2, "%.2fs exceeds 2s threshold" % elapsed)
self.assertEqual(len(self._fs.nid_map), 9999)
def testNoIndexDefined(self):
"""filesystem with no index set"""
self._fs = self.makeConfFileSystem("""
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
mgt: node=foo1
mdt: node=foo2
ost: node=foo2
ost: node=foo1
""")
self.assertEqual(len(self._fs.get('ost')), 2)
self.assertEqual(self._fs.get('ost')[0].get('node'), 'foo2')
self.assertEqual(self._fs.get('ost')[0].get('index'), 0)
self.assertEqual(self._fs.get('ost')[1].get('node'), 'foo1')
self.assertEqual(self._fs.get('ost')[1].get('index'), 1)
def testSomeIndexedDefined(self):
"""filesystem with not all indexes set"""
self._fs = self.makeConfFileSystem("""
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
mgt: node=foo1
mdt: node=foo2
ost: node=foo2
ost: node=foo1 index=0
""")
self.assertEqual(len(self._fs.get('ost')), 2)
self.assertEqual(self._fs.get('ost')[0].get('node'), 'foo2')
self.assertEqual(self._fs.get('ost')[0].get('index'), 1)
self.assertEqual(self._fs.get('ost')[1].get('node'), 'foo1')
self.assertEqual(self._fs.get('ost')[1].get('index'), 0)
def testSameIndexedDefined(self):
"""filesystem with same index used twice"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
mgt: node=foo1
mdt: node=foo2
ost: node=foo2 index=0
ost: node=foo1 index=0
""")
def make_fs_with_backend(self, backend, text):
"""
Create a FileSystem instance from text with a specific backend
instance.
"""
self._testfile = makeTempFile(text)
fs = FileSystem(self._testfile.name)
fs.backend = backend
fs.setup_target_devices()
return fs
def test_match_device_simple_ha_node(self):
"""test target.match_device() with a simple ha_node"""
# Dummy backend
class DummyBackend(Backend):
def start(self):
pass
def get_target_devices(self, target, fs_name=None, update_mode=None):
return [TargetDevice('mgt', {'node': 'foo1', 'ha_node': ['foo2']}),
TargetDevice('mgt', {'node': 'foo1', 'ha_node': ['foo3']})]
# Test with 1 matching ha_node
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1 ha_node=foo2
""")
self.assertEqual(len(fs.get('mgt')), 1)
# Test with 1 matching ha_node (bis)
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1 ha_node=foo3
""")
self.assertEqual(len(fs.get('mgt')), 1)
# Test without ha_node
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1
""")
fs.setup_target_devices()
# Test with no matching ha_node
self.assertRaises(ConfigDeviceNotFoundError, self.make_fs_with_backend,
DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-4] nids=foo[1-4]@tcp0
mgt: node=foo1 ha_node=foo4
""")
def test_match_device_multiple_ha_node(self):
"""test target.match_device() with a several ha_node"""
# Dummy backend
class DummyBackend(Backend):
def start(self):
pass
def get_target_devices(self, target, fs_name=None, update_mode=None):
return [TargetDevice('mgt', {'node': 'foo1', 'ha_node
|
noironetworks/networking-cisco
|
networking_cisco/tests/unit/ml2_drivers/nexus/test_trunk.py
|
Python
|
apache-2.0
| 6,254
| 0.00016
|
# Copyright (c) 2017 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from networking_cisco import backwards_compatibility as bc
from networking_cisco.ml2_drivers.nexus import trunk
from neutron.tests.unit.db import test_db_base
|
_plugin_v2
PORT_ID = 'fake_port_id'
TRUNK_ID = 'fake_trunk_id'
DNS_NAME = 'test_dns_name'
VM_NAME = 'test_vm_name'
SEGMENTATION_VLAN = 'vl
|
an'
SEGMENTATION_ID1 = 101
SEGMENTATION_ID2 = 102
SUBPORTS = [
{'segmentation_type': SEGMENTATION_VLAN, 'port_id': PORT_ID,
'segmentation_id': SEGMENTATION_ID1},
{'segmentation_type': SEGMENTATION_VLAN, 'port_id': PORT_ID,
'segmentation_id': SEGMENTATION_ID2}]
TRUNK = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'sub_ports': SUBPORTS,
'name': 'trunk0',
'admin_state_up': 'true',
'tenant_id': 'fake_tenant_id',
'project_id': 'fake_project_id',
'port_id': PORT_ID,
'id': TRUNK_ID,
'description': 'fake trunk port'}
PROFILE_BAREMETAL = [{"switch_info": "test_value"}]
SUBPORT = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'port_id': PORT_ID,
'segmentation_id': SEGMENTATION_ID1}
PORT_BAREMETAL = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'id': PORT_ID,
bc.portbindings.VNIC_TYPE: bc.portbindings.VNIC_BAREMETAL,
bc.dns.DNSNAME: DNS_NAME,
bc.portbindings.PROFILE: {"local_link_information": PROFILE_BAREMETAL},
'trunk_details': {'trunk_id': TRUNK_ID, 'sub_ports': SUBPORTS}}
PORT_VM = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'id': PORT_ID,
bc.portbindings.VNIC_TYPE: bc.portbindings.VNIC_NORMAL,
bc.portbindings.HOST_ID: VM_NAME,
bc.portbindings.PROFILE: {},
'trunk_details': {'trunk_id': TRUNK_ID, 'sub_ports': SUBPORTS}}
class TestSubPort(object):
port_id = PORT_ID
trunk_id = TRUNK_ID
segmentation_type = SEGMENTATION_VLAN
segmentation_id = SEGMENTATION_ID1
class TestTrunk(object):
admin_state_up = 'test_admin_state'
id = TRUNK_ID
tenant_id = 'test_tenant_id'
name = 'test_trunk_name'
port_id = PORT_ID
status = bc.constants.PORT_STATUS_ACTIVE
sub_ports = SUBPORTS
update = mock.Mock()
@testtools.skipIf(bc.NEUTRON_VERSION < bc.NEUTRON_OCATA_VERSION,
"Test not applicable prior to stable/ocata.")
class TestNexusTrunkHandler(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
super(TestNexusTrunkHandler, self).setUp()
self.handler = trunk.NexusMDTrunkHandler()
self.plugin = bc.get_plugin()
self.plugin.get_port = mock.Mock()
self.plugin.update_port = mock.Mock()
self.mock_subport_get_object = mock.patch.object(
bc.trunk_objects.SubPort, 'get_object',
return_value=TestSubPort).start()
self.mock_trunk_get_object = mock.patch.object(
bc.trunk_objects.Trunk, 'get_object',
return_value=TestTrunk).start()
self.mock_trunk_get_object = mock.patch.object(
bc.trunk_objects.Trunk, 'get_object').start()
def _test_update_subports(self, port, host_id):
self.handler.update_subports(port)
self.assertEqual(2, self.plugin.update_port.call_count)
self.plugin.update_port.assert_called_with(mock.ANY, PORT_ID,
{'port':
{bc.portbindings.HOST_ID: host_id,
'device_owner': bc.trunk_consts.TRUNK_SUBPORT_OWNER}})
self.mock_trunk_get_object.called_once_with(mock.ANY, id=TRUNK_ID)
TestTrunk.update.called_once_with(
status=bc.trunk_consts.ACTIVE_STATUS)
self.mock_trunk_get_object.assert_called_once_with(
mock.ANY, id=TRUNK_ID)
def test_is_trunk_parentport(self):
return_value = self.handler.is_trunk_parentport(PORT_VM)
self.assertTrue(return_value)
def test_is_trunk_parentport_no_trunk(self):
PORT_VM_NO_TRUNK = PORT_VM.copy()
del PORT_VM_NO_TRUNK['trunk_details']
return_value = self.handler.is_trunk_parentport(PORT_VM_NO_TRUNK)
self.assertFalse(return_value)
def test_is_trunk_subport(self):
PORT_VM['device_owner'] = bc.trunk_consts.TRUNK_SUBPORT_OWNER
return_value = self.handler.is_trunk_subport(PORT_VM)
self.assertTrue(return_value)
def test_is_trunk_subport_invalid_deviceowner(self):
PORT_VM['device_owner'] = 'fake_owner'
return_value = self.handler.is_trunk_subport(PORT_VM)
self.assertFalse(return_value)
def test_update_subports_baremetal(self):
self._test_update_subports(PORT_BAREMETAL, DNS_NAME)
def test_is_trunk_subport_baremetal(self):
self.plugin.get_port.return_value = PORT_BAREMETAL
return_value = self.handler.is_trunk_subport_baremetal(PORT_BAREMETAL)
self.assertTrue(return_value)
self.mock_subport_get_object.assert_called_once_with(
mock.ANY, port_id=PORT_BAREMETAL['id'])
self.mock_trunk_get_object.assert_called_once_with(
mock.ANY, id=TestSubPort().trunk_id)
def test_is_trunk_subport_baremetal_no_subport(self):
self.mock_subport_get_object.return_value = None
return_value = self.handler.is_trunk_subport_baremetal(PORT_BAREMETAL)
self.assertFalse(return_value)
self.mock_subport_get_object.assert_called_once_with(
mock.ANY, port_id=PORT_BAREMETAL['id'])
self.assertFalse(self.mock_trunk_get_object.call_count)
def test_is_trunk_subport_baremetal_vm_port(self):
self.plugin.get_port.return_value = PORT_VM
return_value = self.handler.is_trunk_subport_baremetal(PORT_VM)
self.assertFalse(return_value)
|
EnviroCentre/jython-upgrade
|
jython/lib/test/test_time.py
|
Python
|
mit
| 10,491
| 0.002288
|
from test import test_support
import time
import unittest
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_missing_module_attribute(self):
self.assertEqual(time.clock.__module__, 'time')
self.assertEqual(time.time.__module__, 'time')
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_clock(self):
time.clock()
def test_conversions(self):
self.assertTrue(time.ctime(self.t)
== time.asctime(time.localtime(self.t)))
self.assertTrue(long(time.mktime(time.localtime(self.t)))
== long(self.t))
def test_sleep(self):
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
def test_strftime_bounds_checking(self):
# Make sure that strftime() checks the bounds of the various parts
#of the time tuple (0 is valid for *all* values).
# XXX: Jython supports more dates than CPython
if not test_support.is_jython:
# Check year [1900, max(int)]
self.assertRaises(ValueError, time.strftime, '',
(1899, 1, 1, 0, 0, 0, 0, 1, -1))
if time.accept2dyear:
self.assertRaises(ValueError, time.strftime, '',
(-1, 1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(100, 1, 1, 0, 0, 0, 0, 1, -1))
# Check month [1, 12] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default values.
# No test for daylight savings since strftime() does not change output
# based on its value.
if not test_support.is_jython:
expected = "2000 01 01 00 00 00 1 001"
else:
# XXX: Jython doesn't support the "two digits years" hack (turned
# on/off by time.accept2dyears), so year 0 means exactly that
# and it is not converted to 2000.
expected = "0000 01 01 00 00 00 1 001"
result = time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# throwing an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_empty(self):
try:
time.strptime('', '')
except ValueError:
self.fail('strptime failed on empty args.')
def test_asctime(self):
time.asctime(time.gmtime(self.t))
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
# XXX: Posix compiant asctime should refuse to conv
|
ert
# year > 9999, but Linux implementation does not.
# self.assertRaises(ValueError, time.asctime,
# (12345, 1, 0, 0, 0, 0, 0, 0, 0))
# XXX: For now, just make sure we don't have a crash:
try:
time.asctime((12345, 1, 1, 0, 0, 0, 0, 1, 0))
except ValueError:
pass
@unittest.ski
|
pIf(not hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ
|
fabiansinz/pipeline
|
python/pipeline/legacy/trk.py
|
Python
|
lgpl-3.0
| 20,366
| 0.00545
|
import warnings
from pprint import pprint
import datajoint as dj
import pandas as pd
from djaddon import hdf5
try:
from pupil_tracking import PupilTracker
except ImportError:
warnings.warn("Failed to import pupil_tacking library. You won't be able to populate trk.EyeFrame")
schema = dj.schema('pipeline_pupiltracking', locals())
from . import rf
import numpy as np
import matplotlib.pyplot as plt
from IPython import embed
import glob
@schema
class Roi(dj.Manual):
definition = """
# table that stores the correct ROI of the Eye in the video
->rf.Eye
---
x_roi_min : int # x coordinate of roi
y_roi_min : int # y coordinate of roi
x_roi_max : int # x coordinate of roi
y_roi_max : int # y coordinate of roi
"""
# embed()
@schema
class ParamEyeFrame(dj.Lookup):
definition = """
# table that stores the paths for the params for pupil_tracker
pupil_tracker_param_id : int # id for param collection
---
convex_weight_high = Null : float # parameter for tracking
convex_weight_low = Null : float # parameter for tracking
thres_perc_high = Null : float # parameter for tracking
thres_perc_low = Null : float # parameter for tracking
pupil_left_limit = Null : float # parameter for tracking
pupil_right_limit = Null : float # parameter for tracking
min_radius = Null : float # parameter for tracking
max_radius = Null : float # parameter for tracking
centre_dislocation_penalty : float # parameter for tracking
distance_sq_pow : float # parameter for tracking
"""
contents = [
{'pupil_tracker_param_id': 0, 'convex_weight_high': 0.5, 'convex_weight_low': 0.5, 'thres_perc_high': 99, 'distance_sq_pow': 1,
'thres_perc_low': 1, 'pupil_left_limit': 0.2, 'pupil_right_limit': 0.8, 'min_radius': 5, 'max_radius': 180,
'centre_dislocation_penalty': 0.001},
{'pupil_tracker_param_id': 1, 'convex_weight_high': 0.5, 'convex_weight_low': 0.5, 'thres_perc_high': 98, 'distance_sq_pow': 0.5,
'thres_perc_low': 2, 'pupil_left_limit': 0.2, 'pupil_right_limit': 0.8, 'min_radius': 5, 'max_radius': 180,
'centre_dislocation_penalty': 0.05}
]
@schema
class EyeFrame(dj.Computed):
definition = """
# eye tracking info for each frame of a movie
-> Roi
-> ParamEyeFrame
frame : int # frame number in movie
---
eye_frame_ts=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def populated_from(self):
return Roi()
def _make_tuples(self, key):
print("Populating: ")
# embed()
param = (ParamEyeFrame() & 'pupil_tracker_param_id=1').fetch.as_dict()[0]
# key.update(param)
key['pupil_tracker_param_id'] = param['pupil_tracker_param_id']
pprint(key)
eye_roi = (Roi() & key).fetch1['x_roi_min', 'y_roi_min', 'x_roi_max', 'y_roi_max']
print("Populating for trk.Roi and roi = ", eye_roi)
p, f = (rf.Session() & key).fetch1['hd5_path', 'file_base']
n = (rf.Scan() & key).fetch1['file_num']
avi_path = glob.glob(r"{p}/{f}{n}*.avi".format(f=f, p=p, n=n))
# print(avi_path)
# embed()
assert len(avi_path) == 1, "Found 0 or more than 1 videos: {videos}".format(videos=str(avi_path))
tr = PupilTracker(param)
trace = tr.track_without_svm(avi_path[0], eye_roi)
# CODE to insert data after tracking
print("Tracking complete... Now inserting data to datajoint")
# embed()
efd = EyeFrame.Detection()
for index, data in trace.iterrows():
key['frame'] = index
self.insert1(key)
if pd.notnull(data['pupil_x']):
values = data.to_dict()
values.update(key)
efd.insert1(values)
class Detection(dj.Part):
definition = """
# eye frames with detected eye
->EyeFrame
---
pupil_x : float # pupil x position
pupil_y : float # pupil y position
pupil_r_minor : float # pupil radius minor axis
pupil_r_major : float # pupil radius major axis
pupil_angle : float # angle of major axis vs. horizontal axis in radians
pupil_x_std : float # pupil x position std
pupil_y_std : float # pupil y position std
pupil_r_minor_std : float # pupil radius minor axis std
pupil_r_major_std : float # pupil radius major axis std
pupil_angle_std : float # angle of major axis vs. horizontal axis in radians
intensity_std : float # standard deviation of the ROI pixel values
"""
@schema
class SelectionProtocol(dj.Lookup):
definition = """
# groups of filtering steps to reject bad frames
filter_protocol_id : int # id of the filtering protocol
---
protocol_name : char(50) # descriptive name of the protocol
"""
contents = [
{'filter_protocol_id': 0, 'protocol_name': 'frame_intensity'},
{'filter_protocol_id': 1, 'protocol_name': 'int_and_ran_pupil_x_50_2'},
{'filter_protocol_id': 2, 'protocol_name': 'int_and_ran_pupil_x_75_2'},
{'filter_protocol_id': 3, 'protocol_name': 'int_and_ran_pupil_x_25_2'},
{'filter_protocol_id': 4, 'protocol_name': 'int_and_ran_pupil_pos'},
{'filter_protocol_id': 5, 'protocol_name': 'int_and_ran_pupil_pos_spikes_removed'},
{'filter_protocol_id': 6, 'protocol_name': 'int_and_ran_pupil_pos_spike_filter2'}
]
def apply(self, frames, key):
print("Applying filter with protocol id :", key['filter_protocol_id'])
for step i
|
n (ProtocolStep() & key).fetch.order_by('priority').as_dict():
# embed()
print("....for protocol id:", key['filter_protocol_id'], "applying filter with filter_id = ",
step['filter_id'])
frames = FrameSelector().apply(frames, step, param=step['filter_param'])
return frames
@schem
|
a
class FrameSelector(dj.Lookup):
definition = """
# single filters to reject frames
filter_id : tinyint # id of the filter
---
filter_name : char(50) # descriptive name of the filter
"""
contents = [
{'filter_id': 0, 'filter_name': 'intensity_filter'},
{'filter_id': 1, 'filter_name': 'ran_pupil_x_th'},
{'filter_id': 2, 'filter_name': 'ran_pupil_pos'},
{'filter_id': 3, 'filter_name': 'spike_filter'},
{'filter_id': 4, 'filter_name': 'spike_filter2'}
]
def apply(self, frames, key, param):
"""
Apply takes a restriction of EyeFrame.Detection() and returns an even more restricted set of frames
:param frames: restriction of EyeFrame.Detection()
:param key: key that singles out a single filter
:param param: parameters to the filter
:return: an even more restricted set of frames
"""
which = (self & key).fetch1['filter_name']
if which == 'intensity_filter':
i = frames.fetch['intensity_std']
th = np.percentile(i, param[0]) / param[1]
return frames & 'intensity_std>{threshold}'.format(threshold=th)
if which == 'ran_pupil_x_th':
|
nxnfufunezn/qtile
|
libqtile/manager.py
|
Python
|
mit
| 59,901
| 0.00025
|
# vim: tabstop=4 shiftwidth=4 expandtab
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
try:
import tracemalloc
except ImportError:
tracemalloc = None
from libqtile.log_utils import init_log
from libqtile.dgroups import DGroups
from xcffib.xproto import EventMask, WindowError, AccessError, DrawableError
import logging
import os
import pickle
import shlex
import signal
import sys
import traceback
import xcffib
import xcffib.xinerama
import xcffib.xproto
import six
from six.moves import asyncio
from .config import Drag, Click, Screen, Match, Rule
from .group import _Group
from .state import QtileState
from .utils import QtileError, get_cache_dir
from .widget.base import _Widget
from . import command
from . import hook
from . import utils
from . import window
from . import xcbq
if sys.version_info >= (3, 3):
def _import_module(module_name, dir_path):
import importlib
file_name = os.path.join(dir_path, module_name) + '.py'
f = importlib.machinery.SourceFileLoader(module_name, file_name)
module = f.load_module()
return module
else:
def _import_module(module_name, dir_path):
import imp
try:
fp, pathname, description = imp.find_module(module_name, [dir_path])
module = imp.load_module(module_name, fp, pathname, description)
finally:
if fp:
fp.close()
return module
class Qtile(command.CommandObject):
"""
This object is the __root__ of the command graph.
"""
def __init__(self, config,
displayName=None, fname=None, no_spawn=False, log=None,
state=None):
logkwargs = {}
if hasattr(config, "log_level"):
logkwargs["log_level"] = config.log_level
if hasattr(config, "log_path"):
logkwargs["log_path"] = config.log_path
self.log = log or init_log(**logkwargs)
self.no_spawn = no_spawn
self._eventloop = None
self._finalize = False
if not displayName:
displayName = os.environ.get("DISPLAY")
if not displayName:
raise QtileError("No DISPLAY set.")
if not fname:
# Dots might appear in the host part of the display name
# during remote X sessions. Let's strip the host part first.
displayNum = displayName.partition(":")[2]
if "." not in displayNum:
displayName = displayName + ".0"
fname = command.find_sockfile(displayName)
self.conn = xcbq.Connection(displayName)
self.config = config
self.fname = fname
hook.init(self)
self.windowMap = {}
self.widgetMap = {}
self.groupMap = {}
self.groups = []
self.keyMap = {}
# Find the modifier mask for the numlock key, if there is one:
nc = self.conn.keysym_to_keycode(xcbq.keysyms["Num_Lock"])
self.numlockMask = xcbq.ModMasks[self.conn.get_modifier(nc)]
self.validMask = ~(self.numlockMask | xcbq.ModMasks["lock"])
# Because we only do Xinerama multi-screening,
# we can assume that the first
# screen's root is _the_ root.
self.root = self.conn.default_screen.root
self.root.set_attribute(
eventmask=(
EventMask.StructureNotify |
EventMask.SubstructureNotify |
EventMask.SubstructureRedirect |
EventMask.EnterWindow |
EventMask.LeaveWindow
)
)
self.root.set_property(
'_NET_SUPPORTED',
[self.conn.atoms[x] for x in xcbq.SUPPORTED_ATOMS]
)
self.supporting_wm_check_window = self.conn.create_window(-1, -1, 1, 1)
self.root.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
# setup the default cursor
self.root.set_cursor('left_ptr')
wmname = getattr(self.config, "wmname", "qtile")
self.supporting_wm_check_window.set_property('_NET_WM_NAME', wmname)
self.supporting_wm_check_window.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
if config.main:
config.main(self)
self.dgroups = None
if self.config.groups:
key_binder = None
if hasattr(self.config, 'dgroups_key_binder'):
key_binder = self.config.dgroups_key_binder
self.dgroups = DGroups(self, self.config.groups, key_binder)
if hasattr(config, "widget_defaults") and config.widget_defaults:
_Widget.global_defaults = config.widget_defaults
else:
_Widget.global_defaults = {}
for i in self.groups:
self.groupMap[i.name] = i
self.setup_eventloop()
self.server = command._Server(self.fname, self, config, self._eventloop)
self.currentScreen = None
self.screens = []
self._process_screens()
self.currentScreen = self.screens[0]
self._drag = None
self.ignoreEvents = set([
xcffib.xproto.KeyReleaseEvent,
xcffib.xproto.ReparentNotifyEvent,
xcffib.xproto.CreateNotifyEvent,
# DWM handles this to help "broken focusing windows".
xcffib.xproto.MapNotifyEvent,
xcffib.x
|
proto.LeaveNotifyEvent,
xcffib.xproto.FocusOutEvent,
xcffib.xproto.FocusInEvent,
xcffib.xproto.NoExposureEvent
])
self.conn.flush()
self.conn.xsync()
self._xpoll()
# Map and Grab keys
for
|
key in self.config.keys:
self.mapKey(key)
# It fixes problems with focus when clicking windows of some specific clients like xterm
def noop(qtile):
pass
self.config.mouse += (Click([], "Button1", command.lazy.function(noop), focus="after"),)
self.mouseMap = {}
for i in self.config.mouse:
if self.mouseMap.get(i.button_code) is None:
self.mouseMap[i.button_code] = []
self.mouseMap[i.button_code].append(i)
self.grabMouse()
# no_spawn is set when we are restarting; we only want to run the
# startup hook once.
if not no_spawn:
hook.fire("startup_once")
hook.fire("startup")
self.scan()
self.update_net_desktops()
hook.subscribe.setgroup(self.update_net_desktops)
if state:
st = pickle.load(six.BytesIO(state.encode()))
try:
st.apply(self)
except:
log.exception("failed restoring state")
self.selection = {
"PRIMARY": {"owner": None, "selection": ""},
"CLIPBOARD": {"owner": None, "selection": ""}
}
self.setup_selection()
def setup_selection(self):
PRIMARY = self.conn.atoms["PRIMARY"]
CLIPBOARD = self.conn.atoms["
|
apyrgio/snf-ganeti
|
test/py/ganeti.hypervisor.hv_kvm_unittest.py
|
Python
|
bsd-2-clause
| 16,077
| 0.003981
|
#!/usr/bin/python
#
# Copyright (C) 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIB
|
ILITY OF SUCH DAMAGE.
"""Script for testing the hypervisor.hv_kvm module"""
import threading
import tempfile
import unittest
import socket
import os
import struct
import re
from ganeti import serializ
|
er
from ganeti import constants
from ganeti import compat
from ganeti import objects
from ganeti import errors
from ganeti import utils
from ganeti import pathutils
from ganeti.hypervisor import hv_kvm
import ganeti.hypervisor.hv_kvm.netdev as netdev
import ganeti.hypervisor.hv_kvm.monitor as monitor
import testutils
class QmpStub(threading.Thread):
"""Stub for a QMP endpoint for a KVM instance
"""
_QMP_BANNER_DATA = {
"QMP": {
"version": {
"package": "",
"qemu": {
"micro": 50,
"minor": 13,
"major": 0,
},
"capabilities": [],
},
}
}
_EMPTY_RESPONSE = {
"return": [],
}
_SUPPORTED_COMMANDS = {
"return": [
{"name": "command"},
{"name": "query-kvm"},
{"name": "eject"},
{"name": "query-status"},
{"name": "query-name"},
]
}
def __init__(self, socket_filename, server_responses):
"""Creates a QMP stub
@type socket_filename: string
@param socket_filename: filename of the UNIX socket that will be created
this class and used for the communication
@type server_responses: list
@param server_responses: list of responses that the server sends in response
to whatever it receives
"""
threading.Thread.__init__(self)
self.socket_filename = socket_filename
self.script = server_responses[:]
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.socket_filename)
self.socket.listen(1)
def run(self):
# Hypothesis: the messages we receive contain only a complete QMP message
# encoded in JSON.
conn, addr = self.socket.accept()
# Send the banner as the first thing
conn.send(self.encode_string(self._QMP_BANNER_DATA))
# Expect qmp_capabilities and return an empty response
conn.recv(4096)
conn.send(self.encode_string(self._EMPTY_RESPONSE))
# Expect query-commands and return the list of supported commands
conn.recv(4096)
conn.send(self.encode_string(self._SUPPORTED_COMMANDS))
while True:
# We ignore the expected message, as the purpose of this object is not
# to verify the correctness of the communication but to act as a
# partner for the SUT (System Under Test, that is QmpConnection)
msg = conn.recv(4096)
if not msg:
break
if not self.script:
break
response = self.script.pop(0)
if isinstance(response, str):
conn.send(response)
elif isinstance(response, list):
for chunk in response:
conn.send(chunk)
else:
raise errors.ProgrammerError("Unknown response type for %s" % response)
conn.close()
def encode_string(self, message):
return (serializer.DumpJson(message) +
hv_kvm.QmpConnection._MESSAGE_END_TOKEN)
class TestQmpMessage(testutils.GanetiTestCase):
def testSerialization(self):
test_data = {
"execute": "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
for k, v in test_data.items():
self.assertEqual(message[k], v)
serialized = str(message)
self.assertEqual(len(serialized.splitlines()), 1,
msg="Got multi-line message")
rebuilt_message = hv_kvm.QmpMessage.BuildFromJsonString(serialized)
self.assertEqual(rebuilt_message, message)
self.assertEqual(len(rebuilt_message), len(test_data))
def testDelete(self):
toDelete = "execute"
test_data = {
toDelete: "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
oldLen = len(message)
del(message[toDelete])
newLen = len(message)
self.assertEqual(oldLen - 1, newLen)
class TestQmp(testutils.GanetiTestCase):
REQUESTS = [
{"execute": "query-kvm", "arguments": []},
{"execute": "eject", "arguments": {"device": "ide1-cd0"}},
{"execute": "query-status", "arguments": []},
{"execute": "query-name", "arguments": []},
]
SERVER_RESPONSES = [
# One message, one send()
'{"return": {"enabled": true, "present": true}}\r\n',
# Message sent using multiple send()
['{"retur', 'n": {}}\r\n'],
# Multiple messages sent using one send()
'{"return": [{"name": "quit"}, {"name": "eject"}]}\r\n'
'{"return": {"running": true, "singlestep": false}}\r\n',
]
EXPECTED_RESPONSES = [
{"enabled": True, "present": True},
{},
[{"name": "quit"}, {"name": "eject"}],
{"running": True, "singlestep": False},
]
def testQmp(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
# Set up the QMP connection
qmp_connection = hv_kvm.QmpConnection(socket_file.name)
qmp_connection.connect()
# Format the script
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp_connection.Execute(request["execute"],
request["arguments"])
self.assertEqual(response, expected_response)
msg = hv_kvm.QmpMessage({"return": expected_response})
self.assertEqual(len(str(msg).splitlines()), 1,
msg="Got multi-line message")
self.assertRaises(monitor.QmpCommandNotSupported,
qmp_connection.Execute,
"unsupported-command")
def testQmpContextManager(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
# Test the context manager functionality
with hv_kvm.QmpConnection(socket_file.name) as qmp:
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp.Execute(request["execute"], request["arguments"])
self.assertEqual(response, expected_response)
class TestConsole(unittest.TestCase):
def _Test(self, instance, node, hvparams):
cons = hv_kvm.KVMHypervisor.GetInstanceConsole(instance, node, hvparams, {})
self.assertTrue(cons.Validate())
return cons
def testSerial(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node6017-uuid")
node = objects.Node(name="node6017", uuid="node6017-uuid")
hvparams = {
constants.HV_SERIAL_CONSOLE: True,
|
NileshPS/OS-and-Networking-programs
|
7_rpc/client.py
|
Python
|
gpl-3.0
| 331
| 0.006042
|
from xmlrpc.client import ServerProxy
import sys
def help():
print("Usage : r
|
emote_finger [-lmsp] user..")
if __name__ == '
|
__main__':
sys.argv = sys.argv[1:]
if len(sys.argv) == 0:
help()
sys.exit(1)
client = ServerProxy('http://localhost:8000')
print(client.finger(sys.argv))
sys.exit(0)
|
giubil/trackit
|
api/files/api/app/views/aws/cost/stats.py
|
Python
|
apache-2.0
| 43,920
| 0.003575
|
from datetime import datetime
from app import app
from app.authentication import with_login
from flask import Blueprint, jsonify, request, Response
from app.generate_csv import generate_csv_clean
from app.msol_util import get_next_update_estimation_message_aws
from app.es.awsmetric import AWSMetric
from app.es.awsstat import AWSStat
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from app.aws_keys import with_multiple_aws_accounts
from dateutil.relativedelta import relativedelta
from app.generate_csv import generate_csv
from app.cache import compressed_json, decompressed_json, cache, with_cache
from hashlib import sha256
from .. import AWS_KEY_PROCESSING_INTERVAL_HOURS
import itertools
import calendar
import config
aws_cost_stats_bp = Blueprint('aws_cost_stats_bp', __name__)
def cut_cost_by_product(products, cut):
res = []
other = {'product': 'Other Services', 'cost': 0}
i = 0
for p in products:
if i < cut and p['cost'] >= 0.01:
res.append(p)
else:
other['cost'] += p['cost']
i += 1
if other['cost'] >= 0.01:
res.append(other)
return res
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycost(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_monthly_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/totalcost/<string:time_arg>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_totalcost(accounts, time_ar
|
g):
"""---
|
get:
tags:
- aws
produces:
- application/json
description: &desc Get total cost
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
this_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
this_month = this_day.replace(day=1)
time_val = {
'ever': AWSDetailedLineitem.get_first_date([account.get_aws_user_id() for account in accounts]),
'currentyear': this_month - relativedelta(months=this_month.month),
'currentmonth': this_month,
}
date_from = time_val.get(time_arg, now)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(raw_data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregion(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['intervals']['buckets']
res = [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'cost': region['cost']['value'],
}
for region in data['regions']['buckets']
],
}
for data in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region'), mimetype='text/csv')
return jsonify(months=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregionbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
it
|
SEL-Columbia/commcare-hq
|
corehq/apps/receiverwrapper/tests/test_repeater.py
|
Python
|
bsd-3-clause
| 6,713
| 0.000447
|
from StringIO import StringIO
from datetime import datetime, timedelta
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.tests.util import check_xml_line_by_line
from casexml.apps.case.xml import V1
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.receiverwrapper.models import (
CaseRepeater,
FormRepeater,
RepeatRecord,
)
from couchforms.models import XFormInstance
case_id = "ABC123CASEID"
instance_id = "XKVB636DFYL38FNX3D38WV5EH"
update_instance_id = "ZYXKVB636DFYL38FNX3D38WV5"
case_block = """
<case>
<case_id>%s</case_id>
<date_modified>2011-12-19T00:00:00Z</date_modified>
<create>
<case_type_id>repeater_case</case_type_id>
<user_id>O2XLT0WZW97W1A91E2W1Y0NJG</user_id>
<case_name>ABC 123</case_name>
<external_id>ABC 123</external_id>
</create>
</case>
""" % case_id
update_block = """
<case>
<case_id>%s</case_id>
<date_modified>2011-12-19T00:00:00Z</date_modified>
<update>
<case_name>ABC 234</case_name>
</update>
</case>
""" % case_id
xform_xml_template = """<?xml version='1.0' ?>
<data xmlns:jrm="http://dev.commcarehq.org/jr/xforms" xmlns="https://www.commcarehq.org/test/repeater/">
<woman_name>Alpha</woman_name>
<husband_name>Beta</husband_name>
<meta>
<deviceID>O2XLT0WZW97W1A91E2W1Y0NJG</deviceID>
<timeStart>2011-10-01T15:25:18.404-04</timeStart>
<timeEnd>2011-10-01T15:26:29.551-04</timeEnd>
<username>admin</username>
<userID>O2XLT0WZW97W1A91E2W1Y0NJG</userID>
<instanceID>%s</instanceID>
</meta>
%s
</data>
"""
xform_xml = xform_xml_template % (instance_id, case_block)
update_xform_xml = xform_xml_template % (update_instance_id, update_block)
class RepeaterTest(TestCase):
def setUp(self):
self.client = Client()
self.domain = "test-domain"
create_domain(self.domain)
self.case_repeater = CaseRepeater(
domain=self.domain,
url='case-repeater-url',
version=V1,
)
self.case_repeater.save()
self.form_repeater = FormRepeater(
domain=self.domain,
url='form-repeater-url',
)
self.form_repeater.save()
self.log = []
self.post_xml(xform_xml)
def post_xml(self, xml):
f = StringIO(xml)
f.name = 'form.xml'
self.client.post(
reverse('receiver_post', args=[self.domain]), {
'xml_submission_file': f
}
)
def clear_log(self):
for i in range(len(self.log)): self.log.pop()
def make_post_fn(self, status_codes):
status_codes = iter(status_codes)
def post_fn(data, url, headers=None):
status_code = status_codes.next()
self.log.append((url, status_code, data, headers))
class resp:
status = status_code
return resp
return post_fn
def tearDown(self):
self.case_repeater.delete()
self.form_repeater.delete()
XFormInstance.get(instance_id).delete()
repeat_records = RepeatRecord.all()
for repeat_record in repeat_records:
repeat_record.delete()
def test_repeater(self):
CommCareCase.get(case_id)
def now():
return datetime.utcnow()
repeat_records = RepeatRecord.all(domain=self.domain, due_before=now())
self.assertEqual(len(repeat_records), 2)
self.clear_log()
records_by_repeater_id = {}
for repeat_record in repeat_records:
repeat_record.fire(post_fn=self.make_post_fn([404, 404, 404]))
repeat_record.save()
records_by_repeater_id[repeat_record.repeater_id] = repeat_record
for (url, status, data, headers) in self.log:
self.assertEqual(status, 404)
self.clear_log()
next_check_time = now() + timedelta(minutes=60)
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=now() + timedelta(minutes=15),
)
self.assertEqual(len(repeat_records), 0)
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=next_check_time + timedelta(seconds=2),
)
self.assertEqual(len(repeat_records), 2)
for repeat_record in repeat_records:
self.assertLess(abs(next_check_time - repeat_record.next_check),
timedelta(seconds=2))
repeat_record.fire(post_fn=self.make_post_fn([404, 200]))
repeat_record.save()
self.assertEqual(len(self.log), 4)
# The following is pretty fickle and depends on which of
# - corehq.apps.receiverwrapper.signals
# - casexml.apps.case.signal
|
s
# gets loaded first.
# This is deterministic but easily affected by minor code changes
# check case stuff
rec = records_by_repeater
|
_id[self.case_repeater.get_id]
self.assertEqual(self.log[1][:2], (self.case_repeater.get_url(rec), 200))
self.assertIn('server-modified-on', self.log[1][3])
check_xml_line_by_line(self, self.log[1][2], case_block)
# check form stuff
rec = records_by_repeater_id[self.form_repeater.get_id]
self.assertEqual(self.log[3][:3],
(self.form_repeater.get_url(rec), 200, xform_xml))
self.assertIn('received-on', self.log[3][3])
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=next_check_time,
)
for repeat_record in repeat_records:
self.assertEqual(repeat_record.succeeded, True)
self.assertEqual(repeat_record.next_check, None)
repeat_records = RepeatRecord.all(domain=self.domain, due_before=now())
self.assertEqual(len(repeat_records), 0)
self.post_xml(update_xform_xml)
repeat_records = RepeatRecord.all(domain=self.domain, due_before=now())
self.assertEqual(len(repeat_records), 2)
class RepeaterLockTest(TestCase):
def testLocks(self):
r = RepeatRecord(domain='test')
r.save()
r2 = RepeatRecord.get(r._id)
self.assertTrue(r.acquire_lock(datetime.utcnow()))
r3 = RepeatRecord.get(r._id)
self.assertFalse(r2.acquire_lock(datetime.utcnow()))
self.assertFalse(r3.acquire_lock(datetime.utcnow()))
r.release_lock()
r4 = RepeatRecord.get(r._id)
self.assertTrue(r4.acquire_lock(datetime.utcnow()))
|
priya-pp/Tacker
|
releasenotes/source/conf.py
|
Python
|
apache-2.0
| 8,504
| 0.000118
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# tacker documentation build configuration file, created by
# sphinx-quickstart on Tue May 31 19:07:30 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'oslosphinx',
'reno.sphinxext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tacker Release Notes'
copyright = u'2016, Tacker Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import pbr.version
tacker_version = pbr.version.VersionInfo('tacker')
release = tacker_version.version_string_with_vcs()
version = tacker_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to
# use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tackerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper
|
').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'TackerReleaseNotes.tex',
u'Tacker Release Notes Documentation',
u'Tacker Developers', 'manual'),
]
# The name of an
|
image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tackerreleasenotes', u'Tacker Release Notes Documentation',
[u'Tacker Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TackerReleaseNotes', u'Tacker Release Notes Documentation',
u'Tacker Developers',
|
dper/pumptweet
|
setup.py
|
Python
|
mit
| 1,006
| 0.036779
|
#!/usr/bin/env python2
# setup.py
from setuptools import setup, find_packages
setup(name='pumptweet',
version='2.1',
description='Cross posts from Pump.io to Twitter.',
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='README.md',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
|
'Topic :: Communications',
],
url='http://github.com/dper/pumptweet',
author='Douglas Paul Perkins',
author_email='contact@dperkins.org',
license='MIT',
packages=['pumptweet'],
install_requires=[
'pypump >= 0.7',
'python-twitter >= 3.1',
],
include_package_data=True,
scripts=[
'pt.py',
'pt.sh',
],
|
zip_safe=False)
|
bbondy/brianbondy.gae
|
libs/html5lib/serializer/__init__.py
|
Python
|
mit
| 585
| 0.005128
|
from html5lib import treewalkers
from htmlserializer import HTMLSerializer
from xhtmlserializer import XHTMLS
|
erializer
def serialize(input, tree="simpletree", format="html", encoding=None,
**serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
if format == "html":
s = HTMLSerializer(**serializer_opts)
elif format == "xhtml":
s = XHTMLSerializer(**serializer_opts)
else:
rai
|
se ValueError, "type must be either html or xhtml"
return s.render(walker(input), encoding)
|
praveen-pal/edx-platform
|
lms/djangoapps/courseware/features/lti.py
|
Python
|
agpl-3.0
| 6,277
| 0.000956
|
#pylint: disable=C0111
from django.contrib.auth.models import User
from lettuce import world, step
from lettuce.django import django_url
from common import course_id
from student.models import CourseEnrollment
@step('I view the LTI and it is not rendered$')
def lti_is_not_rendered(_step):
# lti div has no class rendered
assert world.is_css_not_present('div.lti.rendered')
# error is shown
assert world.css_visible('.error_message')
# iframe is not visible
assert not world.css_visible('iframe')
#inside iframe test content is not presented
with world.browser.get
|
_iframe('ltiLaunchFrame') as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_not_present_by_css('.result', wait_time=5)
@step('I view the LTI and it is rendered$')
def lti_is_rendered(_step):
# lti div has class rendered
assert world.is_css_present('div.lti.rendered')
# error is hidden
assert not world.css_visible('.error_message')
# iframe is visible
|
assert world.css_visible('iframe')
#inside iframe test content is presented
with world.browser.get_iframe('ltiLaunchFrame') as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=5)
assert ("This is LTI tool. Success." == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('I view the LTI but incorrect_signature warning is rendered$')
def incorrect_lti_is_rendered(_step):
# lti div has class rendered
assert world.is_css_present('div.lti.rendered')
# error is hidden
assert not world.css_visible('.error_message')
# iframe is visible
assert world.css_visible('iframe')
#inside iframe test content is presented
with world.browser.get_iframe('ltiLaunchFrame') as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=5)
assert ("Wrong LTI signature" == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('the course has correct LTI credentials$')
def set_correct_lti_passport(_step):
coursenum = 'test_course'
metadata = {
'lti_passports': ["correct_lti_id:{}:{}".format(
world.lti_server.oauth_settings['client_key'],
world.lti_server.oauth_settings['client_secret']
)]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has incorrect LTI credentials$')
def set_incorrect_lti_passport(_step):
coursenum = 'test_course'
metadata = {
'lti_passports': ["test_lti_id:{}:{}".format(
world.lti_server.oauth_settings['client_key'],
"incorrect_lti_secret_key"
)]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has an LTI component filled with correct fields$')
def add_correct_lti_to_course(_step):
category = 'lti'
world.ItemFactory.create(
# parent_location=section_location(course),
parent_location=world.scenario_dict['SEQUENTIAL'].location,
category=category,
display_name='LTI',
metadata={
'lti_id': 'correct_lti_id',
'launch_url': world.lti_server.oauth_settings['lti_base'] + world.lti_server.oauth_settings['lti_endpoint']
}
)
course = world.scenario_dict["COURSE"]
chapter_name = world.scenario_dict['SECTION'].display_name.replace(
" ", "_")
section_name = chapter_name
path = "/courses/{org}/{num}/{name}/courseware/{chapter}/{section}".format(
org=course.org,
num=course.number,
name=course.display_name.replace(' ', '_'),
chapter=chapter_name,
section=section_name)
url = django_url(path)
world.browser.visit(url)
@step('the course has an LTI component with incorrect fields$')
def add_incorrect_lti_to_course(_step):
category = 'lti'
world.ItemFactory.create(
parent_location=world.scenario_dict['SEQUENTIAL'].location,
category=category,
display_name='LTI',
metadata={
'lti_id': 'incorrect_lti_id',
'lti_url': world.lti_server.oauth_settings['lti_base'] + world.lti_server.oauth_settings['lti_endpoint']
}
)
course = world.scenario_dict["COURSE"]
chapter_name = world.scenario_dict['SECTION'].display_name.replace(
" ", "_")
section_name = chapter_name
path = "/courses/{org}/{num}/{name}/courseware/{chapter}/{section}".format(
org=course.org,
num=course.number,
name=course.display_name.replace(' ', '_'),
chapter=chapter_name,
section=section_name)
url = django_url(path)
world.browser.visit(url)
def create_course(course, metadata):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course',
metadata=metadata
)
# Add a section to the course to contain problems
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
display_name='Test Section'
)
world.scenario_dict['SEQUENTIAL'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category='sequential',
display_name='Test Section')
def i_am_registered_for_the_course(course, metadata):
# Create the course
create_course(course, metadata)
# Create the user
world.create_user('robot', 'test')
usr = User.objects.get(username='robot')
# If the user is not already enrolled, enroll the user.
CourseEnrollment.enroll(usr, course_id(course))
world.log_in(username='robot', password='test')
|
adbar/url-tools
|
courlan/clean.py
|
Python
|
gpl-2.0
| 5,520
| 0.002899
|
"""
Functions performing URL trimming and cleaning
"""
## This file is available from https://github.com/adbar/courlan
## under GNU GPL v3 license
import logging
import re
from collections import OrderedDict
from urllib.parse import parse_qs, urlencode, urlparse, ParseResult
from .filters import validate_url
from .settings import ALLOWED_PARAMS, CONTROL_PARAMS,\
TARGET_LANG_DE, TARGET_LANG_EN
PROTOCOLS = re.compile(r'https?://')
SELECTION = re.compile(r'(https?://[^">&? ]+?)(?:https?://)|(?:https?://[^/]+?/[^/]+?[&?]u(rl)?=)(https?://[^"> ]+)')
MIDDLE_URL = re.compile(r'https?://.+?(https?://.+?)(?:https?://|$)')
NETLOC_RE = re.compile(r'(?<=\w):(?:80|443)')
PATH1 = re.compile(r'/+')
PATH2 = re.compile(r'^(?:/\.\.(?![^/]))+')
def clean_url(url, language=None):
'''Helper function: chained scrubbing and normalization'''
try:
return normalize_url(scrub_url(url), language)
except (AttributeError, ValueError):
return None
def scrub_url(url):
'''Strip unnecessary parts and make sure only one URL is considered'''
# trim
# https://github.com/cocrawler/cocrawler/blob/main/cocrawler/urls.py
# remove leading and trailing white space and unescaped control chars
url = url.strip('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \r\n')
# clean the input string
url = url.replace('[ \t]+', '')
# <![CDATA[http://www.urbanlife.de/item/260-bmw-i8-hybrid-revolution-unter-den-sportwagen.html]]>
if url.startswith('<![CDATA['): # re.match(r'<!\[CDATA\[', url):
url = url.replace('<![CDATA[', '') # url = re.sub(r'^<!\[CDATA\[', '', url)
url = url.replace(']]>', '') # url = re.sub(r'\]\]>$', '', url)
# markup rests
url = re.sub(r'</?a>', '', url)
# &
if '&' in url:
url = url.replace('&', '&')
#if '"' in link:
# link = link.split('"')[0]
# double/faulty URLs
protocols = PROTOCOLS.findall(url)
if len(protocols) > 1 and not 'web.archive.org' in url:
logging.debug('double url: %s %s', len(protocols), url)
match = SELECTION.match(url)
if match and validate_url(match.group(1))[0] is True:
url = match.group(1)
logging.debug('taking url: %s', url)
else:
match = MIDDLE_URL.match(url)
if match and validate_url(match.group(1))[0] is True:
url = match.group(1)
logging.debug('taking url: %s', url)
# too long and garbled URLs e.g. due to quotes URLs
# https://github.com/cocrawler/cocrawler/blob/main/cocrawler/urls.py
if len(url) > 500: # arbitrary choice
match = re.match(r'(.*?)[<>"\'\r\n ]', url)
if match:
url = match.group(1)
if len(url) > 500:
logging.debug('invalid-looking link %s of length %d',
url[:50] + '...', len(url))
# trailing ampersand
url = url.strip('&')
# trailing slashes in URLs without path or in embedded URLs
if url.count('/') == 3 or url.count('://') > 1:
url = url.rstrip('/')
# lower
# url = url.lower()
return url
def clean_query(parsed_url, strict=False, language=None):
'''Strip unwanted query elements'''
if len(parsed_url.query) > 0:
qdict = parse_qs(parsed_url.query)
newqdict = OrderedDict()
for qelem in sorted(qdict.keys()):
teststr = qelem.lower()
# control param
if strict is True and \
teststr not in ALLOWED_PARAMS and teststr not in CONTROL_PARAMS:
continue
# control language
if language is not None and teststr in CONTROL_PARAMS:
found_lang = str(qdict[qelem][0])
if (language == 'de' and found_lang not in TARGET_LANG_DE) or \
(language == 'en' and found_lang not in TARGET_LANG_EN) or \
found_lang != language:
logging.debug('bad lang: %s %s %s', language, qelem, found_lang)
raise ValueError
# insert
newqdict[qelem] = qdict[qelem]
newstring = urlencode(newqdict, doseq=True)
parsed_url = parsed_url._replace(query=newstring)
return parsed_url
def normalize_url(parsed_url, strict=False, language=None):
'''Takes a URL string or a parsed URL and returns a (basically) normalized URL string'''
if not isinstance(parsed_url, ParseResult):
parsed_url = urlparse(parsed_url)
# port
if parsed_url.port is not None and parsed_url.port in (80, 443):
parsed_url = parsed_url._replace(netloc=NETLOC_RE.sub('', parsed_url.netloc))
# path: https://github.com/saintamh/alcazar/blob/master/alcazar/utils/urls.p
|
y
newpath = PATH1.sub('/', parsed_url.path)
# Leading /../'s in the path are removed
newpath = PATH2.sub('', newpath)
# fragment
if strict is True:
newfragment = ''
else:
newfragment = parsed_url.fragment
#
|
lowercase + remove fragments
parsed_url = parsed_url._replace(
scheme=parsed_url.scheme.lower(),
netloc=parsed_url.netloc.lower(),
path=newpath,
fragment=newfragment
)
# strip unwanted query elements
parsed_url = clean_query(parsed_url, strict, language)
# rebuild
return parsed_url.geturl()
|
ProkopHapala/SimpleSimulationEngine
|
python/pyRay/tests/testSceneList.py
|
Python
|
mit
| 624
| 0.110577
|
#!/usr/bin/python
import sys
sys.path.append("../../")
#import pyRay as ra
import pyRay.scene as scn
# TODO : how to pass arguments from function header?
object1 = ("obj1",(), [( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ),( "S","sdSphere","%s",(1.2,) )])
object2 = ("obj1",("f","f3"),[( "U","sdBox" ,"%s",("2",) ),( "S","sdSphere","%s",("1",) )])
object3 = ("obj2",("f","f2"),[( "U","sdBox" ,"%s",(("2",1.
|
0),) ),( "S","sdSphere","%s",("1",) )])
scene = [
( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ),
( "S","sdSphere","%s",(1.2,) ),
]
scene_src = scn.parseSceneList(scene)
pr
|
int scene_src
|
google/grr
|
grr/server/grr_response_server/flows/file.py
|
Python
|
apache-2.0
| 16,923
| 0.006736
|
#!/usr/bin/env python
"""Flows to collect file contents and metadata."""
from typing import Any, Mapping, Optional
from grr_response_core import config
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import flow_base
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
_MAX_FILE_SIZE = 1024 * 1024 * 1024 * 10 # 10 GiB.
# Although MultiGetFileLogic is a leaky, complex, and overall problematic Mixin
# it seems to be best choice to fetch the stat, hashes, and contents of a file.
# At the time of writing, none of the flows exposed all three to the caller in
# a sensible way.
class CollectSingleFile(transfer.MultiGetFileLogic, flow_base.FlowBase):
"""Fetches contents of a single file from the specified absolute path."""
friendly_name = "File content"
category = "/Filesystem/"
args_type = rdf_file_finder.CollectSingleFileArgs
result_types = (rdf_file_finder.CollectSingleFileResult,)
progress_type = rdf_file_finder.CollectSingleFileProgress
behaviours = flow_base.BEHAVIOUR_DEBUG
def GetProgress(self) -> rdf_file_finder.CollectSingleFileProgress:
return self.state.progress
def Start(self):
super().Start(file_size=self.args.max_size_bytes)
self.state.progress = rdf_file_finder.CollectSingleFileProgress(
status=rdf_file_finder.CollectSingleFileProgress.Status.IN_PROGRESS)
pathspec = rdf_paths.PathSpec.OS(path=self.args.path)
self.StartFileFetch(pathspec)
def ReceiveFetchedFile(self,
stat_entry,
hash_obj,
request_data=None,
is_duplicate=False):
"""See MultiGetFileLogic."""
del request_data, is_duplicate # Unused.
result = rdf_file_finder.CollectSingleFileResult(
stat=stat_entry, hash=hash_obj)
self.SendReply(result)
self.state.progress.result = result
self.state.progress.status = (
rdf_file_finder.CollectSingleFileProgress.Status.COLLECTED)
def FileFetchFailed(self,
pathspec: rdf_paths.PathSpec,
request_data: Any = None,
status: Optional[rdf_flow_objects.FlowStatus] = None):
"""See MultiGetFileLogic."""
if (self.client_os == "Windows" and
pathspec.pathtype == rdf_paths.PathSpec.PathType.OS):
# Retry with raw filesystem access on Windows,
# the file might be locked for reads.
raw_pathspec = rdf_paths.PathSpec(
path=self.args.path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"])
self.StartFileFetch(raw_pathspec)
elif status is not None and status.error_message:
error_description = "{} when fetching {} with {}".format(
status.error_message, pathspec.path, pathspec.pathtype)
# TODO: this is a really bad hack and should be fixed by
# passing the 'not found' status in a more structured way.
if "File not found" in status.error_message:
self.state.progress.status = rdf_file_finder.CollectSingleFileProgress.Status.NOT_FOUND
else:
self.state.progress.status = rdf_file_finder.CollectSingleFileProgress.Status.FAILED
self.state.progress.error_description = error_description
raise flow_base.FlowError(error_description)
else:
error_description = (
"File {} could not be fetched with {} due to an unknown error. "
"Check the flow logs.".format(pathspec.path, pathspec.pathtype))
self.state.progress.status = rdf_file_finder.CollectSingleFileProgress.Status.FAILED
self.state.progress.error_description = error_description
raise flow_base.FlowError(error_description)
@classmethod
def GetDefaultArgs(cls, username=None):
"""See base class."""
del username # Unused.
return rdf_file_finder.CollectSingleFileArgs(
path="", max_size_bytes="1 GiB")
# Although MultiGetFileLogic is a leaky, complex, and overall problematic Mixin
# it seems to be best choice to fetch the stat, hashes, and contents of a file.
# At the time of writing, none of the flows exposed all three to the caller in
# a sensible way.
class CollectFilesByKnownPath(transfer.MultiGetFileLogic, flow_base.FlowBase):
"""Fetches specified absolute path file contents."""
friendly_name = "File contents by exact path"
category = "/Filesystem/"
behaviours = flow_base.BEHAVIOUR_DEBUG
args_type = rdf_file_finder.CollectFilesByKnownPathArgs
result_types = (rdf_file_finder.CollectFilesByKnownPathResult,)
progress_type = rdf_file_finder.CollectFilesByKnownPathProgress
def GetProgress(self) -> rdf_file_finder.CollectFilesByKnownPathProgress:
return self.state.progress
def Start(self):
super().Start(file_size=_MAX_FILE_SIZE)
self.state.progress = rdf_file_finder.CollectFilesByKnownPathProgress(
num_in_progress=0,
num_raw_fs_access_retries=0,
num_collected=0,
num_failed=0,
)
if self.args.collection_level == rdf_file_finder.CollectFilesByKnownPathArgs.CollectionLevel.STAT:
self.state.stop_at_stat = True
elif self.args.collection_level == rdf_file_finder.CollectFilesByKnownPathArgs.CollectionLevel.HASH:
self.state.stop_at_hash = True
for path in self.args.paths:
pathspec = rdf_paths.PathSpec.OS(path=path)
self.StartFileFetch(
pathspec, request_data=dict(requested_pathspec=pathspec))
self.state.progress.num_in_progress += 1
def ReceiveFetchedFileStat(self,
stat_entry: rdf_client_fs.StatEntry,
request_data: Optional[Mapping[str, Any]] = None):
"""This method will be called for each new file stat successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
del request_data # Unused.
if self.state.stop_at_stat:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1
else:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGRESS
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry, status=status)
self.SendReply(result)
def ReceiveFetchedFileHash(self,
stat_entry: rdf_client_fs.StatEntry,
file_hash: rdf_crypto.Hash,
request_data: Optional[Mapping[str, Any]] = None):
"""This method will be called for each new file hash successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
del request_data # Unused.
if self.state.stop_at_hash:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1
else:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGR
|
ESS
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry, hash=file_hash, status=status)
self.SendReply(result)
def ReceiveFetchedFile(self,
stat_entry: rdf_client_fs.StatEntry,
file_hash: rdf_crypto.Hash,
request_data: Optional[Mapping[str, Any]] = None,
is_duplicate: bool = False):
"""This method will be called for each new file successf
|
ully fetched.
Args:
stat_entry: rdf_client_fs.StatE
|
caktus/rapidsms-groups
|
groups/views.py
|
Python
|
bsd-3-clause
| 3,191
| 0
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.contrib im
|
port messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db im
|
port transaction
from django.db.models import Count
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from rapidsms.models import Contact
from groups.models import Group
from groups.forms import GroupForm, ContactForm
@login_required
def list_groups(request):
groups = Group.objects.annotate(count=Count('contacts')).order_by('name')
return render(request, 'groups/groups/list.html', {
'groups': groups,
})
@login_required
@transaction.commit_on_success
def create_edit_group(request, group_id=None):
group = None
if group_id:
group = get_object_or_404(Group, pk=group_id)
if not group.is_editable:
return HttpResponseForbidden('Access denied')
if request.method == 'POST':
form = GroupForm(request.POST, instance=group)
if form.is_valid():
form.save()
messages.info(request, 'Group saved successfully')
return HttpResponseRedirect(reverse('list-groups'))
else:
form = GroupForm(instance=group)
return render(request, 'groups/groups/create_edit.html', {
'form': form,
'group': group,
})
@login_required
@transaction.commit_on_success
def delete_group(request, group_id):
group = get_object_or_404(Group, pk=group_id)
if not group.is_editable:
return HttpResponseForbidden('Access denied')
if request.method == 'POST':
group.delete()
messages.info(request, 'Group successfully deleted')
return HttpResponseRedirect(reverse('list-groups'))
return render(request, 'groups/groups/delete.html', {
'group': group,
})
@login_required
def list_contacts(request):
contacts = Contact.objects.all().order_by('name')
return render(request, 'groups/contacts/list.html', {
'contacts': contacts,
})
@login_required
@transaction.commit_on_success
def create_edit_contact(request, contact_id=None):
contact = None
if contact_id:
contact = get_object_or_404(Contact, pk=contact_id)
if request.method == 'POST':
form = ContactForm(request.POST, instance=contact)
if form.is_valid():
form.save()
messages.info(request, 'Contact saved successfully')
return HttpResponseRedirect(reverse('list-contacts'))
else:
form = ContactForm(instance=contact)
return render(request, 'groups/contacts/create_edit.html', {
'form': form,
'contact': contact,
})
@login_required
@transaction.commit_on_success
def delete_contact(request, contact_id):
contact = get_object_or_404(Contact, pk=contact_id)
if request.method == 'POST':
contact.delete()
messages.info(request, 'Contact successfully deleted')
return HttpResponseRedirect(reverse('list-contacts'))
return render(request, 'groups/contacts/delete.html', {
'contact': contact,
})
|
Dawny33/Code
|
Code_Forces/A_string_task.py
|
Python
|
gpl-3.0
| 152
| 0.006579
|
T = raw_input().lower()
vowels = "aeiouy"
output = ""
for
|
i in range(0,len(T)):
if T[i] not in vowels:
output += "." + T
|
[i]
print output
|
wangming28/syzygy
|
syzygy/scripts/test_bot/PRESUBMIT.py
|
Python
|
apache-2.0
| 1,582
| 0.005689
|
#!python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Vers
|
ion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
r implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Additional presubmit script. This will be run for changes to files in this
# subdirectory, as well as the root syzygy/PRESUBMIT.py.
#
# This script will be read as a string and intepreted, so __file__ is not
# available. However, it is guaranteed to be run with this file's directory as
# the current working directory.
def CheckChange(input_api, output_api, dummy_committing):
# We only check Python files in this tree. The others are checked by the
# PRESUBMIT in the root Syzygy directory.
white_list = [r'^.*\.py$']
black_list = []
disabled_warnings = []
results = input_api.canned_checks.RunPylint(
input_api,
output_api,
white_list=white_list,
black_list=black_list,
disabled_warnings=disabled_warnings)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api, False)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api, True)
|
rmasters/inbox
|
migrations/versions/061_remove_easfoldersyncstatus_folder_rows_.py
|
Python
|
agpl-3.0
| 1,547
| 0.002586
|
"""Remove EASFolderSyncStatus + Folder rows f
|
or folders we never sync
Revision ID: 2a748760ac63
Revises: 4af5952e8a5b
Create Date: 2014-07-19 00:28:08.258857
"""
# revision identifiers, used by Alembic.
revision = 'bb4f204f192'
down_revision = '2a748760ac63'
from inbox.ignition import engine
from inbox.models.session import session_scope
from sqlalchemy.ext.declarative import
|
declarative_base
from sqlalchemy.orm.exc import NoResultFound
Base = declarative_base()
Base.metadata.reflect(engine)
def upgrade():
if 'easfoldersyncstatus' in Base.metadata.tables:
from inbox.models.backends.eas import EASFolderSyncStatus
from inbox.models import Folder
from inbox.util.eas.constants import SKIP_FOLDERS
with session_scope(versioned=False, ignore_soft_deletes=False) as \
db_session:
statuses = db_session.query(EASFolderSyncStatus).filter(
EASFolderSyncStatus.eas_folder_type.in_(SKIP_FOLDERS)).all()
for s in statuses:
db_session.delete(s)
db_session.delete(s.folder)
try:
for status in db_session.query(EASFolderSyncStatus)\
.join(Folder).filter(
Folder.name == 'RecipientInfo').all():
db_session.delete(status)
db_session.delete(status.folder)
except NoResultFound:
pass
db_session.commit()
def downgrade():
raise Exception("Nope, not needed.")
|
kikokubo/Sick-Beard-TPB
|
lib/subliminal/services/usub.py
|
Python
|
gpl-3.0
| 4,305
| 0.00813
|
# -*- coding: utf-8 -*-
# Copyright 2013 Julien Goret <jgoret@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords, split_keyword
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import urllib
logger = logging.getLogger("subliminal")
class Usub(ServiceBase):
server_url = 'http://www.u-sub.net/sous-titres'
site_url = 'http://www.u-sub.net/'
api_based = False
languages = language_set(['fr'])
videos = [Episode]
require_video = False
#required_features = ['permissive']
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode)
def query(se
|
lf, filepath, languages, keywords=None, series=None, season=None, episode=None):
## Check if we really got informations about our episode
if series and season and episode:
re
|
quest_series = series.lower().replace(' ', '-')
if isinstance(request_series, unicode):
request_series = request_series.encode('utf-8')
logger.debug(u'Getting subtitles for %s season %d episode %d with language %r' % (series, season, episode, languages))
r = self.session.get('%s/%s/saison_%s' % (self.server_url, urllib.quote(request_series),season))
if r.status_code == 404:
print "Error 404"
logger.debug(u'Could not find subtitles for %s' % (series))
return []
else:
print "One or more parameter missing"
raise ServiceError('One or more parameter missing')
## Check if we didn't got an big and nasty http error
if r.status_code != 200:
print u'Request %s returned status code %d' % (r.url, r.status_code)
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
## Editing episode informations to be able to use it with our search
if episode < 10 :
episode_num='0'+str(episode)
else :
episode_num=str(episode)
season_num = str(season)
series_name = series.lower().replace(' ', '.')
possible_episode_naming = [season_num+'x'+episode_num,season_num+episode_num]
## Actually parsing the page for the good subtitles
soup = BeautifulSoup(r.content, self.required_features)
subtitles = []
subtitles_list = soup.find('table', {'id' : 'subtitles_list'})
link_list = subtitles_list.findAll('a', {'class' : 'dl_link'})
for link in link_list :
link_url = link.get('href')
splited_link = link_url.split('/')
filename = splited_link[len(splited_link)-1]
for episode_naming in possible_episode_naming :
if episode_naming in filename :
for language in languages:
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s' % (link_url))
subtitles.append(subtitle)
return subtitles
def download(self, subtitle):
## All downloaded files are zip files
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = Usub
|
ipa-led/airbus_coop
|
airbus_docgen/src/airbus_docgen/digraph/spline.py
|
Python
|
apache-2.0
| 717
| 0.001395
|
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.
|
org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cla
|
ss SPLINE:
Ortho = 'ortho'
|
unho/pootle
|
pootle/apps/accounts/apps.py
|
Python
|
gpl-3.0
| 566
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (C)
|
Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import importlib
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = "accounts"
verbose_name
|
= "Accounts"
version = "0.1.1"
def ready(self):
importlib.import_module("accounts.getters")
importlib.import_module("accounts.receivers")
|
pevma/PtP
|
ProtoIPv4/IPv4_HTTP.py
|
Python
|
gpl-2.0
| 182,209
| 0.017381
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
## ##
# Author: Peter Manev #
# peter.manev@openinfosecfoundation.org #
## ##
## !!! IMPORTANT - LATEST DEV Scapy is needed !!!
# REMOVE your current scapy installation !!!
# then ->
# hg clone http://hg.secdev.org/scapy-com
# python setup.py install
from scapy.all import *
import sys, urllib , os, subprocess, random
from itertools import *
import Global_Vars
class pacifyIpv4Http:
def writeIPv4HttpRule(self, sid_id_http, http_method, http_uri_string, \
http_content_all, directory, src_name):
##creating and writing a sid.rules file
rule_file = open('%s/%s.rules' % (directory,sid_id_http), 'w+')
content_http_uri_string_ready_for_rule = None
content_http_uri_string_ready_for_rule = ""
if (len(http_uri_string) > 250):
content_http_uri_string_array = [http_uri_string[i:i+250] for i in range(0, len(http_uri_string), 250)]
for i in content_http_uri_string_array:
i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\
replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\
replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|')
content_http_uri_string_ready_for_rule = \
content_http_uri_string_ready_for_rule + \
("content:\"%s\"; http_raw_uri; " % (i))
else:
http_uri_string = http_uri_string.replace('|', '|7C|').\
replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\
replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\
replace('\r', '|0d|').replace('\n', '|0a|')
content_http_uri_string_ready_for_rule = \
("content:\"%s\"; http_raw_uri; " % (http_uri_string))
content_all_ready_for_rule = None
content_all_ready_for_rule = ""
if (len(http_content_all) > 250):
content_http_all_array = [http_content_all[i:i+250] for i in range(0, len(http_content_all), 250)]
for i in content_http_all_array:
i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\
replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\
replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|')
content_all_ready_for_rule = \
content_all_ready_for_rule + \
("content:\"%s\"; " % (i))
else:
http_content_all = http_content_all.replace('|', '|7C|').\
replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\
replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\
replace('\r', '|0d|').replace('\n', '|0a|')
content_all_ready_for_rule = \
("content:\"%s\"; " % (http_content_all))
rule_file.write ( \
"alert http any any -> any any (msg:\"HTTP requests tests - sid %s , \
pcap - %s \"; \
content:\"%s\"; http_method; %s %s \
reference:url,%s; sid:%s; rev:1;)" % \
(sid_id_http, sid_id_http, http_method, \
content_http_uri_string_ready_for_rule, \
content_all_ready_for_rule, \
src_name, sid_id_http) )
rule_file.close()
def rebuildIPv4HttpSessionExtraTcpSAs(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#We rebuild the http session , however inject some extra SAs
session_packets = list()
session_packets_fragmented = list()
#print packet[TCP][Raw]
#print packet[Ether].src
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint((2**10),(2**16))
# We make sure ack_num_extra* are never going to be the same numbering
# as ack_num
ack_num_extra_1 = random.randint((2**22)+1 , (2**32)-1)
ack_num_extra_2 = random.randint((2**16)+1,(2**22)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
synack_extra_1 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \
type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \
dport=portsrc, seq=ack_num_extra_1, ack=syn.seq+1)
synack_extra_2 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \
type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \
dport=portsrc, seq=ack_num_extra_2, ack=syn.seq+1)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
p_frag_synack = fragment(synack, fragsize=1 )
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
|
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA",
|
sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
##
# Here we start ordering the stream so that we have 3 SAs. The extra ones are
# BEFORE the real one. For the purpose of thoroughness we also
# add cases where the real SA arrives fragmented.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack_extra_1)
session_packets.append(synack_extra_2)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Real_SA-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented real SA
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
for p_fragment in reversed(p_frag_synack):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
sessi
|
AutorestCI/azure-sdk-for-python
|
azure-batch/azure/batch/models/start_task.py
|
Python
|
mit
| 5,044
| 0.000595
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StartTask(Model):
"""A task which is run when a compute node joins a pool in the Azure Batch
service, or when the compute node is rebooted or reimaged.
:param command_line: The command line of the start task. The command line
does not run under a shell, and therefore cannot take advantage of shell
features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
start task runs. When this is specified, all directories recursively below
the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the
node) are mapped into the container, all task environment variables are
mapped into the container, and the task command line is executed in the
container.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param environment_settings: A list of environment variable settings for
the start task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param user_identity: The user identity under which the start task runs.
If omitted, the task runs
|
as a non-administrative user unique to the task.
:type user_identity: ~azure.batch.models.UserIdentity
:param max_task_retry_count: The maximum number of times the task may be
retried. The Batch service retries a task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the task once, and may then retry up to this limit.
For ex
|
ample, if the maximum retry count is 3, Batch tries the task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the task. If the maximum retry count is
-1, the Batch service retries the task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
start task to complete successfully (that is, to exit with exit code 0)
before scheduling any tasks on the compute node. If true and the start
task fails on a compute node, the Batch service retries the start task up
to its maximum retry count (maxTaskRetryCount). If the task has still not
completed successfully after all retries, then the Batch service marks the
compute node unusable, and will not schedule tasks to it. This condition
can be detected via the node state and failure info details. If false, the
Batch service will not wait for the start task to complete. In this case,
other tasks can start executing on the compute node while the start task
is still running; and even if the start task fails, new tasks will
continue to be scheduled on the node. The default is false.
:type wait_for_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, command_line, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None):
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success
|
vaygr/ansible
|
lib/ansible/modules/monitoring/uptimerobot.py
|
Python
|
gpl-3.0
| 3,698
| 0.001622
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: uptimerobot
short_description: Pause and start Uptime Robot monitoring
description:
- This module will let you start and pause Uptime Robot Monitoring
author: "Nate Kingsley (@nate-kingsley)"
version_added: "1.9"
requirements:
- Valid Uptime Robot API Key
options:
state:
description:
- Define whether or not the monitor should be running or paused.
required: true
choices: [ "started", "paused" ]
monitorid:
description:
- ID of the monitor to check.
required: true
apikey:
description:
- Uptime Robot API key.
required: true
notes:
- Support for adding and removing monitors and alert contacts has not yet been implemented.
'''
EXAMPLES = '''
# Pause the monitor with an ID of 12345.
- uptimerobot:
monitorid: 12345
apikey: 12345-1234512345
state: paused
# Start the monitor with an ID of 12345.
- uptimerobot:
|
monitorid: 12345
apikey: 12345-1234512345
state: started
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.
|
module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
API_BASE = "http://api.uptimerobot.com/"
API_ACTIONS = dict(
status='getMonitors?',
editMonitor='editMonitor?'
)
API_FORMAT = 'json'
API_NOJSONCALLBACK = 1
CHANGED_STATE = False
SUPPORTS_CHECK_MODE = False
def checkID(module, params):
data = urlencode(params)
full_uri = API_BASE + API_ACTIONS['status'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult
def startMonitor(module, params):
params['monitorStatus'] = 1
data = urlencode(params)
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult['stat']
def pauseMonitor(module, params):
params['monitorStatus'] = 0
data = urlencode(params)
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult['stat']
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['started', 'paused']),
apikey=dict(required=True),
monitorid=dict(required=True)
),
supports_check_mode=SUPPORTS_CHECK_MODE
)
params = dict(
apiKey=module.params['apikey'],
monitors=module.params['monitorid'],
monitorID=module.params['monitorid'],
format=API_FORMAT,
noJsonCallback=API_NOJSONCALLBACK
)
check_result = checkID(module, params)
if check_result['stat'] != "ok":
module.fail_json(
msg="failed",
result=check_result['message']
)
if module.params['state'] == 'started':
monitor_result = startMonitor(module, params)
else:
monitor_result = pauseMonitor(module, params)
module.exit_json(
msg="success",
result=monitor_result
)
if __name__ == '__main__':
main()
|
cdeil/ctools
|
scripts/obsutils.py
|
Python
|
gpl-3.0
| 9,202
| 0.040535
|
# ==========================================================================
# This script provides a number of functions that are useful for handling
# CTA observations.
#
# Copyright (C) 2011-2013 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
from ctools import *
from gammalib import *
# ===================== #
# Simulate observations #
# ===================== #
def sim(obs, log=False, debug=False, seed=0, nbins=0, binsz=0.05, npix=200):
"""
Simulate events for all observations in the container.
Parameters:
obs - Observation container
Keywords:
log - Create log file(s)
debug - Create screen dump
seed - Seed value for simulations (default: 0)
nbins - Number of energy bins (default: 0=unbinned)
binsz - Pixel size for binned simulation (deg/pixel)
npix - Number of pixels in X and Y for binned simulation
"""
# Allocate ctobssim application and set parameters
sim = ctobssim(obs)
sim['seed'].integer(seed)
# Optionally open the log file
if log:
sim.logFileOpen()
# Optionally switch-on debugging model
if debug:
sim["debug"].boolean(True)
# Run ctobssim application. This will loop over all observations in the
# container and simulation the events for each observation. Note that
# events are not added together, they still apply to each observation
# separately.
sim.run()
# Binned option?
if nbins > 0:
# Determine common energy boundaries for observations
emin = None
emax = None
for run in sim.obs():
run_emin = run.events().ebounds().emin().TeV()
run_emax = run.events().ebounds().emax().TeV()
if emin == None:
emin = run_emin
elif run_emin > emin:
emin = run_emin
if emax == None:
emax = run_emax
elif run_emax > emax:
emax = run_emax
# Allocate ctbin application and set parameters
bin = ctbin(sim.obs())
bin["emin"].real(emin)
bin["emax"].real(emax)
bin["enumbins"].integer(nbins)
bin["usepnt"].boolean(True) # Use pointing for map centre
bin["nxpix"].integer(npix)
bin["nypix"].integer(npix)
bin["binsz"].real(binsz)
bin["coordsys"].string("GAL")
bin["proj"].string("TAN")
# Optionally open the log file
if log:
bin.logFileOpen()
# Optionally switch-on debugging model
if debug:
bin["debug"].boolean(True)
# Run ctbin application. This will loop over all observations in
# the container and bin the events in counts maps
bin.run()
# Make a deep copy of the observation that will be returned
# (the ctbin object will go out of scope one the function is
# left)
obs = bin.obs().copy()
else:
# Make a deep copy of the observation that will be returned
# (the ctobssim object will go out of scope one the function is
# left)
obs = sim.obs().copy()
# Delete the simulation
del sim
# Return observation container
return obs
# ================ #
# Fit observations #
# ================ #
def fit(obs, log=False, debug=False):
"""
Perform maximum likelihood fitting of observations in the container.
Parameters:
obs - Observation container
Keywords:
log - Create log file(s)
debug - Create screen dump
"""
# Allocate ctlike application
like = ctlike(obs)
# Optionally open the log file
if log:
like.logFileOpen()
# Optionally switch-on debugging model
if debug:
like["debug"].boolean(True)
# Run ctlike application.
like.run()
# Return observations
return like
# ================= #
# Create counts map #
# ================= #
def cntmap(obs, proj="TAN", coord="GAL", xval=0.0, yval=0.0, \
binsz=0.05, nxpix=200, nypix=200, \
outname="cntmap.fits"):
"""
Creates a counts map by combining the events of all observations.
The counts map will be a summed map over all energies.
Parameters:
obs - Observation container
Keywords:
proj - Projection type (e.g. TAN, CAR, STG, ...) (default: TAN)
coord - Coordinate type (GAL, CEL) (default: GAL)
xval - Refe
|
rence longitude value [deg] (default: 0.0)
yval - Reference latitude value [deg] (default: 0.0)
binsz - Pixel size [deg/pixel] (default: 0.05)
nxpix - Number of pixels in X direction (default: 200)
nypix - Number of pixels in Y di
|
rection (default: 200)
outname - Counts map FITS filename (default: cntmap.fits)
"""
# Allocate counts map
map = GSkymap(proj, coord, xval, yval, -binsz, binsz, nxpix, nypix, 1)
# Fill all observations
for run in obs:
# Loop over all events
for event in run.events():
# Determine sky pixel
skydir = GCTAInstDir(event.dir()).dir()
pixel = map.dir2pix(skydir)
# Set pixel
map[pixel] += 1.0
# Save sky map. The clobber flag is set to True, so any existing FITS
# file will be overwritten.
map.save(outname, True)
# Return counts map
return map
# ================ #
# Create model map #
# ================ #
def modmap(obs, eref=0.1, proj="TAN", coord="GAL", xval=0.0, yval=0.0, \
binsz=0.05, nxpix=200, nypix=200, \
outname="modmap.fits"):
"""
Make model map for a given reference energy by combining all observations.
The model map will be evaluated for a given reference energy 'eref' and will
be given in units of [counts/(sr MeV s)].
Parameters:
obs - Observation container
Keywords:
eref - Reference energy for which model is created [TeV] (default: 0.1)
proj - Projection type (e.g. TAN, CAR, STG, ...) (default: TAN)
coord - Coordinate type (GAL, CEL) (default: GAL)
xval - Reference longitude value [deg] (default: 0.0)
yval - Reference latitude value [deg] (default: 0.0)
binsz - Pixel size [deg/pixel] (default: 0.05)
nxpix - Number of pixels in X direction (default: 200)
nypix - Number of pixels in Y direction (default: 200)
outname - Model map FITS filename (default: modmap.fits)
"""
# Allocate model map
map = GSkymap(proj, coord, xval, yval, -binsz, binsz, nxpix, nypix, 1)
# Set reference energy, time and direction. The time is not initialised and is
# in fact not used (as the IRF is assumed to be time independent for now).
# The sky direction is set later using the pixel values.
energy = GEnergy()
time = GTime()
instdir = GCTAInstDir()
energy.TeV(eref)
# Loop over all map pixels
for pixel in range(map.npix()):
# Get sky direction
skydir = map.pix2dir(pixel)
instdir.dir(skydir)
# Create event atom for map pixel
atom = GCTAEventAtom()
atom.dir(instdir)
atom.energy(energy)
atom.time(time)
# Initialise model value
value = 0.0
# Loop over all observations
for run in obs:
value += obs.models().eval(atom, run)
# Set map value
map[pixel] = value
# Save sky map
map.save(outname, True)
# Return model map
return map
# ======================= #
# Set one CTA observation #
# ======================= #
def set(pntdir, tstart=0.0, duration=1800.0, deadc=0.95, \
emin=0.1, emax=100.0, rad=5.0, \
irf="cta_dummy_irf", caldb="$GAMMALIB/share/caldb/cta"):
"""
Returns a single CTA observation. By looping over this function we can
add CTA observations to the observation container.
Parameters:
pntdir - Pointing direction
Keywords:
tstart - Start time [seconds] (default: 0.0)
duration - Duration of observation [seconds] (default: 1800.0)
deadc - Deadtime correction factor (default: 0.95)
emin - Minimum event energy [TeV] (default: 0.1)
emax - Maximum event energy [TeV] (default: 100.0)
rad - ROI radius used for analysis [deg] (default: 5.0
|
tantSinnister/doto
|
doto/model/timerecord.py
|
Python
|
bsd-3-clause
| 2,998
| 0.000667
|
import doto.model
import doto.model.task
CREATE_CMD = """
CREATE TABLE IF NOT EXISTS
timerecords (
id INTEGER NOT NULL,
task_id INTEGER,
start TIMESTAMP,
end TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY(task_id) REFERENCES tasks (id)
)
"""
class Timerecord(object):
"""
A timerecord is a time span for which one worked on a task
A timerecord is a time span that is assosiated with a event.
The sum of all timerecords is the total amount of work taht was put into the Task.
This can be used to track the amount of time one worked a specific task.
This should come in handy for freelancers (like me).
"""
def __init__(self, start, end=None, task_event=None):
"""
"""
self.id = None
self.span = doto.model.TimeSpan(start=start, end=end)
self.task = task_event
@staticmethod
def row_to_obj(row, store):
"""
Create Task from database row
"""
timerecord = doto.model.unwrap_row(store,
row,
Timerecord,
('start', 'end'),
('id',))
task_id = row['task_id']
if task_id is None:
timerecord.task = None
else:
timerecord.task = doto.model.task.get(store, task_id)
return timerecord
@staticmethod
def obj_to_row(obj):
row_dict = doto.model.unwrap_obj(obj, ignore_list=['span', 'task'])
row_dict['task_id'] = obj.task.id if obj.task is not None else None
row_dict['start'] = obj.span.start
row_dict['end'] = obj.span.end
return row_dict
def get_started_timerecords(store):
"""
Get all task which are not completed.
@param cache if True the result will be stored in the cache
so a cache_id can be used. Default=False
@param limit Set the maximum number of returned items. Default=10
@return A list of unfinished tasks
"""
return store.query(Timerecord.row_to_obj, 'SELECT * FROM timerecords WHERE end IS NULL;', ())
insert_query = """INSERT INTO timerecords ( task_id, start, end)
VALUES (:task_id, :start, :end)
;
"""
update_query = """UPDATE timerecords SET task_id = :task_id,
start = :start,
|
end = :end
WHERE id = :id;
"""
delete_query = 'DELETE FROM timerecords WHERE id = ?;'
update = doto.model.crud.update(update_query, Timerecord)
add_new = doto.model
|
.crud.insert(insert_query, Timerecord)
delete = doto.model.crud.delete(delete_query)
doto.model.setup_module(CREATE_CMD, ())
|
bazzile/imScrape
|
Scripts/v1/dev/auxiliary/move_imagery.py
|
Python
|
mit
| 1,527
| 0.001972
|
import os
import shutil
import re
import zipfile
import xml.etree.ElementTree as ET
from tempfile import TemporaryDirectory
import psycopg2
conn = psycopg2.connect(
database='innoter', user='postgres', password='postgres', host='192.168.0.107', port='5432')
cursor = conn.cursor()
dst_dir = r"\\nas1\storage\DG_archive\sat"
path_list = []
cursor.execute(
"""SELECT path, order_id
FROM geoarchive.dg_orders
WHERE aero is not TRUE""")
results = cursor.fetchall()
for i, result in enumerate(results):
zip_path, order_id = result[0], result[1]
print(i + 1, zip_path)
dst_filepath = os.path.join(dst_dir, os.path.basename(zip_path))
shutil.move(zip_path, dst_filepath)
cursor.execute("""UPDATE geoarchive.dg_orders
SET path = %s
WHERE order_id = %s""", [dst_filepath, order_id, ], )
conn.commit()
print('Готово...\n')
# with zipfile.ZipFile(zip_path) as zf:
# order_shape = [fnm for fnm in zf.namelist() if re.match(r'.+ORDER_SHAPE.+', fnm, re.I)]
# if not order_shape:
# # for fnm in zf.namelist():
# # if re.ma
|
tch(r'.+ORDER_SHAPE.+', fnm, re.I) is None:
# cursor.execute("""UPDATE geoarchive.dg_orders
# SET aero = TRUE
#
|
WHERE order_id = %s""", [order_id, ],)
# conn.commit()
# print(80*'=', order_id, 80*'=')
# aero_list.append(order_id)
#
# print('\nDone:\n', len(aero_list))
# for i in aero_list:
# print(i)
|
christopherjenness/ML-lib
|
ML/treemethods.py
|
Python
|
mit
| 31,697
| 0.000126
|
"""
Tree based methods of learning (classification and regression)
"""
import abc
import numpy as np
import networkx as nx
from scipy.stats import mode
class BaseTree(object):
"""
Base Tree for classification/regression. Written for single
variable/value binary split critereon. Many methods needs to be
rewritten if a more complex split critereon is desired.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""
Attributes:
graph (nx.DiGraph): Directed graph which stores tree
nodes (int): Current number of nodes in tree
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
learned (bool): Keeps track of if model has been fit
"""
self.graph = nx.DiGraph()
self.graph.add_node(1)
self.nodes = 1
self.X = None
self.y = None
self.learned = False
def fit(self, X, y, height, weights=None):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
height (int): height of tree
weights (np.array): array of sample weights
if None, all samples are weighted evenly
Returns: an instance of self
"""
self.X = X
self.y = y
self.weights = weights
for layer in range(height):
self.add_layer()
self.compute_class_averages()
self.learned = True
return self
def predict(self, x):
"""
Args:
x (np.array): Training data of shape[n_features,]
Returns:
float: predicted value
Raises:
ValueError if model has not been fit
Notes:
Currently, only a single data instance can be predicted at a time.
"""
if not self.learned:
raise NameError('Fit model first')
current_node = 1
leaves = self.get_leaves()
while current_node not in leaves:
children = self.graph.successors(current_node)
current_variable = self.graph.node[current_node]['variable']
current_cutoff = self.graph.node[current_node]['cutoff']
if current_variable is None:
return self.graph.node[current_node]['classval']
if x[current_variable] > current_cutoff:
current_node = children[1]
else:
current_node = children[0]
return self.graph.node[current_node]['classval']
def add_layer(self):
"""
Used by Fit() to add a single layer at the bottom of the tree
"""
leaves = self.get_leaves()
for leaf in leaves:
data_indices = self.partition_data(leaf)
leaf_X = self.X[data_indices, :]
leaf_y = self.y[data_indices]
self.add_split(leaf, leaf_X, leaf_y)
def get_leaves(self):
"""
Used by add_layer() to get the leaves of the tree.
"""
leaves = []
for node in self.graph.nodes():
if len(self.graph.successors(node)) == 0:
leaves.append(node)
return leaves
def add_split(self, node_number, data, values):
"""
Used by add_layer() to add two children at a leaf in the tree
Args:
node_number (int): Node in tree which a new split is added to
data (np.ndarray): data of shape[n_samples, n_features]
Data which node split will be based off of
values (np.array): values of shape[n_samples,]
Target values which node split will be based off of
"""
min_feature, min_split = self.learn_split(data, values)
self.graph.node[node_number]['variable'] = min_feature
self.graph.node[node_number]['cutoff'] = min_split
for i in range(2):
self.nodes += 1
self.graph.add_edge(node_number, self.nodes)
def partition_data(self, node_number):
"""
Partitions the training data at a given node. Traverses the
entire down to the indicated node.
Args:
node_number (int): Node in tree to partition data down to
Returns:
np.array: Array of indices from training data which
partition to node
"""
predecessors = self.get_predecessors(node_number)
predecessors.reverse()
|
predecessors.append(node_number)
data_indices = np.array(range(len(self.y)))
node_count = 0
|
while node_count < len(predecessors) - 1:
current_node = predecessors[node_count]
next_node = predecessors[node_count + 1]
current_variable = self.graph.node[current_node]['variable']
current_cutoff = self.graph.node[current_node]['cutoff']
if current_cutoff is None:
return []
if next_node == min(self.graph.successors(current_node)):
data_indices = data_indices[self.X[data_indices,
current_variable]
< current_cutoff]
else:
data_indices = data_indices[self.X[data_indices,
current_variable]
> current_cutoff]
node_count += 1
return data_indices
def get_predecessors(self, node_number):
"""
Used by parition_data() to get predecessors of a given node
(to walk down the tree).
"""
predecessors = []
current_node = node_number
while len(self.graph.predecessors(current_node)) > 0:
current_node = self.graph.predecessors(current_node)[0]
predecessors.append(current_node)
return predecessors
@abc.abstractmethod
def compute_class_averages(self):
"""
Method to compute average value for all nodes in the tree
"""
return
@abc.abstractmethod
def learn_split(self, inputs, values):
"""
Method to learn split given a data set (inputs) with
target values (values)
"""
return
class RegressionTree(BaseTree):
"""
Regression Tree implimenting CART algorithm
"""
def __init__(self):
BaseTree.__init__(self)
def learn_split(self, inputs, values):
"""
CART algorithm to learn split at node in tree.
Minimizes mean squared error of the two classes generated.
Args:
data (np.ndarray): data of shape[n_samples, n_features]
Data which node split will be based off of
values (np.array): values of shape[n_samples,]
Target values which node split will be based off of
Returns: (min_split, min_feature)
min_split (float): feature value at which to split
min_feature (int): feature number to split data by
Essentially, the column number from the data which
split is performed on
"""
if self.weights is None:
weights = np.ones(len(values))
else:
weights = np.array(self.weights)
min_error = np.inf
min_feature = None
min_split = None
for feature in range(np.shape(inputs)[1]):
feature_vector = inputs[:, feature]
sorted_vector = np.unique(np.sort(feature_vector))
feature_splits = (sorted_vector[1:] + sorted_vector[:-1]) / 2
for split in feature_splits:
lower_class_average = np.mean(values[feature_vector < split])
upper_class_average = np.mean(values[feature_vector > split])
lower_class_errors = (values[feature_vector < split] -
lower_class_average) * \
weights[feature_vector < split]
upper_class_errors = (values[feature_vector > split] -
|
jekhokie/scriptbox
|
python--advent-of-code/2020/5/solve.py
|
Python
|
mit
| 960
| 0.015625
|
#!/usr/bin/env python3
from math import floor, ceil
lines = []
with open('input.txt', 'r') as f:
lines = f.read().splitlines()
def get_val(line, start_pos, end_pos, lhalf, uhalf, uhalf_char):
for x in line[start_pos:end_pos:]:
if x == uhalf_char: # take lower half
uhalf -= ceil((uhalf - lhalf) / 2)
else: # take upper half
lhalf += ceil((uhalf - lhalf) / 2)
if lh
|
alf != uhalf:
return Exception("Something went wrong: {} != {}".format(lhalf, uhalf))
return uhalf
#--- challenge 1
seat_ids = []
for boarding_pass in lines:
row = get_val(boarding_pass, 0, 7, 0, 127, 'F')
column = get_val(boarding_pass, 7, 10, 0, 7, 'L')
seat_ids.append(row * 8 + column)
print("Solution to challenge 1: {}".format(max(seat_ids)))
#--- challenge 2
seat_ids.sort()
missing_seat = ""
for x in range(seat_ids[0], seat_ids[-1]):
if x not in seat_ids:
mis
|
sing_seat = x
print("Solution to challenge 2: {}".format(missing_seat))
|
dmccloskey/SBaaS_quantification
|
SBaaS_quantification/lims_quantitationMethod_io.py
|
Python
|
mit
| 26,562
| 0.03569
|
import json
import re
from SBaaS_LIMS.lims_calibratorsAndMixes_query import lims_calibratorsAndMixes_query
from SBaaS_LIMS.lims_sample_query import lims_sample_query
from .lims_quantitationMethod_query import lims_quantitationMethod_query
from .stage01_quantification_MQResultsTable_query import stage01_quantification_MQResultsTable_query
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from SBaaS_base.sbaas_template_io import sbaas_template_io
from ddt_python.ddt_container import ddt_container
class lims_quantitationMethod_io(lims_quantitationMethod_query,
stage01_quantification_MQResultsTable_query,
lims_calibratorsAndMixes_query,
#lims_msMethod_query,
lims_sample_query,
sbaas_template_io
):
def import_calibration_sampleAndComponents(self, filename):
'''import calibration curve sample and component information'''
data = base_importData();
data.read_csv(filename);
data.format_data();
# split into separate data structures
samplesComponents_data = [];
for d in data.data:
samplesComponents_data.append({'sample_name':d['Sample Name'],
'sample_type':d['Sample Type'],
'met_id':d['Component Group Name']});
data.clear_data();
return samplesComponents_data;
def export_calibrationConcentrations(self, sampleAndComponent_fileName_I, concentrations_fileName_O):
'''export calibrator concentrations for "cut&paste" into Actual Concentration column in MultiQuant
when filtering Analytes only'''
#Input:
# sampleAndComponent_fileName_I = .csv file specifying sample_name, sample_type, and component_group_name
#Output:
# concentrations_fileName_O = .csv file specifying sample_name, sample_type, component_group_name, and actual_concentration
concentrations_O = [];
met_id_conv_dict = {'Hexose_Pool_fru_glc-D':'glc-D',
'Pool_2pg_3pg':'3pg'};
#import sampleAndComponents
samplesComponents = [];
samplesComponents = self.import_calibration_sampleAndComponents(sampleAndComponent_fileName_I);
#data = base_importData();
#data.read_csv(sampleAndComponent_fileName_I);
#samplesComponents = data.data;
for sc in samplesComponents:
# if met_id is a pool of metabolites, convert to the metabolite
# that is logged in calibrator tables and standards tables
if sc['met_id'] in list(met_id_conv_dict.keys()):
met_id_conv = met_id_conv_dict[sc['met_id']];
else:
met_id_conv = sc['met_id'];
#query calibrator_id and calibrator_level from sample
calibrator_id,calibrator_level = None,None;
calibrator_id,calibrator_level = self.get_calibratorIDAndLevel_sampleNameAndSampleType_sample(sc['sample_name'],sc['sample_type']);
#query calibrator_concentration from calibrator_concentrations
calibrator_concentration, concentration_units = 'N/A', None;
if calibrator_id and calibrator_level:
calibrator_concentration, concentration_units = self.get_calibratorConcentrationAndUnit_metIDAndCalibratorIDAndLevel_calibratorConcentrations(met_id_conv,calibrator_id,calibrator_level);
concentrations_O.append({'sample_name':sc['sample_name'], 'sample_type':sc['sample_type'],'component_group_name':sc['met_id'], 'actual_concentration':calibrator_concentration});
# write calibration curve concentrations to file
export = base_exportData(concentrations_O);
export.write_dict2csv(concentrations_fileName_O);
def import_quantitationMethod_add(self,QMethod_id_I, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_quantitationMethod(QMethod_id_I, data.data);
data.clear_data();
def export_quantitationMethod_js(self,QMethod_id_I,component_names_I=[],data_dir_I='tmp'):
'''Export the quantitation and calibrators to ddt'''
#get the calibrator data
data_1 = [];
data_2 = [];
data_1a = [];
# get the sample names that were used to generate the calibration curve:
if component_names_I:
component_names = component_names_I;
else:
component_names = [];
component_names = self.get_components(QMethod_id_I);
for cn in component_names:
# get the quant method parameters for each component
fit,weighting,use_area = self.get_quantMethodParameters(QMethod_id_I,cn);
# get the sample names for that component
sample_names = [];
sample_names = self.get_sampleNames_QMethodIDAndComponentNameAndSampleType(QMethod_id_I,cn,sample_type_I='Standard');
if not sample_names: continue;
concentrations = []
ratios = [];
for sn in sample_names:
# get the quant method rows
row = {};
row = self.get_row_sampleNameAndComponentName(sn,cn);
if row and not row is None and not row['concentration_ratio'] is None:
if use_area: row['ratio'] = row['area_ratio'];
else: row['ratio'] = row['height_ratio'];
row['acquisition_date_and_time'] = None;
data_1.append(row);
concentrations.append(row['concentration_ratio']);
ratios.append(row['ratio']);
if not concentrations: continue;
# get the quant method statistics
row = {};
row = self.get_row_QMethodIDAndComponentNamequantitationMethod(QMethod_id_I,cn);
if row:
data_2.append(row);
# generate the line of best fit
min_ratio = min(ratios);
max_ratio = max(ratios);
index_min = [cnt for cnt,x in enumerate(ratios) if
|
x == min_ratio][0];
index_max = [cnt for cnt,x in enumerate(ratios) if x == max_ratio][0];
conc_min = min(concentrations);
conc_max = max(concentrations);
|
sample_name_min = sample_names[index_min];
sample_name_max = sample_names[index_max];
data_1a.append({'concentration_ratio':row['lloq'],
'ratio':min_ratio,
'component_name':cn,
'sample_name':sample_name_min,
'id':QMethod_id_I});
data_1a.append({'concentration_ratio':row['uloq'],
'ratio':max_ratio,
'component_name':cn,
'sample_name':sample_name_max,
'id':QMethod_id_I});
# dump chart parameters to a js files
data1_keys = [
'id',
'concentration_ratio',
'sample_name',
'component_name',
'ratio',
];
data1_nestkeys = ['component_name'];
data1_keymap = {'xdata':'concentration_ratio',
'ydata':'ratio',
'serieslabel':'component_name',
'featureslabel':'sample_name'};
data2_keys = ['id',
'q1_mass',
'q3_mass',
'met_id',
'component_name',
'is_name',
'fit',
'weighting',
'intercept',
'slope',
'correlation',
'use_area',
'lloq',
|
chemelnucfin/tensorflow
|
tensorflow/contrib/distribute/python/keras_backward_compat_test.py
|
Python
|
apache-2.0
| 43,076
| 0.011213
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, (tpu_strategy.TPUStrategy,
tpu_strategy.TPUStrategyV1)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs
|
(use_numpy, use_validation_data,
with_distribution,
x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras w
|
ith DS."""
training_epochs = 2
global_batch_size = 64
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
with_distribution and
not distributed_training_utils.global_batch_size_supported(
with_distribution))
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': tra
|
ncliam/serverpos
|
openerp/addons/email_template/tests/test_mail.py
|
Python
|
agpl-3.0
| 14,322
| 0.004538
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
class test_message_compose(TestMail):
def setUp(self):
super(test_message_compose, self).setUp()
# create a 'pigs' and 'bird' groups that will be used through the various tests
self.group_bird_id = self.mail_group.create(self.cr, self.uid,
{'name': 'Bird', 'description': 'I am angry !'})
def test_00_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
self.res_users.write(cr, uid, [uid], {'signature': 'Admin', 'email': 'a@a.a'})
user_admin = self.res_users.browse(cr, uid, uid)
p_a_id = user_admin.partner_id.id
group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
group_bird = self.mail_group.browse(cr, uid, self.group_bird_id)
# Mail data
_subject1 = 'Pigs'
_subject2 = 'Bird'
_body_html1 = 'Fans of Pigs, unite !'
_body_html2 = 'I am angry !'
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': base64.b64encode('My first attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': base64.b64encode('My second attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# Create template on mail.group, with attachments
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': False,
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])],
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d'
})
# ----------------------------------------
# CASE1: comment and save as template
# ----------------------------------------
# 1. Comment on pigs
compose_id = mail_compose.create(cr, uid,
{'subject': 'Forget me subject', 'body': '<p>Dummy body</p>'},
{'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]})
compose = mail_compose.browse(cr, uid, compose_id)
# 2. Save current composition form as a template
mail_compose.save_as_template(cr, uid, [compose_id], context={'default_model': 'mail.group'})
# Test: email_template subject, body_html, model
last_template_id = email_template.search(cr, uid, [('model', '=', 'mail.group'), ('subject', '=', 'Forget me subject')], limit=1)[0]
self.assertTrue(last_template_id, 'email_template not found for model mail.group, subject Forget me subject')
last_template = email_template.browse(cr, uid, last_template_id)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
# ----------------------------------------
# CASE2: comment with template, sa
|
ve as template
# ----------------------------------------
# 1. Comment on pigs
context = {
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_use_template': False,
'default_template_id': email_template_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]
|
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'comment', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# Test: mail.compose.message: subject, body, partner_ids
self.assertEqual(compose.subject, _subject1, 'mail.compose.message subject incorrect')
self.assertIn(_body_html1, compose.body, 'mail.compose.message body incorrect')
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# Test: mail.compose.message: attachments (owner has not been modified)
for attach in compose.attachment_ids:
self.assertEqual(attach.res_model, 'res.partner', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.partner_admin_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# Test: mail.message: attachments
mail_compose.send_mail(cr, uid, [compose_id])
group_pigs.refresh()
message_pigs = group_pigs.message_ids[0]
for attach in message_pigs.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.group_pigs_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# ----------------------------------------
# CASE3: mass_mail with template
# ----------------------------------------
# 1. Mass_mail on pigs and bird, with a default_partner_ids set to check he is correctly added
context = {
'default_composition_mode': 'mass_mail',
'default_notify': True,
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_template_id': email_template_id,
'default_partner_ids': [p_a_id],
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, co
|
szecsi/Gears
|
GearsPy/Project/Components/Temporal/CellLti7.py
|
Python
|
gpl-2.0
| 1,209
| 0.033085
|
import Gears as gears
from .. import *
from .Filter import *
class CellLti7(Filter) :
def applyWithArgs(
self,
stimulus,
) :
sequence = stimulus.getSequence().getPythonObject()
stimulus.setLtiMatrix(
[
0, 0.47494, -0.0966925, 0.150786, -0.0647224, 0.0574935, 0.0180677 , 0.00359244 ,
0.383115, 0.865133, 0.366234, -0.100492, -0.017631, 0.0105551, 0.00101862 , 0.000257363 ,
0.330595, -0.366234, 0.768675, 0.175282, 0.1717, -0.0244711, -0.00537899 , -0.000588159 ,
-0.247068, 0.100492, 0.175282, 0.810277, -0.302384, 0.0166167, 0.00747239 , 0.000383462 ,
0.157351, -0.017631, -0.1717, 0.302384, 0.393383,
|
0.3339, 0.0221983 , 0.00646065 ,
0.0351307, -0.0105551, -0.0244711, 0.0166167, -0.3339, 0.265154, -0.33863 , -0.0135562 ,
-0.00584964, 0.00101862, 0.00537899, -0.
|
00747239, 0.0221983, 0.33863, 0.186403 , -0.308048 ,
0.000798099, -0.000257363, -0.000588159, 0.000383462, -0.00646065, -0.0135562, 0.308048 , 0.11294 ,
]
)
|
kirbyfan64/hytest
|
setup.py
|
Python
|
mit
| 1,421
| 0.004222
|
try:
from setuptools import setup
from setuptools.command.build_py import build_py
setuptools = True
except:
from distutils.core import setup
from distutils.command.build_py import build_py
setuptools = False
import os, re
# XXX: This is a hack
def patch(func):
setattr(build_py, func.__name__, func)
@patch
def find_modules(self):
return [('', 'hytest', 'hytest.
|
hy')]
@patch
def get_module_outfile(self, build_dir, *_):
return os.path.join(build_dir, 'hytest.hy')
this_dir = os.path.dirname(__file__)
with open(os.path.join(this_dir, 'R
|
EADME.rst')) as f:
readme = f.read()
with open(os.path.join(this_dir, 'hytest.hy')) as f:
version = re.search(r'\(def __version__ "([^"]+)"\)', f.read()).group(1)
with open(os.path.join(this_dir, 'requirements.txt')) as f:
hy_ver = f.read().strip()
kw = {}
if setuptools:
kw['install_requires'] = hy_ver
setup(
name='HyTest',
version=version,
description='A testing framework for Hy',
long_description=readme,
author='Ryan Gonzalez',
author_email='rymg19@gmail.com',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Testing'
],
requires=[hy_ver.replace('>= ', '(>=')+')'],
scripts=['hytest'],
py_modules=['hytest'],
url='https://github.com/kirbyfan64/hytest',
**kw)
|
ecolell/pfamserver
|
tests/api/v0/test_version.py
|
Python
|
agpl-3.0
| 394
| 0
|
from __future__ import unicode_literals
import json
def test_get_version(app, client, current_version):
headers = [('Accept', 'application/
|
json'),
('Content-Type', 'application/json')]
res = client.get('/api/v0/version', headers=headers)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data
|
['version'] == current_version
|
google-research/google-research
|
etcmodel/layers/embedding_test.py
|
Python
|
apache-2.0
| 9,033
| 0.001661
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for embedding layers."""
from absl.testing import parameterized
import tensorflow as tf
from etcmodel import layers as etc_layers
class LayersTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_2d_ids_no_mask(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, 0, 4], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [
[
[1.3, -1.3], #
[1.2, -1.2], #
[1.1, -1.1], #
], #
[
[1.4, -1.4], #
[1.0, -1.0], #
[1.4, -1.4], #
], #
]
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_2d_ids_with_mask(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, -1, 5], #
])
input_mask = tf.constant([
[1, 1, 0], #
[1, 0, 0], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [
[
[1.3, -1.3], #
[1.2, -1.2], #
[0.0, 0.0], #
], #
[
[1.4, -1.4], #
[0.0, 0.0], #
[0.0, 0.0], #
], #
]
result = layer(input_ids, input_mask=input_mask)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_1d_ids(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([1, 0, 0, 3])
input_mask = tf.constant([1, 1, 0, 1])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [
[1.1, -1.1], #
[1.0, -1.0], #
[0.0, 0.0], #
[1.3, -1.3], #
]
result = layer(input_ids, input_mask=input_mask)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_3d_ids(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([[
[3, 2, 1], #
[4, 0, 4], #
]])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [[
[
[1.3, -1.3], #
[1.2, -1.2], #
[1.1, -1.1], #
], #
[
[1.4, -1.4], #
[1.0, -1.0], #
[1.4, -1.4], #
], #
]]
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_random_init_no_mask(self, use_one_hot_lookup):
vocab_size = 5
embedding_size = 2
input_ids = tf.constant([1, 0, 0, 3])
input_size = input_ids.shape.as_list()[0]
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(result)
self.assertAllEqual([input_size, embedding_size], result.shape)
@parameterized.named_parameters(
('no_projection', 0),
('embedding_size_equals_projection_size', 3),
)
def test_embedding_lookup_no_projection(self, projection_size):
# Create an embedding table with width = projection_size
embedding_table = tf.constant([
[1.0, -1.0, 0.5], #
[1.1, -1.1, -0.5], #
[1.2, -1.2, -0.2], #
[1.3, -1.3, 0.3], #
[1.4, -1.4, 0.4], #
])
vocab_size, embedding_size = embedd
|
ing_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, 0, 4], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
projection_size=projection_size,
use_one_hot_lookup=True)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
exp
|
ected = [
[
[1.3, -1.3, 0.3], #
[1.2, -1.2, -0.2], #
[1.1, -1.1, -0.5], #
], #
[
[1.4, -1.4, 0.4], #
[1.0, -1.0, 0.5], #
[1.4, -1.4, 0.4], #
], #
]
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
def test_embedding_lookup_with_projection(self):
# Create an embedding table with width != projection_size
embedding_table = tf.constant([
[1.0, -1.0, 0.5], #
[1.1, -1.1, -0.4], #
[1.2, -1.2, -0.5], #
[1.3, -1.3, 0.8], #
[1.4, -1.4, 0.9], #
])
projection_size = 2 # Different from the embedding_dimension.
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, 0, 4], #
])
input_mask = tf.constant([
[1, 0, 0], #
[0, 0, 1], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
projection_size=projection_size,
use_one_hot_lookup=True)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
# Dense layer to use for projection. Note that, we have
|
ericholscher/sublime-rst-completion
|
helpers.py
|
Python
|
bsd-3-clause
| 1,480
| 0.001351
|
import re
from sublime import Region
import sublime_plugin
class BaseBlockCommand(sublime_plugin.TextCommand):
def _get_row_text(self, row):
if row < 0 or row > self.view.rowcol(self.view.size())[0]:
raise RuntimeError('Cannot find table bounds.')
point = self.view.text_point(row, 0)
region = self.view.line(point)
text = self.view.substr(region)
return text
def get_cursor_position(self):
return self.view.rowcol(self.view.sel()[0].begin())
def get_block_bounds(self):
"""given the cursor position as started point,
returns the limits and indentation"""
row, col = self.get_cursor_position()
upper = lower = row
try:
while self._get_row_text(upper - 1).strip():
upper -= 1
except Exception as e:
print(e)
pass
else:
upper += 1
try:
while self._get_row_text(lower + 1).st
|
rip():
lower += 1
except Exception as e:
print(e)
pass
else:
lower -= 1
|
block_region = Region(self.view.text_point(upper - 1, 0),
self.view.text_point(lower + 2, 0))
lines = [self.view.substr(region) for region in self.view.lines(block_region)]
indent = re.match('^(\s*).*$', self._get_row_text(upper - 1)).group(1)
return block_region, lines, indent
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.