repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
jtsommers/dijkd | p1.py | Python | gpl-2.0 | 3,077 | 0.039649 | from p1_support import load_level, show_level
from math import sqrt
from heapq import heappush, heappop
import operator
VERBOSE = False
def debug(*args):
if (VERBOSE):
print ''.join([str(arg) for arg in args])
def dijkstras_shortest_path(src, dst, graph, adj):
dist = {}
prev = {}
dist[src] = 0
prev[src] = None # parent of the source node
queue = []
# Python heapq (heap, item) : item can be a tuple or single value
# If tuple is used, the first element will be used as key (key, data)
heappush(queue, (dist[src], src))
while queue:
pathCost, node = heappop(queue)
if node == dst:
break
adjacent = adj(graph, node)
# Extract (position, cost) from list of adjacent states
for neighbor, cost in adjacent:
totalCost = pathCost + cost
#print totalCost
if neighbor not in dist or totalCost < dist[neighbor]:
dist[neighbor] = totalCost
prev[neighbor] = node # parent of [ neighbor ] is node
heappush(queue, ( totalCost, neighbor))
path = []
# Path found build it, else return empty path
if node == dst:
# Traverse up the parent tree
while node: # while there is a parent (prev[src] = None)
path.append(node)
node = prev[node] # update to the parent
# Path is from dst to src, reverse it
path.reverse()
if path:
debug("Path: ", path)
debug("Path cost: ", pathCost)
return path
def navigation_edges(level, cell):
# Valid movement deltas
deltas = {
'LEFT_DOWN': (-1, -1),
'LEFT': (-1, 0),
'LEFT_UP': (-1, 1),
'DOWN': (0, -1),
'UP': (0, 1),
'RIGHT_DOWN': (1, -1),
'RIGHT': (1, 0),
'RIGHT_UP': (1, 1)
};
validMoves = []
for delta in deltas.values():
# Calculate new position
position = (cell[0]+delta[0], cell[1]+delta[1])
if position in level['spaces']:
# Calculate edge cost
cost = sqrt(delta[0] ** 2 + delta[1] ** 2)
# Valid move is a tuple (nextState, edgeCost)
validMoves.append((position, cost))
return validMoves
def test_route(filename, src_waypoint, dst_waypoint):
level = load_level(filename)
if VERBOSE:
print("Level layout:")
show_level(level)
src = level['waypoints'][src_waypoint]
dst = level['waypoints'][dst_waypoint]
path = dijkstras_shortest_path(src, dst, level, navigation_edges)
if path:
show_level(level, path)
else:
print "No path possible!"
# Show the level if the user hasn't already seen it
if not VERBOSE:
show_level(level, [])
if __name__ == '__main__':
import sys
# Use command line optio | ns
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] level_file src_waypoint dst_waypoint")
parser.add_option("-v", "--verbose", dest="verbose", help="use verbose logging", action=" | store_true", default=False)
(options, args) = parser.parse_args()
# Make sure the appropriate number of arguments was supplied
if (len(args) != 3):
print "Unexpected argument count."
parser.print_help()
else:
VERBOSE = options.verbose
filename, src_waypoint, dst_waypoint = args
test_route(filename, src_waypoint, dst_waypoint)
|
Resmin/Resmin | resmin/utils/models.py | Python | gpl-3.0 | 1,570 | 0 | import hashlib
from sorl.thumbnail import get_thumbnail
from django.db import models
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from resmin.libs.baseconv import base62
class BaseModel(models.Model):
owner = models.ForeignKey(User, verbose_name=_('Owner'))
created_at = models.DateTimeField(auto_now_add=True,
verbose_name=_('Created at'))
def get_owner(self):
if hasattr(self, 'is_anonymouse'):
return AnonymousUser if self.is_anonymouse else self.owner
else:
return self.owner
@property
def base62_id(self):
return base62.from_decimal(self.id)
class Meta:
abstract = True
class UniqueFileModel(models.Model):
"""
TODO: add unique=True property to given file field when initializing.
"""
FILE_FIELD = 'image'
md5sum = models.CharField(max_length=36, blank=True)
def _update_md5sum(self):
md5 = hashlib.md5()
for chunk in getattr(self, self.FILE_FIELD).chunks():
md5.update(chunk)
self.md5sum = md5.hexdigest()
@property
def thumbnail_url(self, size='100x100'):
return get_thumbnail(self.image, siz | e, crop='center').url
def serialize(self):
return {'cPk': self.p | k,
'cTp': ContentType.objects.get_for_model(self).pk,
'thumbnail_url': self.thumbnail_url}
class Meta:
abstract = True
|
edx/ecommerce-scripts | transifex/pull.py | Python | agpl-3.0 | 4,324 | 0.002544 | #!/usr/bin/env python3
"""
This script can be used to automatically pull translations from Transifex,
commit, push, and merge them to their respective repos.
To use, export an environment variable `GITHUB_ACCESS_TOKEN`. The token requires
GitHub's "repo" scope.
Run the script from the root of this repo.
python transifex/pull.py git@github.com:edx/course-discovery.git
If you want to use a custom merge method pass the --merge-method option.
python transifex/pull.py git@github.com:edx/course-discovery.git --merge-method rebase
If you want to skip the compile messages step, pass the --skip-compilemessages option.
python transifex/pull.py git@github.com:edx/course-discovery.git --skip-com | pilemessages
"""
import os
import shutil
from argparse import ArgumentParser
from utils import DEFAULT_MERGE_METHOD, MERGE_METHODS, logger, repo_context
# The name of the branch to use.
BRANCH_NAME = 'transifex-bot-update-translations'
# The commit message to use.
MESSAGE = 'chore(i18n): update translations'
# Environment variable needed to run paver compilejsi18n command
os.environ['LMS_CFG']='../lms.yml'
os.environ['STUDIO_CFG'] = '../studio.yml'
os.environ['REVISION_CFG'] | = ''
os.environ['SKIP_NPM_INSTALL'] = 'True'
os.environ['LANG'] = 'C.UTF-8'
# Configuration repo to fetch lms/studio settings
CONFIGURATION_REPO_URL = 'https://github.com/edx/configuration.git'
def pull(clone_url, repo_owner, merge_method=DEFAULT_MERGE_METHOD, skip_compilemessages=False,
skip_check_changes=False):
"""Pulls translations for the given repo.
If applicable, commits them, pushes them to GitHub, opens a PR, waits for
status checks to pass, then merges the PR and deletes the branch.
"""
with repo_context(CONFIGURATION_REPO_URL, repo_owner, BRANCH_NAME, MESSAGE, merge_method=merge_method) as config_repo:
logger.info('Pulling lms/studio settings from [%s].', config_repo.name)
shutil.copy('./docker/build/edxapp/lms.yml', '../')
shutil.copy('./docker/build/edxapp/studio.yml', '../')
with repo_context(clone_url, repo_owner, BRANCH_NAME, MESSAGE, merge_method=merge_method) as repo:
logger.info('Pulling translations for [%s].', repo.name)
repo.pull_translations()
if skip_compilemessages:
logger.info('Skipping compilemessages.')
else:
compilemessages_succeeded = repo.compilemessages()
repo.commit_push_and_open_pr(skip_check_changes)
if repo.pr:
if not (skip_compilemessages or compilemessages_succeeded):
# Notify the team that message compilation failed.
repo.pr.create_issue_comment(
'@{owner} failing message compilation prevents this PR from being automatically merged. '
'Refer to the build log for more details.'.format(
owner=repo.owner
)
)
# Fail job immediately, without trying to merge the PR. We don't
# want to merge PRs without compiled messages.
raise RuntimeError('Failed to compile messages.')
repo.merge_pr()
def parse_arguments():
parser = ArgumentParser()
parser.add_argument(
'clone_url',
help='URL to use to clone the repository.'
)
parser.add_argument(
'repo_owner',
help='This is the user/team that will be pinged when errors occur.'
)
parser.add_argument(
'--merge-method',
choices=MERGE_METHODS,
default=DEFAULT_MERGE_METHOD,
help='Method to use when merging the PR. See https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-button for details.'
)
parser.add_argument(
'--skip-compilemessages',
action='store_true',
help='Skip the message compilation step.'
)
parser.add_argument(
'--skip-check-changes',
action='store_true',
help='Skip the check changes step.'
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
pull(
args.clone_url,
args.repo_owner,
merge_method=args.merge_method,
skip_compilemessages=args.skip_compilemessages,
skip_check_changes=args.skip_check_changes,
)
|
antoinecarme/pyaf | tests/artificial/transf_Integration/trend_MovingAverage/cycle_12/ar_/test_artificial_32_Integration_MovingAverage_12__100.py | Python | bsd-3-clause | 271 | 0.084871 | import pyaf.Benc | h.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Int | egration", sigma = 0.0, exog_count = 100, ar_order = 0); |
attakei/openshift-ansible | playbooks/aws/openshift-cluster/library/ec2_ami_find.py | Python | apache-2.0 | 9,777 | 0.007262 | #!/usr/bin/env python2
#pylint: skip-file
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: 2.0
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: Tom Bamford
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
choices: ['name', 'description', 'tag']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
| required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
require | d: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_search.results[0].ami_id }}"
instance_type: m4.medium
key_name: mykey
wait: yes
'''
try:
import boto.ec2
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
' |
dennisfrancis/PacketManipulator | umit/pm/gui/core/__init__.py | Python | gpl-2.0 | 971 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Adriano Monteiro Marques
#
# Author | : Francesco Piccinno <stack.box@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warra | nty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Gui core module contains:
- App
+ MainWindow
+ Icons
- Paned
- FallbackPaned
+ Views
"""
|
Bassintag551/spark-python-sdk | sparkpy/Message.py | Python | mit | 513 | 0 | #
# Message for spark-python-sdk
# Started on 19/04/2017 by Antoine
#
class Message:
def __init__(self):
self.id = None
self.roomId = None |
self.toPersonId = None
self.toPersonEmail = None
self.personId = None
self.personEmail = None
self.text = None
self.file = None
self.roomType = None
self.created = None
self.files = None
self.markdown = N | one
self.html = None
self.mentionedPeople = None
|
roadmapper/ansible | lib/ansible/plugins/terminal/dellos6.py | Python | gpl-3.0 | 3,474 | 0.002015 | #
# (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Bad secret"),
re.compile(br"(\bInterface is part of a port-channel\b)"),
re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Invalid|invalid.*$", re.I),
re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I),
re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)).*", re.I),
]
terminal_initial_prompt = br"\(y/n\)"
terminal_initial_answer = b"y"
terminal_inital_prompt_newline = False
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameter | s')
def on_be | come(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
|
nmc-probe/emulab-nome | robots/vmcd/camera_data/test_lin_blend3.py | Python | agpl-3.0 | 7,084 | 0.009034 | #!/usr/local/bin/python
#
# Copyright (c) 2005 University of Utah and the Flux Group.
#
# {{{EMULAB-LICENSE
#
# This file is part of the Emulab network testbed software.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This file is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
#
# }}}
#
# test_lin_blend3.py - 9 point concentric version.
# Quick prototype of piecewise triangular linear blending.
#
# The object is to apply error corrections from the center, edge-midpoints,
# and corners of a camera grid to the whole grid.
#
# To use this program, first make a file_dumper input file specifying the
# calibration data at the center, edge midpoints, and corner points in meters.
# The required order is left-to-right, bottom-to-top, so the lower-left corner
# comes first on the bottom row, then the middle and top rows. Gather these
# points using file_dumper, and analyze them through dump_analyzer.
#
# Gather a finer grid of points to be corrected using file_dumper. (If the
# corners and origin above are part of the fine grid, you can extract a subset
# of this file_dumper data and analyze it to make the above file.)
#
# Usage: There are two filename args:
#
# ptAnal - The dump_analyzer output file for the center and corner points.
# The order must be as above. The section headers give the target
# world coordinates in meters. The mean_[ab][xy] give Mezzanine
# fiducial blob coordinates in pixels, and the mean_[xy]_offset tells
# the world coordinate error in meters (difference from the target)
# to be canceled out at the center pixel between the fiducial blobs.
#
# gridData - The file_dumper output file containing data to be corrected.
# Each data frame contains blob [ab] coordinates and world
# coordinates. Th | e blob coordinates are p | assed through the error
# correction blend, producing a world coordinate offset which is
# added to the and new world coordinates in the data frame line,
# which is then output. Everything else streams straight through.
import sys
import getopt
import string
import re
import geom
import blend_tris
import read_analysis
opts,args = getopt.getopt(sys.argv[1:], 'td')
printTris = False
debug = False
for o,a in opts:
if o == "-t":
printTris = True
pass
if o == "-d":
debug = True
pass
pass
if len(args) != 2:
print "Read the comments for usage."
pass
# Read in the calibration points.
nPts = 9
loc,target,offset = read_analysis.data(args[0], nPts)
# XXX Horrid Hack Warning - We need right-handed coordinates,
# but the image Y coordinate goes down from 0 at the top.
# Negate it internally.
for l in loc:
l[1] = l[1] * -1.0
if debug:
print "loc "+str(loc)
print "target "+str(target)
print "offset "+str(offset)
# Make the triangles.
#
# The required point order is left-to-right, bottom-to-top, so the lower-left
# corner comes first on the bottom row, then the middle and top rows.
# Triangles are generated clockwise from the bottom row, first inner and then
# outer. Vertices are listed clockwise from the center in each triangle, so
# edges will have the inside on the right.
#
# p6 ------ p7 ----- p8
# | / | \ |
# | t6 / | \ t7 |
# | / | \ |
# | / | \ |
# | / t2 | t3 \ |
# | / | \ |
# p3 ------ p4 ----- p5
# | \ | / |
# | \ t1 | t0 / |
# | \ | / |
# | \ | / |
# | t5 \ | / t4 |
# | \ | / |
# p0 ------ p1 ----- p2
#
def mkTri(i0, i1, i2):
return blend_tris.BlendTri((loc[i0], loc[i1], loc[i2]),
(target[i0], target[i1], target[i2]),
(offset[i0], offset[i1], offset[i2]))
triangles = [ mkTri(4,5,1), mkTri(4,1,3), mkTri(4,3,7), mkTri(4,7,5),
mkTri(2,1,5), mkTri(0,3,1), mkTri(6,7,3), mkTri(8,5,7) ]
# Optionally output only gnuplot lines for the triangles.
if printTris:
for tri in triangles:
for v in tri.target:
print '%f, %f'%tuple(v)
print "%f, %f\n"%tuple(tri.target[0])
pass
sys.exit(0)
pass
#================================================================
# Regexes for parsing file_dumper output.
fpnum = "\s*(\-*\d+\.\d+)\s*"
reDumperSection = re.compile("section:\s*\("+fpnum+","+fpnum+"\)")
reFrameData_line = re.compile("(\[[0-9]+\] a\("+fpnum+","+fpnum+"\)\s*"
"b\("+fpnum+","+fpnum+"\)\s*-- wc)"
"\("+fpnum+","+fpnum+","+fpnum+"\)\s*")
gridData = file(args[1])
gridLine = gridData.readline()
##print "gridLine: "+gridLine
while gridLine != "":
# Chop the newline.
gridLine = gridLine.strip('\n')
##print "gridLine = "+gridLine
m1 = reFrameData_line.match(gridLine)
if m1 == None:
# Everything else streams straight through.
print gridLine
else:
# Frame data.
lineHead = m1.group(1)
data = [float(f) for f in m1.groups()[1:]]
# XXX Horrid Hack Warning - We need right-handed coordinates,
# but the image Y coordinate goes down from 0 at the top.
# Negate it internally.
pixLoc = geom.ptBlend((data[0], -data[1]), (data[2], -data[3]))
wcLoc = (data[4], data[5])
wAng = data[6]
# Find the quadrant by looking at the center edges of the triangles.
# We can actually blend linearly past the outer edges...
for iTri in range(nPts-1):
# Get the barycentric coords of the image point.
bcs = triangles[iTri].baryCoords(pixLoc)
# In the "concentric" layout, the first four triangles are inside,
# so we pay attention to all 3 of their edges. The second four
# triangles are on the outside, so two of their edges are outside.
if iTri <= 3 and bcs[0] >= 0.0 and bcs[1] >= 0.0 and bcs[2] >= 0.0 \
or iTri >= 4 and bcs[0] >= 0.0:
# This is the triangle containing this image point.
newLoc = geom.ptOffset(wcLoc, triangles[iTri].errorBlend(bcs))
##print "pixLoc %s, iTri %d, bcs %s"%(pixLoc, iTri, bcs)
##print "triangles[%d] %s"%(iTri, triangles[iTri])
print "%s(%f,%f,%f)"%(lineHead, newLoc[0], newLoc[1], wAng)
break
pass
pass
gridLine = gridData.readline()
pass
|
akrherz/iem | scripts/iemre/precip_ingest.py | Python | mit | 3,827 | 0 | """Ingest Stage IV Hourly Files.
1. Copies to hourly stage IV netCDF files
2. Copies hourly stage IV netCDF to hourly IEMRE
"""
import os
import datetime
import sys
import numpy as np
from scipy.interpolate import NearestNDInterpolator
import pygrib
from pyiem import iemre
from pyiem.util import utc, ncopen, logger
LOG = logger()
def get_p01m_status(valid):
"""Figure out what our current status is of this hour."""
nc = ncopen(
("/mesonet/data/stage4/%s_stage4_hourly.nc") % (valid.year,),
timeout=300,
)
tidx = iemre.hourly_offset(valid)
# 2 prism_adjust_stage4 ran
# 1 copied hourly data in
# 0 nothing happened
p01m_status = nc.variables["p01m_status"][tidx]
nc.close()
LOG.debug("p01m_status is %s for valid %s", p01m_status, valid)
return p01m_status
def ingest_hourly_grib(valid):
"""Copy the hourly grib data into the netcdf storage.
Returns:
int value of the new p01m_status
"""
tidx = iemre.hourly_offset(valid)
fn = valid.strftim | e(
"/mesonet/ARCHIVE/data/%Y/%m/%d/stage4/ST4.%Y%m%d%H.01h.grib"
)
if not os.path.isfile(fn):
LOG.info("stage4_ingest: missing file %s", fn)
return 0
gribs = pygrib.open(fn)
grb = gribs[1]
val = grb.values
# values over 10 inches are bad
val = np.where(val > 250.0, 0, val)
ncfn = f"/mesonet/data/stage4/{valid.year}_stage4_hourly.nc"
with n | copen(ncfn, "a", timeout=300) as nc:
p01m = nc.variables["p01m"]
# account for legacy grid prior to 2002
if val.shape == (880, 1160):
p01m[tidx, 1:, :] = val[:, 39:]
else:
p01m[tidx, :, :] = val
nc.variables["p01m_status"][tidx] = 1
LOG.debug(
"write p01m to stage4 netcdf min: %.2f avg: %.2f max: %.2f",
np.min(val),
np.mean(val),
np.max(val),
)
return 1
def copy_to_iemre(valid):
"""verbatim copy over to IEMRE."""
tidx = iemre.hourly_offset(valid)
ncfn = f"/mesonet/data/stage4/{valid.year}_stage4_hourly.nc"
with ncopen(ncfn, "a", timeout=300) as nc:
lats = nc.variables["lat"][:]
lons = nc.variables["lon"][:]
val = nc.variables["p01m"][tidx]
# Our data is 4km, iemre is 0.125deg, so we stride some to cut down on mem
stride = slice(None, None, 3)
lats = np.ravel(lats[stride, stride])
lons = np.ravel(lons[stride, stride])
vals = np.ravel(val[stride, stride])
nn = NearestNDInterpolator((lons, lats), vals)
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
res = nn(xi, yi)
# Lets clip bad data
# 10 inches per hour is bad data
res = np.where(np.logical_or(res < 0, res > 250), 0.0, res)
# Open up our RE file
nc = ncopen(iemre.get_hourly_ncname(valid.year), "a", timeout=300)
nc.variables["p01m"][tidx, :, :] = res
LOG.debug(
"wrote data to hourly IEMRE min: %.2f avg: %.2f max: %.2f",
np.min(res),
np.mean(res),
np.max(res),
)
nc.close()
def workflow(valid):
"""Our stage IV workflow."""
# Figure out what the current status is
p01m_status = get_p01m_status(valid)
if np.ma.is_masked(p01m_status) or p01m_status < 2:
# merge in the raw hourly data
ingest_hourly_grib(valid)
copy_to_iemre(valid)
def main(argv):
"""Go Main"""
if len(argv) == 5:
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
workflow(ts)
return
# Otherwise we are running for an explicit 12z to 12z period, copy only
ets = utc(int(argv[1]), int(argv[2]), int(argv[3]), 12)
now = ets - datetime.timedelta(hours=23)
while now <= ets:
copy_to_iemre(now)
now += datetime.timedelta(hours=1)
if __name__ == "__main__":
main(sys.argv)
|
elPistolero/DeepFried2 | DeepFried2/datasets/cifar100.py | Python | mit | 1,879 | 0.001597 | from DeepFried2.zoo.download import download as _download
import numpy as _np
from tarfile import open as _taropen
try: # Py2 compatibility
import cPickle as _pickle
except ImportError:
import pickle as _pickle
def data():
fname = _download('http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz')
with _taropen(fname, 'r') as f:
with f.extractfile('cifar-100-python/train') as train:
train = _pickle.load(train, encoding='latin1')
Xtr = _np.array(train['data'], dtype=_np.float32)
ytr_c = _np.array(train['coarse_labels'])
ytr_f = _np.array(train['fine_labels'])
Xtr /= 255
# There is no "official" validation set here that I know of!
# But the maxout paper uses the last 10k samples as validation.
Xtr, Xva = Xtr[:-10000], Xtr[-10000:]
ytr_c, yva_c = ytr_c[:-10000], ytr_c[-10000:]
ytr_f, yva_f = ytr_f[:-10000], ytr_f[-10000:]
with f.extractfile('cifar-100-python/test') as test:
test = _pickle.load(test, encoding='latin1')
Xte = _np.array(test['data'], dtype=_np.float32)
yte_c = _np.array(test['coarse_labels'])
yte_f = _np.array(test['fine_labels'])
Xte /= 255
# Get the label names additionally.
| with f.extractfile('cifar-100-python/meta') as m:
m = _pickle.load(m, encoding='latin1')
try:
from sklearn.preprocessing import LabelEncoder
le_c = LabelEncoder()
le_c.classes_ = _np.array(m['coarse_label_names'])
le_f = LabelEncoder()
| le_f.classes_ = _np.array(m['fine_label_names'])
except ImportError:
le_c = _np.array(m['coarse_label_names'])
le_f = _np.array(m['fine_label_names'])
return (Xtr, ytr_c, ytr_f), (Xva, yva_c, yva_f), (Xte, yte_c, yte_f), (le_c, le_f)
|
openqt/algorithms | leetcode/python/lc069-sqrtx.py | Python | gpl-3.0 | 925 | 0.008649 | # coding=utf-8
import unittest
"""69. Sqrt(x)
https://leetcode.com/problems/sqrtx/description/
Implement `int sqrt(int x)`.
Compute and return the square root of _x_ , where _x_ is guaranteed to be a
non-negative integer.
Since the return type is an integer, the decimal digits are truncated an | d only
the integer part of the result is returned.
**Example 1:**
**Input:** 4
**Output:** 2
**Example 2:**
**Input:** 8
**Output:** 2
**Explanation:** The square root of 8 is 2.82842..., and since
the decimal part is truncated, 2 is returned.
Similar Questions:
Pow(x, n) (powx-n)
Valid Perfect Square (valid-perfect-square)
"""
class Solution(object):
def mySqrt(self, x):
"""
: | type x: int
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
jcu-eresearch/TDH-dc24-ingester-platform | dc24_ingester_platform/service/repodb.py | Python | bsd-3-clause | 13,401 | 0.007537 | """
Created on Oct 5, 2012
@author: nigel
"""
from dc24_ingester_platform.utils import format_timestamp, parse_timestamp
from dc24_ingester_platform.service import BaseRepositoryService
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DECIMAL, Boolean, ForeignKey, DateTime
import sqlalchemy.orm as orm
from sqlalchemy import create_engine
from sqlalchemy.orm.exc import NoResultFound
import decimal
import logging
import os
import shutil
from jcudc24ingesterapi.schemas.data_types import FileDataType
from jcudc24ingesterapi.models.data_entry import DataEntry, FileObject
from jcudc24ingesterapi.models.metadata import DatasetMetadataEntry, DataEntryMetadataEntry
from jcudc24ingesterapi.schemas import ConcreteSchema
from jcudc24ingesterapi.search import SearchResults
logger = logging.getLogger(__name__)
Base = declarative_base()
def obj_to_dict(obj, klass=None):
"""Maps an object of base class BaseManagementObject to a dict.
"""
ret = {}
for attr in dir(obj):
if attr.startswith("_") or attr == "metadata": continue
if type(getattr(obj, attr)) in (str, int, float, unicode, dict):
ret[attr] = getattr(obj, attr)
elif type(getattr(obj, attr)) == decimal.Decimal:
ret[attr] = float(getattr(obj, attr))
if klass != None: ret["class"] = klass
elif hasattr(obj, "__xmlrpc_class__"): ret["class"] = obj.__xmlrpc_class__
return ret
def dict_to_object(dic, obj):
for attr in dir(obj):
if attr.startswith("_"): continue
if dic.has_key(attr): setattr(obj, attr, dic[attr])
class DatasetMetadata(Base):
__tablename__ = "DATASET_METADATA"
id = Column(Integer, primary_key=True)
dataset = Column(Integer)
schema = Column(Integer)
attrs = orm.relationship("DatasetMetadataAttr")
class DatasetMetadataAttr(Base):
__tablename__ = "DATASET_METADATA_ATTRS"
id = Column(Integer, primary_key=True)
metadata_entry = Column(Integer, ForeignKey('DATASET_METADATA.id'))
name = Column(String(255))
value = Column(String(255))
class Observation(Base):
__tablename__ = "OBSERVATIONS"
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime)
dataset = Column(Integer)
attrs = orm.relationship("ObservationAttr")
class ObservationAttr(Base):
__tablename__ = "OBSERVATION_ATTRS"
id = Column(Integer, primary_key=True)
observation = Column(Integer, ForeignKey('OBSERVATIONS.id'))
name = Column(String(255))
value = Column(String(255))
class DataEntryMetadata(Base):
__tablename__ = "DATA_ENTRY_METADATA"
id = Column(Integer, primary_key=True)
data_entry = Column(Integer, ForeignKey('OBSERVATIONS.id'))
schema = Column(Integer)
attrs = orm.relationship("DataEntryMetadataAttr")
class DataEntryMetadataAttr(Base):
__tablename__ = "DATA_ENTRY_METADATA_ATTRS"
id = Column(Integer, primary_key=True)
metadata_entry = Column(Integer, ForeignKey('DATA_ENTRY_METADATA.id'))
name = Column(String(255))
value = Column(String(255))
def merge_parameters(col_orig, col_new, klass, name_attr="name", value_attr="value"):
"""This method updates col_orig removing any that aren't in col_new, updating those that are, and adding new ones
using klass as the constructor
col_new is a dict
col_orig is a list
klass is a type
"""
working = col_new.copy()
to_del = []
for obj in col_orig:
if getattr(obj,name_attr) in working:
# Update
setattr(obj, value_attr, working[obj.name].f_path if isinstance(working[obj.name], FileObject) else working[obj.name])
del working[obj.name]
else:
# Delete pending
to_del.append(obj)
# Delete
for obj in to_del:
col_orig.remove(obj)
# Add
for k in working:
obj = klass()
setattr(obj, name_attr, k)
setattr(obj, value_attr, working[obj.name].f_path if isinstance(working[obj.name], FileObject) else working[obj.name])
col_orig.append(obj)
class RepositoryDB(BaseRepositoryService):
"""This service provides DAO operations for the ingester service.
All objects/DTOs passed in and out of this service are dicts. This service protects the storage layer.
"""
def __init__(self, config):
self.engine = create_engine(config["db"])
self.repo = config["files"]
if not os.path.exists(self.repo):
os.makedirs(self.repo)
Observation.metadata.create_all(self.engine, checkfirst=True)
def reset(self):
Observation.metadata.drop_all(self.engine, checkfirst=True)
Observation.metadata.create_all(self.engine, checkfirst=True)
def copy_files(self, attrs, schema, cwd, obj, obj_type):
"""Copy file attributes into place and update the File Objects
to point to the destination path."""
obj_path = os.path.join(self.repo, obj_type)
if not os.path.exists(obj_path): os.makedirs(obj_path)
for k in attrs:
if isinstance(schema[k], FileDataType):
dest_file_name = os.path.join(obj_path, "%d-%s"%(obj.id, k))
shutil.copyfile(os.path.join(cwd, attrs[k].f_path), dest_file_name)
attrs[k].f_path = dest_file_name
def find_data_entries(self, dataset, offset, limit, start_time=None, end_time=None):
"""Find all observations within this dataset that match the given criteria"""
s = orm.sessionmaker(bind=self.engine)()
try:
dataset = self.service.get_dataset(dataset.id)
schema = ConcreteSchema(self.service.get_schema_tree(dataset.schema))
objs = s.query(Observation).filter(Observation.dataset == dataset.id)
if start_time != None:
objs = objs.filter(Observation.timestamp >= start_time)
if end_time != None:
objs = objs.filter(Observation.timestamp <= end_time)
count = objs.count()
objs = objs.limit(limit).offset(offset)
return SearchResults([self._create_data_entry(obs, schema) for obs in objs.all()], offset, limit, count)
finally:
s.close()
def find_dataset_metadata(self, dataset, offset, limit):
s = orm.sessionmaker(bind=self.engine)()
try:
objs = s.query(DatasetMetadata).filter(DatasetMetadata.dataset == dataset.id)
count = objs.count()
return SearchResults([self._create_dataset_metadata(s, obj) for obj in objs.offset(offset).limit(limit).all()], \
offset, limit, count)
finally:
s.close()
def find_data_entry_metadata(self, data_entry, offset, limit):
s = orm.sessionma | ker(bind=self.engine)()
try:
objs = s.query(DataEntryMetadata).filter(DataEntryMetadata.data_entry == data_entry.id)
count = objs.count()
return SearchResults([self._create_data_entry_metadata(obj) for obj in objs.offset(offset).limit(limit).all()], \
| offset, limit, count)
finally:
s.close()
def _create_dataset_metadata(self, session, obj):
"""Internal method for creating the DataEntry domain object from a database
observation
"""
schema = ConcreteSchema(self.service.get_schema_tree(obj.schema))
entry = DatasetMetadataEntry()
entry.metadata_schema = obj.schema
entry.id = obj.id
entry.object_id = obj.dataset
for attr in obj.attrs:
if isinstance(schema.attrs[attr.name], FileDataType):
entry[attr.name] = FileObject(f_path=attr.value)
else:
entry[attr.name] = attr.value
return entry
def _create_data_entry_metadata(self, session, obj):
"""Internal method for creating the DataEntry domain object from a database
observation
"""
schema = ConcreteSchema(self.service.get_schema_tr |
qianqians/Screw | 3rdparty/duktape/tools/configure.py | Python | lgpl-2.1 | 41,654 | 0.005714 | #!/usr/bin/env python2
#
# Prepare a duk_config.h and combined/separate sources for compilation,
# given user supplied config options, built-in metadata, Unicode tables, etc.
#
# This is intended to be the main tool application build scripts would use
# before their build step, so convenient, versions, Python compatibility,
# etc all matter.
#
# When obsoleting options, leave the option definitions behind (with
# help=optparse.SUPPRESS_HELP) and give useful suggestions when obsolete
# options are used. This makes it easier for users to fix their build
# scripts.
#
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(name)-21s %(levelname)-7s %(message)s')
logger = logging.getLogger('configure.py')
logger.setLevel(logging.INFO)
import os
import re
import shutil
import glob
import optparse
import tarfile
import json
import yaml
import tempfile
import subprocess
import atexit
import genconfig
# Helpers
def exec_get_stdout(cmd, input=None, default=None, print_stdout=False):
try:
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.communicate(input=input)
if print_stdout:
sys.stdout.write(ret[0])
sys.stdout.flush()
if proc.returncode != 0:
sys.stdout.write(ret[1]) # print stderr on error
sys.stdout.flush()
if default is not None:
logger.info('WARNING: command %r failed, return default' % cmd)
return default
raise Exception('command failed, return code %d: %r' % (proc.returncode, cmd))
return ret[0]
except:
if default is not None:
logger.info('WARNING: command %r failed, return default' % cmd)
return default
raise
def exec_print_stdout(cmd, input=None):
ret = exec_get_stdout(cmd, input=input, print_stdout=True)
def mkdir(path):
os.mkdir(path)
def copy_file(src, dst):
with open(src, 'rb') as f_in:
with open(dst, 'wb') as f_out:
f_out.write(f_in.read())
def copy_files(filelist, srcdir, dstdir):
for i in filelist:
copy_file(os.path.join(srcdir, i), os.path.join(dstdir, i))
def copy_and_replace(src, dst, rules):
# Read and write separately to allow in-place replacement
keys = sorted(rules.keys())
res = []
with open(src, 'rb') as f_in:
for line in f_in:
for k in keys:
line = line.replace(k, rules[k])
res.append(line)
with open(dst, 'wb') as f_out:
f_out.write(''.join(res))
def copy_and_cquote(src, dst):
with open(src, 'rb') as f_in:
with open(dst, 'wb') as f_out:
f_out.write('/*\n')
for line in f_in:
line = line.decode('utf-8')
f_out.write(' * ')
for c in line:
if (ord(c) >= 0x20 and ord(c) <= 0x7e) or (c in '\x0a'):
f_out.write(c.encode('ascii'))
else:
f_out.write('\\u%04x' % ord(c))
f_out.write(' */\n')
def read_file(src, strip_last_nl=False):
with open(src, 'rb') as f:
data = f.read()
if len(data) > 0 and data[-1] == '\n':
data = data[:-1]
return data
def delete_matching_files(dirpath, cb):
for fn in os.listdir(dirpath):
if os.path.isfile(os.path.join(dirpath, fn)) and cb(fn):
logger.debug('Deleting %r' % os.path.join(dirpath, fn))
os.unlink(os.path.join(dirpath, fn))
def create_targz(dstfile, filelist):
# https://docs.python.org/2/library/tarfile.html#examples
def _add(tf, fn): # recursive add
logger.debug('Adding to tar: ' + fn)
if os.path.isdir(fn):
for i in sorted(os.listdir(fn)):
_add(tf, os.path.join(fn, i))
elif os.path.isfile(fn):
tf.add(fn)
else:
raise Exception('invalid file: %r' % fn)
with tarfile.open(dstfile, 'w:gz') as tf:
for fn in filelist:
_add(tf, fn)
def cstring(x):
return '"' + x + '"' # good enough for now
# DUK_VERSION is grepped from duktape.h.in: it is needed for the
# public API and we want to avoid defining it in two places.
def get_duk_version(apiheader_filename):
r = re.compile(r'^#define\s+DUK_VERSION\s+(.*?)L?\s*$')
with open(apiheader_filename, 'rb') as f:
for line in f:
m = r.match(line)
if m is not None:
duk_version = int(m.group(1))
duk_major = duk_version / 10000
duk_minor = (duk_version % 10000) / 100
duk_patch = duk_version % 100
duk_version_formatted = '%d.%d.%d' % (duk_major, duk_minor, duk_patch)
return duk_version, duk_major, duk_minor, duk_patch, duk_version_formatted
raise Exception('cannot figure out duktape version')
# Python module check and friendly errors
def check_python_modules():
# dist.py doesn't need yaml but other dist utils will; check for it and
# warn if it is missing.
failed = False
def _warning(module, aptPackage, pipPackage):
sys.stderr.write('\n')
sys.stderr.write('*** NOTE: Could not "import %s" needed for dist. Install it using e.g.:\n' % module)
sys.stderr.write('\n')
sys.stderr.write(' # Linux\n')
sys.stderr.write(' $ sudo apt-get install %s\n' % aptPackage)
sys.stderr.write('\n')
sys.stderr.write(' # Windows\n')
sys.stderr.write(' > pip install %s\n' % pipPackage)
try:
import yaml
except ImportError:
_warning('yaml', 'python-yaml', 'PyYAML')
failed = True
if failed:
sys.stderr.write('\n')
raise Exception('Missing some required Python modules')
check_python_modules()
# Option parsing
def main():
parser = optparse.OptionParser(
usage='Usage: %prog [options]',
description='Prepare Duktape source files and a duk_config.h configuration header for compilation. ' + \
'Source files can be combined (amalgamated) or kept separate. ' + \
'See http://wiki.duktape.org/Configuring.html for examples.'
)
# Forced options from multiple sources are gathered into a shared list
# so that the override order remains the same as on the command line.
force_options_yaml = []
def add_force_option_yaml(option, opt, value, parser):
# XXX: check that YAML parses
force_options_yaml.append(value)
def add_force_option_file(option, opt, value, parser):
# XXX: check that YAML parses
| with open(value, 'rb') as f:
force_options_yaml.append(f.read())
def add_force_option_define(option, opt, value, parser):
tmp = value.split('=')
if len(tmp) == 1:
doc = { tmp[0]: True }
elif len(tmp) == 2:
doc = { tmp[0]: | tmp[1] }
else:
raise Exception('invalid option value: %r' % value)
force_options_yaml.append(yaml.safe_dump(doc))
def add_force_option_undefine(option, opt, value, parser):
tmp = value.split('=')
if len(tmp) == 1:
doc = { tmp[0]: False }
else:
raise Exception('invalid option value: %r' % value)
force_options_yaml.append(yaml.safe_dump(doc))
fixup_header_lines = []
def add_fixup_header_line(option, opt, value, parser):
fixup_header_lines.append(value)
def add_fixup_header_file(option, opt, value, parser):
with open(value, 'rb') as f:
for line in f:
if line[-1] == '\n':
line = line[:-1]
fixup_header_lines.append(line)
# Options for configure.py tool itself.
parser.add_option('--source-directory', dest='source_directory', default=None, help='Directory with raw input sources (defaulted based on configure.py script path)')
parser.add_option('--output-directory', dest='output_directory', default=None, help='Directory for output files (created aut |
lampslave/sorl-thumbnail | tests/settings/default.py | Python | bsd-3-clause | 1,335 | 0 | from os.path import join as pjoin, abspath, dirname, pardir
import django
SECRET_KEY = 'SECRET'
PROJ_ROOT = abspath(pjoin(dirname(__file__), pardir))
DATA_ROOT = pjoin(PROJ_ROOT, 'data')
THUMBNAIL_PREFIX = 'test/cache/'
THUMBNAIL_DEBUG = True
THUMBNAIL_LOG_HANDLER = {
'class': 'sorl.thumbnail.log.ThumbnailLogHandler',
'level': 'ERROR',
}
THUMBNAIL_KVSTORE = 'tests.thumbnail_tests.kvstore.TestKVStore'
THUMBNAIL_STORAGE = 'tests.thumbnail_tests.storage.TestStorage'
DEFAULT_FILE_STORAGE = 'tests.thumbnail_tests.storage.TestStorage'
ADMINS = (
('Sorl', 'thumbnail@sorl.net'),
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
MEDIA_ROOT = pjoin(PROJ_ROOT, 'media')
MEDIA_URL = '/media/'
ROOT_URLCONF = 'tests.thumbnail_tests.urls'
INSTALLED_APPS = (
'sorl.thumbnail',
'tests.thumbnail_tests',
)
TE | MPLATE_CONTEXT_PROCESSORS = (
'django.core.context_proc | essors.request',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
|
chasedog/EpisodeDiscussions | stats.py | Python | apache-2.0 | 4,633 | 0.005612 | from db import DB
from itertools import groupby
import operator
from datetime import datetime, timedelta
import classifiers, re
class Season:
def __init__(self, number, isRewatch):
self.number = number
self.episodes = []
self.is_rewatch = isRewatch
def addEpisode(self, episode):
self.episodes.append(episode)
def serialize(self):
return {
'season_number': self.number,
'episodes': [e.serialize() for e in self.episodes],
'is_rewatch': self.is_rewatch
}
class Episode:
def __init__(self, title, url, number, date_utc, name, score, num_comments):
self.name = name
self.title = title
self.url = url
self.number = number
self.date_utc = date_utc
self.date_pacific = (datetime.utcfromtimestamp(self.date_utc) - timedelta(hours=7)).strftime("%m/%d/%Y")
self.score = score
self.num_comments = num_comments
titleLowered = title.lower()
containsPre = " pre" in titleLowered
containsPost = " post" in titleLowered
self.prePostText = "pre" if containsPre else "post" if containsPost else ""
def serialize(self):
return {
'name': self.name,
'title': self.title,
'url': self.url,
'episode_number': self.number,
'date_utc': self.date_utc,
'date_pacific': self.date_pacific,
'score': self.score,
'num_comments': self.num_comments,
}
def contains(word, capture):
return 1 if word in capture else 0
def extractSeasonsAndEpisodes(data):
for item in data:
print(item)
titleLowered = item["title"].lower()
| matches = re.findall(classifiers.ff, titleLowered)
count = len(matches)
if count == 0:
item["season"] = -1
item["episode"] = - | 1
elif count == 1:
season = matches[0][2].strip()
episode = matches[0][4].strip()
seasonAndEpisode = str(season) + episode
if len(season) >= 1 and len(episode) >= 1:
print("A", matches[0])
item["season"] = int(season)
item["episode"] = int(episode)
elif len(matches[0][0].strip()) <= 2 and len(seasonAndEpisode) <= 2:
print("B", matches[0])
item["season"] = -1
item["episode"] = -1
else:
print("C", matches[0])
item["season"] = int(seasonAndEpisode[0])
item["episode"] = int(seasonAndEpisode[1:])
else:
scores = {}
for idx, match in enumerate(matches):
capture = match[0].lower()
scores[idx] = sum([contains(score, capture) for score in [".", "s", "e", "x"]])
maxScore = max(scores.keys(), key=(lambda key: scores[key]))
item["season"] = int(matches[maxScore][2])
item["episode"] = int(matches[maxScore][4])
#print(item["season"], item["episode"], item["title"])
episodeName = re.search(classifiers.somethingInQuotesRegex, item["title"])
item["episodeName"] = None if episodeName is None else episodeName.group(0)
flair = "" if item["link_flair_text"] is None else item["link_flair_text"].lower()
if re.search(r"re-?watch", titleLowered) is not None or re.search(r"re-?watch", flair) is not None:
item["isRewatch"] = True
else:
item["isRewatch"] = False
data = sorted(data, key=lambda x: (x["subreddit"], x["season"], x["isRewatch"], x["episode"], x["created_utc"]))
response = []
for subreddit, group in groupby(data, lambda x: x["subreddit"]):
#subredditSorted = sorted(group, key=lambda x: (x["season"], x["episode"], x["created_utc"]))
for season, seasonGroup in groupby(group, lambda x: (x["season"], x["isRewatch"])):
#sortedEpisodes = sorted(seasonGroup, key=lambda x: (x["episode"], x["created_utc"]))
season = Season(season[0], season[1])
for item in seasonGroup:
episode = Episode(item["title"], item["url"], item["episode"], item["created_utc"], item["episodeName"], item["score"], item["num_comments"])
season.addEpisode(episode)
#date = datetime.utcfromtimestamp(item["created_utc"]).strftime("%m/%d/%Y")
response.append(season)
return response
if __name__ == "__main__":
conn = DB()
data = conn.getValidDiscussionData()
extractSeasonsAndEpisodes(data)
conn.close() |
cpcloud/numba | numba/tests/doc_examples/test_structref_usage.py | Python | bsd-2-clause | 4,851 | 0 | # "magictoken" is used for markers as beginning and ending of example text.
import unittest
# magictoken.ex_structref_type_definition.begin
import numpy as np
from numba import njit
from numba.core import types
from numba.experimental import structref
from numba.tests.support import skip_unless_scipy
# Define a StructRef.
# `structref.register` associates the type with the default data model.
# This will also install getters and setters to the fields of
# the StructRef.
@structref.register
class MyStructType(types.StructRef):
def preprocess_fields(self, fields):
# This method is called by the type constructor for additional
# preprocessing on the fields.
# Here, we don't want the struct to take Literal types.
return tuple((name, types.unliteral(typ)) for name, typ in fields)
# Define a Python type that can be use as a proxy to the StructRef
# allocated inside Numba. Users can construct the StructRef via
# the constructor for this type in python code and jit-code.
class MyStruct(structref.StructRefProxy):
def __new__(cls, name, vector):
# Overriding the __new__ method is optional, doing so
# allows Python code to use keyword arguments,
# or add other customized behavior.
# The default __new__ takes `*args`.
# IMPORTANT: Users should not override __init__.
return structref.StructRefProxy.__new__(cls, name, vector)
# By default, the proxy type does not reflect the attributes or
# methods to the Python side. It is up to users to define
# these. (This may be automated in the future.)
@property
def name(self):
# To access a field, we can define a function that simply
# retu | rn the field in jit-code.
# The definition of MyStruct_get_name is shown later.
return MyStruct_get_name(self)
@property
def vector(self):
# The definition of MyStruct_get_vector is shown later.
return MyStruct_get_vector(self)
@njit
def | MyStruct_get_name(self):
# In jit-code, the StructRef's attribute is exposed via
# structref.register
return self.name
@njit
def MyStruct_get_vector(self):
return self.vector
# This associates the proxy with MyStructType for the given set of
# fields. Notice how we are not contraining the type of each field.
# Field types remain generic.
structref.define_proxy(MyStruct, MyStructType, ["name", "vector"])
# magictoken.ex_structref_type_definition.end
@skip_unless_scipy
class TestStructRefUsage(unittest.TestCase):
def test_type_definition(self):
np.random.seed(0)
# Redirect print
buf = []
def print(*args):
buf.append(args)
# magictoken.ex_structref_type_definition_test.begin
# Let's test our new StructRef.
# Define one in Python
alice = MyStruct("Alice", vector=np.random.random(3))
# Define one in jit-code
@njit
def make_bob():
bob = MyStruct("unnamed", vector=np.zeros(3))
# Mutate the attributes
bob.name = "Bob"
bob.vector = np.random.random(3)
return bob
bob = make_bob()
# Out: Alice: [0.5488135 0.71518937 0.60276338]
print(f"{alice.name}: {alice.vector}")
# Out: Bob: [0.88325739 0.73527629 0.87746707]
print(f"{bob.name}: {bob.vector}")
# Define a jit function to operate on the structs.
@njit
def distance(a, b):
return np.linalg.norm(a.vector - b.vector)
# Out: 0.4332647200356598
print(distance(alice, bob))
# magictoken.ex_structref_type_definition_test.end
self.assertEqual(len(buf), 3)
def test_overload_method(self):
# magictoken.ex_structref_method.begin
from numba.core.extending import overload_method
from numba.core.errors import TypingError
# Use @overload_method to add a method for
# MyStructType.distance(other)
# where *other* is an instance of MyStructType.
@overload_method(MyStructType, "distance")
def ol_distance(self, other):
# Guard that *other* is an instance of MyStructType
if not isinstance(other, MyStructType):
raise TypingError(
f"*other* must be a {MyStructType}; got {other}"
)
def impl(self, other):
return np.linalg.norm(self.vector - other.vector)
return impl
# Test
@njit
def test():
alice = MyStruct("Alice", vector=np.random.random(3))
bob = MyStruct("Bob", vector=np.random.random(3))
# Use the method
return alice.distance(bob)
# magictoken.ex_structref_method.end
self.assertIsInstance(test(), float)
|
vedujoshi/tempest | tempest/api/identity/admin/v3/test_domains_negative.py | Python | apache-2.0 | 2,964 | 0 | # Copyright 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class DomainsNegativeTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
@decorators.attr(type=['negative', 'gate'])
@decorators.idempotent_id('1f3fbff5-4e44-400d-9ca1-d953f05f609b')
def test_delete_active_domain(self):
domain = self.create_domain()
domain_id = domain['id']
self.addCleanup(self.delete_domain, domain_id)
# domain need to be disabled before deleting
self.assertRaises(lib_exc.Forbidden, self.domains_client.delete_domain,
domain_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9018461d-7d24-408d-b3fe-ae37e8cd5c9e')
def test_create_domain_with_empty_name(self):
# Domain name should not be empty
self.assertRaises(lib_exc.BadRequest,
self.domains_client.create_domain, name='')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('37b1bbf2-d664-4785-9a11-333438586eae')
def test_create_domain_with_name_length_over_64(self):
# Domain name length should not ne greater than 64 characters
d_name = 'a' * 65
self.assertRaises(lib_exc.BadRequest,
self.domains_client.create_domain,
name=d_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('43781c07-764f-4cf2-a405-953c1916f605')
def test_delete_non_existent_domain(self):
# Attempt to delete a non existent domain should fail
self.assertRaises(lib_exc.NotFound, self.domains_client.delete_domain,
data_utils.rand_uuid_hex())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e6f9e4a2-4f36-4be8-bdbc-4e199ae29427')
def test_domain_create_duplicate(self):
domain_name = data_util | s.rand_name('domain-dup')
domain = self.domains_client.create_domain(name=domain_name)['domain']
domain_id = domain['id']
self.addCleanup(self. | delete_domain, domain_id)
# Domain name should be unique
self.assertRaises(
lib_exc.Conflict, self.domains_client.create_domain,
name=domain_name)
|
ByrdOfAFeather/AlphaTrion | Community/migrations/0021_auto_20170817_2233.py | Python | mit | 775 | 0.00129 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-18 02:33
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = | [
('Community', '0020_auto_20170816_1400'),
]
operations = [
migrations.AlterField(
model_name='communitygameratings',
name='game_rating',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')]),
),
migrations.AlterField(
model_name='communityinst',
name='date',
field=models.DateField(default=da | tetime.date(2017, 8, 17)),
),
]
|
colloquium/rhevm-api | python/host-nics-test.py | Python | lgpl-2.1 | 1,769 | 0.013002 | #!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
import time
from testutils import | *
opts = parseOptions()
(host, cluster, network) = (None, None, None)
if len(opts['oargs']) >= 3:
(host, cluster, network) = opts['oarg | s'][0:3]
links = http.HEAD_for_links(opts)
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE, " ==="
if host is None:
continue
h = t.find(links['hosts'], host)
c = t.find(links['clusters'], cluster)
nic = fmt.HostNIC()
nic.name = 'bond0'
nic.network = fmt.Network()
nic.network.name = network
nic.slaves = []
slave = fmt.HostNIC()
slave.name = 'dummy0'
nic.slaves.append(slave)
slave = fmt.HostNIC()
slave.name = 'dummy1'
nic.slaves.append(slave)
net = t.find(links['networks'], network)
net = t.create(c.link['networks'].href, net)
nic = t.create(h.link['nics'].href, nic)
t.delete(nic.href)
t.delete(net.href)
|
GT-IDEaS/SkillsWorkshop2017 | Week01/Problem04/cruiz_04.py | Python | bsd-3-clause | 242 | 0.024793 | #!/usr/bin/env python
number = 0
for a in range(999,99,-1):
for b in range(999,99,-1):
pal=a*b
if (str(pal) == str(pal)[::-1]) | :
if (pal > number):
number = pal
break
print(number | )
|
delink/SA-kvstore_migrator | bin/splunkkvstore.py | Python | apache-2.0 | 6,834 | 0.036874 | #####
# splunkkvstore.py - Class for manipulating kvstore collections in Splunk
#####
import sys
import requests
import json
import logging
logging.getLogger(__name__)
class splunkkvstore(object):
# On instaniation, only collect the details. Do not do anything until login is called.
def __init__(self,url,*args):
self.url = url;
if len(args) == 2:
self.username = args[0]
self.password = args[1]
self.session_key = ""
elif len(args) == 1:
self.session_key = args[0]
self.username = ""
self.password = ""
# Generic function to make an API call and return the results
def api(self,method,api_endpoint,*payload):
api_endpoint = api_endpoint + "?output_mode=json"
try:
payload = payload[0]
except:
pass
if method.lower() == 'get':
try:
results = self.session.get(self.url+api_endpoint,verify=False,headers={"content-type":"application/json"})
except:
logging.error("Unable to retrieve from Splunk API: {}".format(sys.exc_info()[0]))
raise
elif method.lower() == 'post':
try:
results = self.session.post(self.url+api_endpoint,data=payload,verify=False,headers={"content-type":"application/json"})
except:
logging.error("Unable to send to Splunk API: {}".format(sys.exc_info()[0]))
raise
elif method.lower() == 'delete':
try:
results = self.session.delete(self.url+api_endpoint,verify=False,headers={"content-type":"application/json"})
except:
logging.error("Unable to delete in Splunk API: {}".format(sys.exc_info()[0]))
raise
else:
raise ValueError("Unknown method: {}".format(method))
return None
results_json = ""
try:
results_json = results.json()
except:
pass
if 'messages' in results_json:
for json_error in results_json['messages']:
if json_error['type'] == "ERROR":
raise RuntimeError(json_error['text'])
elif json_error['type'] == "WARN" and (json_error['text'] == "Login failed" or json_error['text'] == "call not properly authenticated"):
raise RuntimeError(json_error['text'])
return results
# Retrieve the session key inside of a requests.Session() object and store it in the object.
def login(self):
self.session = requests.Session()
if self.session_key != "":
requests.utils.cookiejar_from_dict({'splunkd_8089': self.session_key},self.session.cookies)
else:
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTaccess
api_endpoint = "/services/auth/login"
creds_payload = { 'cookie': '1', 'username': self.username, 'password': self.password }
login_request = self.api("POST",api_endpoint,creds_payload)
return None
# Get the list of collection names from a particular scope. Use "-" for no scope
# This will return a dict with keys of base collection names and values as the acl dict
def get_collection_list(self,owner_scope,app_scope):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collections/config".format(owner_scope,app_scope)
get_coll_list_request = self.api("GET",api_endpoint)
results = get_coll_list_request.json()
coll_list = {}
for entry in results['entry']:
coll_list[entry['id'].split("/")[-1]] = entry['acl']
return coll_list
def create_collection(self,owner_scope,app_scope,coll_name):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collec | tions/config".format(owner_scope,app_scope)
payload = { 'name': coll_name }
create_coll_request = self.api("POST",api_endpoint,payload | )
results = create_coll_request.json()
return results
def delete_collection(self,owner_scope,app_scope,coll_name):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collections/config/{}".format(owner_scope,app_scope,coll_name)
delete_coll_request = self.api("DELETE",api_endpoint)
return None
# This method returns the collection's configuration as a JSON string
def get_collection_config(self,owner_scope,app_scope,coll_name):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collections/config/{}".format(owner_scope,app_scope,coll_name)
get_coll_config_request = self.api("GET",api_endpoint)
return get_coll_config_request.text
# This method returns the collection's data as a JSON string
def get_collection_data(self,owner_scope,app_scope,coll_name):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collections/data/{}".format(owner_scope,app_scope,coll_name)
get_coll_data_request = self.api("GET",api_endpoint)
return get_coll_data_request.text
# This method sets the collection's configuration using the provided JSON string
def set_collection_config(self,owner_scope,app_scope,coll_name,configuration):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collections/config/{}".format(owner_scope,app_scope,coll_name)
payload = json.loads(configuration)
set_coll_config_request = self.api("POST",api_endpoint,payload)
results = set_coll_config_request.json()
return results
# This method sets the collection's data using the provided JSON string
def set_collection_data(self,owner_scope,app_scope,coll_name,data):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collections/data/{}/batch_save".format(owner_scope,app_scope,coll_name)
payload = json.loads(data)
set_coll_data_request = self.api("POST",api_endpoint,payload)
results = set_coll_data_request.json()
return results
# This method deletes the collection's data while leaving the collection itself intact
def delete_collection_data(self,owner_scope,app_scope,coll_name):
if not owner_scope:
owner_scope = "-"
if not app_scope:
app_scope = "-"
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTkvstore
api_endpoint = "/servicesNS/{}/{}/storage/collections/data/{}".format(owner_scope,app_scope,coll_name)
set_coll_data_request = self.api("DELETE",api_endpoint)
return None
|
census-instrumentation/opencensus-python | contrib/opencensus-ext-azure/opencensus/ext/azure/common/transport.py | Python | apache-2.0 | 10,424 | 0 | # Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
import time
import requests
from azure.core.exceptions import ClientAuthenticationError
from azure.identity._exceptions import CredentialUnavailableError
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger(__name__)
_MAX_CONSECUTIVE_REDIRECTS = 10
_MONITOR_OAUTH_SCOPE = "https://monitor.azure.com//.default"
_requests_lock = threading.Lock()
_requests_map = {}
class TransportMixin(object):
def _check_stats_collection(self):
return not os.environ.get("APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL") and (not hasattr(self, '_is_stats') or not self._is_stats) # noqa: E501
def _transmit_from_storage(self):
if self.storage:
for blob in self.storage.gets():
# give a few more seconds for blob lease operation
# to reduce the chance of race (for perf consideration)
if blob.lease(self.options.timeout + 5):
envelopes = blob.get()
result = self._transmit(envelopes)
if result > 0:
blob.lease(result)
else:
blob.delete()
def _transmit(self, envelopes):
"""
Transmit the data envelopes to the ingestion service.
Return a negative value for partial success or non-retryable failure.
Return 0 if all envelopes have been successfully ingested.
Return the next retry time in seconds for retryable failure.
This function should never throw exception.
"""
if not envelopes:
return 0
exception = None
try:
start_time = time.time()
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json; charset=utf-8',
}
endpoint = self.options.endpoint
if self.options.credential:
token = self.options.credential.get_token(_MONITOR_OAUTH_SCOPE)
headers["Authorization"] = "Bearer {}".format(token.token)
endpoint += '/v2.1/track'
if self._check_stats_collection():
with _requests_lock:
_requests_map['count'] = _requests_map.get('count', 0) + 1 # noqa: E501
response = requests.post(
url=endpoint,
data=json.dumps(envelopes),
headers=headers,
timeout=self.options.timeout,
proxies=json.loads(self.options.proxies),
allow_redirects=False,
)
except requests.Timeout:
logger.warning(
'Request time out. Ingestion may be backed up. Retrying.')
exception = self.options.minimum_retry_interval
except requests.RequestException as ex:
logger.warning(
'Retrying due to transient client side error %s.', ex)
if self._check_stats_collection():
with _requests_lock:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
# client side error (retryable)
exception = self.options.minimum_retry_interval
except CredentialUnavailableError as ex:
logger.warning('Credential error. %s. Dropping telemetry.', ex)
exception = -1
except ClientAuthenticationError as ex:
logger.warning('Authentication error %s', ex)
| exception = self.options.minimum_retry_interval
except Exception as ex:
logger.warning(
'Error when sending request %s. Dropping telemetry.', ex)
if self._check_stats_collection():
with _requests_lock:
_re | quests_map['exception'] = _requests_map.get('exception', 0) + 1 # noqa: E501
# Extraneous error (non-retryable)
exception = -1
finally:
end_time = time.time()
if self._check_stats_collection():
with _requests_lock:
duration = _requests_map.get('duration', 0)
_requests_map['duration'] = duration + (end_time - start_time) # noqa: E501
if exception is not None:
return exception
text = 'N/A'
data = None
try:
text = response.text
except Exception as ex:
logger.warning('Error while reading response body %s.', ex)
else:
try:
data = json.loads(text)
except Exception:
pass
if response.status_code == 200:
self._consecutive_redirects = 0
if self._check_stats_collection():
with _requests_lock:
_requests_map['success'] = _requests_map.get('success', 0) + 1 # noqa: E501
return 0
# Status code not 200 counts as failure
if self._check_stats_collection():
with _requests_lock:
_requests_map['failure'] = _requests_map.get('failure', 0) + 1 # noqa: E501
if response.status_code == 206: # Partial Content
if data:
try:
resend_envelopes = []
for error in data['errors']:
if error['statusCode'] in (
429, # Too Many Requests
500, # Internal Server Error
503, # Service Unavailable
):
resend_envelopes.append(envelopes[error['index']])
else:
logger.error(
'Data drop %s: %s %s.',
error['statusCode'],
error['message'],
envelopes[error['index']],
)
if resend_envelopes:
self.storage.put(resend_envelopes)
except Exception as ex:
logger.error(
'Error while processing %s: %s %s.',
response.status_code,
text,
ex,
)
if self._check_stats_collection():
with _requests_lock:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
return -response.status_code
# cannot parse response body, fallback to retry
if response.status_code in (
206, # Partial Content
429, # Too Many Requests
500, # Internal Server Error
503, # Service Unavailable
):
logger.warning(
'Transient server side error %s: %s.',
response.status_code,
text,
)
# server side error (retryable)
if self._check_stats_collection():
with _requests_lock:
# 429 counts as throttle instead of retry
if response.status_code == 429:
_requests_map['throttle'] = _requests_map.get('throttle', 0) + 1 # noqa: E501
else:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
return self.options.minimum_retr |
spulec/moto | moto/sdb/__init__.py | Python | apache-2.0 | 179 | 0 | """sdb module initialization; sets value for base decorator."""
from .models import sdb | _back | ends
from ..core.models import base_decorator
mock_sdb = base_decorator(sdb_backends)
|
xyvab/graphTheory | test.py | Python | gpl-3.0 | 561 | 0.024955 | #import sys
#sys.path.append("/home/vxv/Workspace/graphTheory/graph")
#print (sys.path)
from graph.graph import | Graph_Adj, Graph_Mat
if __name__ == "__main__":
g = { "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}
graph = Graph_Adj(g)
print("Vertices of graph:")
print(graph.edges())
if __name__ == '__main__':
g | = [ [1, 0, 1],
[0, 0, 1],
[1, 1, 1]]
graph = Graph_Mat(g)
print(graph.vertices()) |
FuegoFro/KeepTalkingBot | src/modules/whos_on_first.py | Python | mit | 3,598 | 0.002779 | import os
import time
from tesserocr import PSM, PyTessBaseAPI
import cv2
from constants import MODULE_CLASSIFIER_DIR, MODULE_SPECIFIC_DIR
from cv_helpers import apply_offset_to_locations, get_classifier_directories, inflate_classifier, ls, \
ls_debug
from modules import ModuleSolver, Type
from modules.whos_on_first_cv import get_buttons_and_positions, get_screen_content
from modules.whos_on_first_solution import button_to_press
from mouse_helpers import MouseButton, click_pixels, post_c | lick_delay
NUM_TIMES_TO_SOLVE = 3
WHOS_ON_FIRST_BUTTON_CLASSIFIER_DIR = os.path.join(MODULE_SPECIFIC_DIR, "whos_on_first", "buttons")
def test():
tesseract = _get_tesseract()
classifier = inflate_classifier(WHOS_ON_FIRST_BUTTON_CLASSIFIER_DIR)
| vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = \
get_classifier_directories(MODULE_CLASSIFIER_DIR)
i = 0
# for path in ["/Users/danny/Dropbox (Personal)/Projects/KeepTalkingBot/module_specific_data/whos_on_first/in_game_6.png"]:
# for path in ls(os.path.join(labelled_dir, "whos_on_first")):
for path in ls_debug(1486, 1486):
i += 1
# if i < 50:
# continue
# if i >= 50:
# break
# name = "-module-".join(os.path.basename(path).split("-full-"))
# path = os.path.join(unlabelled_dir, name)
im = cv2.imread(path)
# show(im)
screen_text = get_screen_content(im, tesseract, 9999)
# if screen_text not in SCREEN_TO_BUTTON_TO_READ:
# print "Could not find screen text: ", screen_text
# print screen_text
# buttons = get_buttons_and_positions(im, classifier, tesseract)
# for b in buttons:
# print b[0]
# show(im)
# print "--------------------"
def _get_tesseract():
tesseract = PyTessBaseAPI()
tesseract.SetVariable("tessedit_char_whitelist", "ABCDEFGHIJKLMNOPQRSTUVWXYZ' ")
tesseract.SetPageSegMode(PSM.SINGLE_LINE)
return tesseract
class WhosOnFirstSolver(ModuleSolver):
def __init__(self):
super(WhosOnFirstSolver, self).__init__()
self._button_classifier = inflate_classifier(WHOS_ON_FIRST_BUTTON_CLASSIFIER_DIR)
self._tesseract = _get_tesseract()
self._debug_image = 0
def get_type(self):
return Type.whos_on_first
def solve(self, image, offset, sides_info, screenshot_helper, current_module_position):
first_time = True
for _ in range(NUM_TIMES_TO_SOLVE):
if not first_time:
# Wait for the screen to redraw. Takes surprisingly long
time.sleep(4)
first_time = False
image, offset = screenshot_helper.get_current_module_screenshot_and_position()
print "\n----- In game try %s -----" % self._debug_image
cv2.imwrite(os.path.join(MODULE_SPECIFIC_DIR, "whos_on_first", "in_game_%i.png" % self._debug_image), image)
screen_text = get_screen_content(image, self._tesseract, self._debug_image)
buttons, positions = get_buttons_and_positions(
image, self._button_classifier, self._debug_image)
print screen_text
print buttons
print positions
to_press = button_to_press(screen_text, buttons)
print "Pressing", to_press
x, y = apply_offset_to_locations(positions, offset)[to_press.value]
click_pixels(MouseButton.left, x, y)
post_click_delay()
self._debug_image += 1
if __name__ == '__main__':
test()
|
ityaptin/encounter | encounter/models/npc.py | Python | mit | 924 | 0.002165 | __author__ = 'it'
import re
from encounter.models import base
"""Contains a models for NPCs and monsters."""
ABILITY_TE | MPLATE = re.compile(r"([a-zA-Z]{3}) (\d+)")
SAVE_TEMPLATE = re.compile(r"([a-zA-Z]{3,4}) ([+-]\d+)")
def read_ability_scores(abilities_raw="Str 0, Dex 0, Con 0, "
"Int 0, Wis 0, Cha 0"):
abilities = ABILITY_TEMPLATE.findall(abilities_raw)
return dict((abili | ty[0], int(ability[1])) for ability in abilities)
def read_saves(saves_raw='Fort +4, Ref +5, Will +2'):
saves = SAVE_TEMPLATE.findall(saves_raw)
return dict((save[0], int(save[1])) for save in saves)
class NPC(base.BaseModel):
__name__ = "NPC"
def __repr__(self):
return ("{clazz}:"
"name={name}, CR={cr}").format(
clazz=self.__class__.__name__,
name=getattr(self, "name", "npc"),
cr=getattr(self, "cr", 0)
)
|
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/Supp_fall_summer_baselinebars.py | Python | mit | 1,857 | 0.012386 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 8/25/14
###Function: barplot comparing number of "any diagnosis" visits during the 7 week fall baseline (wks 40-6) and 7 week summer baseline (wks 33-39)
###Import data:
###Command Line: python Supp_fall_summer_baselinebars.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions as fxn
### plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
bw = fxn.gp_barwidth
### functions ###
### import data ###
anydiagin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/fall_summer_baselinebars.csv','r')
anydiagin.readline() # rm header
anydiag=csv.reader(anydiagin, delimiter=',')
### process baseline data ###
# dict_anydiag[season] = (# anydiag fall BL per week, # anydiag summer BL per week)
d_anydiag = fxn.anydiag_baseline_comparison(anydiag)
# plot values
fallBL = [d_anydiag[s][0] for s in ps]
summerBL = [d_anydiag[s][1] for s in ps]
print fallBL
print summerBL
# bar chart of normalized child attack rates
xloc = np.arange(len(ps))
fig, ax = plt.subplots()
f | all = ax.bar(xloc, fallBL, bw, color='green', align='center')
summer = ax.bar(xloc+bw, summerBL, bw, color='orange', align='center')
ax.legend([fall[0], summer[0]], ('Fall BL', 'Summer BL'), loc='upper left')
ax.set_xticks(xloc+bw/2)
ax.set_xticklabels(sl, fontsize=fssml)
ax.set_ylabel('Any Diagnosis Visits', fontsize=fs)
ax.set_xlabel('Season', fontsize=fs)
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Sup | p/fall_summer_baselinebars.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
|
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/format.py | Python | mit | 88,798 | 0.000631 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable=W0141
import sys
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import(StringIO, lzip, range, map, zip, reduce, u,
OrderedDict)
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
import pandas.core.common as com
import pandas.lib as lib
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
import numpy as np
import itertools
import csv
docstring_to_string = """
Parameters
----------
frame : DataFrame
object to render
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
whether to print column labels, default True
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled | by set_option), 'right' out
of the box.
index_na | mes : bool, optional
Prints the names of the indexes, default True
force_unicode : bool, default False
Always return a unicode result. Deprecated in v0.10.0 as string
formatting is now rendered to unicode by default.
Returns
-------
formatted : string (or unicode, depending on data and options)"""
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True,
na_rep='NaN', name=False, footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.name = name
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.name:
name = com.pprint_thing(self.categorical.name,
escape_chars=('\t', '\r', '\n'))
footer += ('Name: %s' % name if self.categorical.name is not None
else '')
if self.length:
if footer:
footer += ', '
footer += "Length: %d" % len(self.categorical)
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None,
na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = ['%s' % i for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[')+result+u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self._chk_truncate()
def _chk_truncate(self):
from pandas.tools.merge import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: %s' % self.series.index.freqstr
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ("Name: %s" %
series_name) if name is not None else ""
if self.length:
if footer:
footer += ', '
footer += 'Length: %d' % len(self.series)
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: %s' % com.pprint_thing(name)
# level infos are added to the end and in a new line, like it is done for Categoricals
# Only added when we request a name
if name and com.is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series.values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
return format_array(self.tr_series.get_values(), None,
float_format=self.float_format,
na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
maxlen = max(len(x) for x in fmt_index) # max index len
pad_space = min(maxlen, 60)
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = len(fmt_values[row_num-1])
if width |
smmribeiro/intellij-community | python/testData/refactoring/introduceVariable/argumentToUnnamedParameter.py | Python | apache-2.0 | 90 | 0.011111 | from typing import NewType
SomeType = NewType("SomeType", bytes)
SomeType(b"va<caret | >lue") | |
hlzz/dotfiles | graphics/VTK-7.0.0/Filters/General/Testing/Python/WarpVectorImage.py | Python | bsd-3-clause | 1,372 | 0.000729 | #!/usr/bin/env python
import vtk
from vtk.test import T | esting
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create pipeline
#
reader = vtk.vtkDataSetReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/RectGrid2.vtk")
rea | der.Update()
warper = vtk.vtkWarpVector()
warper.SetInputConnection(reader.GetOutputPort())
warper.SetScaleFactor(0.2)
extract = vtk.vtkExtractGrid()
extract.SetInputConnection(warper.GetOutputPort())
extract.SetVOI(0, 100, 0, 100, 7, 15)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(extract.GetOutputPort())
mapper.SetScalarRange(0.197813, 0.710419)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Graphics stuff
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.76213, 10.712)
cam1.SetFocalPoint(-0.0842503, -0.136905, 0.610234)
cam1.SetPosition(2.53813, 2.2678, -5.22172)
cam1.SetViewUp(-0.241047, 0.930635, 0.275343)
# render the image
#
iren.Initialize()
#iren.Start()
|
jpmunic/udest | watson_examples/conversation_v1.py | Python | mit | 771 | 0.003891 | import json
from watson_developer_cloud import ConversationV1
conversation = ConversationV1(
username='c4129348-b14c-4d2d-80ca-25b1acc59ca7',
password='dHfhKbO2pXH4',
versio | n='2016-09-20')
# replace with your own workspace_id
workspace_id = 'b42ee794-c019-4a0d-acd2-9e4d1d016767'
response = conversation.message(workspace_id=workspace_id, message_input={'text': 'What\'s the weather like?'})
print(json.dumps(response, indent=2))
# When you send multiple requests for the same conversation, include the context object from the previous response.
# response = conversation.message(workspace_id=workspace_id, me | ssage_input={'text': 'turn the wipers on'},
# context=response['context'])
# print(json.dumps(response, indent=2))
|
ceumicrodata/adatmesterseg | code/class11.py | Python | cc0-1.0 | 1,190 | 0.001681 | import | pandas
s = pandas.Series([1, 2, 3])
print s
data = {'part': ['fidesz', 'jobbik'], 'szavazat': [60, 40]}
frame = pandas.DataFrame(data)
print frame
df = pandas.read_csv('../data/result_part.csv')
print df.head()
print df[:100 | ]
print df['part']
print df['szavazat']
df['uj'] = 'NaN'
print df.tail()
new = df[df['megye'] == 1][df['telepules'] == 1][df['szavazokor'] == 1]
print df[df.megye == 1][df.telepules == 1][df.szavazokor == 1]
print new
print df.describe()
print df.drop_duplicates()
print df['part'].unique()
print df.groupby('part').mean()
group_megye_part = df['szavazat'].groupby([df['megye'], df['part']])
print group_megye_part.mean()
print group_megye_part.sum()
print group_megye_part.min()
print group_megye_part.median()
print group_megye_part.mean().unstack('part')
osszes_szavazat = df['szavazat'].sum()
group_partok_sum = df['szavazat'].groupby(df['part']).sum()
def calculate_percent(i):
szazalekos_eredmeny = 100 * i / float(osszes_szavazat)
return szazalekos_eredmeny
print group_partok_sum.apply(calculate_percent)
group_partok_megye_sum = group_megye_part.sum()
print group_partok_megye_sum.groupby(level=0).apply(calculate_percent)
|
sharhalakis/vdns | src/vdns/zone0.py | Python | gpl-3.0 | 14,983 | 0.014683 | #!/usr/bin/env python
# coding=UTF-8
#
import copy
import time
import socket
import logging
import vdns.common
class Zone0:
"""
Base class for producing zone files
"""
def __init__(self, dt):
self.dt=dt
def fmttd(self, td):
"""
Format a timedelta value to something that's appropriate for
zones
"""
lst=((1, '', 'second', 'seconds'),
(60, 'M', 'minute', 'minutes'),
(3600, 'H', 'hour', 'hours'),
(86400, 'D', 'day', 'days'),
(86400*7, 'W', 'week', 'weeks'))
ts=int(td.total_seconds())
# Find the first value that doesn't give an exact result
ent=lst[0]
for i in lst:
if (ts % i[0]) != 0:
break
ent=i
ret1="%d%s" % (int(ts/ent[0]), ent[1])
# Now form the human readable string
rem=ts
ret2=[]
for i in reversed(lst):
t=int(rem / i[0])
rem=rem % i[0]
if t==0:
continue
if t==1:
unit=i[2]
else:
unit=i[3]
st='%s %s' % (t, unit)
ret2.append(st)
# Speadup
if rem==0:
break
ret2st=', '.join(ret2)
ret=(ret1, ret2st)
return(ret)
def make_ptr_name(self, rec):
"""
Format the name of a PTR record (i.e. reverse IPv4 or IPv6)
"""
if rec['family']==4:
rev=rec['ip_str'].split('.')
rev.reverse()
rev='.'.join(rev)
ret=rev + '.in-addr.arpa'
elif rec['family']==6:
ip2=rec['ip_str'] + '/128'
ret=vdns.common.reverse_name(ip2)
# logging.error('Unhandled address family: %s' % (rec['family'], ))
# ret=''
else:
logging.error('Unknown address family: %s' % (rec['family'], ))
ret=''
# Get rid of the suffix if we can
domain=self.dt['_domain']
if ret[-len(domain):]==domain:
ret=ret[:-len(domain)-1]
return(ret)
# def make_soa(self, incserial):
def make_soa(self):
"""!
NO @param incserial If True then increment the serial number
"""
dt=self.dt
dt2={
# 'serial': self.mkserial(dt, incserial),
'serial': dt['serial'],
'domain': dt['_domain'],
'contact': dt['contact'],
'ns0': dt['ns0'],
}
times=('ttl', 'refresh', 'retry', 'expire', 'minimum')
for i in times:
t=self.fmttd(dt[i])
dt2[i]=t[0]
dt2[i+'2']=t[1]
st="""\
$ORIGIN %(domain)s.
$TTL %(ttl)s ; %(ttl2)s
@ %(ttl)s IN SOA %(ns0)s. %(contact)s. (
%(serial)-10s ; serial
%(refresh)s ; refresh (%(refresh2)s)
%(retry)s ; retry (%(retry2)s)
%(expire)s ; expire (%(expire2)s)
%(minimum)s ; minimum (%(minimum2)s)
)
""" % dt2
return(st)
def fmtrecord(self, name, ttl, rr, data):
"""
Format a record
This is a dump function that concatenates data, translating ttl
Use mkrecord instead
@param name The hostname
@param ttl The TTL in seconds
@param rr The type of the record
@param data A freeform string
@return The formed entry
"""
if ttl==None:
ttl2=''
else:
t=self.fmttd(ttl)
ttl2=' ' + t[0]
ret="%-16s%s IN %s %s" % \
(name, ttl2, rr, data)
return(ret)
def split_txt(self, data):
"""
Split TXT data to chunks of max 255 bytes to comply with bind
@param data An unquoted string of arbitrary length
@return A quoted string to be used as TXT record
"""
limit=255
items=[]
data2=copy.deepcopy(data)
while len(data2)>limit:
items.append(data2[:limit])
data2=data2[limit:]
items.append(data2)
ret='"' + '" "'.join(items) + '"'
return(ret)
def mkrecord(self, rr, rec):
"""
Create a record based on RR (the type)
@param rr The record type. One of: ns, mx, ds
@return The formed entry
"""
# If this is true then we will make sure that there is a dot
# at the end of the name
needsdot=False
# Allow this to be changed by a type (i.e. PTR)
hostname=None
if rr=='mx':
rrname='MX'
data="%-4d %s" % (rec['priority'], rec['mx'])
if rec['mx'].count('.')>=2:
needsdot=True
elif rr=='ns':
rrname='NS'
data=rec['ns']
if rec['ns'].count('.')>=2:
needsdot=True
elif rr=='ds':
rrname='DS'
data=[]
data.append("%d %d %d %s" % (rec['keyid'], rec['algorithm'],
1, rec['digest_sha1']))
data.append("%d %d %d %s" % (rec['keyid'], rec['algorithm'],
2, rec['digest_sha256']))
elif rr=='a':
rrname='A'
data=rec['ip_str'].split('/')[0]
elif rr=='aaaa':
rrname='AAAA'
data=rec['ip_str'].split('/')[0]
elif rr=='ptr':
# TODO: This is broken. We need to inverse the ip
# and take care of ipv6 as well
rrname='PTR'
data="%s.%s." % (rec['hostname'], rec['domain'])
hostname=self.make_ptr_name(rec)
needsdot=True
elif rr in ('cname', 'cnames'):
rrname | ='CNAME'
data=rec['hostname0']
if rec['hostname0'].count('.')>=2:
needsdot=True
elif rr=='txt':
rrname='TXT'
data='"%s"' % (rec['txt'],)
elif rr=='dnssec':
rrname='DNSKEY'
if rec['ksk']:
flags=257
else:
flags=256
# | rec['hostname']=rec['domain']
data='%s 3 %s %s' % (flags, rec['algorithm'], rec['key_pub'])
elif rr=='sshfp':
rrname='SSHFP'
data='%(keytype)d %(hashtype)d %(fingerprint)s' % rec
elif rr=='dkim':
rrname='TXT'
hostname='%(selector)s._domainkey' % rec
if 'hostname' in rec and rec['hostname']:
hostname+='.'+rec['hostname']
data0=[]
data0.append('v=DKIM1')
if rec['g']!=None: data0.append('g=' + rec['g'])
data0.append('k=' + rec['k'])
data0.append('s=email')
if rec['t'] or not rec['subdomains']:
if rec['t']:
if rec['subdomains']:
t='y'
else:
t='s:y'
else:
t='s'
data0.append('t='+t)
if rec['h']!=None: data0.append('h=' + rec['h'])
data0.append('p=' + rec['key_pub'])
data=self.split_txt('; '.join(data0))
elif rr=='srv':
rrname='SRV'
hostname='_%(service)s._%(protocol)s' % rec
if rec['name']!='':
hostname+='.' + rec['name']
data='%(priority)s %(weight)s %(port)s %(target)s' % rec
if rec['target'].count('.')>=1:
needsdot=True
else:
vdns.common.abort("Unhandled RR type %s: %s" % (rr, rec))
if type(data)!=list:
data=[data]
if needsdot:
for i in range(len(data)):
if data[i][-1]!='.':
data[i]+='.'
if hostname==None:
if 'hostname' in rec:
hostname=rec['hostname']
else:
hostname=''
if hostname=='.':
hostname=''
ttl=rec['ttl']
#ret=self.fmtrecord(hostname, self.dt['ttl'], rrname, data)
ret=''
|
antismap/MICshooter | sources/lib/eztext.py | Python | mit | 11,212 | 0.0198 | # input lib
from pygame.locals import *
import pygame, string
class ConfigError(KeyError): pass
class Config:
""" A utility for configuration """
def __init__(self, options, *look_for):
assertions = []
for key in look_for:
if key[0] in options.keys(): exec('self.'+key[0]+' = options[\''+key[0]+'\']')
else: exec('self.'+key[0]+' = '+key[1])
assertions.append(key[0])
for key in options.keys():
if key not in assertions: raise ConfigError(key+' not expected as option')
class Input:
""" A text input for pygame apps """
def __init__(self, **options):
""" Options: x, y, font, color, restricted, maxlength, prompt """
self.options = Config(options, ['x', '0'], ['y', '0'], ['font', 'pygame.font.Font(None, 32)'],
['color', '(0,0,0)'], ['restricted', '\'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!"#$%&\\\'()*+,-./:;<=>?@[\]^_`{|}~\''],
['maxlength', '-1'], ['prompt', '\'\''])
self.x = self.options.x; self.y = self.options.y
self.font = self.options.font
self.color = self.options.color
self.restricted = self.options.restricted
self.maxlength = self.options.maxlength
self.prompt = self.options.prompt; self.value = ''
self.shifted = False
def set_pos(self, x, y):
""" Set the position to x, y """
self.x = x
self.y = y
def set_font(self, font):
""" Set the font for the input """
self.font = font
def draw(self, surface):
""" Draw the text input to a surface """
text = self.font.render(self.prompt+self.value, 1, self.color)
surface.blit(text, (self.x, self.y))
def getText(self):
return self.value
def hasTyped(self):
if self.value =="":
return False
else:
return True
def update(self, events):
""" Update the input based on passed events """
for event in events:
if event.type == KEYUP:
if event.key == K_LSHIFT or event.key == K_RSHIFT: self.shifted = False
if event.type == KEYDOWN:
if event.key == K_BACKSPACE: self.value = self.value[:-1]
elif event.key == K_LSHIFT or event.key == K_RSHIFT: self.shifted = True
elif event.key == K_SPACE: self.value += ' '
if not self.shifted:
if event.key == K_a and 'a' in self.restricted: self.value += 'a'
elif event.key == K_b and 'b' in self.restricted: self.value += 'b'
elif event.key == K_c and 'c' in self.restricted: self.value += 'c'
elif event.key == K_d and 'd' in self.restricted: self.value += 'd'
elif event.key == K_e and 'e' in self.restricted: self.value += 'e'
elif event.key == K_f and 'f' in self.restricted: self.value += 'f'
elif event.key == K_g and 'g' in self.restricted: self.value += 'g'
elif event.key == K_h and 'h' in self.restricted: self.value += 'h'
elif event.key == K_i and 'i' in self.restricted: self.value += 'i'
elif event.key == K_j and 'j' in self.restricted: self.value += 'j'
elif event.key == K_k and 'k' in self.restricted: self.value += 'k'
elif event.key == K_l and 'l' in self.restricted: self.value += 'l'
elif event.key == K_m and 'm' in self.restricted: self.value += 'm'
elif event.key == K_n and 'n' in self.restricted: self.value += 'n'
elif event.key == K_o and 'o' in self.restricted: self.value += 'o'
elif event.key == K_p and 'p' in self.restricted: self.value += 'p'
elif event.key == K_q and 'q' in self.restricted: self.value += 'q'
elif event.key == K_r and 'r' in self.restricted: self.value += 'r'
elif event.key == K_s and 's' in self.restricted: self.value += 's'
elif event.key == K_t and 't' in self.restricted: self.value += 't'
elif event.key == K_u and 'u' in self.restricted: self.value += 'u'
elif event.key == K_v and 'v' in self.restricted: self.value += 'v'
elif event.key == K_w and 'w' in self.restricted: self.value += 'w'
elif event.key == K_x and 'x' in self.restricted: self.value += 'x'
elif event.key == K_y and 'y' in self.restricted: self.value += 'y'
elif event.key == K_z and 'z' in self.restricted: self.value += 'z'
elif event.key == K_0 and '0' in self.restricted: self.value += '0'
elif event.key == K_1 and '1' in self.restricted: self.value += '1'
elif event.key == K_2 and '2' in self.restricted: self.value += '2'
elif event.key == | K_3 and '3' in self.restricted: self.value += '3'
elif event.key == K_4 and '4' in self.restricted: self.value | += '4'
elif event.key == K_5 and '5' in self.restricted: self.value += '5'
elif event.key == K_6 and '6' in self.restricted: self.value += '6'
elif event.key == K_7 and '7' in self.restricted: self.value += '7'
elif event.key == K_8 and '8' in self.restricted: self.value += '8'
elif event.key == K_9 and '9' in self.restricted: self.value += '9'
elif event.key == K_BACKQUOTE and '`' in self.restricted: self.value += '`'
elif event.key == K_MINUS and '-' in self.restricted: self.value += '-'
elif event.key == K_EQUALS and '=' in self.restricted: self.value += '='
elif event.key == K_LEFTBRACKET and '[' in self.restricted: self.value += '['
elif event.key == K_RIGHTBRACKET and ']' in self.restricted: self.value += ']'
elif event.key == K_BACKSLASH and '\\' in self.restricted: self.value += '\\'
elif event.key == K_SEMICOLON and ';' in self.restricted: self.value += ';'
elif event.key == K_QUOTE and '\'' in self.restricted: self.value += '\''
elif event.key == K_COMMA and ',' in self.restricted: self.value += ','
elif event.key == K_PERIOD and '.' in self.restricted: self.value += '.'
elif event.key == K_SLASH and '/' in self.restricted: self.value += '/'
elif self.shifted:
if event.key == K_a and 'A' in self.restricted: self.value += 'A'
elif event.key == K_b and 'B' in self.restricted: self.value += 'B'
elif event.key == K_c and 'C' in self.restricted: self.value += 'C'
elif event.key == K_d and 'D' in self.restricted: self.value += 'D'
elif event.key == K_e and 'E' in self.restricted: self.value += 'E'
elif event.key == K_f and 'F' in self.restricted: self.value += 'F'
elif event.key == K_g and 'G' in self.restricted: self.value += 'G'
elif event.key == K_h and 'H' in self.restricted: self.value += 'H'
elif event.key == K_i and 'I' in self.restricted: self.value += 'I'
elif event.key == K_j and 'J' in self.restricted: self.value += 'J'
elif event.key == K_k and 'K' in self.restricted: self.value += 'K'
elif event.key == K_l and 'L' in self.restricted: self.value += 'L'
elif event.key == K_m and 'M' in self.restricted: self.value += 'M'
elif event.key == K_n and 'N' in self.restricted: self.value += 'N'
elif event.key == K_o and 'O' in self.restricted: self.va |
scikit-optimize/scikit-optimize | skopt/sampler/base.py | Python | bsd-3-clause | 689 | 0 |
from collections import defaultdict
class InitialPointGenerator(object):
def generate(self, dimensions, n_samples, random_state=None):
raise NotImplemented
def set_params(self, **params):
"""
Set the parameters of this initial point generator.
Parameters
----------
**params : dic | t
Generator parameters.
Returns
-------
self : object
Generator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
for key, value in params.items():
| setattr(self, key, value)
return self
|
B3AU/waveTree | sklearn/linear_model/omp.py | Python | bsd-3-clause | 31,732 | 0.000126 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import array2d, as_float_array, check_arrays
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.arrayfuncs import solve_triangular
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active]
else:
return gamma, indices[:n_active]
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L | [:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
| beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if tol |
reyoung/Paddle | python/paddle/fluid/tests/test_if_else_op.py | Python | apache-2.0 | 8,444 | 0.000237 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import MomentumOptimizer
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
from paddle.fluid.layers.control_flow import ConditionalBlock
import unittest
import numpy as np
class TestMNISTIfElseOp(unittest.TestCase):
# FIXME: https://github.com/PaddlePaddle/Paddle/issues/12245#issuecomment-406462379
def not_test_raw_api(self):
prog = Program()
startup_prog = Program()
with program_guard(prog, startup_prog):
image = layers.data(name='x', shape=[784], dtype='float32')
label = layers.data(name='y', shape=[1], dtype='int64')
limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
cond = layers.less_than(x=label, y=limit)
true_image, false_image = split_lod_tensor(input=image, mask=cond)
true_out = layers.create_tensor(dtype='float32')
true_cond = ConditionalBlock([cond])
with true_cond.block():
hidden = layers.fc(input=true_image, size=100, act='tanh')
prob = layers.fc(input=hidden, size=10, act='softmax')
layers.assign(input=prob, output=true_out)
false_out = layers.create_tensor(dtype='float32')
false_cond = ConditionalBlock([cond])
with false_cond.block():
hidden = layers.fc(input=false_image, size=200, act='tanh')
prob = layers.fc(input=hidden, size=10, act='softmax')
layers.assign(input=prob, output=false_out)
prob = merge_lod_tensor(
in_true=true_out, in_false=false_out, mask=cond, x=image)
loss = layers.cross_entropy(input=prob, label=label)
avg_loss = layers.mean(loss)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, startup_prog)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=10)
place = core.CPUPlace()
exe = Executor(place)
exe.run(startup_prog)
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for data in train_reader():
x_data = np.array([x[0] for x in data]).astype("float32")
y_data = np.array([x[1] for x in data]).astype("int64")
y_data = np.expand_dims(y_data, axis=1)
outs = exe.run(prog,
feed={'x': x_data,
'y': y_data},
fetch_list=[avg_loss])
print(outs[0])
if outs[0] < 1.0:
return
self.assertFalse(True)
# FIXME: https://github.com/PaddlePaddle/Paddle/issues/12245#issuecomment-406462379
def not_test_ifelse(self):
prog = Program()
startup_prog = Program()
with program_guard(prog, startup_prog):
image = layers.data(name='x', shape=[784], dtype='float32')
label = layers.data(name='y', shape=[1], dtype='int64')
limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
cond = layers.less_than(x=label, y=limit)
ie = layers.IfElse(cond)
with ie.true_block():
true_image = ie.input(image)
hidden = layers.fc(input=true_image, size=100, act='tanh')
prob = layers.fc(input=hidden, size=10, act='softmax')
ie.output(prob)
with ie.false_block():
false_image = ie.input(image)
hidden = layers.fc(input=false_image, size=200, act='tanh')
prob = layers.fc(input=hidden, size=10, act='softmax')
ie.output(prob)
prob = ie()
loss = layers.cross_entropy(input=prob[0], label=label)
avg_loss = layers.mean(loss)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, startup_prog)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=200)
place = core.CPUPlace()
exe = Executor(place)
exe.run(startup_prog)
PASS_NUM = 100
for pass_id in range(PASS_NUM):
| for data in train_reader():
x_data = np.array([x[0] for x in data]).astype("float32")
y_data = np.array([x[1] for x in data]).astype("int64")
| y_data = y_data.reshape((y_data.shape[0], 1))
outs = exe.run(prog,
feed={'x': x_data,
'y': y_data},
fetch_list=[avg_loss])
print(outs[0])
if outs[0] < 1.0:
return
self.assertFalse(True)
class TestIfElse(unittest.TestCase):
def set_test_case(self):
# condiction is: self.data < self.cond_value
self.cond_value = 0.5
self.data = np.random.rand(25, 1).astype(np.float32)
def numpy_cal(self):
s1 = self.data[np.where(self.data < self.cond_value)]
res = np.sum(np.exp(s1))
s2 = self.data[np.where(self.data >= self.cond_value)]
res += np.sum(np.tanh(s2))
return res
def compare_ifelse_op_and_numpy(self, place):
self.set_test_case()
prog = Program()
startup_prog = Program()
with program_guard(prog, startup_prog):
src = layers.data(name='data', shape=[1], dtype='float32')
cond = layers.fill_constant(
[1], dtype='float32', value=self.cond_value)
ifcond = layers.less_than(x=src, y=cond)
ie = layers.IfElse(ifcond)
with ie.true_block():
true_target = ie.input(src)
true_target = fluid.layers.exp(true_target)
ie.output(true_target)
with ie.false_block():
false_target = ie.input(src)
false_target = fluid.layers.tanh(false_target)
ie.output(false_target)
if_out = ie()
out = layers.reduce_sum(if_out)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
fetch_list = [out]
o1, = exe.run(fluid.default_main_program(),
feed={'data': self.data},
fetch_list=[out])
o2 = self.numpy_cal()
self.assertTrue(
np.allclose(
o1, o2, atol=1e-8),
"IfElse result : " + str(o1) + "\n Numpy result :" + str(o2))
def test_cpu(self):
self.compare_ifelse_op_and_numpy(fluid.CPUPlace())
def test_cuda(self):
if not core.is_compiled_with_cuda():
return
self.compare_ifelse_op_and_numpy(fluid.CUDAPlace(0))
class TestIfElseTrueBranch(TestIfElse):
def set_test_case(self):
# condiction is: self.data < self.cond_value
self.cond_value = 10.
self.data = np.random.rand(25, 1).astype(np.float32)
|
shiquanwang/numba | numba/codegen/coerce.py | Python | bsd-2-clause | 8,887 | 0.0018 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm
from numba import *
from numba import nodes
from numba.typesystem import is_obj, promote_to_native
from numba.codegen.codeutils import llvm_alloca, if_badval
from numba.codegen import debug
class ObjectCoercer(object):
"""
Object that knows how to convert to/from objects using Py_BuildValue
and PyArg_ParseTuple.
"""
# TODO: do all of this in a specializer
type_to_buildvalue_str = {
char: "b",
short: "h",
int_: "i",
long_: "l",
longlong: "L",
Py_ssize_t: "n",
npy_intp: "n", # ?
size_t: "n", # ?
uchar: "B",
ushort: "H",
uint: "I",
ulong: "k",
ulonglong: "K",
float_: "f",
double: "d",
complex128: "D",
object_: "O",
bool_: "b", # ?
c_string_type: "s",
char.pointer() : "s",
}
def __init__(self, translator):
self.context = translator.context
self.translator = translator
self.builder = translator.builder
self.llvm_module = self.builder.basic_block.function.module
sig, self.py_buildvalue = self.context.external_library.declare(
self.llvm_module, 'Py_BuildValue')
sig, self.pyarg_parsetuple = self.context.external_library.declare(
self.llvm_module, 'PyArg_ParseTuple')
sig, self.pyerr_clear = self.context.external_library.declare(
self.llvm_module, 'PyErr_Clear')
self.function_cache = translator.function_cache
self.NULL = self.translator.visit(nodes.NULL_obj)
def check_err(self, llvm_result, callback=None, cmp=llvm.core.ICMP_EQ,
pos_node=None):
"""
Check for errors. If the result is NULL, and error should have been set
Jumps to translator.error_label if an exception occurred.
"""
assert llvm_result.type.kind == llvm.core.TYPE_POINTER, llvm_result.type
int_result = self.translator.builder.ptrtoint(llvm_result,
llvm_types._intp)
NULL = llvm.core.Constant.int(int_result.type, 0)
if callback:
if_badval(self.translator, int_result, NULL,
callback=callback or default_callback, cmp=cmp)
else:
test = self.builder.icmp(cmp, int_result, NULL)
name = 'no_error'
if hasattr(pos_node, 'lineno'):
name = 'no_error_%s' % error.format_pos(pos_node).rstrip(": ")
bb = self.translator.append_basic_block(name)
self.builder.cbranch(test, self.translator.error_label, bb)
self.builder.position_at_end(bb)
return llvm_result
def check_err_int(self, llvm_result, badval):
llvm_badval = llvm.core.Constant.int(llvm_result.type, badval)
if_badval(self.translator, llvm_result, llvm_badval,
callback=lambda b, *args: b.branch(self.trans | lator.error_label))
def _create_llvm_string(self, str):
return self.translator.visit(nod | es.ConstNode(str, c_string_type))
def lstr(self, types, fmt=None):
"Get an llvm format string for the given types"
typestrs = []
result_types = []
for type in types:
if is_obj(type):
type = object_
elif type.is_int:
type = promote_to_native(type)
result_types.append(type)
typestrs.append(self.type_to_buildvalue_str[type])
str = "".join(typestrs)
if fmt is not None:
str = fmt % str
if debug.debug_conversion:
self.translator.puts("fmt: %s" % str)
result = self._create_llvm_string(str)
return result_types, result
def buildvalue(self, types, *largs, **kwds):
# The caller should check for errors using check_err or by wrapping
# its node in an ObjectTempNode
name = kwds.get('name', '')
fmt = kwds.get('fmt', None)
types, lstr = self.lstr(types, fmt)
largs = (lstr,) + largs
if debug.debug_conversion:
self.translator.puts("building... %s" % name)
# func_type = object_(*types).pointer()
# py_buildvalue = self.builder.bitcast(
# self.py_buildvalue, func_type.to_llvm(self.context))
py_buildvalue = self.py_buildvalue
result = self.builder.call(py_buildvalue, largs, name=name)
if debug.debug_conversion:
self.translator.puts("done building... %s" % name)
nodes.print_llvm(self.translator.env, object_, result)
self.translator.puts("--------------------------")
return result
def npy_intp_to_py_ssize_t(self, llvm_result, type):
# if type == minitypes.npy_intp:
# lpy_ssize_t = minitypes.Py_ssize_t.to_llvm(self.context)
# llvm_result = self.translator.caster.cast(llvm_result, lpy_ssize_t)
# type = minitypes.Py_ssize_t
return llvm_result, type
def py_ssize_t_to_npy_intp(self, llvm_result, type):
# if type == minitypes.npy_intp:
# lnpy_intp = minitypes.npy_intp.to_llvm(self.context)
# llvm_result = self.translator.caster.cast(llvm_result, lnpy_intp)
# type = minitypes.Py_ssize_t
return llvm_result, type
def convert_single_struct(self, llvm_result, type):
types = []
largs = []
for i, (field_name, field_type) in enumerate(type.fields):
types.extend((c_string_type, field_type))
largs.append(self._create_llvm_string(field_name))
struct_attr = self.builder.extract_value(llvm_result, i)
largs.append(struct_attr)
return self.buildvalue(types, *largs, name='struct', fmt="{%s}")
def convert_single(self, type, llvm_result, name=''):
"Generate code to convert an LLVM value to a Python object"
llvm_result, type = self.npy_intp_to_py_ssize_t(llvm_result, type)
if type.is_struct:
return self.convert_single_struct(llvm_result, type)
elif type.is_complex:
# We have a Py_complex value, construct a Py_complex * temporary
new_result = llvm_alloca(self.translator.lfunc, self.builder,
llvm_result.type, name='complex_temp')
self.builder.store(llvm_result, new_result)
llvm_result = new_result
return self.buildvalue([type], llvm_result, name=name)
def build_tuple(self, types, llvm_values):
"Build a tuple from a bunch of LLVM values"
assert len(types) == len(llvm_values)
return self.buildvalue(lstr, *llvm_values, name='tuple', fmt="(%s)")
def build_list(self, types, llvm_values):
"Build a tuple from a bunch of LLVM values"
assert len(types) == len(llvm_values)
return self.buildvalue(types, *llvm_values, name='list', fmt="[%s]")
def build_dict(self, key_types, value_types, llvm_keys, llvm_values):
"Build a dict from a bunch of LLVM values"
types = []
largs = []
for k, v, llvm_key, llvm_value in zip(key_types, value_types,
llvm_keys, llvm_values):
types.append(k)
types.append(v)
largs.append(llvm_key)
largs.append(llvm_value)
return self.buildvalue(types, *largs, name='dict', fmt="{%s}")
def parse_tuple(self, lstr, llvm_tuple, types, name=''):
"Unpack a Python tuple into typed llvm variables"
lresults = []
for i, type in enumerate(types):
var = llvm_alloca(self.translator.lfunc, self.builder,
type.to_llvm(self.context),
name=name and "%s%d" % (name, i))
lresults.append(var)
largs = [llvm_tuple, lstr] + lresults
if debug.debug_conversion:
self.translator.puts("parsing tuple... %s" % (types,))
nodes.print_llv |
tartavull/google-cloud-python | trace/tests/__init__.py | Python | apache-2.0 | 575 | 0 | # Copyright 2017 Google Inc.
#
# Licensed under the | Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and |
# limitations under the License.
|
typemytype/RoboFontExamples | fontGroups/changeGroupSingleQuote.py | Python | mit | 514 | 0.005837 |
for f in AllFonts():
hasEdits = False
for name, members in f.gro | ups.items():
groupHasEdits = False
new = []
for m in members:
if m[-1] == "'":
groupHasEdits = True
hasEdits = True
new.append(m[:-1])
else:
new.append(m)
f.groups[name]=new
if hasEdits:
print "edits made in ", f.info.fullName
f.save()
else:
print "no edits made", f.info.ful | lName |
tsdmgz/ansible | lib/ansible/module_utils/facts/hardware/darwin.py | Python | gpl-3.0 | 3,527 | 0.001985 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.sysctl import get_sysctl
class DarwinHardware(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def populate(self, collected_facts=None):
hardware_facts = {}
self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern'])
mac_facts = self.get_mac_facts()
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
hardware_facts.update(mac_facts)
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
return hardware_facts
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
mac_facts = {}
rc, out, err = self.module.run_command("sysctl hw.model")
| if rc == 0:
mac_facts['model'] = out.splitlines()[-1].split()[1]
mac_facts['osversion'] = self.sysctl['kern.osversion']
| mac_facts['osrevision'] = self.sysctl['kern.osrevision']
return mac_facts
def get_cpu_facts(self):
cpu_facts = {}
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
cpu_facts['processor'] = self.sysctl['machdep.cpu.brand_string']
cpu_facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
cpu_facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
cpu_facts['processor_cores'] = self.sysctl['hw.physicalcpu']
cpu_facts['processor_vcpus'] = self.sysctl.get('hw.logicalcpu') or self.sysctl.get('hw.ncpu') or ''
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
memory_facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) // 1024 // 1024
rc, out, err = self.module.run_command("sysctl hw.usermem")
if rc == 0:
memory_facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) // 1024 // 1024
return memory_facts
class DarwinHardwareCollector(HardwareCollector):
_fact_class = DarwinHardware
_platform = 'Darwin'
|
palmer-dabbelt/riscv-qemu | scripts/tracetool/backend/ust.py | Python | gpl-2.0 | 804 | 0.006234 | #!/usr/bin/env | python
# -*- coding: utf-8 -*-
"""
LTTng User Space Tracing backend.
"""
__author__ = "Lluís Vilanova | <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2016, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
PUBLIC = True
def generate_h_begin(events):
out('#include <lttng/tracepoint.h>',
'#include "trace/generated-ust-provider.h"',
'')
def generate_h(event):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
out(' tracepoint(qemu, %(name)s%(tp_args)s);',
name=event.name,
tp_args=argnames)
|
vandenheuvel/tribler | Tribler/community/allchannel/__init__.py | Python | lgpl-3.0 | 125 | 0.008 | """
The allchannel communi | ty is used to collect votes for cha | nnels and thereby discover which channels are most popular.
"""
|
tiredtyrant/CloudBot | plugins/chan_track.py | Python | gpl-3.0 | 23,414 | 0.000043 | """
Track channel ops for permissions checks
Requires:
server_info.py
"""
import gc
import json
import logging
import weakref
from collections import Mapping, Iterable, namedtuple
from contextlib import suppress
from numbers import Number
from operator import attrgetter
from irclib.parser import Prefix
import cloudbot.bot
from cloudbot import hook
from cloudbot.clients.irc import IrcClient
from cloudbot.util import web
from cloudbot.util.mapping import KeyFoldDict, KeyFoldMixin
logger = logging.getLogger("cloudbot")
class WeakDict(dict):
"""
A subclass of dict to allow it to be weakly referenced
"""
class MemberNotFoundException(KeyError):
def __init__(self, name, chan):
super().__init__(
"No such member '{}' in channel '{}'".format(
name, chan.name
)
)
self.name = name
self.chan = chan
self.members = list(chan.users.values())
self.nicks = [
memb.user.nick for memb in self.members
]
self.masks = [
memb.user.mask.mask for memb in self.members
]
class ChannelMembersDict(KeyFoldDict):
def __init__(self, chan):
super().__init__()
self.chan = weakref.ref(chan)
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError as e:
raise MemberNotFoundException(item, self.chan()) from e
def __delitem__(self, item):
try:
super().__delitem__(item)
except KeyError as e:
raise MemberNotFoundException(item, self.chan()) from e
def pop(self, key, *args, **kwargs):
try:
return super().pop(key, *args, **kwargs)
except KeyError as e:
raise MemberNotFoundException(key, self.chan()) from e
class KeyFoldWeakValueDict(KeyFoldMixin, weakref.WeakValueDictionary):
"""
KeyFolded WeakValueDictionary
"""
class ChanDict(KeyFoldDict):
"""
Mapping for channels on a network
"""
def __init__(self, conn):
"""
:type conn: cloudbot.client.Client
"""
super().__init__()
self.conn = weakref.ref(conn)
def getchan(self, name):
"""
:type name: str
"""
try:
return self[name]
except KeyError:
self[name] = value = Channel(name, self.conn())
return value
class UsersDict(KeyFoldWeakValueDict):
"""
Mapping for users on a network
"""
def __init__(self, conn):
"""
:type conn: cloudbot.client.Client
"""
super().__init__()
self.conn = weakref.ref(conn)
def getuser(self, nick):
"""
:type nick: str
"""
try:
return self[nick]
except KeyError:
self[nick] = value = User(nick, self.conn())
return value
class MappingAttributeAdapter:
"""
Map item lookups to attribute lookups
"""
def __init__(self):
self.data = {}
def __getitem__(self, item):
try:
return getattr(self, item)
except AttributeError:
return self.data[item]
def __setitem__(self, key, value):
if not hasattr(self, key):
self.data[key] = value
else:
setattr(self, key, value)
class Channel(MappingAttributeAdapter):
"""
Represents a channel and relevant data
"""
class Member(MappingAttributeAdapter):
"""
Store a user's membership with the channel
"""
def __init__(self, user, channel):
self.user = user
self.channel = channel
self.conn = user.conn
self.status = []
super().__init__()
def add_status(self, status, sort=True):
"""
Add a status to this membership
:type status: plugins.core.server_info.Status
:type sort: bool
"""
if status in self.status:
logger.warning(
"[%s|chantrack] Attempted to add existing status "
"to channel member: %s %s",
self.conn.name, self, status
)
else:
self.status.append(status)
if sort:
self.sort_status()
def remove_status(self, sta | tus):
"""
:type status: plugins.core.server_info.Status
"""
if status not in self.status:
logger.warn | ing(
"[%s|chantrack] Attempted to remove status not set "
"on member: %s %s",
self.conn.name, self, status
)
else:
self.status.remove(status)
def sort_status(self):
"""
Ensure the status list is properly sorted
"""
status = list(set(self.status))
status.sort(key=attrgetter("level"), reverse=True)
self.status = status
def __init__(self, name, conn):
"""
:type name: str
:type conn: cloudbot.client.Client
"""
super().__init__()
self.name = name
self.conn = weakref.proxy(conn)
self.users = ChannelMembersDict(self)
self.receiving_names = False
def get_member(self, user, create=False):
"""
:type user: User
:type create: bool
:rtype: Channel.Member
"""
try:
data = self.users[user.nick]
except KeyError:
if not create:
raise
self.users[user.nick] = data = self.Member(user, self)
return data
class User(MappingAttributeAdapter):
"""
Represent a user on a network
"""
def __init__(self, name, conn):
"""
:type name: str
:type conn: cloudbot.client.Client
"""
self.mask = Prefix(name)
self.conn = weakref.proxy(conn)
self.realname = None
self._account = None
self.server = None
self.is_away = False
self.away_message = None
self.is_oper = False
self.channels = KeyFoldWeakValueDict()
super().__init__()
def join_channel(self, channel):
"""
:type channel: Channel
"""
self.channels[channel.name] = memb = channel.get_member(
self, create=True
)
return memb
@property
def account(self):
"""
The user's nickserv account
"""
return self._account
@account.setter
def account(self, value):
if value == '*':
value = None
self._account = value
@property
def nick(self):
"""
The user's nickname
"""
return self.mask.nick
@nick.setter
def nick(self, value):
self.mask = Prefix(value, self.ident, self.host)
@property
def ident(self):
"""
The user's ident/username
"""
return self.mask.user
@ident.setter
def ident(self, value):
self.mask = Prefix(self.nick, value, self.host)
@property
def host(self):
"""
The user's host/address
"""
return self.mask.host
@host.setter
def host(self, value):
self.mask = Prefix(self.nick, self.ident, value)
# region util functions
def get_users(conn):
"""
:type conn: cloudbot.client.Client
:rtype: UsersDict
"""
return conn.memory.setdefault("users", UsersDict(conn))
def get_chans(conn):
"""
:type conn: cloudbot.client.Client
:rtype: ChanDict
"""
return conn.memory.setdefault("chan_data", ChanDict(conn))
# endregion util functions
def update_chan_data(conn, chan):
# type: (IrcClient, str) -> None
"""
Start the process of updating channel data from /NAMES
:param conn: The current connection
:param chan: The channel to update
"""
chan_data = get_chans(conn).getchan(chan)
chan_data.receiving_names = False
conn.cmd("NAMES", chan)
def up |
leppa/home-assistant | tests/helpers/test_state.py | Python | apache-2.0 | 7,019 | 0.001282 | """Test state helpers."""
import asyncio
from datetime import timedelta
from unittest.mock import patch
import pytest
from homeassistant.components.sun import STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON
from homeassistant.const import (
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_CLOSED,
STATE_HOME,
STATE_LOCKED,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_UNLOCKED,
)
import homeassistant.core as ha
from homeassistant.helpers import state
from homeassistant.util import dt as dt_util
from tests.common import async_mock_service
@asyncio.coroutine
def test_async_track_states(hass):
"""Test AsyncTrackStates context manager."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=5)
point3 = point2 + timedelta(seconds=5)
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point2
with state.AsyncTrackStates(hass) as states:
mock_utcnow.return_value = point1
hass.states.async_set("light.test", "on")
mock_utcnow.return_value = point2
hass.states.async_set("light.test2", "on")
state2 = hass.states.get("light.test2")
mock_utcnow.return_value = point3
hass.states.async_set("light.test3", "on")
state3 = hass.states.get("light.test3")
assert [state2, state3] == sorted(states, key=lambda state: state.entity_id)
@asyncio.coroutine
def test_call_to_component(hass):
"""Test calls to components state reproduction functions."""
with patch(
("homeassistant.components.media_player.reproduce_state.async_reproduce_states")
) as media_player_fun:
media_player_fun.return_value = asyncio.Future()
media_player_fun.return_value. | set_result(None)
with patch(
("homeassistant.components.climate.reproduce_state.async_reproduce_states")
) as climate_fun:
climate_fun.return_value = asyncio.Future()
climate_fun.return_value.set_result(None)
state_media_player = ha.State("media_player.te | st", "bad")
state_climate = ha.State("climate.test", "bad")
context = "dummy_context"
yield from state.async_reproduce_state(
hass,
[state_media_player, state_climate],
blocking=True,
context=context,
)
media_player_fun.assert_called_once_with(
hass, [state_media_player], context=context
)
climate_fun.assert_called_once_with(hass, [state_climate], context=context)
async def test_get_changed_since(hass):
"""Test get_changed_since."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=5)
point3 = point2 + timedelta(seconds=5)
with patch("homeassistant.core.dt_util.utcnow", return_value=point1):
hass.states.async_set("light.test", "on")
state1 = hass.states.get("light.test")
with patch("homeassistant.core.dt_util.utcnow", return_value=point2):
hass.states.async_set("light.test2", "on")
state2 = hass.states.get("light.test2")
with patch("homeassistant.core.dt_util.utcnow", return_value=point3):
hass.states.async_set("light.test3", "on")
state3 = hass.states.get("light.test3")
assert [state2, state3] == state.get_changed_since([state1, state2, state3], point2)
async def test_reproduce_with_no_entity(hass):
"""Test reproduce_state with no entity."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
await state.async_reproduce_state(hass, ha.State("light.test", "on"))
await hass.async_block_till_done()
assert len(calls) == 0
assert hass.states.get("light.test") is None
async def test_reproduce_turn_on(hass):
"""Test reproduce_state with SERVICE_TURN_ON."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
hass.states.async_set("light.test", "off")
await state.async_reproduce_state(hass, ha.State("light.test", "on"))
await hass.async_block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert last_call.domain == "light"
assert SERVICE_TURN_ON == last_call.service
assert "light.test" == last_call.data.get("entity_id")
async def test_reproduce_turn_off(hass):
"""Test reproduce_state with SERVICE_TURN_OFF."""
calls = async_mock_service(hass, "light", SERVICE_TURN_OFF)
hass.states.async_set("light.test", "on")
await state.async_reproduce_state(hass, ha.State("light.test", "off"))
await hass.async_block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert last_call.domain == "light"
assert SERVICE_TURN_OFF == last_call.service
assert "light.test" == last_call.data.get("entity_id")
async def test_reproduce_complex_data(hass):
"""Test reproduce_state with complex service data."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
hass.states.async_set("light.test", "off")
complex_data = [255, 100, 100]
await state.async_reproduce_state(
hass, ha.State("light.test", "on", {"rgb_color": complex_data})
)
await hass.async_block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert last_call.domain == "light"
assert SERVICE_TURN_ON == last_call.service
assert complex_data == last_call.data.get("rgb_color")
async def test_reproduce_bad_state(hass):
"""Test reproduce_state with bad state."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
hass.states.async_set("light.test", "off")
await state.async_reproduce_state(hass, ha.State("light.test", "bad"))
await hass.async_block_till_done()
assert len(calls) == 0
assert hass.states.get("light.test").state == "off"
async def test_as_number_states(hass):
"""Test state_as_number with states."""
zero_states = (
STATE_OFF,
STATE_CLOSED,
STATE_UNLOCKED,
STATE_BELOW_HORIZON,
STATE_NOT_HOME,
)
one_states = (STATE_ON, STATE_OPEN, STATE_LOCKED, STATE_ABOVE_HORIZON, STATE_HOME)
for _state in zero_states:
assert state.state_as_number(ha.State("domain.test", _state, {})) == 0
for _state in one_states:
assert state.state_as_number(ha.State("domain.test", _state, {})) == 1
async def test_as_number_coercion(hass):
"""Test state_as_number with number."""
for _state in ("0", "0.0", 0, 0.0):
assert state.state_as_number(ha.State("domain.test", _state, {})) == 0.0
for _state in ("1", "1.0", 1, 1.0):
assert state.state_as_number(ha.State("domain.test", _state, {})) == 1.0
async def test_as_number_invalid_cases(hass):
"""Test state_as_number with invalid cases."""
for _state in ("", "foo", "foo.bar", None, False, True, object, object()):
with pytest.raises(ValueError):
state.state_as_number(ha.State("domain.test", _state, {}))
|
codekoala/clip2zeus | clip2zeus/main.py | Python | mit | 581 | 0.006885 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Monitors the system clipboard for text that contains URLs for conversion
using 2ze.us
"""
from clip2zeus.common import *
def mai | n():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-p', '--port', dest='port', default=DEFAULT_PORT, help='The port for the daemon to listen on')
options, args = parser.parse_args()
params = dict(
port=options.port,
)
clip2zeus = Clip2ZeusApp.for_platform()
clip2zeus(**params).start()
|
if __name__ == '__main__':
main()
|
ouyangshi/ahpucourses | config.py | Python | gpl-3.0 | 1,337 | 0 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICA | TIONS = False
MAIL_SERVER = 'smtp.qq.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or ''
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or ''
AHPUCOURSES_MAIL_SUBJECT_PREFIX = '[Ahpucourse]'
AHPUCOURSES_MAIL_SENDER = 'Ahpucourses Admin <admin@example.com>'
| AHPUCOURSES_ADMIN = 'admin@ahpucourses.com'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
zvezdan/pip | src/pip/_vendor/distlib/database.py | Python | mit | 50,056 | 0.000479 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
| if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and | the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_c |
atvKumar/open-tamil | transliterate/azhagi.py | Python | mit | 30,836 | 0.042643 | ## -*- coding: utf-8 -*-
# (C) 2013 Muthiah Annamalai
#
# Implementation of Azhagi rules - தமிழ் எழுத்துக்கள்
# Ref: B. Viswanathan, http://www.mazhalaigal.com/tamil/learn/keys.php
#
class Transliteration:
table = {}
# not implemented - many-to-one table
# Azhagi has a many-to-one mapping - using a Tamil language model and Baye's conditional probabilities
# to break the tie when two or more Tamil letters have the same English syllable. Currently
table["a"]=u"அ"
table["aa"]=u"ஆ"
table["A"]=u"ஆ"
table["i"]=u"இ"
table["ee"]=u"ஈ"
table["I"]=u"ஈ"
table["u"]=u"உ"
table["oo"]=u"ஊ"
table["U"]=u"ஊ"
table["e"]=u"எ"
table["E"]=u"ஏ"
table["ae"]=u"ஏ"
table["ai"]=u"ஐ"
table["o"]=u"ஒ"
table["O"]=u"ஓ"
table["oe"]=u"ஓ"
table["oa"]=u"ஓ"
table["ou"]=u"ஔ"
table["au"]=u"ஔ"
table["ow"]=u"ஔ"
table["q"]=u"ஃ"
table["ga"]=u"க"
table["ca"]=u"க"
table["kha"]=u"க"
table["gha"]=u"க"
table["ka"]=u"க"
table["Ka"]=u"க"
table["kaa"]=u"கா"
table["gaa"]=u"கா"
table["caa"]=u"கா"
table["khaa"]=u"கா"
table["ghaa"]=u"கா"
table["kA"]=u"கா"
table["gA"]=u"கா"
table["cA"]=u"கா"
table["khA"]=u"கா"
table["ghA"]=u"கா"
table["ki"]=u"கி"
table["gi"]=u"கி"
table["ci"]=u"கி"
table["khi"]=u"கி"
table["ghi"]=u"கி"
table["kii"]=u"கீ"
table["kee"]=u"கீ"
table["kI"]=u"கீ"
table["gee"]=u"கீ"
table["gI"]=u"கீ"
table["gii"]=u"கீ"
table["cee"]=u"கீ"
table["cI"]=u"கீ"
table["cii"]=u"கீ"
table["khee"]=u"கீ"
table["khI"]=u"கீ"
table["khii"]=u"கீ"
table["ghee"]=u"கீ"
table["ghI"]=u"கீ"
table["ghii"]=u"கீ"
table["gu"]=u"கு"
table["cu"]=u"கு"
table["khu"]=u"கு"
table["ghu"]=u"கு"
table["ku"]=u"கு"
table["koo"]=u"கூ"
table["kU"]=u"கூ"
table["goo"]=u"கூ"
table["gU"]=u"கூ"
table["guu"]=u"கூ"
table["coo"]=u"கூ"
table["cU"]=u"கூ"
table["cuu"]=u"கூ"
table["khoo"]=u"கூ"
table["khU"]=u"கூ"
table["khuu"]=u"கூ"
table["ghoo"]=u"கூ"
table["ghU"]=u"கூ"
table["ghuu"]=u"கூ"
table["kuu"]=u"கூ"
table["ge"]=u"கெ"
table["ce"]=u"கெ"
table["khe"]=u"கெ"
table["ghe"]=u"கெ"
table["ke"]=u"கெ"
table["kE"]=u"கே"
table["gE"]=u"கே"
table["gae"]=u"கே"
table["cE"]=u"கே"
table["cae"]=u"கே"
table["khE"]=u"கே"
table["khae"]=u"கே"
table["ghE"]=u"கே"
table["ghae"]=u"கே"
table["kae"]=u"கே"
table["gai"]=u"கை"
table["cai"]=u"கை"
table["khai"]=u"கை"
table["ghai"]=u"கை"
table["koa"]=u"கை"
table["koh"]=u"கொ"
table["ko"]=u"கொ"
table["kO"]=u"கொ"
table["kO"]=u"கொ"
table["kho"]=u"கோ"
table["kHO"]=u"கோ"
table["kou"]=u"கௌ"
table["kau"]=u"கௌ"
table["gow"]=u"கௌ"
table["gou"]=u"கௌ"
table["gau"]=u"கௌ"
table["cow"]=u"கௌ"
table["cou"]=u"கௌ"
table["cau"]=u"கௌ"
table["khow"]=u"கௌ"
table["khou"]=u"கௌ"
table["khau"]=u"கௌ"
table["ghow"]=u"கௌ"
table["ghou"]=u"கௌ"
table["ghau"]=u"கௌ"
table["kow"]=u"கௌ"
table["g"]=u"க்"
table["c"]=u"க்"
table["kh"]=u"க்"
table["gh"]=u"க்"
table["k"]=u"க்"
table["nga"]=u"ங"
table["nGa"]=u"ங"
table["nGA"]=u"ஙா"
table["ngaa"]=u"ஙா"
table["ngA"]=u"ஙா"
table["nGaa"]=u"ஙா"
table["ngi"]=u"ஙி"
table["nGi"]=u"ஙி"
table["nGee"]=u"ஙீ"
table["nGI"]=u"ஙீ"
table["ngee"]=u"ஙீ"
table["ngI"]=u"ஙீ"
table["ngii"]=u"ஙீ"
table["nGii"]=u"ஙீ"
table["ngu"]=u"ஙு"
table["nGu"]=u"ஙு"
table["nGoo"]=u"ஙூ"
table["nGU"]=u"ஙூ"
table["ngoo"]=u"ஙூ"
table["ngU"]=u"ஙூ"
table["nguu"]=u"ஙூ"
table["nGuu"]=u"ஙூ"
table["nge"]=u"ஙெ"
table["nGe"]=u"ஙெ"
table["nGE"]=u"ஙே"
table["ngE"]=u"ஙே"
table["ngae"]=u"ஙே"
table["nGae"]=u"ஙே"
table["ngai"]=u"ஙை"
table["nGai"]=u"ஙை"
table["ngo"]=u"ஙொ"
table["nGo"]=u"ஙொ"
table["nGO"]=u"ஙோ"
table["nGoe"]=u"ஙோ"
table["ngO"]=u"ஙோ"
table["ngoa"]=u"ஙோ"
table["ngoe"]=u"ஙோ"
table["nGoa"]=u"ஙோ"
table["nGou"]=u"ஙௌ"
table["nGau"]=u"ஙௌ"
table["ngow"]=u"ஙௌ"
table["ngou"]=u"ஙௌ"
table["ngau"]=u"ஙௌ"
table["nGow"]=u"ஙௌ"
table["ng"]=u"ங்"
table["nG"]=u"ங்"
table["cha"]=u"ச"
table["sa"]=u"ச"
table["sA"]=u"சா"
table["chaa"]=u"சா"
table["chA"]=u"சா"
table["saa"]=u"சா"
table["chi"]=u"சி"
table["si"]=u"சி"
table["see"]=u"சீ"
table["sI"]=u"சீ"
table["chee"]=u"சீ"
table["chI"]=u"சீ"
table["chii"]=u"சீ"
table["sii"]=u"சீ"
table["chu"]=u"சு"
table["su"]=u"சு"
table["soo"]=u"சூ"
table["sU"]=u"சூ"
table["choo"]=u"சூ"
table["chU"]=u"சூ"
table["chuu"]=u"சூ"
table["suu"]=u"சூ"
table["che"]=u"செ"
table["se"]=u"செ"
table["sE"]=u"சே"
table["chE"]=u"சே"
table["chae"]=u"சே"
table["sae"]=u"சே"
table["chai"]=u"சை"
table["sai"]=u"சை"
table["cho"]=u"சொ"
table["so"]=u"சொ"
table["sO"]=u"சோ"
table["soe"]=u"சோ"
table["chO"]=u"சோ"
table["choa"]=u"சோ"
table["choe"]=u"சோ"
table["soa"]=u"சோ"
table["sou"]=u"சௌ"
table["sau"]=u"சௌ"
table["chow"]=u"சௌ"
table["chou"]=u"சௌ"
table["chau"]=u"சௌ"
table["sow"]=u"சௌ"
table["ch"]=u"ச்"
table["s"]=u"ச்"
table["gna"]=u"ஞ"
table["nja"]=u"ஞ"
table["Gna"]=u"ஞ"
table["GnA"]=u"ஞா"
table["gnaa"]=u"ஞா"
table["gnA"]=u"ஞா"
table["njaa"]=u"ஞா"
table["njA"]=u"ஞா"
table["Gnaa"]=u"ஞா"
table["gni"]=u"ஞி"
table["nji"]=u"ஞி"
table["Gni"]=u"ஞி"
table["Gnee"]=u"ஞீ"
table["GnI"]=u"ஞீ"
table["gnee"]=u"ஞீ"
table["gnI"]=u"ஞீ"
table["gnii"]=u"ஞீ"
table["njee"]=u"ஞீ"
table["njI"]=u"ஞீ"
table["njii"]=u"ஞீ"
table["Gnii"]=u"ஞீ"
table["gnu"]=u"ஞு"
table["nju"]=u"ஞு"
table["Gnu"]=u"ஞு"
table["Gnoo"]=u"ஞூ"
table["GnU"]=u"ஞூ"
table["gnoo"]=u"ஞூ"
table["gnU"]=u"ஞூ"
table["gnuu"]=u"ஞூ"
table["njoo"]=u"ஞூ"
table["njU"]=u"ஞூ"
table["njuu"]=u"ஞூ"
table["Gnuu"]=u"ஞூ"
table["gne"]=u"ஞெ"
tab | le["nje"]=u"ஞெ"
table["Gne"]=u"ஞெ"
table["GnE"]=u"ஞே"
table["gnE"]=u"ஞே"
table["gnae"]=u"ஞே"
table["njE"]=u"ஞே"
table["njae"]=u"ஞே"
table["Gnae"]=u"ஞே"
table["gnai"]=u"ஞை"
table["njai"]=u"ஞை"
table["Gnai"]=u"ஞை"
table["gno"]=u"ஞொ"
table["njo"]=u"ஞொ"
table["Gno"]=u"ஞொ"
table["GnO"]=u"ஞோ"
table["Gnoe"]=u"ஞோ"
table["gnO"]=u"ஞ | ோ"
table["gnoa"]=u"ஞோ"
table["gnoe"]=u"ஞோ"
table["njO"]=u"ஞோ"
table["njoa"]=u"ஞோ"
table["njoe"]=u"ஞோ"
table["Gnoa"]=u"ஞோ"
table["Gnou"]=u"ஞௌ"
table["Gnau"]=u"ஞௌ"
table["gnow"]=u"ஞௌ"
table["gnou"]=u"ஞௌ"
table["gnau"]=u"ஞௌ"
table["njow"]=u"ஞௌ"
table["njou"]=u"ஞௌ"
table["njau"]=u"ஞௌ"
table["Gnow"]=u"ஞௌ"
table["gn"]=u"ஞ்"
table["nj"]=u"ஞ்"
table["Gn"]=u"ஞ்"
table["da"]=u"ட"
table["ta"]=u"ட"
table["tA"]=u"டா"
table["daa"]=u"டா"
table["dA"]=u"டா"
table["taa"]=u"டா"
table["di"]=u"டி"
table["ti"]=u"டி"
table["tee"]=u"டீ"
table["tI"]=u"டீ"
table["dee"]=u"டீ"
table["dI"]=u"டீ"
table["dii"]=u"டீ"
table["tii"]=u"டீ"
table["du"]=u"டு"
table["tu"]=u"டு"
table["too"]=u"டூ"
table["tU"]=u"டூ"
table["doo"]=u"டூ"
table["dU"]=u"டூ"
table["duu"]=u"டூ"
table["tuu"]=u"டூ"
table["de"]=u"டெ"
table["te"]=u"டெ"
table["tE"]=u"டே"
table["dE"]=u"டே"
table["dae"]=u"டே"
table["tae"]=u"டே"
table["dai"]=u"டை"
table["tai"]=u"டை"
table["do"]=u"டொ"
table["to"]=u"டொ"
table["tO"]=u"டோ"
table["toe"]=u"டோ"
table["dO"]=u"டோ"
table["doa"]=u"டோ"
table["doe"]=u"டோ"
table["toa"]=u"டோ"
table["tou"]=u"டௌ"
table["tau"]=u"டௌ"
table["dow"]=u"டௌ"
table["dou"]=u"டௌ"
table["dau"]=u"டௌ"
table["tow"]=u"டௌ"
table["d"]=u"ட்"
table["T"]=u"ட்"
table["t"]=u"ட்"
table["nda"]=u"ண"
table["Na"]=u"ண"
table["NA"]= |
joshua-cogliati-inl/raven | tests/framework/PostProcessors/Validation/DSS/lorentzAttractor_timeScale_I.py | Python | apache-2.0 | 2,032 | 0.023622 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from wikipedia: dx/dt = sigma*(y-x) ; dy/dt = x*(rho-z)-y dz/dt = x*y-beta*z
import numpy as np
def initialize(self,runInfoDict,inputFiles):
"""
Constructor
@ In, runInfoDict, dict, dictiona | ry of input file names, file location, and other parameters
RAVEN run.
@ In, inputFiles, list, data objects required for external model initialization.
@ Out, None
"""
self.sigma = 10.0
self.rho = 28.0
self.beta = 8.0/3.0
return
def run(self,Input):
"""
Constructor
@ In, Input, dict, dictionary of input values for each feature.
@ Out, None
"""
disc = 2.0
| maxTime = 0.5
tStep = 0.005
self.sigma = 10.0
self.rho = 28.0
self.beta = 8.0/3.0
numberTimeSteps = int(maxTime/tStep)
self.x1 = np.zeros(numberTimeSteps)
self.y1 = np.zeros(numberTimeSteps)
self.z1 = np.zeros(numberTimeSteps)
self.time1 = np.zeros(numberTimeSteps)
self.x0 = Input['x0']
self.y0 = Input['y0']
self.z0 = Input['z0']
self.x1[0] = Input['x0']
self.y1[0] = Input['y0']
self.z1[0] = Input['z0']
self.time1[0]= 0.0
for t in range (numberTimeSteps-1):
self.time1[t+1] = self.time1[t] + tStep
self.x1[t+1] = self.x1[t] + disc*self.sigma*(self.y1[t]-self.x1[t]) * tStep
self.y1[t+1] = self.y1[t] + disc*(self.x1[t]*(self.rho-self.z1[t])-self.y1[t]) * tStep
self.z1[t+1] = self.z1[t] + disc*(self.x1[t]*self.y1[t]-self.beta*self.z1[t]) * tStep
|
richm/designate | designate/backend/impl_nsd4slave.py | Python | apache-2.0 | 4,900 | 0 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Artom Lifshitz <artom.lifshitz@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import os
import socket
import ssl
from designate import exceptions
from designate.backend import base
from designate.openstack.common import log as logging
from oslo.config import cfg
LOG = logging.getLogger(__name__)
CFG_GRP = 'backend:nsd4slave'
cfg.CONF.register_group(
cfg.OptGroup(name=CFG_GRP, title='Configuration for NSD4-slave backend')
)
cfg.CONF.register_opts([
cfg.StrOpt('keyfile', default='/etc/nsd/nsd_control.key',
help='Keyfile to use when connecting to the NSD4 servers over '
'SSL'),
cfg.StrOpt('certfile', default='/etc/nsd/nsd_control.pem',
help='Certfile to use when connecting to the NSD4 servers over '
'SSL'),
cfg.ListOpt('servers',
help='Comma-separated list of servers to control, in '
' <host>:<port> format. If <port> is omitted, '
' the default 8952 is used.'),
cfg.StrOpt('pattern', default='slave',
help='Pattern to use when creating zones on the NSD4 servers. '
'This pattern must be identically configured on all NSD4 '
'servers.'),
], group=CFG_GRP)
DEFAULT_PORT = 8952
class NSD4SlaveBackend(base.Backend):
__plugin__name__ = 'nsd4slave'
NSDCT_VERSION = 'NSDCT1'
def __init__(self, central_service):
self._keyfile = cfg.CONF[CFG_GRP].keyfile
self._certfile = cfg.CONF[CFG_GRP].certfile
# Make sure keyfile and certfile are readable to avoid cryptic SSL
# errors later
if not os.access(self._keyfile, os.R_OK):
raise exceptions.NSD4SlaveBackendError(
'Keyfile %s missing or permission denied' % self._keyfile)
if not os.access(self._certfile, os.R_OK):
raise exceptions.NSD4SlaveBackendError(
'Certfile %s missing or permission denied' % self._certfile)
self._pattern = cfg.CONF[CFG_GRP].pattern
try:
self._servers = [self._parse_server(cfg_server)
for cfg_server in cfg.CONF[CFG_GRP].servers]
except TypeError:
raise exceptions.ConfigurationError('No NSD4 servers defined')
def _parse_server(self, cfg_server):
try:
(host, port) = cfg_server.split(':')
port = int(port)
except ValueError:
host = str(cfg_server)
port = DEFAULT_PORT
return {'host': host, 'port': port}
def create_domain(self, context, domain):
command = 'addzone %s %s' % (domain['name'], self._pattern)
self._all_servers(command)
def update_domain(self, context, domain):
pass
def delete_domain(self, context, domain):
command = 'delzone %s' % domain['name']
self._all_servers(command)
def _all_servers(self, command):
for server in self._servers:
try:
result = self._command(command, server['host'], server['port'])
except (ssl.SSLError, socket.error) as e:
raise exceptions.NSD4SlaveBackendError(e)
if result != 'ok':
raise exceptions.NSD4SlaveBackendError(result)
def _command(self, command, host, port):
| sock = eventlet.wrap_ssl(eventlet.connect((host, port)),
keyfile=self._keyfile,
certfile=self._certfile)
stream = sock.makefile()
stream.write('%s %s\n' % (self.NSDCT_VERSION, command))
stream.flush()
| result = stream.read()
stream.close()
sock.close()
return result.rstrip()
def update_recordset(self, context, domain, recordset):
pass
def delete_recordset(self, context, domain, recordset):
pass
def create_record(self, context, domain, recordset, record):
pass
def update_record(self, context, domain, recordset, record):
pass
def delete_record(self, context, domain, recordset, record):
pass
def create_server(self, context, server):
pass
def update_server(self, context, server):
pass
def delete_server(self, context, server):
pass
|
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/static_lsps/__init__.py | Python | apache-2.0 | 13,426 | 0.001266 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import static_lsp
class static_lsps(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/static-lsps. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: statically configured LSPs, without dynamic
signaling
"""
__slots__ = ("_path_helper", "_extmethods", "__static_lsp")
_yang_name = "static-lsps"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__static_lsp = YANGDynClass(
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-in | stance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
| break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances", "network-instance", "mpls", "lsps", "static-lsps"
]
def _get_static_lsp(self):
"""
Getter method for static_lsp, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp (list)
YANG Description: list of defined static LSPs
"""
return self.__static_lsp
def _set_static_lsp(self, v, load=False):
"""
Setter method for static_lsp, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_static_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_static_lsp() directly.
YANG Description: list of defined static LSPs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """static_lsp must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("name",static_lsp.static_lsp, yang_name="static-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="static-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__static_lsp = t
if hasattr(self, "_set"):
self._set()
def _unset_static_lsp(self):
self.__static_lsp = YANGDynClass(
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
static_lsp = __builtin__.property(_get_static_lsp, _set_static_lsp)
_pyangbind_elements = OrderedDict([("static_lsp", static_lsp)])
from . import static_lsp
class static_lsps(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/static-lsps. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: statically configured LSPs, without dynamic
signaling
"""
__slots__ = ("_path_helper", "_extmethods", "__static_lsp")
_yang_name = "static-lsps"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__static_lsp = YANGDynClass(
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
|
meigrafd/Sample-Code | serial_input.py | Python | mit | 1,579 | 0.010766 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import serial
import time
import sys
#-------------------------------------------------------------------
ser = serial.Serial()
ser.port = "/dev/ttyUSB0"
ser.baudrate = 38400
ser.bytesize = serial.EIGHTBITS #number of bits per bytes
ser.par | ity = serial.PARITY_NONE #set parity check: no parity
ser.stopbits = serial.STOPBITS_ONE #number of stop bits
#ser.timeout = None # | block read
ser.timeout = 1 #non-block read
#ser.timeout = 2 #timeout block read
ser.xonxoff = False #disable software flow control
ser.rtscts = False #disable hardware (RTS/CTS) flow control
ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
ser.writeTimeout = 2 #timeout for write
#-------------------------------------------------------------------
try:
ser.open()
except Exception as e:
print("Error open serial port: " + str(e))
sys.exit()
if ser.isOpen():
try:
while True:
Eingabe = raw_input("Was soll gesendet werden? > ")
if not Eingabe:
print("Bitte irgend etwas eingeben!")
continue
response = None
print("Sende: %s" % input)
ser.write(Eingabe + "\n")
while response is None:
time.sleep(0.01)
response = ser.readline().strip()
print("Antwort: %s" % response)
except Exception as e:
print("Error...: " + str(e))
except (KeyboardInterrupt, SystemExit):
print("\nSchliesse Programm..\n")
if ser: ser.close() |
rizac/gfzreport | gfzreport/sphinxbuild/core/extensions/setup.py | Python | gpl-3.0 | 9,566 | 0.003868 | '''
Simple sphinx extension which sets up the necessary translators (html, latex) and other stuff
It is also a collection of trial and errors mostly commented and left here as a reminder in order
Created on Apr 4, 2016
@author: riccardo
'''
import re
import sys
import os
from os import path
from gfzreport.sphinxbuild.core.writers.latex import LatexTranslator
from gfzreport.sphinxbuild.core.writers.html import HTMLTranslator
from docutils import nodes
# from docutils import nodes
# the sphinx environment, saved here for access from other extensions
# FIXME!!! if in sphinx config you add:
# sys.path.insert(0, os.path.abspath("../../reportbuild/core"))
# and then extensions = ['extensions.setup', ...]
# GLOBAL VARIABLES BELOW ARE NOT SET! One should inspect if it is a problem with sys.path.insert
# or due to the fact that the package reportbuild is already installed
# However, the problem has been fixed by removing sys.path.insert and by sepcifying the full path
# in extensions dict (see conf.py)
sphinxapp = None
def app_builder_inited(app_):
global sphinxapp
sphinxapp = app_
# this keyword is used to replace any | in e.g., |bibname| so that we do not issue a reference
# warning. Later, we will replace any literal bibfieldreplkwd + bibname + bibfieldreplkwd
# PLEASE SUPPORT ONLY CHARS THAT DO NOT NEED TO BE REGEX ESCAPED!!
bibfieldreplkwd = "___bfrt___"
def relfn2path(filename):
"""Returns a relative path normalized and relative to the app srcdir, unless
the path is already absolute (in the sense of os.path.isabspath) in that case returns
the argument as it is.
Sphinx is quite confusing about paths, first
in the definition of "relative" vs "absolute" paths, second in the consistency
(I suspect that images paths do not follow the same rules than other directives path)
All in all, that NEEDS A FIXME and a CHECK.
This method is imported and used by our custom directives of this package
"""
# We might use the function below, but it works the first N times
# then fails cause app.env.docname has been set to None (??)
# return sphinxapp.env.relfn2path(filename, " ")[1]
# As we have no control over it. From the sphinx doc:
# we do another approach: if absolute, it's absolute. If relative, is relative to the source
# dir. Stop
if os.path.isabs(filename):
return filename
else:
try:
# the path.abspath() might seem redundant, but otherwise artifacts
# such as ".." will remain in the path
return path.abspath(path.join(sphinxapp.env.srcdir, filename))
except UnicodeDecodeError:
# the source directory is a bytestring with non-ASCII characters;
# let's try to encode the rel_fn in the file system encoding
enc_rel_fn = filename.encode(sys.getfilesystemencoding())
return path.abspath(path.join(sphinxapp.env.srcdir, enc_rel_fn))
def app_source_read(app, docname, source):
source[0] = normalize_sec_headers(source[0])
source[0] = replace_math_dollar(source[0])
# Note that we cannot handle the warning (printed to stdout) of referenced bib. fields (if any)
# but we will try to fix it in the next listener called (app_doctree_read)
# We might also try to fix those errors now, and we actually did it in a previous version, BUT
# it's a less robust method (we should use regexp which behave like rst-parsers) and involves
# much more code for not a big advantage. Simply warn "trying to fix... " with a meaningful
# message the user
def replace_math_dollar(source):
return re.sub("(?<!\\\\)\\$(.*?)(?<!\\\\)\\$", " :math:`\\1` ", source) # flags=re.MULTILINE)
def normalize_sec_headers(string, list_of_under_and_overlines=[("#", "#"),
("*", "*"),
"=",
"-",
"^",
"\""]):
"""Normalizes section titles by replacing all github markdown section title symbols (#) with the
corresponding rst one. Note that contrarily to the github markdown style, rst understands the
nesting level of sections by means of clustering the same symbols. If not otherwise specified
in list_of_under_and_overlines, the correspondence is:
Markdown symbol rst symbol coventionally assigned for
=============== ================= ============================================================
# # (with overline) parts
## * (with overline) chapters
### = sections
#### - subsections
##### ^ subsubsections
###### " paragraphs
* Note: that's a convention, it does not correspond to the latex keyword
For info see
- http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#sections
- http://www.sphinx-doc.org/en/stable/rest.html#sections
:param: string the string to be searched and substituted
:param: list_of_under_and_overlines a list of 6 elements specifying the rst decorators. If
missing defaults to the list representing the table above. Otherwise each element is either a
character string, denoting the underline symbol, or a list/tuple ot 2 character strings,
denoting under- and overline symbols, respectuvely. Note that for each element ("^", None)
equals "^"
"""
reg = re.compile("^(#+)\\s(.*?)$", re.MULTILINE)
for matchobj in list(reg.finditer(string))[::-1]:
grp = matchobj.groups()
if len(grp) == 2: # groups count starts from group 0
indx = len(grp[0])
decorator = list_of_under_and_overlines[min(6, indx) - 1]
str_to_decorate = grp[1]
rst_str = decorate_title(str_to_decorate,
*(list(decorator) if hasattr(decorator, "__iter__")
else [decorator]))
string = string[:matchobj.start()] + rst_str + string[matchobj.end():]
return string
d | ef decorate_title(string, underline_symbol, overline_symbol=None):
"""
Decorates a string title as in rst format
:param string: the string represetning the title
:param underline_symbol: A 1-character string, representing the decorator character for
underline
:param overlline_symbol: A 1-character string, representin | g the decorator character for
overline
:Examples:
decorate_string("sea", "=") returns:
sea
===
decorate_string("abc", "=", "-") returns:
---
sea
===
"""
lens = len(string)
string = "{0}\n{1}".format(string, lens * underline_symbol)
if overline_symbol:
string = "{0}\n{1}".format(lens * overline_symbol, string)
return string
def app_doctree_read(app, doctree):
"""
This method resolved replacement due to bib fields.
This feature is dropped as it is highly unmantainable and not
yet required. Leave the method here for future doctree modifications.
Remember that you can use:
for obj in doctree.traverse(nodes.problematic):
.. your code here
"""
for _ in doctree.traverse(nodes.problematic):
__ = 9
for _ in doctree.traverse(nodes.Element):
__ = 9
def missing_reference(app, env, node, contnode):
"""This method does nothing as it was a test to see if it handled missing text substitution
references, but it doesn't
Will be removed in the future
"""
pass
# Emitted when a cross-reference to a Python module or object cannot be resolved. If the event
# handler can resolve the reference, it should return a new docutils node to be inserted in the
# document tree in place of the node node. Usually this node is a reference node containing
# contnode
# as a child. |
mattbernst/polyhartree | support/ansible/modules/core/cloud/rackspace/rax_cdb_database.py | Python | gpl-3.0 | 4,837 | 0.000413 | #!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
module: rax_cdb_database |
short_description: 'create / delete a database in the Cloud Databases'
description:
- create / delete a database in the Cloud Databases.
version_added: "1.8"
options:
cdb_id:
description:
- The databases server UUID
default: null
name:
description:
- Name to give to the database
default: null
character_set:
| description:
- Set of symbols and encodings
default: 'utf8'
collate:
description:
- Set of rules for comparing characters in a character set
default: 'utf8_general_ci'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Simon JAILLET
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a database in Cloud Databases
tasks:
- name: Database build request
local_action:
module: rax_cdb_database
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
name: db1
state: present
register: rax_db_database
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_database(instance, name):
try:
database = instance.get_database(name)
except Exception:
return False
return database
def save_database(module, cdb_id, name, character_set, collate):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if not database:
try:
database = instance.create_database(name=name,
character_set=character_set,
collate=collate)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='create',
database=rax_to_dict(database))
def delete_database(module, cdb_id, name):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if database:
try:
database.delete()
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='delete',
database=rax_to_dict(database))
def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
# act on the state
if state == 'present':
save_database(module, cdb_id, name, character_set, collate)
elif state == 'absent':
delete_database(module, cdb_id, name)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cdb_id=dict(type='str', required=True),
name=dict(type='str', required=True),
character_set=dict(type='str', default='utf8'),
collate=dict(type='str', default='utf8_general_ci'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cdb_id = module.params.get('cdb_id')
name = module.params.get('name')
character_set = module.params.get('character_set')
collate = module.params.get('collate')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_cdb_database(module, state, cdb_id, name, character_set, collate)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
|
fangxinmiao/projects | Architeture/Backend/Framework/WebBenchmarker/frameworks/Python/django/helloworld/manage.py | Python | gpl-3.0 | 630 | 0 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'helloworld.settings')
try:
fro | m django.core.management import execute_from_command_line
| except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
lariodiniz/OramaBankTest | OramaBank/pynetbanking/tests/test_views.py | Python | gpl-2.0 | 2,719 | 0.002575 | # coding: utf-8
#--------------//////////----------------------
#Projeto Criado por: Lário Diniz
#Contatos: developer.lario@gmail.com
#data: 29/09/2015
#--------------//////////----------------------
from django.test import TestCase
from django.test import Client
from ..models import User, Cliente_Model
class loginViewTest(TestCase):
def setUp(self):
self.resp = self.client.get('/login/')
def test_get(self):
'Verifica o staus code 200 da pagina /login/'
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Verifica o Template Usado'
self.assertTemplateUsed(self.resp, 'pynetbanking/login.html')
def test_html(self):
'Verifica alguns pontos no html do template'
self.assertContains(self.resp, '<div class="navbar-wrapper"')
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 4)
self.assertContains(self.resp, 'type="text"', 2)
self.assertContains(self.resp, 'type="submit"')
class newclintViewTest(TestCase):
def setUp(self):
self.resp = self.client.get('/newclint/')
def test_get(self):
'Verifica o staus code 200 da pagina /newclint/'
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Verifica o Template Usado'
self.assertTemplateUsed(self.resp, 'pynetbanking/newclint.html')
def test_html(self):
'Verifica alguns pontos no html do template'
self.assertContains(self.resp, '<div class="navbar-wrapper"')
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 9)
self.assertContains(self.resp, 'type="text"', 4)
self.assertContains(self.resp, 'type="submit"')
"""
class clintViewTest(TestCase):
def setUp(self):
obj=User.objects.create(
username='zezinhio',
password='12345abc'
)
obj.save()
cliente=Cliente_Model.objects.create(
user=obj,
codigo='dasdasdasd',
cpf='12345678901',
#slug='zezinhio'
)
cliente.save()
client=Client()
client.login(username=obj.username, password= | obj.password)
self.resp = self.client.get('/clint/%s' %cliente.slug,secure=True )
print self.resp
def test_get(self):
'Verifica o staus code 200 da pagina /clint/'
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Verifica o Template Usado'
self.assertTemplateUsed(self.resp, 'pynetbanking/clint.html')
def test_html(self):
'Verifica alguns pontos no html do template'
| pass
"""
|
DrXyzzy/smc | src/smc_pyutil/smc_pyutil/smc_close.py | Python | agpl-3.0 | 2,160 | 0.003704 | #!/usr/bin/python
MAX_FILES = 100
import json, os, sys
home = os.environ['HOME']
if 'TMUX' in os.environ:
prefix = '\x1bPtmux;\x1b'
postfix = '\x1b\\'
else:
prefix = ''
postfix = ''
def process(paths):
v = []
if len(paths) > MAX_FILES:
sys.stderr.write(
"You may close at most %s at once using the open command; truncating list\n"
% MAX_FILES)
paths = paths[:MAX_FILES]
for path in paths:
if not path:
continue
if not os.path.exists(path) and any(c in path for c in '{?*'):
# If the path doesn't exist and does contain a shell glob character which didn't get expanded,
# then don't try to just create that file. See https://github.com/sagemathinc/cocalc/issues/1019
sys.stderr.write("no match for '%s', so not closing\n" % path)
continue
if not os.path.exists(path):
# Doesn't exist, so doesn't matter
continue
if not path.startswith('/'):
# we use | pwd instead of getcwd or os.path.abspath since we want this to
# work when used inside a directory that is a symlink! I could find
# no analogue of pwd directly in Python (getcwd is not it!).
path = os.path.join(os.popen('pwd').read().strip(), path)
# determine name relative to home directory
if path.startswith(home):
name = path[len(home) + 1:]
else:
name = path
# Is it a file or | directory?
if os.path.isdir(path):
v.append({'directory': name})
else:
v.append({'file': name})
if v:
mesg = {'event': 'close', 'paths': v}
print(prefix + '\x1b]49;%s\x07' % json.dumps(
mesg, separators=(',', ':')) + postfix)
def main():
if len(sys.argv) == 1:
print("Usage: close [path names] ...")
print("Closes each file (or directory) in the CoCalc web-based editor from the shell.")
print("If the named file doesn't exist, it is created.")
else:
process(sys.argv[1:])
if __name__ == "__main__":
main()
|
idea4bsd/idea4bsd | python/testData/formatter/spaceAfterTrailingCommaIfNoSpaceAfterCommaButWithinBracesOrBrackets.py | Python | apache-2.0 | 126 | 0.031746 | s1 = {1, 2, 3 | ,}
s2 = {1, }
d1 = {'foo': 1, 'bar': 2, 'baz': 3,}
d2 = {'foo': 1,}
d3 = {}
l1 = [1, 2 | , 3,]
l2 = [1, ]
l3 = []
|
rackerlabs/quark | quark/api/extensions/ip_addresses.py | Python | apache-2.0 | 6,441 | 0 | # Copyright (c) 2013 Rackspace Hosting Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api import extensions
from neutron import manager
from neutron import wsgi
from neutron_lib import exceptions as n_exc
from oslo_log import log as logging
import webob
RESOURCE_NAME = 'ip_address'
RESOURCE_COLLECTION = RESOURCE_NAME + "es"
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
attr_dict[RESOURCE_NAME] = {'allow_post': True,
'allow_put': True,
'is_visible': True}
SUB_RESOURCE_ATTRIBUTE_MAP = {
'ports': {
'parent': {'collection_name': 'ip_addresses',
'member_name': 'ip_address'}
}
}
LOG = logging.getLogger(__name__)
class IpAddressesController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
def index(self, request):
context = request.context
return {"ip_addresses":
self._plugin.get_ip_addresses(context, **request.GET)}
def show(self, request, id):
context = request.context
try:
return {"ip_address":
self._plugin.get_ip_address(context, id)}
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFound(e)
def create(self, request, body=None):
body = self._deserialize(request.body, request.get_content_type())
try:
return {"ip_address": self._plugin.create_ip_address(
request.context, body)}
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFound(e)
except n_exc.Conflict as e:
raise webob.exc.HTTPConflict(e)
except n_exc.BadRequest as e:
raise webob.exc.HTTPBadRequest(e)
def update(self, request, id, body=None):
body = self._deserialize(request.body, request.get_content_type())
try:
return {"ip_address": self._plugin.update_ip_address(
request.context, id, body)}
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFound(e)
except n_exc.BadRequest as e:
raise webob.exc.HTTPBadRequest(e)
def delete(self, request, id):
context = request.context
try:
return self._plugin.delete_ip_address(context, id)
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFound(e)
except n_exc.BadRequest as e:
raise webob.exc.HTTPBadRequest(e)
class IpAddressPortController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
def _clean_query_string(self, request, filters):
clean_list = ['id', 'device_id', 'service']
for clean in clean_list:
if clean in request.GET:
filters[clean] = request.GET[clean]
del request.GET[clean]
def index(self, ip_address_id, request):
context = request.context
filters = {}
self._clean_query_string(request, filters)
fx = self._plugin.get_ports_for_ip_address
try:
ports = fx(context, ip_address_id, filters=filters, **request.GET)
return {"ip_addresses_ports": ports}
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFou | nd(e)
def create(self, request, **kwargs):
raise webob.exc.HTTPNotImplemented()
def show(self, ip_address_id, request, id):
context = request.context
# TODO(jlh): need to ensure ip_address_id is used to filter port
try:
return {"ip_addresses_port": |
self._plugin.get_port_for_ip_address(context,
ip_address_id, id)}
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFound(e)
def update(self, ip_address_id, request, id, body=None):
body = self._deserialize(request.body, request.get_content_type())
try:
return {"ip_addresses_port": self._plugin.update_port_for_ip(
request.context, ip_address_id, id, body)}
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFound(e)
except n_exc.BadRequest as e:
raise webob.exc.HTTPBadRequest(e)
def delete(self, request, id, **kwargs):
raise webob.exc.HTTPNotImplemented()
class Ip_addresses(extensions.ExtensionDescriptor):
"""IP Addresses support."""
@classmethod
def get_name(cls):
return "IP Addresses for a tenant"
@classmethod
def get_alias(cls):
return RESOURCE_COLLECTION
@classmethod
def get_description(cls):
return "Expose functions for tenant IP Address management"
@classmethod
def get_namespace(cls):
return ("http://docs.openstack.org/network/ext/"
"ip_addresses/api/v2.0")
@classmethod
def get_updated(cls):
return "2013-02-19T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
ip_controller = IpAddressesController(
manager.NeutronManager.get_plugin())
ip_port_controller = IpAddressPortController(
manager.NeutronManager.get_plugin())
resources = []
resources.append(extensions.ResourceExtension(
Ip_addresses.get_alias(),
ip_controller))
parent = {'collection_name': 'ip_addresses',
'member_name': 'ip_address'}
resources.append(extensions.ResourceExtension(
'ports', ip_port_controller, parent=parent))
return resources
|
antoinecarme/pyaf | tests/artificial/transf_Quantization/trend_MovingAverage/cycle_5/ar_12/test_artificial_32_Quantization_MovingAverage_5_12_100.py | Python | bsd-3-clause | 272 | 0.084559 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset a | s art
art.process_d | ataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 12); |
Canas/kaftools | kaftools/filters.py | Python | mit | 7,408 | 0.00216 | # -*- coding: utf-8 -*-
"""
kaftools.filters
~~~~~~~~~~~~~~~~~~~~~~~~
This module provides adaptive kernel filtering classes.
Currently supports:
- Kernel Least Mean Squeares (KLMS)
- Exogenous Kernel Least Mean Squares (KLMS-X)
- Kernel Recursive Least Squares (KRLS)
Not all filters support all features. Be sure to check the info sheet
for detailed comparisons.
"""
import numpy as np
from kaftools.kernels import GaussianKernel, MultiChannelGaussianKernel
from kaftools.utils.shortcuts import timeit
class Filter:
"""Base class for filter implementations. """
def __init__(self, x, y, **kwargs):
if x.ndim > 1 and x.shape[-1] == 1:
self.x = x.ravel()
else:
self.x = x
if y.ndim > 1 and y.shape[-1] == 1 or y.ndim == 1:
self.y = y.ravel()
else:
raise Exception("Target data must not be multidimensional.")
self.n = len(x)
self.regressor = None
self.support_vectors | = None
self.coefficients = None
self.similarity = None
self.kernel = None
self.error = None
self.learning_rate = None
self.delay = None
self.coefficient_history = None
self.error_history = None
self.param_history = None
self._estimate = np.zeros(y.shape)
@property
def estimate(self):
if self._estimate is None:
raise | AttributeError("Filter has not been applied yet.")
else:
return self._estimate
class KlmsFilter(Filter):
"""Original SISO KLMS filter with optional delayed input. """
def fit(self, kernel=GaussianKernel(sigma=1.0), learning_rate=1.0, delay=1,
kernel_learning_rate=None, sparsifiers=None, **kwargs):
"""Fit data using KLMS algorithm.
:param kernel: Kernel class object
:param learning_rate: float with learning rate (eta)
:param delay: optinonal number of delayed samples to use
:param kernel_learning_rate: float with param learning rate (mu)
:param sparsifiers: list with Sparsifier class objects
:return: None
"""
if delay >= len(self.x):
raise Exception("Delay greater than the length of the input.")
self.learning_rate = learning_rate
self.delay = delay
self.coefficients = kwargs.get('coefs', np.array([self.y[delay]]))
self.support_vectors = kwargs.get('dict', np.array([self.x[0:delay]]))
self.coefficient_history = [self.coefficients]
self.error_history = [0] * delay
self.kernel = kernel
self.param_history = [kernel.params]
freeze_dict = kwargs.get('freeze_dict', False)
for i in range(0, self.n - delay):
self.regressor = self.x[i:i+delay]
self.similarity = kernel(self.support_vectors, self.regressor)
self.estimate[i+delay] = np.dot(self.coefficients, self.similarity)
self.error = self.y[i + delay] - self.estimate[i + delay]
self.error_history.append(self.error)
self.coefficients += self.learning_rate * self.error * self.similarity
if kernel_learning_rate:
previous_regressor = self.x[i-1:i+delay-1]
previous_error = self.error_history[-2]
sigmas = []
for k, sigma in enumerate(kernel.params):
new_sigma = sigma + 2 * learning_rate * kernel_learning_rate * self.error * previous_error * \
(np.linalg.norm(previous_regressor - self.regressor) ** 2) * \
kernel(previous_regressor, self.regressor) / sigma ** 3
kernel.params[k] = new_sigma
sigmas.append(new_sigma)
self.param_history.append(sigmas)
if not freeze_dict:
if sparsifiers:
for sparsifier in sparsifiers:
sparsifier.apply(self)
else:
self.support_vectors = np.append(self.support_vectors, [self.regressor], axis=0)
self.coefficients = np.append(self.coefficients, [learning_rate * self.error])
self.coefficient_history.append(np.array([self.coefficients]))
class KlmsxFilter(KlmsFilter):
"""Exogenous MISO KLMS-X filter. """
def __init__(self, x, y):
if x.ndim < 2 and x.shape[-1] > 1:
raise Exception("KLMS-X requires at least two input sources, otherwise use KLMS.")
else:
super().__init__(x, y)
def fit(self, kernel=MultiChannelGaussianKernel(sigmas=(1.0, 1.0)), learning_rate=1.0, delay=0,
kernel_learning_rate=None, sparsifiers=None, **kwargs):
if len(kernel.params) != self.x.shape[-1]:
raise Exception("There must be at least one Kernel parameter per input channel.")
super().fit(kernel=kernel, learning_rate=learning_rate, delay=delay,
kernel_learning_rate=kernel_learning_rate, sparsifiers=sparsifiers, **kwargs)
class KrlsFilter(Filter):
"""Original SISO KRLS filter. """
def __init__(self, x, y):
super().__init__(x, y)
self.q = None
self.h = None
self.z = None
self.r = None
@timeit
def fit(self, kernel=GaussianKernel(sigma=1.0), regularizer=1.0, sparsifiers=None):
"""Fit data using KRLS algorithm.
:param kernel: Kernel class object
:param regularizer: float with regularization parameter (lambda)
:param sparsifiers: list with Sparsifier class objects
:return: None
"""
self.q = np.array((regularizer + kernel(self.x[0], self.x[0])) ** (-1))
self.coefficients = np.array([self.q * self.y[0]])
self.coefficient_history = [self.coefficients]
self._estimate = np.zeros(self.y.shape)
self.support_vectors = np.array([self.x[0]]).reshape(-1, 1)
self.error_history = [0]
self.kernel = kernel
for i in range(1, self.n):
self.regressor = self.x[i]
self.h = kernel(self.support_vectors, self.regressor).T
self.z = np.dot(self.q, self.h)
self.r = regularizer + kernel(self.x[i], self.x[i]) - np.dot(self.z, self.h)
self.estimate[i] = np.dot(self.h.T, self.coefficients)
self.error = self.y[i] - self.estimate[i]
self.error_history.append(self.error)
self.coefficients -= self.z * self.r ** (-1) * self.error
self.coefficient_history.append(self.coefficients)
if sparsifiers:
for sparsifier in sparsifiers:
sparsifier.apply(self)
else:
self.q = self.q * self.r + np.outer(self.z, self.z.T)
q_row = np.asarray(-self.z).reshape(-1, 1).T
q_col = np.asarray(-self.z).reshape(-1, 1)
q_end = np.array([1]).reshape(-1, 1)
self.q = np.append(self.q, q_row, axis=0)
self.q = np.append(self.q, np.concatenate((q_col, q_end), axis=0), axis=1)
self.q = self.r ** (-1) * self.q
self.support_vectors = np.vstack((self.support_vectors, self.regressor.reshape(1, -1)))
self.coefficients = np.append(self.coefficients, self.r ** (-1) * self.error) |
xiangke/pycopia | mibs/pycopia/mibs/HP_SN_POS_GROUP_MIB_OID.py | Python | lgpl-2.1 | 2,839 | 0.02043 | # python
# This file is generated by a program (mib2py).
import HP_SN_POS_GROUP_MIB
OIDMAP = {
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1': HP_SN_POS_GROUP_MIB.snPO | SInfo,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.1': HP_SN_POS_GROUP_MIB.snPOSInfoPortNum,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.2': HP_SN_POS_GROUP_MIB.snPOSIfIndex,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.3': HP_SN_POS_GROUP_MIB.snPOSDescr,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.4': HP_SN_POS_GROUP_MIB.snPOSName,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.5': HP_SN_POS_GROUP_MIB.snPOSInfoSpee | d,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.6': HP_SN_POS_GROUP_MIB.snPOSInfoAdminStatus,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.7': HP_SN_POS_GROUP_MIB.snPOSInfoLinkStatus,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.8': HP_SN_POS_GROUP_MIB.snPOSInfoClock,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.9': HP_SN_POS_GROUP_MIB.snPOSInfoLoopBack,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.10': HP_SN_POS_GROUP_MIB.snPOSInfoScambleATM,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.11': HP_SN_POS_GROUP_MIB.snPOSInfoFraming,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.12': HP_SN_POS_GROUP_MIB.snPOSInfoCRC,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.13': HP_SN_POS_GROUP_MIB.snPOSInfoKeepAlive,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.14': HP_SN_POS_GROUP_MIB.snPOSInfoFlagC2,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.15': HP_SN_POS_GROUP_MIB.snPOSInfoFlagJ0,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.16': HP_SN_POS_GROUP_MIB.snPOSInfoFlagH1,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.17': HP_SN_POS_GROUP_MIB.snPOSStatsInFrames,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.18': HP_SN_POS_GROUP_MIB.snPOSStatsOutFrames,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.19': HP_SN_POS_GROUP_MIB.snPOSStatsAlignErrors,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.20': HP_SN_POS_GROUP_MIB.snPOSStatsFCSErrors,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.21': HP_SN_POS_GROUP_MIB.snPOSStatsFrameTooLongs,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.22': HP_SN_POS_GROUP_MIB.snPOSStatsFrameTooShorts,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.23': HP_SN_POS_GROUP_MIB.snPOSStatsInDiscard,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.24': HP_SN_POS_GROUP_MIB.snPOSStatsOutDiscard,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.25': HP_SN_POS_GROUP_MIB.snPOSInOctets,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.26': HP_SN_POS_GROUP_MIB.snPOSOutOctets,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.27': HP_SN_POS_GROUP_MIB.snPOSStatsInBitsPerSec,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.28': HP_SN_POS_GROUP_MIB.snPOSStatsOutBitsPerSec,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.29': HP_SN_POS_GROUP_MIB.snPOSStatsInPktsPerSec,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.30': HP_SN_POS_GROUP_MIB.snPOSStatsOutPktsPerSec,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.31': HP_SN_POS_GROUP_MIB.snPOSStatsInUtilization,
'1.3.6.1.4.1.11.2.3.7.11.12.2.14.1.1.1.32': HP_SN_POS_GROUP_MIB.snPOSStatsOutUtilization,
}
|
stratosphereips/Manati | manati/api_manager/migrations/0003_remove_externalmodule_acronym.py | Python | agpl-3.0 | 1,302 | 0.006144 | #
# Copyright (c) 2017 Stratosphere Laboratory.
#
# This file is part of ManaTI Project
# (see <https://stratosphereips.org>). It was created by 'Raul B. Netto <raulbeni@gmail.com>'
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. See the file 'docs/LICENSE' or see <http://www | .gnu.org/licenses/>
# for copying permission.
#
# -*- coding: utf-8 -* | -
# Generated by Django 1.9.7 on 2016-11-09 20:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api_manager', '0002_externalmodule_status'),
]
operations = [
migrations.RemoveField(
model_name='externalmodule',
name='acronym',
),
]
|
mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/dataaccess/response/GetGridLatLonResponse.py | Python | bsd-3-clause | 858 | 0.001166 | ##
##
# File auto-generated against equivalent DynamicSerialize Java class
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Oct 10, 2016 5916 bsteffen Generated
class GetGridLatLonResponse(object):
def __init__(self):
self.lats = None
self.lons = None
self.nx = None
self.ny = None
def getLats(self):
return self.lats
def setLats(self, lats):
self.lats = lats
def getLons(self):
return self.lons
def setLons(self, lons):
self.lons = lons
def getNx(self):
return se | lf.nx
def set | Nx(self, nx):
self.nx = nx
def getNy(self):
return self.ny
def setNy(self, ny):
self.ny = ny
|
akiokio/centralfitestoque | src/.pycharm_helpers/python_stubs/-1807332816/_sre.py | Python | bsd-2-clause | 453 | 0.013245 | # e | ncoding: utf-8
# module _sre
# from (built-in)
# by generator 1.130
# no doc
# no imports
# Variables with simple values
CODESIZE = 4
copyright = ' SRE 2.2.2 Copyright (c) 1997-2002 by Secret Labs AB '
MAGIC = 20031017
# functions
def compile(*args, **kwargs): # real signature unknown
pass
def getcode | size(*args, **kwargs): # real signature unknown
pass
def getlower(*args, **kwargs): # real signature unknown
pass
# no classes
|
tiaanwillemse/pythonNotifier | notifier.py | Python | mit | 2,262 | 0.007958 | import utils
class Notifier(object):
def __init__(self, key='', type='slack'):
self.key = key
self.type = type
self.parameters = {
'icon' : ':space_invader:',
'username': 'Notifier Bot',
'channel' : 'general'
}
if self.key:
if self.type == 'slack':
from slackclient import SlackClient
self.slackClient = SlackClient(self.key);
def configure(self, parameters):
self.parameters.update(parameters)
def message(self, object):
if self.type == 'slack':
if isinstance(object, str):
object = {'text': object}
defaults = {
'color' : '#E8E8E8',
'text' : ''
}
attachments = {}
attachments.update(defaults)
attachments.update(object)
if hasattr(attachments, 'fallback') == False:
attachments.update({'fallback' : attachments['text']})
self.slackClient.api_call(
"chat.postMessage", channel=self.parameters['channel'],
username=self.parameters['username'], icon_emoji=self.parameters['icon'], attachments='[' + utils.object_to_string(attachments) + ']'
)
def info(self, object):
if self.type == 'slack':
if isinstance(object, str):
object = {'text': object}
object.update({'color' : '#28D7E5'})
self.message(object)
def good(self, object):
if self.type == 'slack':
if isinstance(object, str):
object = {'text': object}
object.update({'color' : 'good'})
|
self.message(object)
def warning(self, object):
if self.type == 'slack':
if isinstance(object, str):
object = {'text': object}
object.update({'color' : 'warning'})
self.message(object)
def danger(self, object):
if self.type == 'slack':
if isinstance(object, str):
object = { | 'text': object}
object.update({'color' : 'danger'})
self.message(object) |
Etharr/plugin.video.youtube | resources/lib/youtube_plugin/kodion/impl/mock/mock_context_ui.py | Python | gpl-2.0 | 1,400 | 0.000714 | __author__ = 'bromix'
from ..abstract_context_ui import AbstractContextUI
from ...logging import *
from .mock_progress_dialog import MockProgressDialog
class MockContextUI(AbstractContextUI):
def __init__(self):
AbstractContextUI.__init__(self)
self._view_mode = None
def set_view_mode(self, view_mode):
self._view_mode = view_mode
def create_progress_dialog(self, heading, text=None, background=False):
return MockProgressDialog(heading, text)
def get_view_mode(self):
return self._view_mode
def get_skin_id(self):
return 'skin.kodion.dummy'
def on_keyboard_input(self, title, default='', hidden=False):
print('[' + title + ']')
print("Returning 'Hello World'")
# var = raw_input("Please enter something: ")
var = u'Hello World'
if var:
return True, var
return False, ''
| def show_notification(self, message, header='', image_uri='', time_milliseconds=5000):
log('=======NOTIFICATION=======')
log('Message : %s' % message)
log('header : %s' % header)
log('image_uri: %s' % image_uri)
log('Time : %d' % time_milliseconds)
log('==========================')
def open_settings(self):
| log("called 'open_settings'")
def refresh_container(self):
log("called 'refresh_container'")
|
apache/incubator-superset | superset/migrations/versions/f9847149153d_add_certifications_columns_to_slice.py | Python | apache-2.0 | 1,499 | 0.000667 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this f | ile
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses | /LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add_certifications_columns_to_slice
Revision ID: f9847149153d
Revises: 0ca9e5f1dacd
Create Date: 2021-11-03 14:07:09.905194
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
from alembic import op
revision = "f9847149153d"
down_revision = "0ca9e5f1dacd"
def upgrade():
with op.batch_alter_table("slices") as batch_op:
batch_op.add_column(sa.Column("certified_by", sa.Text(), nullable=True))
batch_op.add_column(
sa.Column("certification_details", sa.Text(), nullable=True)
)
def downgrade():
with op.batch_alter_table("slices") as batch_op:
batch_op.drop_column("certified_by")
batch_op.drop_column("certification_details")
|
AdamWill/blivet | tests/formats_test/methods_test.py | Python | lgpl-2.1 | 16,986 | 0.003356 | import unittest
from unittest.mock import patch, sentinel, PropertyMock
from blivet.errors import DeviceFormatError
from blivet.formats import DeviceFormat
from blivet.formats.luks import LUKS
from blivet.formats.lvmpv import LVMPhysicalVolume
from blivet.formats.mdraid import MDRaidMember
from blivet.formats.swap import SwapSpace
from blivet.formats.fs import EFIFS, Ext4FS, XFS
class FormatMethodsTestCase(unittest.TestCase):
format_class = DeviceFormat
def __init__(self, methodName='runTest'):
super().__init__(methodName=methodName)
self.patchers = dict()
self.patches = dict()
#
# patch setup
#
def set_patches(self):
# self.patchers["update_sysfs_path"] = patch.object(self.device, "update_sysfs_path")
self.patchers["status"] = patch.object(self.format_class, "status", new=PropertyMock(return_value=False))
self.patchers["os"] = patch("blivet.formats.os")
def start_patches(self):
for target, patcher in self.patchers.items():
self.patches[target] = patcher.start()
def stop_patches(self):
for target, patcher in self.patchers.items():
patcher.stop()
del self.patches[target]
#
# device constructor arguments
#
def _ctor_args(self):
return []
def _ctor_kwargs(self):
return {"device": "/fake/device"}
def setUp(self):
self.format = self.format_class(*self._ctor_args(), **self._ctor_kwargs())
self.set_patches()
self.start_patches()
self.addCleanup(self.stop_patches)
# some formats use os from multiple modules, eg: fs
def set_os_path_exists(self, value):
self.patches["os"].path.exists.return_value = value
#
# tests for format backend usage
#
def _test_create_backend(self):
pass
def _test_destroy_backend(self):
with patch("blivet.formats.run_program") as run_program:
run_program.return_value = 0
self.format.exists = True
self.format.destroy()
self.assertFalse(self.format.exists)
run_program.assert_called_with(["wipefs", "-f", "-a", self.format.device])
def _test_setup_backend(self):
pass
def _test_teardown_backend(self):
pass
#
# format method tests
#
def test_create(self):
| # fmt cannot exist
self.format.exists = True
with patch.object(self.format, "_create"):
self.set_os_path_exists(True)
self.assertRaisesRegex(DeviceFormatError, "format already exists", self.format.create)
self.assertFalse(self.format._create.called) # pylint: disable=no-member
self.format.exists = False
# device | must be accessible
with patch.object(self.format, "_create"):
# device must be accessible
self.set_os_path_exists(False)
self.assertRaisesRegex(DeviceFormatError, "invalid device specification", self.format.create)
self.assertFalse(self.format._create.called) # pylint: disable=no-member
self.set_os_path_exists(True)
# _pre_create raises -> no _create
self.assertFalse(self.format.exists)
# pylint: disable=unused-argument
def _fail(*args, **kwargs):
raise RuntimeError("problems")
with patch.object(self.format, "_create"):
with patch.object(self.format, "_pre_create") as m:
m.side_effect = _fail
self.assertRaisesRegex(RuntimeError, "problems", self.format.create)
self.assertFalse(self.format._create.called) # pylint: disable=no-member
self.assertFalse(self.format.exists)
# _create raises -> no _post_create -> exists == False
with patch.object(self.format, "_create") as m:
m.side_effect = _fail
self.assertRaisesRegex(RuntimeError, "problems", self.format.create)
self.assertTrue(self.format._create.called) # pylint: disable=no-member
self.assertFalse(self.format.exists)
# _create succeeds -> make sure _post_create sets existence
with patch.object(self.format, "_create"):
with patch.object(self.format, "_post_create"):
self.format.create()
self.assertTrue(self.format._create.called) # pylint: disable=no-member
self.assertFalse(self.format.exists)
# _post_create sets exists to True
with patch.object(self.format, "_create"):
self.format.create()
self.assertTrue(self.format._create.called) # pylint: disable=no-member
self.assertTrue(self.format.exists)
self._test_create_backend()
def test_destroy(self):
# fmt must exist
self.format.exists = False
with patch.object(self.format, "_destroy"):
self.patches["os"].access.return_value = True
self.assertRaisesRegex(DeviceFormatError, "has not been created", self.format.destroy)
self.assertFalse(self.format._destroy.called) # pylint: disable=no-member
self.format.exists = True
# format must be inactive
with patch.object(self.format, "_destroy"):
self.patches["status"].return_value = True
self.assertRaisesRegex(DeviceFormatError, "is active", self.format.destroy)
self.assertFalse(self.format._destroy.called) # pylint: disable=no-member
# device must be accessible
with patch.object(self.format, "_destroy"):
self.patches["os"].access.return_value = False
self.patches["status"].return_value = False
self.assertRaisesRegex(DeviceFormatError, "device path does not exist", self.format.destroy)
self.assertFalse(self.format._destroy.called) # pylint: disable=no-member
self.patches["os"].access.return_value = True
# _pre_destroy raises -> no _create
# pylint: disable=unused-argument
def _fail(*args, **kwargs):
raise RuntimeError("problems")
self.assertTrue(self.format.exists)
with patch.object(self.format, "_destroy"):
with patch.object(self.format, "_pre_destroy") as m:
m.side_effect = _fail
self.assertRaisesRegex(RuntimeError, "problems", self.format.destroy)
self.assertFalse(self.format._destroy.called) # pylint: disable=no-member
self.assertTrue(self.format.exists)
# _destroy raises -> no _post_destroy -> exists == True
with patch.object(self.format, "_destroy") as m:
m.side_effect = _fail
self.assertRaisesRegex(RuntimeError, "problems", self.format.destroy)
self.assertTrue(self.format._destroy.called) # pylint: disable=no-member
self.assertTrue(self.format.exists)
# _destroy succeeds -> _post_destroy is what updates existence
with patch.object(self.format, "_destroy"):
with patch.object(self.format, "_post_destroy"):
self.format.destroy()
self.assertTrue(self.format._destroy.called) # pylint: disable=no-member
self.assertTrue(self.format.exists)
# _post_destroy set exists to False
with patch.object(self.format, "_destroy"):
self.format.destroy()
self.assertTrue(self.format._destroy.called) # pylint: disable=no-member
self.assertFalse(self.format.exists)
self._test_destroy_backend()
def test_setup(self):
# fmt must exist
self.format.exists = False
with patch.object(self.format, "_setup"):
self.set_os_path_exists(True)
self.assertRaisesRegex(DeviceFormatError, "has not been created", self.format.setup)
# _pre_setup raises exn -> no _setup
self.assertFalse(self.format._setup.called) # pylint: disable=no-member
self.format.exists = True
# device must be accessible
with patch.object(self.format, "_setup"):
self.set_os_path_exists(False)
self.a |
schleichdi2/OPENNFR-6.1-CORE | opennfr-openembedded-core/meta/lib/oeqa/selftest/bbtests.py | Python | gpl-2.0 | 14,655 | 0.007711 | import os
import re
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
from oeqa.uti | ls.decorators import testcase
class BitbakeTests(oeSelfTest):
def getline(self, res, line):
for l in res.output.split('\n'):
if line in l:
return l
@testcase(789)
def test_run_bitbake_from_dir_1(self):
os.chdir(os.path.jo | in(self.builddir, 'conf'))
self.assertEqual(bitbake('-e').status, 0, msg = "bitbake couldn't run from \"conf\" dir")
@testcase(790)
def test_run_bitbake_from_dir_2(self):
my_env = os.environ.copy()
my_env['BBPATH'] = my_env['BUILDDIR']
os.chdir(os.path.dirname(os.environ['BUILDDIR']))
self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from builddir")
@testcase(806)
def test_event_handler(self):
self.write_config("INHERIT += \"test_events\"")
result = bitbake('m4-native')
find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Executing RunQueue Tasks", result.output)
find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output)
self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output)
self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output)
self.assertFalse('Test for bb.event.InvalidEvent' in result.output, msg = "\"Test for bb.event.InvalidEvent\" message found during bitbake process. bitbake output: %s" % result.output)
@testcase(103)
def test_local_sstate(self):
bitbake('m4-native')
bitbake('m4-native -cclean')
result = bitbake('m4-native')
find_setscene = re.search("m4-native.*do_.*_setscene", result.output)
self.assertTrue(find_setscene, msg = "No \"m4-native.*do_.*_setscene\" message found during bitbake m4-native. bitbake output: %s" % result.output )
@testcase(105)
def test_bitbake_invalid_recipe(self):
result = bitbake('-b asdf', ignore_status=True)
self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output, msg = "Though asdf recipe doesn't exist, bitbake didn't output any err. message. bitbake output: %s" % result.output)
@testcase(107)
def test_bitbake_invalid_target(self):
result = bitbake('asdf', ignore_status=True)
self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output, msg = "Though no 'asdf' target exists, bitbake didn't output any err. message. bitbake output: %s" % result.output)
@testcase(106)
def test_warnings_errors(self):
result = bitbake('-b asdf', ignore_status=True)
find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output)
find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output)
self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output)
self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output)
@testcase(108)
def test_invalid_patch(self):
# This patch already exists in SRC_URI so adding it again will cause the
# patch to fail.
self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"')
self.write_config("INHERIT_remove = \"report-error\"")
result = bitbake('man -c patch', ignore_status=True)
self.delete_recipeinc('man')
bitbake('-cclean man')
line = self.getline(result, "Function failed: patch_do_patch")
self.assertTrue(line and line.startswith("ERROR:"), msg = "Repeated patch application didn't fail. bitbake output: %s" % result.output)
@testcase(1354)
def test_force_task_1(self):
# test 1 from bug 5875
test_recipe = 'zlib'
test_data = "Microsoft Made No Profit From Anyone's Zunes Yo"
bb_vars = get_bb_vars(['D', 'PKGDEST', 'mandir'], test_recipe)
image_dir = bb_vars['D']
pkgsplit_dir = bb_vars['PKGDEST']
man_dir = bb_vars['mandir']
bitbake('-c clean %s' % test_recipe)
bitbake('-c package -f %s' % test_recipe)
self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
man_file = os.path.join(image_dir + man_dir, 'man3/zlib.3')
ftools.append_file(man_file, test_data)
bitbake('-c package -f %s' % test_recipe)
man_split_file = os.path.join(pkgsplit_dir, 'zlib-doc' + man_dir, 'man3/zlib.3')
man_split_content = ftools.read_file(man_split_file)
self.assertIn(test_data, man_split_content, 'The man file has not changed in packages-split.')
ret = bitbake(test_recipe)
self.assertIn('task do_package_write_rpm:', ret.output, 'Task do_package_write_rpm did not re-executed.')
@testcase(163)
def test_force_task_2(self):
# test 2 from bug 5875
test_recipe = 'zlib'
bitbake(test_recipe)
self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
result = bitbake('-C compile %s' % test_recipe)
look_for_tasks = ['do_compile:', 'do_install:', 'do_populate_sysroot:', 'do_package:']
for task in look_for_tasks:
self.assertIn(task, result.output, msg="Couldn't find %s task.")
@testcase(167)
def test_bitbake_g(self):
result = bitbake('-g core-image-minimal')
for f in ['pn-buildlist', 'recipe-depends.dot', 'task-depends.dot']:
self.addCleanup(os.remove, f)
self.assertTrue('Task dependencies saved to \'task-depends.dot\'' in result.output, msg = "No task dependency \"task-depends.dot\" file was generated for the given task target. bitbake output: %s" % result.output)
self.assertTrue('busybox' in ftools.read_file(os.path.join(self.builddir, 'task-depends.dot')), msg = "No \"busybox\" dependency found in task-depends.dot file.")
@testcase(899)
def test_image_manifest(self):
bitbake('core-image-minimal')
bb_vars = get_bb_vars(["DEPLOY_DIR_IMAGE", "IMAGE_LINK_NAME"], "core-image-minimal")
deploydir = bb_vars["DEPLOY_DIR_IMAGE"]
imagename = bb_vars["IMAGE_LINK_NAME"]
manifest = os.path.join(deploydir, imagename + ".manifest")
self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image. It should have been created in %s" % manifest)
@testcase(168)
def test_invalid_recipe_src_uri(self):
data = 'SRC_URI = "file://invalid"'
self.write_recipeinc('man', data)
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
INHERIT_remove = \"report-error\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
bitbake('-ccleanall man')
result = bitbake('-c fetch man', ignore_status=True)
bitbake('-ccleanall man')
self.delete_recipeinc('man')
self.assertEqual(result.status, 1, msg="Command succeded when it should have failed. bitbake output: %s" % result.output)
self.assertTrue('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output, msg = "\"invalid\" file \
doesn't exist, yet no error message encountered. bitbake output: %s" % result.output)
line = self.getline(result, 'Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.')
self.assertTrue(line and line.startswith("ERROR:"), msg = "\"invalid\" file \
doesn't exist, yet fetcher didn't report any error. bitbake output: %s" % result.output)
@testcase(171)
def test_rename_downloaded_file(self):
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
data = 'SRC_URI_append = ";downloadfilename=test-aspe |
bo858585/AbstractBooking | Booking/booking/migrations/0018_auto_20150310_1533.py | Python | mit | 1,427 | 0.000701 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('booking', '0017_auto_20150309_1910'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='date',
field=models.DateTimeField(auto_now_add=True, db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='booking',
name='status',
field=models.CharField(default=b'pending', max_length=30, db_index=True, choices=[(b'pending', '\u041e\u0436\u0438\u0434\u0430\u0435\u0442 \u0438\u0441\u043f\u043e\u043b\u043d\u0438\u0442\u0435\u043b\u044f'), (b'waiting_for_approval', '\u041e\u0436\u0438\u0434\u0430\u0435\u0442 \u043f\u043e\u0434\u | 0442\u0432\u0435\u0440\u0436\u0434\u0435\u043d\u0438\u044f \u0437\u0430\u043a\u0430\u0437\u0447\u0438\u043a\u043e\u043c'), (b'running', '\u0412\u0437\u044f\u0442 \u043d\u0430 \u0438\u0441\u043f\u043e\u043b\u043d\u0435\u043d\u0438\u0435'), (b'completed', '\u0417\u0430\u0432\u0435\u0440\u0448\u0435\u043d')]),
preserve_default=True,
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeFie | ld(auto_now_add=True, db_index=True),
preserve_default=True,
),
]
|
bcbnz/python-rebuild | src/rebuild/widgets/results/__init__.py | Python | gpl-3.0 | 196 | 0 | from .base_result import BaseResult
| from .sub import Sub
from .split import Split
from .match import Match
from .search import Search
from .find_all import FindAll
from .find_iter import FindIter | |
aspose-cells/Aspose.Cells-for-Cloud | Examples/Python/Examples/GetAutoshapeFromWorksheet.py | Python | mit | 1,342 | 0.009687 | import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
from asposecellscloud.CellsApi import AutoShapesResponse
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
sheetName = "Sheet4"
autoshapeNumber = 1
#uploa | d file to aspose cloud storage
storageApi.PutCreate(Path=filen | ame, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to get autoshape from a worksheet
response = cellsApi.GetWorksheetAutoshape(name=filename, sheetName=sheetName, autoshapeNumber=autoshapeNumber)
if response.Status == "OK":
autoShape = response.AutoShape
print autoShape.HtmlText
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
google/material-design-icons | update/venv/lib/python3.9/site-packages/pip/_internal/resolution/base.py | Python | apache-2.0 | 563 | 0.001776 | from typing import Callable, List
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_set import RequirementSet
InstallRequirementProvider = Callable[[str, InstallRequirement], InstallRequirement]
class BaseResolver:
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
raise NotImplement | edError()
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[Instal | lRequirement]
raise NotImplementedError()
|
lbryio/lbry | torba/torba/client/words/english.py | Python | mit | 19,271 | 0.106274 | words = [
'abandon',
'ability',
'able',
'about',
'above',
'absent',
'absorb',
'abstract',
'absurd',
'abuse',
'access',
'accident',
'account',
'accuse',
'achieve',
'acid',
'acoustic',
'acquire',
'across',
'act',
'action',
'actor',
'actress',
'actual',
'adapt',
'add',
'addict',
'address',
'adjust',
'admit',
'adult',
'advance',
'advice',
'aerobic',
'affair',
'afford',
'afraid',
'again',
'age',
'agent',
'agree',
'ahead',
'aim',
'air',
'airport',
'aisle',
'alarm',
'album',
'alcohol',
'alert',
'alien',
'all',
'alley',
'allow',
'almost',
'alone',
'alpha',
'already',
'also',
'alter',
'always',
'amateur',
'amazing',
'among',
'amount',
'amused',
'analyst',
'anchor',
'ancient',
'anger',
'angle',
'angry',
'animal',
'ankle',
'announce',
'annual',
'another',
'answer',
'antenna',
'antique',
'anxiety',
'any',
'apart',
'apology',
'appear',
'apple',
'approve',
'april',
'arch',
'arctic',
'area',
'arena',
'argue',
'arm',
'armed',
'armor',
'army',
'around',
'arrange',
'arrest',
'arrive',
'arrow',
'art',
'artefact',
'artist',
'artwork',
'ask',
'aspect',
'assault',
'asset',
'assist',
'assume',
'asthma',
'athlete',
'atom',
'attack',
'attend',
'attitude',
'attra | ct',
'auction',
'audit',
'august',
'aunt',
'author',
'auto',
'autumn',
'average',
'avocado',
'avoid',
'awake',
'aware',
'away',
'awesome',
'awful',
' | awkward',
'axis',
'baby',
'bachelor',
'bacon',
'badge',
'bag',
'balance',
'balcony',
'ball',
'bamboo',
'banana',
'banner',
'bar',
'barely',
'bargain',
'barrel',
'base',
'basic',
'basket',
'battle',
'beach',
'bean',
'beauty',
'because',
'become',
'beef',
'before',
'begin',
'behave',
'behind',
'believe',
'below',
'belt',
'bench',
'benefit',
'best',
'betray',
'better',
'between',
'beyond',
'bicycle',
'bid',
'bike',
'bind',
'biology',
'bird',
'birth',
'bitter',
'black',
'blade',
'blame',
'blanket',
'blast',
'bleak',
'bless',
'blind',
'blood',
'blossom',
'blouse',
'blue',
'blur',
'blush',
'board',
'boat',
'body',
'boil',
'bomb',
'bone',
'bonus',
'book',
'boost',
'border',
'boring',
'borrow',
'boss',
'bottom',
'bounce',
'box',
'boy',
'bracket',
'brain',
'brand',
'brass',
'brave',
'bread',
'breeze',
'brick',
'bridge',
'brief',
'bright',
'bring',
'brisk',
'broccoli',
'broken',
'bronze',
'broom',
'brother',
'brown',
'brush',
'bubble',
'buddy',
'budget',
'buffalo',
'build',
'bulb',
'bulk',
'bullet',
'bundle',
'bunker',
'burden',
'burger',
'burst',
'bus',
'business',
'busy',
'butter',
'buyer',
'buzz',
'cabbage',
'cabin',
'cable',
'cactus',
'cage',
'cake',
'call',
'calm',
'camera',
'camp',
'can',
'canal',
'cancel',
'candy',
'cannon',
'canoe',
'canvas',
'canyon',
'capable',
'capital',
'captain',
'car',
'carbon',
'card',
'cargo',
'carpet',
'carry',
'cart',
'case',
'cash',
'casino',
'castle',
'casual',
'cat',
'catalog',
'catch',
'category',
'cattle',
'caught',
'cause',
'caution',
'cave',
'ceiling',
'celery',
'cement',
'census',
'century',
'cereal',
'certain',
'chair',
'chalk',
'champion',
'change',
'chaos',
'chapter',
'charge',
'chase',
'chat',
'cheap',
'check',
'cheese',
'chef',
'cherry',
'chest',
'chicken',
'chief',
'child',
'chimney',
'choice',
'choose',
'chronic',
'chuckle',
'chunk',
'churn',
'cigar',
'cinnamon',
'circle',
'citizen',
'city',
'civil',
'claim',
'clap',
'clarify',
'claw',
'clay',
'clean',
'clerk',
'clever',
'click',
'client',
'cliff',
'climb',
'clinic',
'clip',
'clock',
'clog',
'close',
'cloth',
'cloud',
'clown',
'club',
'clump',
'cluster',
'clutch',
'coach',
'coast',
'coconut',
'code',
'coffee',
'coil',
'coin',
'collect',
'color',
'column',
'combine',
'come',
'comfort',
'comic',
'common',
'company',
'concert',
'conduct',
'confirm',
'congress',
'connect',
'consider',
'control',
'convince',
'cook',
'cool',
'copper',
'copy',
'coral',
'core',
'corn',
'correct',
'cost',
'cotton',
'couch',
'country',
'couple',
'course',
'cousin',
'cover',
'coyote',
'crack',
'cradle',
'craft',
'cram',
'crane',
'crash',
'crater',
'crawl',
'crazy',
'cream',
'credit',
'creek',
'crew',
'cricket',
'crime',
'crisp',
'critic',
'crop',
'cross',
'crouch',
'crowd',
'crucial',
'cruel',
'cruise',
'crumble',
'crunch',
'crush',
'cry',
'crystal',
'cube',
'culture',
'cup',
'cupboard',
'curious',
'current',
'curtain',
'curve',
'cushion',
'custom',
'cute',
'cycle',
'dad',
'damage',
'damp',
'dance',
'danger',
'daring',
'dash',
'daughter',
'dawn',
'day',
'deal',
'debate',
'debris',
'decade',
'december',
'decide',
'decline',
'decorate',
'decrease',
'deer',
'defense',
'define',
'defy',
'degree',
'delay',
'deliver',
'demand',
'demise',
'denial',
'dentist',
'deny',
'depart',
'depend',
'deposit',
'depth',
'deputy',
'derive',
'describe',
'desert',
'design',
'desk',
'despair',
'destroy',
'detail',
'detect',
'develop',
'device',
'devote',
'diagram',
'dial',
'diamond',
'diary',
'dice',
'diesel',
'diet',
'differ',
'digital',
'dignity',
'dilemma',
'dinner',
'dinosaur',
'direct',
'dirt',
'disagree',
'discover',
'disease',
'dish',
'dismiss',
'disorder',
'display',
'distance',
'divert',
'divide',
'divorce',
'dizzy',
'doctor',
'document',
'dog',
'doll',
'dolphin',
'domain',
'donate',
'donkey',
'donor',
'door',
'dose',
'double',
'dove',
'draft',
'dragon',
'drama',
'drastic',
'draw',
'dream',
'dress',
'drift',
'drill',
'drink',
'drip',
'drive',
'drop',
'drum',
'dry',
'duck',
'dumb',
'dune',
'during',
'dust',
'dutch',
'duty',
'dwarf',
'dynamic',
'eager',
'eagle',
'early',
'earn',
'earth',
'easily',
'east',
'easy',
'echo',
'ecology',
'economy',
'edge',
'edit',
'educate',
'effort',
'egg',
'eight',
'either',
'elbow',
'elder',
'electric',
'elegant',
'element',
'elephant',
'elevator',
'elite',
'else',
'embark',
'embody',
'embrace',
'emerge',
'emotion',
'employ',
'empower',
'empty',
'enable',
'enact',
'end',
'endless',
'endorse',
'enemy',
'energy',
'enforce',
'engage',
'engine',
'enhance',
'enjoy',
'enlist',
'enough',
'enrich',
'enroll',
'ensure',
'enter',
'entire',
'entry',
'envelope',
'episode',
'equal',
'equip',
'era',
'erase',
'erode',
'erosion',
'error',
'erupt',
'escape',
'essay',
'essence',
'estate',
'eternal',
'ethics',
'evidence',
'evil',
'evoke',
'evolve',
'exact',
'example',
'excess',
'exchange',
'excite',
'exclude',
'excuse',
'execute',
'exercise',
'exhaust',
'exhibit',
'exile',
'exist',
'exit',
'exotic',
'expand',
'expect',
'expire',
'explain',
'expose',
'express',
'extend',
'extra',
'eye',
'eyebrow',
'fabric',
'face',
'faculty',
'fade',
'faint',
'faith',
'fall',
'false',
'fame',
'family',
'famous',
'fan',
'fancy',
'fantasy',
'farm',
'fashion',
'fat',
'fatal',
'father',
'fatigue',
'fault',
'favorite',
'feature',
'february',
'federal',
'fee',
'feed',
'feel',
'female',
'fence',
'festival',
'fetch',
'fever',
'few',
'fiber',
'fiction',
'field',
'figure',
'file',
'film',
'filter',
'final',
'find',
'fine',
'finger',
'finish',
'fire',
'firm',
'first',
'fiscal',
'fish',
'fit',
'fitness',
'fix',
'flag',
'flame',
'flash',
'flat',
'flavor',
'flee',
'flight',
'flip',
'float',
'flock',
'floor',
'flower',
'fluid',
'flush',
'fly',
'foam',
'focus',
'fog',
'foil',
'fold',
'follow',
'food',
'foot',
'force',
'forest',
'forget',
'fork',
'fortune',
'forum',
'forward',
'fossil',
'foster',
'found',
'fox',
'fragile',
'frame',
'frequent',
'fresh',
'friend',
'fringe',
'frog',
'front',
'frost',
'frown',
'frozen',
'fruit',
'fuel',
'fun',
'funny',
'furnace',
'fury',
'future',
'gadget',
'gain',
'galaxy',
'gallery',
'game',
'gap',
'garage',
'garbage',
'garden',
'garlic',
'garment',
'gas',
'gasp',
'gate',
'gather',
'gauge',
'gaze',
'general',
'genius',
'genre',
'gentle',
'genuine',
'gesture',
'ghost',
'giant',
'gift',
'giggle',
'ginger',
'giraffe',
'girl',
'give',
'glad',
'glance',
'glare',
'glass',
'glide',
'glimpse',
'globe',
'gloom',
'glory',
'glove',
'glow',
'glue',
'goat',
'goddess',
'gold',
'good',
'goose',
'gorilla',
'gospel',
'gossip',
'govern',
'gown',
'grab',
'grace',
'grain',
'grant',
'grape',
'grass',
'gravity',
'great',
'green',
'grid',
'grief',
'grit',
'grocery',
'group',
'grow',
'grunt',
'guard',
'guess',
'guide',
'guilt',
'guitar',
'gun',
'gym',
'habit',
'hair',
'half',
'hammer',
'hamster',
'hand',
'happy',
'harbor',
'hard',
'harsh',
'harvest',
'hat',
'have',
'hawk',
'hazard',
'head',
'health',
'heart',
'heavy',
'hedgehog',
'height',
'hello',
'helmet',
'help',
'hen',
'hero',
'hidden',
'high',
'hill',
'hint',
'hi |
Golker/wttd | eventex/subscriptions/tests/test_view_detail.py | Python | mit | 1,288 | 0.000776 | from django.test import TestCase
from eventex.subscriptions.models import Subscription
from django.shortcuts import resolve_url as r
class SubscriptionDetailGet(TestCase):
def setUp(self):
self.obj = Subscription.objects.create(
name='Luca Bezerra', cpf='12345678901',
email='lucabezerra@gmail.com', phone='987654321'
| )
self.response = self.client.get(r('subscriptions:detail', self.obj.pk))
def test_get(self):
self.assertEqual(200, self.response.status_code)
def test_template(self):
self.assertTemplateUsed(self.response,
'subscriptions/subscription_detail.html')
def test_context(self):
subscription | = self.response.context['subscription']
self.assertIsInstance(subscription, Subscription)
def test_html(self):
contents = (self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone)
with self.subTest():
for expected in contents:
self.assertContains(self.response, expected)
class SubscriptionDetailNotFound(TestCase):
def setUp(self):
self.response = self.client.get(r('subscriptions:detail', 0))
def test_not_found(self):
self.assertEqual(404, self.response.status_code)
|
jiadaizhao/LeetCode | 1501-1600/1579-Remove Max Number of Edges to Keep Graph Fully Traversable/1579-Remove Max Number of Edges to Keep Graph Fully Traversable.py | Python | mit | 1,211 | 0.004955 | class Solution:
def maxNumEdgesToRemove(self, n: int, edges: List[List[int]]) -> int:
parent = list(range(n + 1))
def findParent(i):
while parent[i] != i:
parent[i] = parent[parent[i]]
i = parent[i]
return i
def union(u, v):
| pu = findParent(u)
pv = findParent(v)
if pu != pv:
parent[pv] = pu
return 1
else:
return 0
e1 = e2 = result = 0
for t, u, v in edges:
if t == 3:
if union(u, v):
e1 += 1
| e2 += 1
else:
result += 1
parentOrig = parent[:]
for t, u, v in edges:
if t == 1:
if union(u, v):
e1 += 1
else:
result += 1
parent = parentOrig
for t, u, v in edges:
if t == 2:
if union(u, v):
e2 += 1
else:
result += 1
return result if e1 == e2 == n - 1 else -1
|
uglyboxer/Blackjack | blackjack/packages/card.py | Python | unlicense | 1,787 | 0.001119 | class Card:
""" Class of a single card in a traditional deck of 52 cards.
Cards are 2 - 10 and have a value matching their integer name.
or the are a 'face' card (Jack, Queen, King) and valued at 10.
Ace cards are valued at 1 or 1 | 1 depending on the state of the rest of the
hand.
Parameters
----------
input_card : tuple
A 3-value tuple composed of:
card_name : string
The name of the card from the traditional set of playing cards.
suit : string
One of ('H', 'D', 'C', 'S') representing the four possible suits.
orig_loc : int
The card's | original location in the shoe at "construction" time
Attributes
----------
card_name : string
The "rank" of given card instance
suit : string
The "suit"
orig_loc : int
The integer representing the location in the original shoe, before any
shuffling
value : int
The integer value (from 1 - 11) of a card's worth in the game of
Blackjack.
"""
def __init__(self, input_card):
self.card_name = input_card[0]
self.suit = input_card[1]
self.orig_loc = input_card[2]
self.value = self.assign_value()
def assign_value(self):
val_dict = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10, 'A': 11}
return val_dict[self.card_name]
def flip_ace(self):
""" For imposing a new vaule if the status of the game requires it. """
if self.card_name != 'A':
return "Not an ace!"
elif self.value == 1:
self.value = 11
else:
self.value = 1
if __name__ == '__main__':
main()
|
rnestler/LibrePCB | tests/funq/libraryeditor/test_copy_package_category.py | Python | gpl-3.0 | 2,109 | 0.003319 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test copying a package category in the library editor
"""
def test(library_editor, helpers):
"""
Copy package category with "New Library Element" wizard
"""
le = library_editor
# Open "New Library Element" wizard
le.action('libraryEditorActionNewElement').trigger(blocking=False)
# Choose "Copy existing element"
le.widget('libraryEditorNewElementWizardChooseTypeCopyRadioButton').set_property('checked', True)
# Choose type of element
le.widget('libraryEditorNewElementWizardChooseTypePkgCatButton').click()
# Choose category
category_tree = le.widget('libraryEditorNewElementWizardCopyFromCategoriesTree')
helpers.wait_for_model_items_count(category_tree, 1)
category | = category_tree.model().items( | ).items[0]
category_tree.select_item(category)
le.widget('libraryEditorNewElementWizardNextButton').click()
# Check metadata
widget_properties = {
('NameEdit', 'text'): 'C-SMT',
('DescriptionEdit', 'plainText'): '',
('KeywordsEdit', 'text'): '',
('VersionEdit', 'text'): '0.1',
}
for (widget, property), value in widget_properties.items():
props = le.widget('libraryEditorNewElementWizardMetadata' + widget).properties()
assert props[property] == value
# Finish
dialog = le.widget('libraryEditorNewElementWizard')
le.widget('libraryEditorNewElementWizardFinishButton').click()
helpers.wait_until_widget_hidden(dialog)
# Check if a new tab is opened (indicates that the element was created)
tab_props = le.widget('libraryEditorStackedWidget').properties()
assert tab_props['count'] == 2
assert tab_props['currentIndex'] == 1
# Check metadata
assert le.widget('libraryEditorPkgCatNameEdit').properties()['text'] == 'C-SMT'
assert le.widget('libraryEditorPkgCatDescriptionEdit').properties()['plainText'] == ''
assert le.widget('libraryEditorPkgCatKeywordsEdit').properties()['text'] == ''
assert le.widget('libraryEditorPkgCatVersionEdit').properties()['text'] == '0.1'
|
smulikHakipod/USB-Emulation | hid.py | Python | bsd-2-clause | 4,982 | 0.005821 | import random
import datetime
from USBIP import BaseStucture, USBDevice, InterfaceDescriptor, DeviceConfigurations, EndPoint, USBContainer
# Emulating USB mouse
# HID Configuration
class HIDClass(BaseStucture):
_fields_ = [
('bLength', 'B', 9),
('bDescriptorType', 'B', 0x21), # HID
('bcdHID', 'H'),
('bCountryCode', 'B'),
('bNumDescriptors', 'B'),
('bDescriptprType2', 'B'),
('wDescriptionLength', 'H'),
]
hid_class = HIDClass(bcdHID=0x0100, # Mouse
bCountryCode=0x0,
bNumDescriptors=0x1,
bDescriptprType2=0x22, # Report
wDescriptionLength=0x3400) # Little endian
interface_d = InterfaceDescriptor(bAlternateSetting=0,
bNumEndpoints=1,
bInterfaceClass=3, # class HID
bInterfaceSubClass=1,
bInterfaceProtocol=2,
iInterface=0)
end_point = EndPoint(bEndpointAddress=0x81,
bmAttributes=0x3,
wMaxPacketSize=8000, # Little endian
bInterval=0xFF) # interval to report
configuration = DeviceConfigurations(wTotalLength=0x2200,
bNumInterfaces=0x1,
bConfigurationValue=0x1,
iConfiguration=0x0, # No string
bmAttributes=0x80, # valid self powered
bMaxPower=50) # 100 mah current
interface_d.descriptions = [hid_class] # Supports only one description
interface_d.endpoints = [end_point] # Supports only one endpoint
configuration.interfaces = [interface_d] # Supports only one interface
class USBHID(USBDevice):
vendorID = 0x0627
productID = 0x0
bcdDevice = 0x0
bcdUSB = 0x0
bNumConfigurations = 0x1
bNumInterfaces = 0x1
bConfigurationValue = 0x1
configurations = []
bDeviceClass = 0x0
bDeviceSubClass = 0x0
bDeviceProtocol = 0x0
configurations = [configuration] # Supports only one configuration
def __init__(self):
USBDevice.__init__(self)
self.start_time = datetime.datetime.now()
def generate_mouse_report(self):
arr = [0x05, 0x01, # Usage Page (Generic Desktop)
0x09, 0x02, # Usage (Mouse)
0xa1, 0x01, # Collection (Application)
0x09, 0x01, # Usage (Pointer)
0xa1, 0x00, # Collection (Physical)
0x05, 0x09, # Usage Page (Button)
0x19, 0x01, # Usage Minimum (1)
0x29, 0x03, # Usage Maximum (3)
0x15, 0x00, # Logical Minimum (0)
0x25, 0x01, # Logical Maximum (1)
0x95, 0x03, # Report Count (3)
0x75, 0x01, # Report Size (1)
0x81, 0x02, # Input (Data, Variable, Absolute)
0x95, 0x01, # Report Count (1)
0x75, 0x05, # Report Size (5)
0x81, 0x01, # Input (Constant)
0x05, 0x01, # Usage Page (Generic Desktop)
0x09, 0x30, # Usage (X)
0x09, 0x31, # Usage (Y)
| 0x09, 0x38, # | Usage (Wheel)
0x15, 0x81, # Logical Minimum (-0x7f)
0x25, 0x7f, # Logical Maximum (0x7f)
0x75, 0x08, # Report Size (8)
0x95, 0x03, # Report Count (3)
0x81, 0x06, # Input (Data, Variable, Relative)
0xc0, # End Collection
0xc0] # End Collection
return_val = ''
for val in arr:
return_val+=chr(val)
return return_val
def handle_data(self, usb_req):
# Sending random mouse data
# Send data only for 5 seconds
if (datetime.datetime.now() - self.start_time).seconds < 5:
return_val = chr(0x0) + chr(random.randint(1, 10)) + chr(random.randint(1, 10)) + chr(random.randint(1, 10))
self.send_usb_req(usb_req, return_val)
def handle_unknown_control(self, control_req, usb_req):
if control_req.bmRequestType == 0x81:
if control_req.bRequest == 0x6: # Get Descriptor
if control_req.wValue == 0x22: # send initial report
print 'send initial report'
self.send_usb_req(usb_req, self.generate_mouse_report())
if control_req.bmRequestType == 0x21: # Host Request
if control_req.bRequest == 0x0a: # set idle
print 'Idle'
# Idle
pass
usb_Dev = USBHID()
usb_container = USBContainer()
usb_container.add_usb_device(usb_Dev) # Supports only one device!
usb_container.run()
# Run in cmd: usbip.exe -a 127.0.0.1 "1-1" |
GccX11/machine-learning | hmm.py | Python | mit | 3,184 | 0.047739 | #author Matt Jacobsen
'''
This program will learn and predict words and sentences using a Hierarchical Hidden Markov Model (HHMM).
Implement a Baum-Welch algorithm (like EM?) to learn parameters
Implement a Viterbi algorithm to learn structure.
Implement a forward-backward algorithm (like BP) to do inference over the evidence.
'''
'''
can do things like adjust sutff to be more like stuff
#probabilities for a single word
#states --> s t u f f
#emisions --> s t u f
'''
import sys, pprint as pp
class HMM(object):
numstates = 2
#prior probabilities
pprob = [0.5, 0.5]
#transition probabilities
aprob = [[0.8, 0.2],
[0.2, 0.8]]
#emission probabilities
bprob = [[0.6, 0.4],
[0.4, 0.6]]
bmap = {
'l': 0,
'r': 1
}
def __init__(self):
pass
#compute forward probabilities
def forward(self, O):
pi = self.pprob
a = self.aprob
b = self.bprob
bmap = self.bmap
#will be used to store alpha_t+1
#initialization
alpha = [[1.0]*len(O) for i in range(self.numstates)]
for t in range(0, len(O)):
for i in range(0, self.numstates):
alpha[i][t] = pi[i] * b[i][bmap[O[t]]]
#recursion
for t in range(1, len(O)):
for j in range(0, self.numstates):
sum_i = 0.0
for i in range(0, self.numstates):
sum_i += alpha[i][t-1] * a[i][j]
alpha[j][t] = sum_i * b[j][bmap[O[t]]]
#normalize alpha to avoid underflow
for t in range(0, len(O)-1):
for n in range(0,len(alpha)):
alpha[n][t] = alpha[n][t] / sum(alpha[n])
return alpha
#compute backward probabilities
def backward(self, O):
pi = self.pprob
a = self.aprob
b = self.bprob
bmap = self.bmap
#initialization
| beta = [[1.0]*len(O) for i in range(self.numstates)]
#r | ecursion
for t in range(len(O)-2, -1, -1):
for i in range(self.numstates-1, -1, -1):
sum_i = 0.0
for j in range(self.numstates-1, -1, -1):
sum_i += a[i][j] * beta[i][t+1]
beta[i][t] = sum_i * b[i][bmap[O[t]]]
#normalize alpha to avoid underflow
for t in range(0, len(O)-1):
for n in range(0,len(beta)):
beta[n][t] = beta[n][t] / sum(beta[n])
return beta
#compute smoother posterior probabilities
def posterior(self, O):
alpha = self.forward(O)
beta = self.backward(O)
p = [0.0]*self.numstates
#dot product between alpha and beta
for i in range(0, len(p)):
p[i] = [0.0] * len(alpha[i])
for j in range(0, len(alpha[i])):
p[i][j] += alpha[i][j] * beta[i][j]
#normalize to be a distribution
sum_p_i = [0.0]*len(p[0])
for i in range(0,len(p)):
for j in range(0, len(p[i])):
sum_p_i[j] += p[i][j]
for i in range(0,len(p)):
for j in range(0, len(p[i])):
p[i][j] = p[i][j] / sum_p_i[j]
return p
#learn HMM parameters (emission and transition probabilities) from a set of observations
def baumwelch():
pass
#learn HMM structure from a set of observations
def viterbi():
pass
if __name__ == "__main__":
if len(sys.argv) < 2:
print 'missing test input'
sys.exit()
hmm = HMM()
'''
print 'forward'
pp.pprint(hmm.forward(sys.argv[1]))
print 'backward'
pp.pprint(hmm.backward(sys.argv[1]))
'''
print 'posterior'
pp.pprint(hmm.posterior(sys.argv[1]))
|
mohittahiliani/tcp-eval-suite-ns3 | src/flow-monitor/examples/wifi-olsr-flowmon.py | Python | gpl-2.0 | 7,439 | 0.008469 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid sid | e number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = | ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
cpcloud/ibis | ibis/backends/clickhouse/client.py | Python | apache-2.0 | 5,153 | 0.000194 | import re
from typing import Any
import numpy as np
import pandas as pd
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))")
base_typename_re = re.compile(r"(\w+)")
_clickhouse_dtypes = {
'Null': dt.Null,
'Nothing': dt.Null,
'UInt8': dt.UInt8,
'UInt16': dt.UInt16,
'UInt32': dt.UInt32,
'UInt64': dt.UInt64,
'Int8': dt.Int8,
'Int16': dt.Int16,
'Int32': dt.Int32,
'Int64': dt.Int64,
'Float32': dt.Float32,
'Float64': dt.Float64,
'String': dt.String,
'FixedString': dt.String,
'Date': dt.Date,
'DateTime': dt.Timestamp,
'DateTime64': dt.Timestamp,
'Array': dt.Array,
}
_ibis_dtypes = {v: k for k, v in _clickhouse_dtypes.items()}
_ibis_dtypes[dt.String] = 'String'
_ibis_dtypes[dt.Timestamp] = 'DateTime'
class ClickhouseDataType:
__slots__ = 'typename', 'base_typename', 'nullable'
def __init__(self, typename, nullable=False):
| m = base_typename_re.match(typename)
self.base_typename = m.groups()[0]
if self.base_typename not in _clickhouse_dtypes:
raise com.UnsupportedBackendType(typename)
self.typename = self.base_typename
self.nullable = nullable
if self.base_typename == 'Array':
self.typename = typename
def __str__(self):
if self.nullable:
return f'Nullable({se | lf.typename})'
else:
return self.typename
def __repr__(self):
return f'<Clickhouse {str(self)}>'
@classmethod
def parse(cls, spec):
# TODO(kszucs): spare parsing, depends on clickhouse-driver#22
if spec.startswith('Nullable'):
return cls(spec[9:-1], nullable=True)
else:
return cls(spec)
def to_ibis(self):
if self.base_typename != 'Array':
return _clickhouse_dtypes[self.typename](nullable=self.nullable)
sub_type = ClickhouseDataType(
self.get_subname(self.typename)
).to_ibis()
return dt.Array(value_type=sub_type)
@staticmethod
def get_subname(name: str) -> str:
lbracket_pos = name.find('(')
rbracket_pos = name.rfind(')')
if lbracket_pos == -1 or rbracket_pos == -1:
return ''
subname = name[lbracket_pos + 1 : rbracket_pos]
return subname
@staticmethod
def get_typename_from_ibis_dtype(dtype):
if not isinstance(dtype, dt.Array):
return _ibis_dtypes[type(dtype)]
return 'Array({})'.format(
ClickhouseDataType.get_typename_from_ibis_dtype(dtype.value_type)
)
@classmethod
def from_ibis(cls, dtype, nullable=None):
typename = ClickhouseDataType.get_typename_from_ibis_dtype(dtype)
if nullable is None:
nullable = dtype.nullable
return cls(typename, nullable=nullable)
@dt.dtype.register(ClickhouseDataType)
def clickhouse_to_ibis_dtype(clickhouse_dtype):
return clickhouse_dtype.to_ibis()
class ClickhouseTable(ir.TableExpr):
"""References a physical table in Clickhouse"""
@property
def _qualified_name(self):
return self.op().args[0]
@property
def _unqualified_name(self):
return self._match_name()[1]
@property
def _client(self):
return self.op().args[2]
def _match_name(self):
m = fully_qualified_re.match(self._qualified_name)
if not m:
raise com.IbisError(
'Cannot determine database name from {}'.format(
self._qualified_name
)
)
db, quoted, unquoted = m.groups()
return db, quoted or unquoted
@property
def _database(self):
return self._match_name()[0]
def invalidate_metadata(self):
self._client.invalidate_metadata(self._qualified_name)
def metadata(self) -> Any:
"""Return the parsed results of a `DESCRIBE FORMATTED` statement.
Returns
-------
TableMetadata
Table metadata
"""
return self._client.describe_formatted(self._qualified_name)
describe_formatted = metadata
@property
def name(self):
return self.op().name
def insert(self, obj, **kwargs):
from .identifiers import quote_identifier
schema = self.schema()
assert isinstance(obj, pd.DataFrame)
assert set(schema.names) >= set(obj.columns)
columns = ', '.join(map(quote_identifier, obj.columns))
query = 'INSERT INTO {table} ({columns}) VALUES'.format(
table=self._qualified_name, columns=columns
)
# convert data columns with datetime64 pandas dtype to native date
# because clickhouse-driver 0.0.10 does arithmetic operations on it
obj = obj.copy()
for col in obj.select_dtypes(include=[np.datetime64]):
if isinstance(schema[col], dt.Date):
obj[col] = obj[col].dt.date
data = obj.to_dict('records')
return self._client.con.execute(query, data, **kwargs)
|
jtsmith1287/gurpscg | main.py | Python | apache-2.0 | 5,971 | 0.011221 | import webapp2
from google.appengine.ext import db
import logging
import charbuilder
import traits
import traceback
import random
import string
instance_key = "".join(
(random.choice(string.ascii_uppercase + string.digits) for i in xrange(25)))
def getFile(_file):
with open(_file, "r") as f: return f.read().replace("\n", "")
HTML = {"main_page": getFile("main_page.html"),
"creation_page": getFile("creation_page.html")}
def mergeDicts(master_dict):
new_dict = []
for dictionary in master_dict.keys():
if not dictionary:
continue
new_dict.extend(master_dict[dictionary].items())
return dict(new_dict)
class Parameters(db.Model):
parameters = db.StringProperty()
order = db.DateTimeProperty(auto_now=True)
instance_key = db.StringProperty()
class MainPage(webapp2.RequestHandler):
fields = {"cat_checkboxes": "",
"spell_checkboxes": ""}
def get(self):
"""
"""
self.response.headers['Content-Type'] = 'text/html' # tells the page to load as html instead of plain text
try:
self.configureCatBoxes()
self.configureSpellCollegeCheckboxes()
self.response.write(HTML["main_page"] % self.fields) # renders the main_page.html contents
except Exception:
self.response.write(traceback.format_exc()) # if there was an error, write that instead of the main_page
def configureSpellCollegeCheckboxes(self):
spell_colleges = {"MC": "Mind Control",
"Meta": "Meta",
"L/D": "Light & Darkness",
"Move.": "Movement",
"BC": "Body Control",
"Fire": "Fire",
"P/W": "Protection & Warning",
"Air": "Air",
"Water": "Water",
"Ench.": "Enchantment",
"C/E": "Communication & Emptahy",
"Healing": "Healing",
"Know.": "Knowledge",
"Earth": "Earth",
"Gate": "Gate",
"Necro.": "Necromantic"}
checkbox_html = '<input type="checkbox" name="spell_colleges" value="%s"> %s'
column = 0
complete_html = "<table>"
for cat in sorted(spell_colleges.keys()):
if column > 5:
column = 0
if column == 0:
complete_html += "<tr>" # starts a new table row
y = checkbox_html % (cat, spell_colleges[cat]) # this puts whatever the current category is as the value and text to display
complete_html += "<td> %s </td>" % (y) # puts the entire line as column with the td tag
colum | n += 1 # go to the next column
complete_html += "</table>" # close the table
self.f | ields["spell_checkboxes"] = complete_html
def configureCatBoxes(self):
psionic_powers = ["Antipsi", "Esp", "Psychic Healing",
"Psychokinesis", "Teleportation", "Telepathy"]
power_cats = []
checkbox_html = '<input type="checkbox" name="cat_type" value="%s"> %s'
column = 0
complete_html = "<table>"
for cat in sorted(traits.traits.SKILL_CATEGORIES):
if cat in psionic_powers:
power_cats.append(cat)
continue
if column > 5:
column = 0
if column == 0:
complete_html += "<tr>" # starts a new table row
y = checkbox_html % (cat, cat) # this puts whatever the current category is as the value and text to display
complete_html += "<td> %s </td>" % (y) # puts the entire line as column with the td tag
column += 1 # go to the next column
complete_html += "</table>"
complete_html += "<br><b>Psionic Powers</b><br>"
complete_html += "<table>"
column = 0
for cat in power_cats:
if column > 5:
column = 0
if column == 0:
complete_html += "<tr>" # starts a new table row
y = checkbox_html % (cat, cat) # this puts whatever the current category is as the value and text to display
complete_html += "<td> %s </td>" % (y) # puts the entire line as column with the td tag
column += 1
complete_html += "</table>"
self.fields["cat_checkboxes"] = complete_html
def post(self):
"""
"""
self.response.headers['Content-Type'] = 'text/html'
try:
try:
fd = self.getRequests()
logging.info(fd)
self.saveParameters(fd)
except ValueError:
fd = self.getParameters()
new_character = charbuilder.CharacterBuilder(fd)
# Write the generated character to the page after formatting
nc = mergeDicts(new_character.__dict__)
self.response.write(HTML["creation_page"] % (nc))
except:
self.response.write(traceback.format_exc())
def getRequests(self):
"""Returns all form data from current set parameters.
"""
return {"points": int(self.request.get("points")),
"tl": int(self.request.get("Tech Level")),
"adv_types": self.request.get_all("adv_type"),
"disadv_types": self.request.get_all("disadv_type"),
"d_limit": self.request.get("d_limit"),
"categories": self.request.get_all("cat_type"),
"pa": self.request.get("pa"),
"sa": self.request.get("sa"),
"ta": self.request.get("ta"),
"spell_colleges": self.request.get_all("spell_colleges")
}
def saveParameters(self, data):
"""
"""
# convert python dict syntax to a string
string_data = repr(data)
new_entity = Parameters()
new_entity.parameters = string_data
# save data
new_entity.put()
logging.info(instance_key)
def getParameters(self):
"""
"""
all_data = Parameters.all()
all_data.order("-order")
try:
fd = eval(all_data.fetch(limit=1)[0].parameters)
except IndexError:
fd = None
return fd
handlers = [("/", MainPage)]
application = webapp2.WSGIApplication(handlers, debug=True)
|
nemonik/Intellect | intellect/examples/testing/Test.py | Python | bsd-3-clause | 2,750 | 0.002182 | """
Copyright (c) 2011, The MITRE Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
| (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
test
Description: | Test module
Initial Version: Feb 23, 2011
@author: Michael Joseph Walsh
"""
def helloworld():
"""
Returns "hello world" annd prints "returning 'hello world'" to the
sys.stdout
"""
print "returning 'hello world'"
return "hello world"
def greaterThanTen(n):
"""
Returns True if 'n' is greater than 10
"""
return n>10
class MyClass(object):
def __init__(self):
self._globals = {}
@property
def globals(self):
return self._globals
@globals.setter
def globals(self, value):
self._globals = value
a = MyClass()
locals = {}
exec("a = 1" ,a.globals, locals)
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
exec("a += 1", a.globals, locals)
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
a.globals["b"] = 5
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
exec("global b;b += 1", a.globals, locals) |
dc3-plaso/dfvfs | tests/vfs/lvm_file_system.py | Python | apache-2.0 | 6,113 | 0.007852 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for a file system implementation using pyvslvm."""
import unittest
from dfvfs.path import lvm_path_spec
from dfvfs.path import os_path_spec
from dfvfs.path import qcow_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import lvm_file_system
from tests import test_lib as shared_test_lib
@shared_test_lib.skipUnlessHasTestFile([u'lvmtest.qcow | 2'])
class LVMFileSystemTest(shared_test_lib.BaseTestCase):
"""The unit test for the LVM file system object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath([u'lvmtest.qcow2'])
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._qcow_path_spec = qcow_path_spec.QCOWPathSpec(parent | =path_spec)
self._lvm_path_spec = lvm_path_spec.LVMPathSpec(
location=u'/', parent=self._qcow_path_spec)
# qcowmount test_data/lvmtest.qcow2 fuse/
# vslvminfo fuse/qcow1
#
# Linux Logical Volume Manager (LVM) information:
# Volume Group (VG):
# Name: vg_test
# Identifier: kZ4S06-lhFY-G4cB-8OQx-SWVg-GrI6-1jEYEf
# Sequence number: 3
# Extent size: 4194304 bytes
# Number of physical volumes: 1
# Number of logical volumes: 2
#
# Physical Volume (PV): 1
# Name: pv0
# Identifier: btEzLa-i0aL-sfS8-Ae9P-QKGU-IhtA-CkpWm7
# Device path: /dev/loop1
# Volume size: 16777216 bytes
#
# Logical Volume (LV): 1
# Name: lv_test1
# Identifier: ldAb7Y-GU1t-qDml-VkAp-qt46-0meR-qJS3vC
# Number of segments: 1
# Segment: 1
# Offset: 0x00000000 (0)
# Size: 8.0 MiB (8388608 bytes)
# Number of stripes: 1
# Stripe: 1
# Physical volume: pv0
# Data area offset: 0x00000000 (0)
#
# Logical Volume (LV): 2
# Name: lv_test2
# Identifier: bJxmc8-JEMZ-jXT9-oVeY-40AY-ROro-mCO8Zz
# Number of segments: 1
# Segment: 1
# Offset: 0x00000000 (0)
# Size: 4.0 MiB (4194304 bytes)
# Number of stripes: 1
# Stripe: 1
# Physical volume: pv0
# Data area offset: 0x00800000 (8388608)
def testOpenAndClose(self):
"""Test the open and close functionality."""
file_system = lvm_file_system.LVMFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.Open(self._lvm_path_spec)
file_system.Close()
def testFileEntryExistsByPathSpec(self):
"""Test the file entry exists by path specification functionality."""
file_system = lvm_file_system.LVMFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.Open(self._lvm_path_spec)
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/', parent=self._qcow_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = lvm_path_spec.LVMPathSpec(
parent=self._qcow_path_spec, volume_index=1)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/lvm2', parent=self._qcow_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = lvm_path_spec.LVMPathSpec(
parent=self._qcow_path_spec, volume_index=9)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/lvm0', parent=self._qcow_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/lvm9', parent=self._qcow_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
file_system.Close()
def testGetFileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
file_system = lvm_file_system.LVMFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.Open(self._lvm_path_spec)
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/', parent=self._qcow_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, u'')
path_spec = lvm_path_spec.LVMPathSpec(
parent=self._qcow_path_spec, volume_index=1)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, u'lvm2')
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/lvm2', parent=self._qcow_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, u'lvm2')
path_spec = lvm_path_spec.LVMPathSpec(
parent=self._qcow_path_spec, volume_index=9)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/lvm0', parent=self._qcow_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = lvm_path_spec.LVMPathSpec(
location=u'/lvm9', parent=self._qcow_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
file_system.Close()
def testGetRootFileEntry(self):
"""Test the get root file entry functionality."""
file_system = lvm_file_system.LVMFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.Open(self._lvm_path_spec)
file_entry = file_system.GetRootFileEntry()
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, u'')
file_system.Close()
if __name__ == '__main__':
unittest.main()
|
andrewyang96/GutenbergsHorse | mostpopular.py | Python | mit | 7,833 | 0.002043 | MOSTPOPULAR = [
1342, # Pride and Prejudice
11, # Alice's Adventures in Wonderland
76, # Adventures of Huckleberry Finn
74, # The Adventures of Tom Sawyer
1952, # The Yellow Wallpaper
4300, # Ulysses
27827, # The Kama Sutra of Vatsyayana
174, # The Picture of Dorian Gray
98, # A Tale of Two Cities
5200, # Metamorphosis
1661, # The Adventures of Sherlock Holmes
844, # The Importance of Being Earnest: A Trivial Comedy for Serious People
2701, # Moby Dick
1232, # The Prince
2591, # Grimms' Fairy Tales
1322, # Leaves of Grass
158, # Emma
30254, # The Romance of Lust
135, # Les Miserables
345, # Dracula
1400, # Great Expectations
308, # Three Men in a Boat
84, # Frankenstein
16328, # Beowulf
22657, # Steam, Its Generation and Use
1080, # A Modest Proposal
2814, # Dubliners
8800, # The Divine Comedy by Dante
6130, # The Iliad
16, # Peter Pan
2542, # A Doll's House
829, # Gulliver's Travels into Several Remote Nations of the World
2500, # Siddhartha
161, # Sense and Sensibility
1260, # Jane Eyre: An Autobiography
1184, # The Count of Monte Cristo
23, # Narrative of the Life o | f Frederick Douglass, an American Slave
120, # Treasure Island
768, # Wuthering Heights
10609, # English Literature
42, # The Strange Case of Dr. Jekyll and Mr. H | yde
2852, # The Hound of the Baskervilles
27, # Far from the Madding Crowd
#4233, # Novo dicionario da lingua portuguesa
236, # The Jungle Book
2600, # War and Peace
2554, # Crime and Punishment
219, # Heart of Darkness
1497, # The Republic
100, # The Complete Works of William Shakespeare
1934, # Songs of Innocence, and Songs of Experience
22381, # Myths and Legends of Ancient Greece and Rome
244, # A Study in Scarlet
4363, # Beyond Good and Evil
28520, # Forbidden Fruit
20203, # Autobiography of Benjamin Franklin
521, # The Life and Adventures of Robinson Crusoe
#31824, # Josefine Mutzenbacher
39293, # The Gentleman's Book of Etiquette and Manual of Politeness
730, # Oliver Twist
28054, # The Brothers Karamazov
2397, # The Story of My Life (Helen Keller)
3825, # Pygmalion
#5740, # Tractatus Logico-Philosophicus
863, # The Mysterious Affair at Styles
103, # Around the World in Eighty Days
41, # The Legend of Sleepy Hollow
3090, # Complete Original Short Stories of Guy De Maupassant
10, # The King James Version of the Bible
205, # Walden, and on the Duty of Civil Disobedience
30360, # My Secret Life, Volumes I to III
45, # Anne of Green Gables
46, # A Christmas Carol in Prose
35, # The Time Machine
1399, # Anna Karenina
30601, # How to Analyze People On Sight
1155, # The Secret Adversary
1837, # The Prince and the Pauper
12, # Through the Looking-Glass
8789, # The Divine Comedy by Dante
852, # Democracy and Education: An Introduction to the Philosophy of Education
160, # The Awakening, and Selected Short Stories
105, # Persuasion
3600, # Essays of Michel de Montaigne
766, # David Copperfield
20776, # Encyclopedia of Needlework
55, # The Wonderful Wizard of Oz
21700, # Don Juan
132, # The Art of War
20, # Paradise Lost
2147, # The Works of Edgar Allan Poe - Volume 1
25305, # Memoirs of Fanny Hill
#33283, # Calculus Made Easy
25717, # The History of the Decline and Fall of the Roman Empire
4217, # A Portrait of the Artist as a Young Man
36, # War of the Worlds
583, # The Woman in White
1998, # Thus Spake Zarathustra
2148, # The Works of Edgar Allan Poe - Volume 2
245, # Life on the Mississippi
3300, # An Inquiry into the Nature and Causes of the Wealth of Nations
62, # A Princess of Mars
108, # The Return on Sherlock Holmes
2781, # Just So Stories
996, # Don Quixote
19942, # Candide
215, # The Call of the Wild
408, # The Souls of Black Folk
3207, # Leviathan
10007, # Carmilla
141, # Mansfield Park
35123, # The Ladies' Book of Etiquette, a Manual of Politeness
3296, # The Confessions of St. Augustine
2413, # Madame Bovary
514, # Little Women
1257, # The Three Musketeers
2097, # The Sign of the Four
805, # This Side of Paradise
45631, # Twelve Years a Slave
851, # Narrative of the Captivity and Restoration of Mrs. Mary Rowlandson
145, # Middlemarch
121, # Northanger Abbey
8799, # The Divine Comedy by Dante
1951, # The Coming Race
375, # An Occurrence at Owl Creek Bridge
40311, # China and Pottery Marks
#1112, # The Tragedy of Romeo and Juliet
14591, # Faust
#18251, # Latin for Beginners
2680, # Meditations
#2000, # Don Quijote
86, # A Connecticut Yankee in King Arthur's Court
34901, # On Liberty
33, # The Scarlet Letter
3176, # The Innocents Abroad
140, # The Jungle
37134, # The Elements of Style
8492, # The King in Yellow
15399, # The Interesting Narrative of the Life of Olaudah Equiano, or Gustavus Vassa, the African
#786, # Hard Times (Download URI not supported)
147, # Common Sense
1727, # The Odyssey
15250, # Myths and Legends of China
110, # Tess of the d'Urbervilles: A Pure Woman
14838, # The Tale of Peter Rabbit
5827, # The Problems of Philosophy
526, # Heart of Darkness
20480, # Palmistry for All
7178, # Swann's Way
#28233, # Philosophiae Naturalis Principia Mathematica
4705, # A Treatise of Human Nature
1228, # On the Origin of Species
22577, # Practical Grammar and Composition
7849, # The Trial
3177, # Roughing It
209, # The Turn of the Screw
834, # The Memoirs of Sherlock Holmes
1597, # Andersen's Fairy Tales
4280, # The Critique of Pure Reason
31547, # Youth
4517, # Ethan Frome
30142, # Little Brother
21279, # Kurt Vonnegut
1251, # Le Morte d'Arthur: Volume 1
5000, # The Notebooks of Leonardo Da Vinci
19033, # Alice's Adventures in Wonderland
113, # The Secret Garden
972, # The Devil's Dictionary
885, # An Ideal Husband
289, # The Wind in the Willows
779, # The Tragical History of Doctor Faustus
421, # Kidnapped
1023, # Bleak House
139, # The Lost World
7370, # Second Treatise of Government
164, # Twenty Thousand Leagues Under the Sea
#1524, # Hamlet Prince of Denmark
#2264, # Macbeth
2848, # Antiquities of the Jews
203, # Uncle Tom's Cabin
107, # Far from the Madding Crowd
19068, # Household Stories by the Brothers Grimm
14264, # The Practice and Science of Drawing
155, # The Moonstone
12299, # The Mechanical Properties of Wood
2638, # The Idiot
20583, # The Tribes and Castes of the Central Provinces of India
2610, # Notre-Dame de Paris
41617, # A Complete Guide to Heraldry
34206, # The Thousand and One Nights, Vol. I
1250, # Anthem
600, # Notes from the Underground
49104, # The History of the Crusades, Vol. 1 of 3
2130, # Utopia
2428, # An Essay on Man; Moral Essays and Satires
3800, # Ethica
#10625, # A Concise Dictionary of Middle English from A.D. 1150 to 1580
541, # The Age of Innocence
49057, # Tales of King Arthur and the Round Table
]
|
qutip/qutip | qutip/tests/test_random.py | Python | bsd-3-clause | 5,522 | 0.000543 | import numpy as np
from qutip import (
rand_ket, rand_dm, rand_herm, rand_unitary, rand_ket_haar, rand_dm_hs,
rand_super, rand_unitary_haar, rand_dm_ginibre, rand_super_bcsz, qeye,
rand_stochastic,
)
import pytest
@pytest.mark.repeat(5)
@pytest.mark.parametrize('func', [rand_unitary, rand_unitary_haar])
def test_rand_unitary(func):
"""
Random Qobjs: Tests that unitaries are actually unitary.
"""
random_qobj = func(5)
I = qeye(5)
assert random_qobj * random_qobj.dag() == I
@pytest.mark.repeat(5)
@pytest.mark.parametrize('density', [0.2, 0.8], ids=["sparse", "dense"])
@pytest.mark.parametrize('pos_def', [True, False])
def test_rand_herm(density, pos_def):
"""
Random Qobjs: Hermitian matrix
"""
random_qobj = rand_herm(5, density=density, pos_def=pos_def)
if pos_def:
assert all(random_qobj.eigenenergies() > -1e14)
assert random_qobj.isherm
@pytest.mark.repeat(5)
def test_rand_herm_Eigs():
"""
Random Qobjs: Hermitian matrix - Eigs given
"""
eigs = np.random.random(5)
eigs /= np.sum(eigs)
eigs.sort()
random_qobj = rand_herm(eigs)
np.testing.assert_allclose(random_qobj.eigenenergies(), eigs)
# verify hermitian
assert random_qobj.isherm
@pytest.mark.repeat(5)
@pytest.mark.parametrize('func', [rand_dm, rand_dm_hs])
def test_rand_dm(func):
"""
Random Qobjs: Density matrix
"""
random_qobj = func(5)
assert abs(random_qobj.tr() - 1.0) < 1e-14
# verify all eigvals are >=0
assert all(random_qobj.eigenenergies() >= -1e-14)
# verify hermitian
assert random_qobj.isherm
@pytest.mark.repeat(5)
def test_rand_dm_Eigs():
"""
Random Qobjs: Density matrix - Eigs given
"""
eigs = np.random.random(5)
eigs /= np.sum(eigs)
eigs.sort()
random_qobj = rand_dm(eigs)
assert abs(random_qobj.tr() - 1.0) < 1e-14
np.testing.assert_allclose(random_qobj.eigenenergies(), eigs)
# verify hermitian
assert random_qob | j.isherm
@pytest.mark.repeat(5)
def test_rand_dm_ginibre_rank():
"""
Random Qobjs: Ginibre-random density ops have correct rank.
"""
random_qobj = rand_dm_ginibre(5, rank=3)
rank = sum([abs(E) >= 1e-10 for E in random_qobj.eigenenergies()])
assert rank == 3
@pytest.mark.repeat(5)
@pytest.mark.para | metrize('kind', ["left", "right"])
def test_rand_stochastic(kind):
"""
Random Qobjs: Test random stochastic
"""
random_qobj = rand_stochastic(5, kind=kind)
axis = {"left":0, "right":1}[kind]
np.testing.assert_allclose(np.sum(random_qobj.full(), axis=axis), 1,
atol=1e-14)
@pytest.mark.repeat(5)
@pytest.mark.parametrize('func', [rand_ket, rand_ket_haar])
def test_rand_ket(func):
"""
Random Qobjs: Test random ket type and norm.
"""
random_qobj = func(5)
assert random_qobj.type == 'ket'
assert abs(random_qobj.norm() - 1) < 1e-14
@pytest.mark.repeat(5)
def test_rand_super():
"""
Random Qobjs: Super operator.
"""
random_qobj = rand_super(5)
assert random_qobj.issuper
@pytest.mark.repeat(5)
def test_rand_super_bcsz_cptp():
"""
Random Qobjs: Tests that BCSZ-random superoperators are CPTP.
"""
random_qobj = rand_super_bcsz(5)
assert random_qobj.issuper
assert random_qobj.iscptp
@pytest.mark.parametrize('func', [
rand_unitary, rand_unitary_haar, rand_herm,
rand_dm, rand_dm_hs, rand_dm_ginibre,
rand_ket, rand_ket_haar,
rand_super, rand_super_bcsz
])
def test_random_seeds(func):
"""
Random Qobjs: Random number generator seed
"""
seed = 12345
U0 = func(5, seed=seed)
U1 = func(5, seed=None)
U2 = func(5, seed=seed)
assert U0 != U1
assert U0 == U2
@pytest.mark.parametrize('func', [rand_ket, rand_ket_haar])
@pytest.mark.parametrize(('args', 'kwargs', 'dims'), [
pytest.param((6,), {}, [[6], [1]], id="N"),
pytest.param((), {'dims': [[2, 3], [1, 1]]}, [[2, 3], [1, 1]], id="dims"),
pytest.param((6,), {'dims': [[2, 3], [1, 1]]}, [[2, 3], [1, 1]],
id="both"),
])
def test_rand_vector_dims(func, args, kwargs, dims):
shape = np.prod(dims[0]), np.prod(dims[1])
output = func(*args, **kwargs)
assert output.shape == shape
assert output.dims == dims
@pytest.mark.parametrize('func', [rand_ket, rand_ket_haar])
def test_rand_ket_raises_if_no_args(func):
with pytest.raises(ValueError):
func()
@pytest.mark.parametrize('func', [
rand_unitary, rand_herm, rand_dm, rand_unitary_haar, rand_dm_ginibre,
rand_dm_hs, rand_stochastic,
])
@pytest.mark.parametrize(('args', 'kwargs', 'dims'), [
pytest.param((6,), {}, [[6], [6]], id="N"),
pytest.param((6,), {'dims': [[2, 3], [2, 3]]}, [[2, 3], [2, 3]],
id="both"),
])
def test_rand_oper_dims(func, args, kwargs, dims):
shape = np.prod(dims[0]), np.prod(dims[1])
output = func(*args, **kwargs)
assert output.shape == shape
assert output.dims == dims
_super_dims = [[[2, 3], [2, 3]], [[2, 3], [2, 3]]]
@pytest.mark.parametrize('func', [rand_super, rand_super_bcsz])
@pytest.mark.parametrize(('args', 'kwargs', 'dims'), [
pytest.param((6,), {}, [[[6]]*2]*2, id="N"),
pytest.param((6,), {'dims': _super_dims}, _super_dims,
id="both"),
])
def test_rand_super_dims(func, args, kwargs, dims):
shape = np.prod(dims[0]), np.prod(dims[1])
output = func(*args, **kwargs)
assert output.shape == shape
assert output.dims == dims
|
ricotabor/opendrop | opendrop/vendor/harvesters/__init__.py | Python | gpl-2.0 | 22 | 0 | __ve | rsion__ = '1.2.8' | |
heddle317/moto | tests/test_awslambda/test_lambda.py | Python | apache-2.0 | 14,853 | 0.000404 | from __future__ import unicode_literals
import base64
import botocore.client
import boto3
import hashlib
import io
import json
import zipfile
import sure # noqa
from freezegun import freeze_time
from moto import mock_lambda, mock_s3, mock_ec2, settings
def _process_lamda(pfunc):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
zip_file.writestr('lambda_function.zip', pfunc)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def get_test_zip_file1():
pfunc = """
def lambda_handler(event, context):
return event
"""
return _process_lamda(pfunc)
def get_test_zip_file2():
pfunc = " | ""
def lambda_handler(event, context):
volume_id = event.get('volume_id')
print('get volume deta | ils for %s' % volume_id)
import boto3
ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url="http://{base_url}")
vol = ec2.Volume(volume_id)
print('Volume - %s state=%s, size=%s' % (volume_id, vol.state, vol.size))
return event
""".format(base_url="localhost:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com")
return _process_lamda(pfunc)
@mock_lambda
def test_list_functions():
conn = boto3.client('lambda', 'us-west-2')
result = conn.list_functions()
result['Functions'].should.have.length_of(0)
@mock_lambda
def test_invoke_requestresponse_function():
conn = boto3.client('lambda', 'us-west-2')
conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.handler',
Code={
'ZipFile': get_test_zip_file1(),
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
in_data = {'msg': 'So long and thanks for all the fish'}
success_result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse',
Payload=json.dumps(in_data))
success_result["StatusCode"].should.equal(202)
base64.b64decode(success_result["LogResult"]).decode(
'utf-8').should.equal(json.dumps(in_data))
json.loads(success_result["Payload"].read().decode(
'utf-8')).should.equal(in_data)
@mock_lambda
def test_invoke_event_function():
conn = boto3.client('lambda', 'us-west-2')
conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.handler',
Code={
'ZipFile': get_test_zip_file1(),
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.invoke.when.called_with(
FunctionName='notAFunction',
InvocationType='Event',
Payload='{}'
).should.throw(botocore.client.ClientError)
in_data = {'msg': 'So long and thanks for all the fish'}
success_result = conn.invoke(
FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data))
success_result["StatusCode"].should.equal(202)
json.loads(success_result['Payload'].read().decode(
'utf-8')).should.equal({})
@mock_ec2
@mock_lambda
def test_invoke_function_get_ec2_volume():
conn = boto3.resource("ec2", "us-west-2")
vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2')
vol = conn.Volume(vol.id)
conn = boto3.client('lambda', 'us-west-2')
conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.handler',
Code={
'ZipFile': get_test_zip_file2(),
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
in_data = {'volume_id': vol.id}
result = conn.invoke(FunctionName='testFunction',
InvocationType='RequestResponse', Payload=json.dumps(in_data))
result["StatusCode"].should.equal(202)
msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % (
vol.id, vol.id, vol.state, vol.size, json.dumps(in_data))
base64.b64decode(result["LogResult"]).decode('utf-8').should.equal(msg)
result['Payload'].read().decode('utf-8').should.equal(msg)
@mock_lambda
def test_create_based_on_s3_with_missing_bucket():
conn = boto3.client('lambda', 'us-west-2')
conn.create_function.when.called_with(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.handler',
Code={
'S3Bucket': 'this-bucket-does-not-exist',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
VpcConfig={
"SecurityGroupIds": ["sg-123abc"],
"SubnetIds": ["subnet-123abc"],
},
).should.throw(botocore.client.ClientError)
@mock_lambda
@mock_s3
@freeze_time('2015-01-01 00:00:00')
def test_create_function_from_aws_bucket():
s3_conn = boto3.client('s3', 'us-west-2')
s3_conn.create_bucket(Bucket='test-bucket')
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
conn = boto3.client('lambda', 'us-west-2')
result = conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
VpcConfig={
"SecurityGroupIds": ["sg-123abc"],
"SubnetIds": ["subnet-123abc"],
},
)
# this is hard to match against, so remove it
result['ResponseMetadata'].pop('HTTPHeaders', None)
# Botocore inserts retry attempts not seen in Python27
result['ResponseMetadata'].pop('RetryAttempts', None)
result.pop('LastModified')
result.should.equal({
'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction',
'Runtime': 'python2.7',
'Role': 'test-iam-role',
'Handler': 'lambda_function.handler',
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
'Description': 'test lambda function',
'Timeout': 3,
'MemorySize': 128,
'Version': '$LATEST',
'VpcConfig': {
"SecurityGroupIds": ["sg-123abc"],
"SubnetIds": ["subnet-123abc"],
"VpcId": "vpc-123abc"
},
'ResponseMetadata': {'HTTPStatusCode': 201},
})
@mock_lambda
@freeze_time('2015-01-01 00:00:00')
def test_create_function_from_zipfile():
conn = boto3.client('lambda', 'us-west-2')
zip_content = get_test_zip_file1()
result = conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.handler',
Code={
'ZipFile': zip_content,
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
# this is hard to match against, so remove it
result['ResponseMetadata'].pop('HTTPHeaders', None)
# Botocore inserts retry attempts not seen in Python27
result['ResponseMetadata'].pop('RetryAttempts', None)
result.pop('LastModified')
result.should.equal({
'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction',
'Runtime': 'python2.7',
'Role': 'test-iam-role',
'Handler': 'lambda_function.handler',
'CodeSize': len(zip_content),
'Description': 'test lambda function',
'Timeout': 3,
'MemorySize': 128,
'CodeSha256': hashlib.sha256(zip_content).hexdigest(),
'Version': '$LATEST',
'VpcConfig': {
"SecurityG |
nttcom/eclcli | eclcli/compute/v2/host.py | Python | apache-2.0 | 2,150 | 0 | # Copyright 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Host action implementations"""
from eclcli.common import command
from eclcli.common import utils
class ListHost(command.Lister):
"""List host command"""
def get_parser(self, prog_name):
parser = super(ListHost, self).get_parser(pr | og_name)
parser.add_argument(
"--zone",
metavar="<zone>",
help="Only return hosts in the availability zone.")
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
columns = (
"Host Name",
"Service",
"Zone"
)
data = compute_client.hosts.list_all(parsed_args.zone)
return (columns,
(utils.get_item_pr | operties(
s, columns,
) for s in data))
class ShowHost(command.Lister):
"""Show host command"""
def get_parser(self, prog_name):
parser = super(ShowHost, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help="Name of host")
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
columns = (
"Host",
"Project",
"CPU",
"Memory MB",
"Disk GB"
)
data = compute_client.hosts.get(parsed_args.host)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
|
PritishC/nereid | trytond_nereid/tests/test_country.py | Python | gpl-3.0 | 6,084 | 0 | # -*- coding: utf-8 -*-
"""
Test Country
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import json
import unittest
from decimal import Decimal
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, USER, DB_NAME, CONTEXT
from nereid.testing import NereidTestCase
from trytond.transaction import Transaction
class TestCountry(NereidTestCase):
"""
Test Country
"""
def setUp(self):
trytond.tests.test_tryton.install_module('nereid')
self.nereid_website_obj = POOL.get('nereid.website')
self.nereid_website_locale_obj = POOL.get('nereid.website.locale')
self.nereid_permission_obj = POOL.get('nereid.permission')
self.nereid_user_obj = POOL.get('nereid.user')
self.url_map_obj = POOL.get('nereid.url_map')
self.company_obj = POOL.get('company.company')
self.currency_obj = POOL.get('currency.currency')
self.language_obj = POOL.get('ir.lang')
self.party_obj = POOL.get('party.party')
self.Country = POOL.get('country.country')
self.Subdivision = POOL.get('country.subdivision')
def setup_defaults(self):
"""
Setup the defaults
"""
usd, = self.currency_obj.create([{
'name': 'US Dollar',
'code': 'USD',
'symbol': '$',
'rates': [('create', [{'rate': Decimal('1')}])],
}])
eur, = self.currency_obj.create([{
'name': 'Euro',
'code': 'EUR',
'symbol': 'E',
'rates': [('create', [{'rate': Decimal('2')}])],
}])
self.party, = self.party_obj.create([{
'name': 'Openlabs',
}])
self.company, = self.company_obj.create([{
'currency': usd,
'party': self.party,
}])
c1, = self.currency_obj.create([{
'code': 'C1',
'symbol': 'C1',
'name': 'Currency 1',
'rates': [('create', [{'rate': Decimal('10')}])],
}])
c2, = self.currency_obj.create([{
'code': 'C2',
'symbol': 'C2',
'name': 'Currency 2',
'rates': [('create', [{'rate': Decimal('20')}])],
}])
self.lang_currency, = self.currency_obj.create([{
'code': 'C3',
'symbol': 'C3',
'name': 'Currency 3',
'rates': [('create', [{'rate': Decimal('30')}])],
}])
self.currency_obj.create([{
'code': 'C4',
'symbol': 'C4',
'name': 'Currency 4',
'rates': [('create', [{'rate': Decimal('40')}])],
}])
self.website_currencies = [c1, c2]
url_map, = self.url_map_obj.search([], limit=1)
self.en_us, = self.language_obj.search([('code', '=', 'en_US')])
self.es_es, = self.language_obj.search([('code', '=', 'es_ES')])
self.usd, = self.currency_obj.search([('code', '=', 'USD')])
self.eur, = self.currency_obj.search([('code', '=', 'EUR')])
locale_en_us, locale_es_es = self.nereid_website_locale_obj.create([{
'code': 'en_US',
'language': self.en_us,
'currency': self.usd,
}, {
'code': 'es_ES',
'language': self.es_es,
'currency': self.eur,
}])
self.nereid_website_obj.create([{
'name': 'localhost',
'url_map': url_map,
'company': self.company,
'application_user': USER,
'default_locale': locale_en_us.id,
'currencies': [('add', self.website_currencies)],
}])
self.templates = {
'home.jinja': '{{ "hell" }}',
}
def test_0010_all_countries(self):
"""
Check list of json serialized countries
"""
with Transaction().start(DB_NAME, USER, CONTEXT):
self.setup_defaults()
app = self.get_app()
self.Country.create([{
'name': 'India',
'code': 'IN'
}, {
'name': 'Australia',
'code': 'AU',
}])
with app.test_client() as c:
rv = c.get('/all-countries')
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data)
self.assertEqual(len(data['countries']), 2)
def test_0010_subdivisions(self):
"""
Check subdivisons for given country
"""
with Transaction().start(DB_NAME, USER, CONTEXT):
self.setup_defaults()
app = self.get_app()
| country1, country2, = self.Country.create([{
'name': 'India',
'code': 'IN'
}, {
'name': 'Australia',
'code': 'AU',
}])
# Create subdivision | only for country1
self.Subdivision.create([{
'country': country1.id,
'code': 'IN-OR',
'name': 'Orissa',
'type': 'state',
}])
with app.test_client() as c:
rv = c.get('/countries/%d/subdivisions' % country1.id)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data)
self.assertEqual(len(data['result']), 1)
self.assertTrue(data['result'][0]['name'] == 'Orissa')
self.assertTrue(data['result'][0]['code'] == 'IN-OR')
rv = c.get('/countries/%d/subdivisions' % country2.id)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data)
self.assertEqual(len(data['result']), 0)
def suite():
"Country test suite"
test_suite = unittest.TestSuite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestCountry)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
imp/transitions | tests/test_reuse.py | Python | mit | 7,652 | 0.001568 | try:
from builtins import object
except ImportError:
pass
from transitions import MachineError
from transitions.extensions import HierarchicalMachine as Machine
from transitions.extensions import NestedState
from .utils import Stuff
from unittest import TestCase
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class TestTransitions(TestCase):
def setUp(self):
states = ['A', 'B',
{'name': 'C', 'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]},
'D', 'E', 'F']
self.stuff = Stuff(states, Machine)
def tearDown | (self):
pass
def test_blueprint_simple(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
| {'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, before_state_change='before_state_change',
after_state_change='after_state_change', initial='A')
self.assertEqual(len(m.blueprints['states']), 4)
self.assertEqual(m.blueprints['states'][3], 'D')
self.assertEqual(len(m.blueprints['transitions']), 3)
self.assertEqual(m.blueprints['transitions'][2]['trigger'], 'sprint')
m.add_transition('fly', 'D', 'A')
self.assertEqual(len(m.blueprints['transitions']), 4)
self.assertEqual(m.blueprints['transitions'][3]['source'], 'D')
def test_blueprint_nested(self):
c1 = NestedState('C_1', parent='C')
c2 = NestedState('C_2', parent='C')
c3 = NestedState('C_3', parent='C')
c = NestedState('C', children=[c1, c2, c3])
states = ['A', {'name': 'B', 'on_enter': 'chirp', 'children': ['1', '2', '3']},
c, 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B','before': 'before_state_change',
'after': 'after_state_change' },
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, before_state_change='before_state_change',
after_state_change='after_state_change', initial='A')
m.before_state_change = MagicMock()
m.after_state_change = MagicMock()
self.assertEqual(len(m.blueprints['states']), 4)
self.assertEqual(m.blueprints['states'][3], 'D')
self.assertEqual(len(m.blueprints['transitions']), 3)
# transition 'walk' before should contain two calls of the same method
self.assertEqual(len(m.blueprints['transitions'][0]['before']), 2)
self.assertEqual(len(m.blueprints['transitions'][0]['after']), 2)
self.assertEqual(len(m.blueprints['transitions'][1]['before']), 1)
self.assertEqual(m.blueprints['transitions'][2]['trigger'], 'sprint')
m.add_transition('fly', 'D', 'A')
self.assertEqual(len(m.blueprints['transitions']), 4)
self.assertEqual(m.blueprints['transitions'][3]['source'], 'D')
def test_blueprint_reuse(self):
states = ['1', '2', '3']
transitions = [
{'trigger': 'increase', 'source': '1', 'dest': '2'},
{'trigger': 'increase', 'source': '2', 'dest': '3'},
{'trigger': 'decrease', 'source': '3', 'dest': '2'},
{'trigger': 'decrease', 'source': '1', 'dest': '1'},
{'trigger': 'reset', 'source': '*', 'dest': '1'},
]
counter = Machine(states=states, transitions=transitions, before_state_change='check',
after_state_change='clear', initial='1')
new_states = ['A', 'B', {'name':'C', 'children': counter}]
new_transitions = [
{'trigger': 'forward', 'source': 'A', 'dest': 'B'},
{'trigger': 'forward', 'source': 'B', 'dest': 'C'},
{'trigger': 'backward', 'source': 'C', 'dest': 'B'},
{'trigger': 'backward', 'source': 'B', 'dest': 'A'},
{'trigger': 'calc', 'source': '*', 'dest': 'C'},
]
walker = Machine(states=new_states, transitions=new_transitions, before_state_change='watch',
after_state_change='look_back', initial='A')
walker.watch = lambda: 'walk'
walker.look_back = lambda: 'look_back'
walker.check = lambda: 'check'
walker.clear = lambda: 'clear'
with self.assertRaises(MachineError):
walker.increase()
self.assertEqual(walker.state, 'A')
walker.forward()
walker.forward()
self.assertEqual(walker.state, 'C_1')
walker.increase()
self.assertEqual(walker.state, 'C_2')
walker.reset()
self.assertEqual(walker.state, 'C_1')
walker.to_A()
self.assertEqual(walker.state, 'A')
walker.calc()
self.assertEqual(walker.state, 'C_1')
def test_blueprint_remap(self):
states = ['1', '2', '3', 'finished']
transitions = [
{'trigger': 'increase', 'source': '1', 'dest': '2'},
{'trigger': 'increase', 'source': '2', 'dest': '3'},
{'trigger': 'decrease', 'source': '3', 'dest': '2'},
{'trigger': 'decrease', 'source': '1', 'dest': '1'},
{'trigger': 'reset', 'source': '*', 'dest': '1'},
{'trigger': 'done', 'source': '3', 'dest': 'finished'}
]
counter = Machine(states=states, transitions=transitions, initial='1')
new_states = ['A', 'B', {'name': 'C', 'children': counter, 'remap': {'finished': 'A'}}]
new_transitions = [
{'trigger': 'forward', 'source': 'A', 'dest': 'B'},
{'trigger': 'forward', 'source': 'B', 'dest': 'C'},
{'trigger': 'backward', 'source': 'C', 'dest': 'B'},
{'trigger': 'backward', 'source': 'B', 'dest': 'A'},
{'trigger': 'calc', 'source': '*', 'dest': 'C'},
]
walker = Machine(states=new_states, transitions=new_transitions, before_state_change='watch',
after_state_change='look_back', initial='A')
walker.watch = lambda: 'walk'
walker.look_back = lambda: 'look_back'
counter.increase()
counter.increase()
counter.done()
self.assertEqual(counter.state, 'finished')
with self.assertRaises(MachineError):
walker.increase()
self.assertEqual(walker.state, 'A')
walker.forward()
walker.forward()
self.assertEqual(walker.state, 'C_1')
walker.increase()
self.assertEqual(walker.state, 'C_2')
walker.reset()
self.assertEqual(walker.state, 'C_1')
walker.to_A()
self.assertEqual(walker.state, 'A')
walker.calc()
self.assertEqual(walker.state, 'C_1')
walker.increase()
walker.increase()
walker.done()
self.assertEqual(walker.state, 'A')
def test_wrong_nesting(self):
correct = ['A', {'name': 'B', 'children': self.stuff.machine}]
wrong_type = ['A', {'name': 'B', 'children': self.stuff}]
siblings = ['A', {'name': 'B', 'children': ['1', self.stuff.machine]}]
m = Machine(None, states=correct)
with self.assertRaises(ValueError):
m = Machine(None, states=wrong_type)
with self.assertRaises(ValueError):
m = Machine(None, states=siblings)
|
google/tink | python/tink/_keyset_reader_test.py | Python | apache-2.0 | 4,878 | 0.005535 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink._keyset_reader."""
from typing import cast
from absl.testing import absltest
from tink.proto import tink_pb2
import tink
from tink import core
class JsonKeysetReaderTest(absltest.TestCase):
def test_read(self):
json_keyset = """
{
"primaryKeyId": 42,
"key": [
{
"keyData": {
"typeUrl": "type.googleapis.com/google.crypto.tink.AesGcmKey",
"keyMaterialType": "SYMMETRIC",
"value": "GhCS/1+ejWpx68NfGt6ziYHd"
},
"outputPrefixType": "TINK",
"keyId": 42,
"status": "ENABLED"
}
]
}"""
reader = tink.JsonKeysetReader(json_keyset)
keyset = reader.read()
self.assertEqual(keyset.primary_key_id, 42)
self.assertLen(keyset.key, 1)
def test_read_invalid(self):
reader = tink.JsonKeysetRead | er('not json')
with self.assertRaises(core.TinkError):
reader.read()
def test_read_encrypted(self):
# encryptedKeyset is a base64-encoding of 'some ciphertext wi | th keyset'
json_encrypted_keyset = """
{
"encryptedKeyset": "c29tZSBjaXBoZXJ0ZXh0IHdpdGgga2V5c2V0",
"keysetInfo": {
"primaryKeyId": 42,
"keyInfo": [
{
"typeUrl": "type.googleapis.com/google.crypto.tink.AesGcmKey",
"outputPrefixType": "TINK",
"keyId": 42,
"status": "ENABLED"
}
]
}
}"""
reader = tink.JsonKeysetReader(json_encrypted_keyset)
enc_keyset = reader.read_encrypted()
self.assertEqual(enc_keyset.encrypted_keyset,
b'some ciphertext with keyset')
self.assertLen(enc_keyset.keyset_info.key_info, 1)
self.assertEqual(enc_keyset.keyset_info.key_info[0].type_url,
'type.googleapis.com/google.crypto.tink.AesGcmKey')
def test_read_encrypted_invalid(self):
reader = tink.JsonKeysetReader('not json')
with self.assertRaises(core.TinkError):
reader.read_encrypted()
class BinaryKeysetReaderTest(absltest.TestCase):
def test_read(self):
keyset = tink_pb2.Keyset()
keyset.primary_key_id = 42
key = keyset.key.add()
key.key_data.type_url = 'type.googleapis.com/google.crypto.tink.AesGcmKey'
key.key_data.key_material_type = tink_pb2.KeyData.SYMMETRIC
key.key_data.value = b'GhCS/1+ejWpx68NfGt6ziYHd'
key.output_prefix_type = tink_pb2.TINK
key.key_id = 42
key.status = tink_pb2.ENABLED
reader = tink.BinaryKeysetReader(keyset.SerializeToString())
self.assertEqual(keyset, reader.read())
def test_read_none(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(cast(bytes, None))
reader.read()
def test_read_empty(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'')
reader.read()
def test_read_invalid(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'some weird data')
reader.read()
def test_read_encrypted(self):
encrypted_keyset = tink_pb2.EncryptedKeyset()
encrypted_keyset.encrypted_keyset = b'c29tZSBjaXBoZXJ0ZXh0IHdpdGgga2V5c2V0'
encrypted_keyset.keyset_info.primary_key_id = 42
key_info = encrypted_keyset.keyset_info.key_info.add()
key_info.type_url = 'type.googleapis.com/google.crypto.tink.AesGcmKey'
key_info.output_prefix_type = tink_pb2.TINK
key_info.key_id = 42
key_info.status = tink_pb2.ENABLED
reader = tink.BinaryKeysetReader(
encrypted_keyset.SerializeToString())
self.assertEqual(encrypted_keyset, reader.read_encrypted())
def test_read_encrypted_none(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(cast(bytes, None))
reader.read_encrypted()
def test_read_encrypted_empty(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'')
reader.read_encrypted()
def test_read_encrypted_invalid(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'some weird data')
reader.read_encrypted()
if __name__ == '__main__':
absltest.main()
|
kayhayen/Nuitka | nuitka/nodes/TryNodes.py | Python | apache-2.0 | 17,486 | 0.000972 | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes for try/except/finally handling.
This is the unified low level solution to trying a block, and executing code
when it returns, break, continues, or raises an exception. See Developer
Manual for how this maps to try/finally and try/except as in Python.
"""
from nuitka.Errors import NuitkaOptimizationError
from nuitka.optimizations.TraceCollections import TraceCollectionBranch
from .Checkers import checkStatementsSequence, checkStatementsSequenceOrNone
from .NodeBases import StatementChildrenHavingBase
from .StatementNodes import StatementsSequence
class StatementTry(StatementChildrenHavingBase):
kind = "STATEMENT_TRY"
named_children = (
"tried",
"except_handler",
"break_handler",
"continue_handler",
"return_handler",
)
checkers = {
"tried": checkStatementsSequence,
"except_handler": checkStatementsSequenceOrNone,
"break_handler": checkStatementsSequenceOrNone,
"continue_handler": checkStatementsSequenceOrNone,
"return_handler": checkStatementsSequenceOrNone,
}
def __init__(
self,
tried,
except_handler,
break_handler,
continue_handler,
return_handler,
source_ref,
):
StatementChildrenHavingBase.__init__(
self,
values={
"tried": tried,
"except_handler": except_handler,
"break_handler": break_handler,
"continue_handler": continue_handler,
"return_handler": return_handler,
},
source_ref=source_ref,
)
def computeStatement(self, trace_collection):
# This node has many children to handle, pylint: disable=I0021,too-many-branches,too-many-locals,too-many-statements
tried = self.subnode_tried
except_handler = self.subnode_except_handler
break_handler = self.subnode_break_handler
continue_handler = self.subnode_continue_handler
return_handler = self.subnode_return_handler
# The tried block must be considered as a branch, if it is not empty
# already.
collection_start = TraceCollectionBranch(
parent=trace_collection, name="try start"
)
abort_context = trace_collection.makeAbortStackContext(
catch_breaks=break_handler is not None,
catch_continues=continue_handler is not None,
catch_returns=return_handler is not None,
catch_exceptions=True,
)
with abort_context:
# As a branch point for the many types of handlers.
result = tried.computeStatementsSequence(trace_collection=trace_collection)
# We might be done entirely already.
if result is None:
return None, "new_statements", "Removed now empty try statement."
# Might be changed.
if result is not tried:
self.setChild("tried", result)
tried = result
break_collections = trace_collection.getLoopBreakCollections()
continue_collections = trace_collection.getLoopContinueCollections()
return_collections = trace_collection.getFunctionReturnCollections()
exception_collections = trace_collection.getExceptionRaiseCollections()
tried_may_raise = tried.mayRaiseException(BaseException)
# Exception handling is useless if no exception is to be raised.
if not tried_may_raise:
if except_handler is not None:
except_handler.finalize()
self.clearChild("except_handler")
trace_collection.signalChange(
tags="new_statements",
message="Removed useless exception handler.",
source_r | ef=except_handler.source_ref,
)
except_handler = None
# If tried may raise, even empty exception handler has a meaning to
# ignore that exception.
if tried_may_raise:
collection_exception_handling = TraceCollectionBranch(
parent=collection_start, name="except handler"
)
# When no exception exits are the | re, this is a problem, we just
# found an inconsistency that is a bug.
if not exception_collections:
for statement in tried.subnode_statements:
if statement.mayRaiseException(BaseException):
raise NuitkaOptimizationError(
"This statement does raise but didn't annotate an exception exit.",
statement,
)
raise NuitkaOptimizationError(
"Falsely assuming tried block may raise, but no statement says so.",
tried,
)
collection_exception_handling.mergeMultipleBranches(exception_collections)
if except_handler is not None:
result = except_handler.computeStatementsSequence(
trace_collection=collection_exception_handling
)
# Might be changed.
if result is not except_handler:
self.setChild("except_handler", result)
except_handler = result
if break_handler is not None:
if not tried.mayBreak():
break_handler.finalize()
self.clearChild("break_handler")
break_handler = None
if break_handler is not None:
collection_break = TraceCollectionBranch(
parent=collection_start, name="break handler"
)
collection_break.mergeMultipleBranches(break_collections)
result = break_handler.computeStatementsSequence(
trace_collection=collection_break
)
# Might be changed.
if result is not break_handler:
self.setChild("break_handler", result)
break_handler = result
if continue_handler is not None:
if not tried.mayContinue():
continue_handler.finalize()
self.clearChild("continue_handler")
continue_handler = None
if continue_handler is not None:
collection_continue = TraceCollectionBranch(
parent=collection_start, name="continue handler"
)
collection_continue.mergeMultipleBranches(continue_collections)
result = continue_handler.computeStatementsSequence(
trace_collection=collection_continue
)
# Might be changed.
if result is not continue_handler:
self.setChild("continue_handler", result)
continue_handler = result
if return_handler is not None:
if not tried.mayReturn():
return_handler.finalize()
self.clearChild("return_handler")
return_handler = None
if return_handler is not None:
collection_return = TraceCollectionBranch(
parent=collection_start, name="return handler"
)
collection_return.mergeMultipleBranches(return_collections)
result = |
SINGROUP/pycp2k | pycp2k/classes/_hf4.py | Python | lgpl-3.0 | 1,138 | 0.002636 | from pycp2k.inputsection import InputSection
from ._hf_info4 import _hf_info4
from ._periodic5 import _periodic5
from ._screening5 import _screening5
from ._interaction_potential5 import _interaction_potential5
from ._load_balance4 import _load_balance4
from ._memory5 import _memory5
class _hf4(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Fraction = None
self.Treat_lsd_in_core = None
self.Pw_hfx = None
self.Pw_hfx_blocksize = None
self.HF_INFO = _hf_info4()
self.PERIODIC = _periodic5()
self.SCREENING = _screening5()
| self.INTERACTION_POTENTIAL = _interaction_potential5()
self.LOAD_BALANCE = _load_balance4()
self.MEMORY = _memory5()
self._name = "HF"
self._keywords = {'Treat_lsd_in_core': 'TREAT_LSD_IN_CORE', 'Pw_hfx_blocksize': 'PW_HFX_BLOCKSIZE', 'Fraction': 'FRACTION', 'Pw_hfx': 'PW_HFX'}
self._subsections = {'SCREENING': 'SCREENING', 'LOAD_BALANC | E': 'LOAD_BALANCE', 'PERIODIC': 'PERIODIC', 'MEMORY': 'MEMORY', 'INTERACTION_POTENTIAL': 'INTERACTION_POTENTIAL', 'HF_INFO': 'HF_INFO'}
|
lubosz/gst-plugins-vr | valgrind_helpers/valgrind-make-fix-list.py | Python | lgpl-2.1 | 628 | 0.004777 | #!/usr/bin/env python
import os, sys
usage = "usage | : %s [infile [outfile]]" % os.path.basename(sys.argv[0])
if len(sys.argv) < 1:
print (usage)
else:
stext = "<insert_a_suppression_name_here>"
rtext = "memcheck problem #"
input = sys.stdin
output = sys.stdout
hit = 0
if len(sys.argv) > 1:
input = open(sys.argv[1])
if len(sys.argv) > 2:
output = open(sys.argv[2], 'w')
for s in input.readlines():
if s.replace(stext, "") != s:
| hit = hit + 1
output.write(s.replace(stext, "memcheck problem #%d" % hit))
else:
output.write(s)
|
ctrlaltdel/neutrinator | vendor/openstack/identity/version.py | Python | gpl-3.0 | 1,236 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class Version(resource.Resource):
resource_key = 'version'
resources_key = 'versions'
base_path = '/'
# capabilities
allow_list = True
# Properties
media_ty | pes = resource.Body('media-types')
status = resource.Body('status')
updated = resource.Body('updated')
@classmethod
def list(cls, session, paginated=False, base_path=None, **params):
if base_path is None:
base_path = cls.base_path
resp = session. | get(base_path,
params=params)
resp = resp.json()
for data in resp[cls.resources_key]['values']:
yield cls.existing(**data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.