repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
tudelft3d/geovalidation.server
|
setup.py
|
Python
|
gpl-3.0
| 590
| 0.064407
|
from setuptools import setup
setup(
name='geovalidation.server',
version='0.5',
long_
|
description="Flask-based server to validate GIS datasets (with prepair and val3dity).",
packages=['geovalidation'],
include_package_data=True,
|
zip_safe=False,
install_requires=[ 'Flask>=1.1'
,'Jinja2>=2.7.2'
,'Werkzeug>=0.9.4'
,'celery>=3.1.11'
,'redis>=2.9.1'
,'lxml>=3.3.3'
,'subprocess32>=3.2.6'
,'cjio>=0.5'
]
author='Hugo Ledoux',
author_email='h.ledoux@tudelft.nl'
)
|
sserrot/champion_relationships
|
venv/Lib/site-packages/ipywidgets/__init__.py
|
Python
|
mit
| 1,536
| 0.003906
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interactive widgets for the Jupyter notebook.
Provide simple interactive controls in the notebook.
Each Widget corresponds to an object in Python and Javascript,
with controls on the page.
To put a Widget on the page, you can display it with IPython's display machinery::
from ipywidgets import IntSlider
from IPython.display import display
slider = IntSlider(min=1, max=10)
display(slider)
Moving the slider will change the value. Most Widgets have a current value,
accessible as a `value` attribute.
"""
import os
from IPython import get_ipython
from ._version import version_info, __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__
from .widgets import *
from traitlets import link, dlink
def load_ipython_extension(ip):
"""Set up IPython to work with widgets"""
if not hasattr(ip, 'kernel'):
return
register_comm_target(ip.kernel)
def register_comm_target(kernel=None):
"""Register the jupyter.widget comm target"""
if kerne
|
l is None:
kernel = get_ipython().kernel
kernel.comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened)
# deprecated alias
handle_kernel = register_comm_target
def _handle_ipython():
"""Register with the comm target at import if running in IPython"""
ip = get_ipython()
if ip is None:
retur
|
n
load_ipython_extension(ip)
_handle_ipython()
|
takeflight/wagtailnews
|
wagtailnews/deprecation.py
|
Python
|
bsd-2-clause
| 609
| 0
|
import warnings
class DeprecatedCallableStr(str):
do_no_call_in_templates = True
def __new__(cls, value, *args, **kwargs):
return super(DeprecatedCallableStr, cls).__new__(cls, value)
def __init__(self, value, warning, warning_cls):
self.warning, self.warning_cls = warning, warning_cls
def __call__(self, *args, **kwargs):
warnings.warn(self.warning, self.warning_cls, stacklevel=2)
return str(self)
def __repr__(self):
super_repr
|
= super(DeprecatedCallableStr, self).__repr__()
|
return '<DeprecatedCallableStr {}>'.format(super_repr)
|
anilveeramalli/cloudify-azure-plugin
|
blueprints/clustered-dns/dns/dns_remove_reverse_record.py
|
Python
|
apache-2.0
| 927
| 0.036677
|
import subprocess, os, sys
from reverseZone_naming import reverseZone_name
from netaddr import *
zone_files_path="/etc/bind/
|
zones"
def remove_reverse_record():
host_name_to_be_removed= sys.argv[1]
reverse_zone_file_name,reverse_zone_name=reverseZone_name()
os.chdir(zone_files_path)
readFiles = open(reverse_zone_file_name,'r')
reverse_zone_file_content = readFil
|
es.read()
readFiles.close()
readFiles = open(reverse_zone_file_name,'r')
lines = readFiles.readlines()
readFiles.close()
if host_name_to_be_removed in reverse_zone_file_content:
file_content = open(reverse_zone_file_name,'w')
for line in lines:
if not host_name_to_be_removed in line:
file_content.write(line)
file_content.close()
print "\nThe reverse record that you entered has been removed!\n"
else:
print "\nThe record you wanted to remove is already absent in the database!\n"
def main():
remove_reverse_record()
main()
|
Ecogenomics/GTDBNCBI
|
scripts_dev/ncbi_assembly_file_metadata.py
|
Python
|
gpl-3.0
| 7,410
| 0.001484
|
#!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'ncbi_assembly_file_metadata.py'
__prog_desc__ = 'Produce filtered metadata file from NCBI assembly metadata file.'
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2015'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.3'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
__status__ = 'Development'
import os
import sys
import argparse
from collections import defaultdict
from numpy import (zeros as np_zeros)
class Metadata(object):
"""Create metadata file from the assembly stats file of each NCBI assembly."""
def __init__(self):
self.fields = {'bioproject': 'ncbi_bioproject',
'wgs_master': 'ncbi_wgs_master',
'refseq_category': 'ncbi_refseq_category',
'species_taxid': 'ncbi_species_taxid',
'isolate': 'ncbi_isolate',
'version_status': 'ncbi_version_status',
'seq_rel_date': 'ncbi_seq
|
_rel_date',
'asm_name': 'ncbi_asm_name',
'gbrs_paired_asm': 'ncbi_gbrs_paired_asm',
'paired_asm_comp': 'ncbi_paired_asm_comp',
'relation_to_type_material': 'ncbi_type_material_designation'}
def run(self, refseq_bacteria_assembly_summary_file,
refseq_archaea_assembly_summary_file,
genbank_bacteria_assembly_summary_
|
file,
genbank_archaea_assembly_summary_file, genome_id_file, output_file):
"""Create metadata by parsing NCBI assembly metadata file."""
# get identifier of genomes in GTDB
genome_ids = set()
for line in open(genome_id_file):
if line[0] == '#':
continue
if '\t' in line:
genome_id = line.strip().split('\t')[0]
else:
genome_id = line.strip().split(',')[0]
if genome_id.startswith('GCA_'):
genome_id = 'GB_' + genome_id
elif genome_id.startswith('GCF_'):
genome_id = 'RS_' + genome_id
genome_ids.add(genome_id)
# write out metadata
fout = open(output_file, 'w')
fout.write('genome_id')
write_header = True
indice_wgs = None
for assembly_file in [refseq_bacteria_assembly_summary_file,
refseq_archaea_assembly_summary_file,
genbank_bacteria_assembly_summary_file,
genbank_archaea_assembly_summary_file]:
with open(assembly_file) as f:
f.readline() # first comment line
headers = f.readline().rstrip().split('\t')
indices = []
for i, header in enumerate(headers):
if header in self.fields:
if write_header:
fout.write('\t' + self.fields[header])
indices.append(i)
if write_header and header == 'wgs_master':
fout.write('\t' + 'ncbi_wgs_formatted')
indice_wgs = i
if write_header:
fout.write('\n')
write_header = False
print indice_wgs
for line in f:
line_split = line.rstrip('\n').split('\t')
genome_id = line_split[0]
if genome_id.startswith('GCA_'):
genome_id = 'GB_' + genome_id
elif genome_id.startswith('GCF_'):
genome_id = 'RS_' + genome_id
if genome_id in genome_ids:
fout.write(genome_id)
for i in indices:
if indice_wgs == i:
fout.write('\t' + line_split[i])
fout.write(
'\t' + self.format_wgs(line_split[i]))
else:
fout.write('\t' + line_split[i])
fout.write('\n')
fout.close()
def format_wgs(self, wgs_accession):
if not wgs_accession:
return ""
wgs_acc, version = wgs_accession.split('.')
idx = [ch.isdigit() for ch in wgs_acc].index(True)
wgs_id = wgs_acc[0:idx] + str(version).zfill(2)
return wgs_id
if __name__ == '__main__':
print __prog_name__ + ' v' + __version__ + ': ' + __prog_desc__
print ' by ' + __author__ + ' (' + __email__ + ')' + '\n'
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('refseq_bacteria_assembly_summary_file',
help='RefSeq assembly summary file (assembly_summary_refseq.txt)')
parser.add_argument('refseq_archaea_assembly_summary_file',
help='RefSeq assembly summary file (assembly_summary_refseq.txt)')
parser.add_argument('genbank_bacteria_assembly_summary_file',
help='GenBank assembly summary file (assembly_summary_genbank.txt)')
parser.add_argument('genbank_archaea_assembly_summary_file',
help='GenBank assembly summary file (assembly_summary_genbank.txt)')
parser.add_argument(
'genome_id_file', help='genome identifiers for genomes in GTDB')
parser.add_argument('output_file', help='output metadata file')
args = parser.parse_args()
try:
p = Metadata()
p.run(args.refseq_bacteria_assembly_summary_file,
args.refseq_archaea_assembly_summary_file,
args.genbank_bacteria_assembly_summary_file,
args.genbank_archaea_assembly_summary_file, args.genome_id_file, args.output_file)
except SystemExit:
print "\nControlled exit resulting from an unrecoverable error or warning."
except:
print "\nUnexpected error:", sys.exc_info()[0]
raise
|
maleficarium/youtube-dl
|
youtube_dl/extractor/ruutu.py
|
Python
|
unlicense
| 4,297
| 0.003495
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
int_or_none,
xpath_attr,
xpath_text,
)
class RuutuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.ruutu.fi/video/2058907',
'md5': 'ab2093f39be1ca8581963451b3c0234f',
'info_dict': {
'id': '2058907',
'ext': 'mp4',
'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 114,
'age_limit': 0,
},
},
{
'url': 'http://www
|
.ruutu.fi/video/2057306',
'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
'info_dict': {
'id': '2057306',
'ext': 'mp4',
'title': 'Superpesis: katso koko kausi Ruudussa',
'description': 'md5:da2736052fef3b2bd5e0005e63c25eac',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 40,
'age_limit': 0,
|
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video_xml = self._download_xml(
'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id)
formats = []
processed_urls = []
def extract_formats(node):
for child in node:
if child.tag.endswith('Files'):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
if (not video_url or video_url in processed_urls or
any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
return
processed_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds', fatal=False))
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith('HTTP') and proto != 'rtmp':
continue
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto
if not self._is_valid_url(video_url, video_id, format_id):
continue
width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
'format_id': format_id,
'url': video_url,
'width': width,
'height': height,
'tbr': tbr,
'preference': preference,
})
extract_formats(video_xml.find('./Clip'))
self._sort_formats(formats)
return {
'id': video_id,
'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True),
'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'),
'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'),
'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
'formats': formats,
}
|
Kbman99/NetSecShare
|
app/logger_setup.py
|
Python
|
mit
| 2,739
| 0.004381
|
'''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def add_fields(_, level, event_dict):
''' Add custom fields to each record. '''
now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\
(pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-Fo
|
r', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(filena
|
me=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
)
|
firmlyjin/brython
|
www/tests/unittests/test/gdb_sample.py
|
Python
|
bsd-3-clause
| 153
| 0.019608
|
# Sample scri
|
pt for use by test_gdb.py
def foo(a, b, c):
bar(a, b, c)
def bar(a, b, c):
baz(a, b, c)
def baz(
|
*args):
id(42)
foo(1, 2, 3)
|
ddico/odoo
|
addons/website_blog/models/website_blog.py
|
Python
|
agpl-3.0
| 11,694
| 0.002993
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
import random
import json
from odoo import api, models, fields, _
from odoo.addons.http_routing.models.ir_http import slug
from odoo.tools.translate import html_translate
from odoo.tools import html2plaintext
class Blog(models.Model):
_name = 'blog.blog'
_description = 'Blog'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.multi.mixin', 'website.cover_properties.mixin']
_order = 'name'
name = fields.Char('Blog Name', required=True, translate=True)
subtitle = fields.Char('Blog Subtitle', translate=True)
active = fields.Boolean('Active', default=True)
content = fields.Html('Content', translate=html_translate, sanitize=False)
blog_post_ids = fields.One2many('blog.post', 'blog_id', 'Blog Posts')
blog_post_count = fields.Integer("Posts", compute='_compute_blog_post_count')
@api.depends('blog_post_ids')
def _compute_blog_post_count(self):
for record in self:
record.blog_post_count = len(record.blog_post_ids)
def write(self, vals):
res = super(Blog, self).write(vals)
if 'active' in vals:
# archiving/unarchiving a blog does it on its posts, too
post_ids = self.env['blog.post'].with_context(active_test=False).search([
('blog_id', 'in', self.ids)
])
for blog_post in post_ids:
blog_post.active = vals['active']
return res
@api.returns('mail.message', lambda value: value.id)
def message_post(self, *, parent_id=False, subtype_id=False, **kwargs):
""" Temporary workaround to avoid spam. If someone replies on a channel
through the 'Presentation Published' email, it should be considered as a
note as we don't want all channel followers to be notified of this answer. """
self.ensure_one()
if parent_id:
parent_message = self.env['mail.message'].sudo().browse(parent_id)
if parent_message.subtype_id and parent_message.subtype_id == self.env.ref('website_blog.mt_blog_blog_published'):
subtype_id = self.env.ref('mail.mt_note').id
return super(Blog, self).message_post(parent_id=parent_id, subtype_id=subtype_id, **kwargs)
def all_tags(self, join=False, min_limit=1):
BlogTag = self.env['blog.tag']
req = """
SELECT
p.blog_id, count(*), r.blog_tag_id
FROM
blog_post_blog_tag_rel r
join blog_post p on r.blog_post_id=p.id
WHERE
p.blog_id in %s
GROUP BY
p.blog_id,
r.blog_tag_id
ORDER BY
count(*) DESC
"""
self._cr.execute(req, [tuple(self.ids)])
tag_by_blog = {i.id: [] for i in self}
all_tags = set()
for blog_id, freq, tag_id in self._cr.fetchall():
if freq >= min_limit:
if join:
all_tags.add(tag_id)
else:
tag_by_blog[blog_id].append(tag_id)
if join:
return BlogTag.browse(all_tags)
for blog_id in tag_by_blog:
tag_by_blog[blog_id] = BlogTag.browse(tag_by_blog[blog_id])
return tag_by_blog
class BlogTagCategory(models.Model):
_name = 'blog.tag.category'
_description = 'Blog Tag Category'
_order = 'name'
name = fields.Char('Name', required=True, translate=True)
tag_ids = fields.One2many('blog.tag', 'category_id', string='Tags')
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag category already exists !"),
]
class BlogTag(models.Model):
_name = 'blog.tag'
_description = 'Blog Tag'
_inherit = ['website.seo.metadata']
_order = 'name'
name = fields.Char('Name', required=True, translate=True)
category_id = fields.Many2one('blog.tag.category', 'Category', index=True)
post_ids = fields.Many2many('blog.post', string='Posts')
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class BlogPost(models.Model):
_name = "blog.post"
_description = "Blog Post"
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.multi.mixin', 'website.cover_properties.mixin']
_order = 'id DESC'
_mail_post_access = 'read'
def _compute_website_url(self):
super(BlogPost, self)._compute_website_url()
for blog_post in self:
blog_post.website_url = "/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post))
def _default_content(self):
return '''
<p class="o_default_snippet_text">''' + _("Start writing here...") + '''</p>
'''
name = fields.Char('Title', required=True, translate=True, default='')
subtitle = fields.Char('Sub Title', translate=True)
author_id = fields.Many2one('res.partner', 'Author', default=lambda self: self.env.user.partner_id)
author_avatar = fields.Binary(related='author_id.image_128', string="Avatar", readonly=False)
author_name = fields.Char(related='author_id.display_name', string="Author Name", readonly=False, store=True)
active = fields.Boolean('Active', default=True)
blog_id = fields.Many2one('blog.blog', 'Blog', required=True, ondelete='cascade')
tag_ids = fields.Many2many('blog.tag', string='Tags')
content = fields.Html('Content', default=_default_content, translate=html_translate, sanitize=False)
teaser = fields.Text('Teaser', compute='_compute_teaser', inverse='_set_teaser')
teaser_manual = fields.Text(string='Teaser Content')
website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', '=', 'comment')])
# creation / update stuff
create_date = fields.Datetime('Created on', index=True, readonly=True)
published_date = fields.Datetime('Published Date')
post_date = fields.Datetime('Publishing date', compute='_compute_post_date', inverse='_set_post_date', store=True,
help="The blog post will be visible for your visitors as of this date on the website if it is set as published.")
create_uid = fields.Many2one('res.users', 'Created by', index=True, readonly=True)
write_date = fields.Datetime('Last Updated on', index=True
|
, readonly=True)
write_uid = fields.Many2one('res.users', 'Last Contributor', index=True, readonly=True)
visits = fields.Integer('No of Views', copy=False, default=0)
website_id = fields.Many2one(related='blog_id.website_id', readonly=True, store=True)
@api.depends('con
|
tent', 'teaser_manual')
def _compute_teaser(self):
for blog_post in self:
if blog_post.teaser_manual:
blog_post.teaser = blog_post.teaser_manual
else:
content = html2plaintext(blog_post.content).replace('\n', ' ')
blog_post.teaser = content[:200] + '...'
def _set_teaser(self):
for blog_post in self:
blog_post.teaser_manual = blog_post.teaser
@api.depends('create_date', 'published_date')
def _compute_post_date(self):
for blog_post in self:
if blog_post.published_date:
blog_post.post_date = blog_post.published_date
else:
blog_post.post_date = blog_post.create_date
def _set_post_date(self):
for blog_post in self:
blog_post.published_date = blog_post.post_date
if not blog_post.published_date:
blog_post._write(dict(post_date=blog_post.create_date)) # dont trigger inverse function
def _check_for_publication(self, vals):
if vals.get('is_published'):
for post in self:
post.blog_id.message_post_with_view(
'website_blog.blog_post_template_new_post',
subject=post.name,
values={'post': post},
subtype_id=self.env['ir.model.data'].xmlid_to_res_id('website_blog.mt_blog_blog_published'))
|
Drowrin/Weeabot
|
cogs/moderation.py
|
Python
|
mit
| 5,452
| 0.002018
|
import asyncio
import datetime
import discord
from discord.ext import commands
import utils
import checks
class Moderation:
"""Moderation commands."""
def __init__(self, bot: commands.Bot):
self.bot = bot
if 'jails' not in self.bot.status:
self.bot.status['jails'] = {}
self.bot.dump_status()
self.bot.loop.create_task(self.check_jails())
self.jail_events = {}
async def get_jail(self, server: discord.Server) -> (discord.Role, discord.Channel):
"""Get the jail role and channel of a server. If it doesn't exist, add it."""
s = self.bot.server_configs[server.id]
if 'jails' in s:
role = discord.utils.get(server.roles, id=s['jails']['role'])
channel = server.get_channel(s['jails']['channel'])
if role is not None and channel is not None:
|
return role, channel
server_perms = discord.Permissions()
server_perms.read_
|
messages = False
server_perms.send_messages = False
role = await self.bot.create_role(server, name="prisoner", hoist=True, permissions=server_perms)
po = discord.PermissionOverwrite(read_messages=True)
prisoner = discord.ChannelPermissions(target=role, overwrite=po)
eo = discord.PermissionOverwrite(read_messages=False)
everyone = discord.ChannelPermissions(target=server.default_role, overwrite=eo)
channel = await self.bot.create_channel(server, "jail", prisoner, everyone)
s['jails'] = {
'role': role.id,
'channel': channel.id
}
self.bot.dump_server_configs()
return role, channel
async def arrest(self, mid: str):
"""Make an arrest based on member id key in the jails dict.
Creates an event.
Creates the channel and role if they don't exist."""
async def a():
try:
j = self.bot.status['jails'][mid]
except KeyError:
print(f'Jail keyerror {mid}')
return
finished = discord.utils.parse_time(j['finished']) - datetime.datetime.now()
server = self.bot.get_server(j['server'])
if server is None:
print(f"Could not arrest, couldn't get server. {j}")
return
user: discord.Member = server.get_member(j['user'])
role, channel = await self.get_jail(server)
if role not in user.roles:
# arrest them
await self.bot.add_roles(user, role)
await self.bot.send_message(channel, f"{user.mention} has been arrested! Time remaining: {utils.down_to_seconds(finished)}")
# handle freeing after duration, or freed by command.
self.jail_events[mid] = asyncio.Event()
async def auto_free():
await asyncio.sleep(finished.seconds)
self.jail_events[mid].set()
self.bot.loop.create_task(auto_free())
await self.jail_events[mid].wait()
# free user
await self.bot.remove_roles(user, role)
await self.bot.send_message(server, f"{user.mention} is free!")
del self.bot.status['jails'][mid]
self.bot.dump_status()
self.bot.loop.create_task(a())
async def check_jails(self):
await self.bot.init.wait()
for mid in self.bot.status['jails']:
await self.arrest(mid)
@commands.command(pass_context=True, aliases=('arrest',), no_pm=True)
async def jail(self, ctx, user: str, *, duration: str="1h"):
"""Jail a user for a specified amount of time. Accepts a user or "me".
The format for the duration uses units. For example, something like 3 hours and 20 minutes or 4m 15s.
Without permissions, you can only jail yourself.
Will create a jail channel and role if they don't already exist."""
# Get user, and check permissions
if user == 'me':
user = ctx.message.author
elif checks.moderator(ctx):
user = commands.MemberConverter(ctx, user).convert()
else:
raise utils.CheckMsg("You do not have permission to do that.")
td = utils.duration(duration)
current_time = datetime.datetime.now()
# create jail
self.bot.status['jails'][ctx.message.id] = {
'finished': str(current_time + td),
'server': ctx.message.server.id,
'user': user.id
}
await self.arrest(ctx.message.id)
self.bot.dump_status()
await self.bot.affirmative()
async def unjail(self, server: discord.Server, user: discord.Member):
def pred(jdata):
_, j = jdata
return j['server'] == server.id and j['user'] == user.id
jid, _ = discord.utils.find(pred, self.bot.status['jails'].items())
self.jail_events[jid].set()
@commands.command(pass_context=True, aliases=('unjail',), no_pm=True)
@checks.is_moderator()
async def free(self, ctx, user: str):
"""Free the user from jail. Accepts a user or "me"."""
# Get user, and check permissions
if user == 'me':
user = ctx.message.author
else:
user = commands.MemberConverter(ctx, user).convert()
await self.unjail(ctx.message.server, user)
def setup(bot):
bot.add_cog(Moderation(bot))
|
mylxiaoyi/caffe-with-spearmint
|
cwsm/cafferun.py
|
Python
|
mit
| 3,964
| 0.00555
|
import numpy as np
import cPickle
import math
import string
import re
import subprocess
from datetime import datetime
from cwsm.performance import Performance
def cafferun(params):
# load general and optimization parameters
with open('../tmp/optparams.pkl', 'rb') as f:
paramdescr = cPickle.load(f)
with open('../tmp/genparams.pkl', 'rb') as f:
genparams = cPickle.load(f)
CAFFE_ROOT = genparams['CAFFE_ROOT']
optimize = genparams['optimize']
# transform parameters accoring to transformation specified in the model file
print params
for p in params:
if paramdescr[p].get('transform', None) is not None:
# X<>: multiplier where <> stands for any number (examples: X10, X100, X22)
if paramdescr[p]['transform'][0] == 'X':
multiplier = int(paramdescr[p]['transform'][1:])
params[p][0] *= multiplier
# LOG<>: number which goes to Spearmint corresponds to log with base <> of an actual
# number (example: value 2 of LOG10 corresponds to 100)
if paramdescr[p]['transform'][0:3] == 'LOG':
base = int(paramdescr[p]['transform'][3:])
params[p][0] = math.log(params[p][0], base)
# NEGEXP<>: where <> is the base, the number which goes to Spearmint is negative of the
# exponent (example: value 3 with NEGEXP10 means 10^-3 and correpsonds to 0.001)
if paramdescr[p]['transform'][0:6] == 'NEGEXP':
negexp = float(paramdescr[p]['transform'][6:])
params[p] = [negexp ** float(-params[p][0])]
# unique prefix for this run
prefix = datetime.now().strftime('%Y-%d-%m-%H-%M-%S')
# generate .prototxt files with current set of paramters
trainnet = open('../tmp/template_trainval.prototxt', 'r').read()
solver = open('../tmp/template_solver.prototxt', 'r').read()
for p in params:
trainnet = string.replace(trainnet, 'OPTIMIZE_' + p, str(params[p][0]), 1)
solver = string.replace(solver, 'OPTIMIZE_' + p, str(params[p][0]), 1)
# kappa optimizer has a special treatment
if optimize == 'kappa':
valnet = open('../tmp/template_val.prototxt', 'r').read()
for p in params:
valnet = string.replace(valnet, 'OPTIMIZE_' + p, str(params[p][0]), 1)
# update paths for this run
solver = string.replace(solver, 'PLACEHOLDER_NET', '../tmp/%s_trainval.prototxt' % prefix, 1)
solver = string.replace(solv
|
er, 'PLACEHOLDER_MODEL_STORE', '../caffeout/%s' % prefix, 1)
|
# store .prototxt for this run
with open('../tmp/%s_trainval.prototxt' % prefix, 'w') as f:
f.write(trainnet)
if optimize == 'kappa':
with open('../tmp/%s_val.prototxt' % prefix, 'w') as f:
f.write(valnet)
with open('../tmp/%s_solver.prototxt' % prefix, 'w') as f:
f.write(solver)
# run caffe training procedure
caffe_return_code = subprocess.call(CAFFE_ROOT + '/build/tools/caffe train --solver ../tmp/%s_solver.prototxt 2> ../caffeout/%s_log.txt' % (prefix, prefix), shell=True)
print 'CAFFE RETURN CODE ' + str(caffe_return_code)
# set result to None by default
result = None
# if Caffe ran successfully update the result
if int(caffe_return_code) == 0:
# run the performace measure estimator
if optimize == 'loss':
result = Performance.loss(prefix)
elif optimize == 'accuracy':
result = Performance.accuracy(prefix)
elif optimize == 'kappa':
result = Performance.kappasq(prefix, CAFFE_ROOT)
else:
print 'ERROR: Unknown perfomance measure %s' % optimize
print '-----------------------------'
print prefix, result
print '-----------------------------'
return result
# Write a function like this called 'main'
def main(job_id, params):
return cafferun(params)
|
zhangvs1988/zhangyl-Djangodemo
|
article/migrations/0003_auto_20160810_1219.py
|
Python
|
gpl-3.0
| 806
| 0.001253
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-10 04:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0002_auto_20160810_0134'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='content_1',
),
migrations.AddField(
model_name='article',
name='content',
field=models.CharField(default=0, max_length=10000, verbose_name='内容'),
|
preserve_default=False,
),
migrations.AlterField(
model_name='article',
name='ti
|
tle',
field=models.CharField(max_length=100, verbose_name='标题'),
),
]
|
KorayAgaya/ftpmap
|
tools/proftpd_versions.py
|
Python
|
gpl-3.0
| 1,228
| 0.012215
|
#!/usr/bin/env python2.7
#
# ProFTPD versions - create proftpd versions and dump them into versions.h
#
# Copyright (c) 2015 by Hypsurus
#
#
import sys
# The proftpd versions cycle:
# proftpd-1.3.2rc1
# proftpd-1.3.2rc2
# proftpd-1.3.2rc3
# proftpd-1.3.2rc4
# proftpd-1.3.2
# proftpd-1.3.2a
# proftpd-1.3.2b
# proftpd-1.3.2c
# proftpd-1.3.2d
# proftpd-1.3.2
# Versions
versions = []
VERSION
|
=1
for version_mi in xrange(1, 4):
# Just in case thay release 1.x.20
for version_mic in xrange(0, 21):
fixed = "ProFTPD%d.%d.%d" %(VERSION,version_mi,version_mi
|
c)
versions.append(fixed)
versions.append(fixed+"rc1")
versions.append(fixed+"rc2")
versions.append(fixed+"rc3")
versions.append(fixed+"rc4")
versions.append(fixed+"a")
versions.append(fixed+"b")
versions.append(fixed+"c")
versions.append(fixed+"d")
versions.append(fixed+"e")
versions.append(fixed+"f")
versions.append(fixed+"g")
# Fix the versions to file
print("/* version.h - created by the proftpd_versions.py script by Hypsurus */\n\n")
print("const char * versions[] = {")
for version in versions:
print("\t\t\"%s\"," %version)
print("};")
|
quaquel/EMAworkbench
|
ema_workbench/analysis/__init__.py
|
Python
|
bsd-3-clause
| 682
| 0
|
# importing anything from analysis segfaults java with netlogo on a mac
# for now no clue why
#
from . import pairs_plotting
from .b_and_w_plotting import set_fig_to_bw
from .ca
|
rt import setup_cart, CART
from .feature_scoring import (get_ex_feature_scores, get_feature_scores_all,
get_rf_feature_scores,
get_univariate_feature_scores)
from .logistic_regression import Logit
from .plotting import lines, envelopes, kde_o
|
ver_time, multiple_densities
from .plotting_util import Density, PlotType
from .prim import Prim, run_constrained_prim, pca_preprocess, setup_prim
from .scenario_discovery_util import RuleInductionType
|
thaim/ansible
|
lib/ansible/utils/path.py
|
Python
|
mit
| 5,225
| 0.002679
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
from errno import EEXIST
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True, basedir=None):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
b_basedir = to_bytes(basedir, errors='surrogate_or_strict', nonstring='passthru')
if b_basedir is None:
b_basedir = to_bytes(os.getcwd(), errors='surrogate_or_strict')
elif os.path.isfile(b_basedir):
b_basedir = os.path.dirname(b_basedir)
b_final_path = os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))
if not os.path.isabs(b_final_path):
b_final_path = os.path.join(b_basedir, b_final_path)
if follow:
b_final_path = os.path.realpath(b_final_path)
return to_text(os.path.normpath(b_final_path), errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''
A *potentially insecure* way to ensure the existence of a directory chain. The "safe" in this function's name
refers only to its ability to ignore `EEXIST` in the case of multiple callers operating on the same part of
the directory chain. This function is not safe to use under world-writable locations when the first level of the
path to be created contains a predictable component. Always create a randomly-named element first if there is any
chance the parent directory might be world-writable (eg, /tmp) to prevent symlink hijacking and potential
disclosure or modification of sensitive file contents.
:arg path: A byte or text string representing a directory chain to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exist.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
|
def basedir(source):
""" returns directory for inventory or playbook """
source = to_bytes(source, errors='surrogate_or_strict
|
')
dname = None
if os.path.isdir(source):
dname = source
elif source in [None, '', '.']:
dname = os.getcwd()
elif os.path.isfile(source):
dname = os.path.dirname(source)
if dname:
# don't follow symlinks for basedir, enables source re-use
dname = os.path.abspath(dname)
return to_text(dname, errors='surrogate_or_strict')
def cleanup_tmp_file(path, warn=False):
"""
Removes temporary file or directory. Optionally display a warning if unable
to remove the file or directory.
:arg path: Path to file or directory to be removed
:kwarg warn: Whether or not to display a warning when the file or directory
cannot be removed
"""
try:
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.unlink(path)
except Exception as e:
if warn:
# Importing here to avoid circular import
from ansible.utils.display import Display
display = Display()
display.display(u'Unable to remove temporary file {0}'.format(to_text(e)))
except Exception:
pass
|
NikolaYolov/invenio_backup
|
modules/bibformat/lib/elements/bfe_editors.py
|
Python
|
gpl-2.0
| 2,179
| 0.010555
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
|
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints editors
"""
__revision__ = "$Id$"
def format_element(bfo, limit, separator=' ; ', extension='[...]', print_links=
|
"yes"):
"""
Prints the list of editors of a record.
@param limit: the maximum number of editors to display
@param separator: the separator between editors.
@param extension: a text printed if more editors than 'limit' exist
@param print_links: if yes, print the editors as HTML link to their publications
"""
from urllib import quote
from invenio.config import CFG_SITE_URL
from invenio import bibrecord
authors = bibrecord.record_get_field_instances(bfo.get_record(), '100')
editors = [bibrecord.field_get_subfield_values(author, 'a')[0]
for author in authors if len(bibrecord.field_get_subfield_values(author, "e")) > 0 and bibrecord.field_get_subfield_values(author, "e")[0]=="ed." ]
if print_links.lower() == "yes":
editors = ['<a href="' + CFG_SITE_URL + '/search?f=author&p=' + \
quote(editor) + \
'&ln='+ bfo.lang + \
'">' + editor + '</a>'
for editor in editors]
if limit.isdigit() and len(editors) > int(limit):
return separator.join(editors[:int(limit)]) + extension
elif len(editors) > 0:
return separator.join(editors)
|
rescale/django-money
|
djmoney/models/managers.py
|
Python
|
bsd-3-clause
| 9,316
| 0.001073
|
# -*- coding: utf-8 -*-
from django import VERSION
from django.db.models import F
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import Q
from django.db.models.sql.constants import QUERY_TERMS
from django.db.models.sql.query import Query
from moneyed import Money
from .._compat import (
LOOKUP_SEP,
BaseExpression,
Case,
resolve_field,
smart_unicode,
wraps,
)
from ..utils import get_currency_field_name, prepare_expression
from .fields import CurrencyField, MoneyField
def _get_clean_name(name):
# Get rid of __lt, __gt etc for the currency lookup
path = name.split(LOOKUP_SEP)
if path[-1] in QUERY_TERMS:
return LOOKUP_SEP.join(path[:-1])
else:
return name
def _get_field(model, name):
# Create a fake query object so we can easily work out what field
# type we are dealing with
qs = Query(model)
opts = qs.get_meta()
alias = qs.get_initial_alias()
parts = name.split(LOOKUP_SEP)
# The following is borrowed from the innards of Query.add_filter - it strips out __gt, __exact et al.
num_parts = len(parts)
if num_parts > 1 and parts[-1] in QUERY_TERMS:
# Traverse the lookup query to distinguish related fields from
# lookup types.
for counter, field_name in enumerate(parts, 1):
try:
lookup_field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# Not a field. Bail out.
parts.pop()
break
# Unless we're at the end of the list of lookups, let's attempt
# to continue traversing relations.
if counter < num_parts:
try:
model = lookup_field.rel.to
except AttributeError:
# Not a related field. Bail out.
parts.pop()
break
return resolve_field(qs, parts, opts, alias)
def is_in_lookup(name, value):
return hasattr(value, '__iter__') & (name.split(LOOKUP_SEP)[-1] == 'in')
def _convert_in_lookup(model, field_name, options):
"""
``in`` lookup can not be represented as keyword lookup.
It requires transformation to combination of ``Q`` objects.
Example:
amount__in=[Money(10, 'EUR'), Money(5, 'USD)]
is equivalent to:
Q(amount=10, amount_currency='EUR') or Q(amount=5, amount_currency='USD')
"""
field = _get_field(model, field_name)
new_query = Q()
for value in options:
if isinstance(value, Money):
option = Q(**{
field.name: value.amount,
get_currency_field_name(field.name): value.currency
})
else:
option = Q(**{field.name: value})
new_query |= option
return new_query
def _expand_money_args(model, args):
"""
Augments args so that they contain _currency lookups - ie.. Q() | Q()
"""
for arg in args:
if isinstance(arg, Q):
_expand_arg(model, arg)
return args
def _expand_arg(model, arg):
for i, child in enumerate(arg.children):
if isinstance(child, Q):
_expand_arg(model, child)
elif isinstance(child, (list, tuple)):
name, value = child
if isinstance(value, Money):
clean_name = _get_clean_name(name)
arg.children[i] = Q(*[
child,
(get_currency_field_name(clean_name), smart_unicode(value.currency))
])
field = _get_field(model, name)
if isinstance(field, MoneyField):
if isinstance(value, (BaseExpression, F)):
clean_name = _get_clean_name(name)
if not isinstance(value, F):
value = prepare_expression(value)
if not _is_money_field(model, value, name):
continue
arg.children[i] = Q(*[
child,
(get_currency_field_name(clean_name), F(get_currency_field_name(value.name)))
])
if is_in_lookup(name, value):
arg.children[i] = _convert_in_lookup(model, name, value)
def _is_money_field(model, rhs, lhs_name):
"""
Checks if the target field from the expression is instance of MoneyField.
"""
# If the right side is the same field, then no reason to check
if rhs.name == lhs_name:
return True
target_field = _get_field(model, rhs.name)
return isinstance(target_field, MoneyField)
def _expand_money_kwargs(model, args=(), kwargs=None, exclusions=()):
"""
Augments kwargs so that they contain _currency lookups.
"""
for name, value in list(kwargs.items()):
if name in exclusions:
continue
if isinstance(value, Money):
clean_name = _get_clean_name(name)
kwargs[name] = value.amount
kwargs[get_currency_field_name(clean_name)] = smart_unicode(value.currency)
else:
field = _get_field(model, name)
if isinstance(field, MoneyField):
if isinstance(value, (BaseExpression, F)) and not (Case and isinstance(value, Case)):
clean_name = _get_clean_name(name)
if not isinstance(value, F):
value = prepare_expression(value)
if not _is_money_field(model, value, name):
continue
kwargs[get_currency_field_name(clean_name)] = F(get_currency_field_name(value.name))
if is_in_lookup(name, value):
args += (_convert_in_lookup(model, name, value), )
del kwargs[name]
elif isinstance(field, CurrencyField) and 'defaults' in exclusions:
_handle_currency_field(model, name, kwargs)
return args, kwargs
def _handle_currency_field(model, name, kwargs):
name = _get_clean_name(name)
money_field_name = name[:-9] # Remove '_currency'
money_field = _get_field(model, money_field_name)
if money_field.default is not None:
kwargs['defaults'] = kwargs.get('defaults', {})
kwargs['defaults'][money_field_name] = money_field.default.amount
def _get_model(args, func):
"""
Returns the model class for given function.
Note, that ``self`` is not available for proxy models.
"""
if hasattr(func, '__self__'):
# Bound method
model = func.__self__.model
|
elif hasattr(func, '__wrapped__'):
# Proxy model
model = func.__wrapped__.__self__.model
else:
# Custom method on user-defined model manager.
model = args[0].model
return model
def understands_money(func):
"""
Used to wrap a queryset method w
|
ith logic to expand
a query from something like:
mymodel.objects.filter(money=Money(100, "USD"))
To something equivalent to:
mymodel.objects.filter(money=Decimal("100.0"), money_currency="USD")
"""
@wraps(func)
def wrapper(*args, **kwargs):
model = _get_model(args, func)
args = _expand_money_args(model, args)
exclusions = EXPAND_EXCLUSIONS.get(func.__name__, ())
args, kwargs = _expand_money_kwargs(model, args, kwargs, exclusions)
return func(*args, **kwargs)
return wrapper
RELEVANT_QUERYSET_METHODS = ('distinct', 'get', 'get_or_create', 'filter', 'exclude', 'update')
EXPAND_EXCLUSIONS = {
'get_or_create': ('defaults', )
}
def add_money_comprehension_to_queryset(qs):
# Decorate each relevant method with understands_money in the queryset given
for attr in RELEVANT_QUERYSET_METHODS:
setattr(qs, attr, understands_money(getattr(qs, attr)))
return qs
def money_manager(manager):
"""
Patches a model manager's get_queryset method so that each QuerySet it returns
is able to work on money fields.
This allow users of django-money to use other managers while still doing
money queries.
"""
# Need to dynamically subclass to add
|
SUSE/azure-sdk-for-python
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/shared_public_ip_address_configuration_fragment.py
|
Python
|
mit
| 1,084
| 0.000923
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license infor
|
mation.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SharedPublicIpAddressConfigurationFragment(Model):
"""Properti
|
es of a virtual machine that determine how it is connected to a
load balancer.
:param inbound_nat_rules: The incoming NAT rules
:type inbound_nat_rules: list of :class:`InboundNatRuleFragment
<azure.mgmt.devtestlabs.models.InboundNatRuleFragment>`
"""
_attribute_map = {
'inbound_nat_rules': {'key': 'inboundNatRules', 'type': '[InboundNatRuleFragment]'},
}
def __init__(self, inbound_nat_rules=None):
self.inbound_nat_rules = inbound_nat_rules
|
clreinki/GalaxyHarvester
|
waypointMaps.py
|
Python
|
agpl-3.0
| 2,719
| 0.013608
|
#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend =
|
'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(current
|
User, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
|
suutari/shoop
|
shuup_tests/admin/test_home.py
|
Python
|
agpl-3.0
| 3,871
| 0.001033
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.core.urlresolvers import reverse
from shuup.admin.views.dashboard import DashboardView
from shuup.admin.views.home import HomeView
from shuup.admin.views.wizard import WizardView
from shuup.apps.provides import override_provides
from shuup.testing.factories import get_default_shop
from shuup.testing.utils import apply_request_middleware
def get_blocks(rf, admin_user):
request = apply_request_middleware(rf.get("/"), user=admin_user)
response = HomeView.as_view()(request)
assert response.status_code == 200
return response.context_data.get("blocks", [])
def has_block_with_text(text, rf, admin_user):
return any(text in b.text for b in get_blocks(rf, admin_user))
def has_done_block_with_te
|
xt(text, rf, admin_user):
return any(text in b.text for b in get_blocks(rf, admin_user) if b.done)
@pytest.mark.django_db
def test_home_wizard_block(rf, admin_user, settings):
# wizard completion block should be present
get_default_shop()
assert has_block_with_text("wizard", rf, admin_user)
# no wizard spec defined so we shouldn't see the wizard block
settings.SHUUP_SETUP_WIZARD_PANE_SPEC = []
assert has_done_blo
|
ck_with_text("wizard", rf, admin_user)
@pytest.mark.django_db
def test_wizard_redirect(rf, admin_user, settings):
settings.SHUUP_SETUP_WIZARD_PANE_SPEC = []
shop = get_default_shop()
shop.maintenance_mode = True
shop.save()
request = apply_request_middleware(rf.get("/"), user=admin_user)
response = WizardView.as_view()(request)
assert response.status_code == 302
assert response["Location"] == reverse("shuup_admin:home")
@pytest.mark.django_db
def test_dashboard_redirect(rf, admin_user, settings):
settings.SHUUP_SETUP_WIZARD_PANE_SPEC = []
shop = get_default_shop()
shop.maintenance_mode = True
shop.save()
request = apply_request_middleware(rf.get("/"), user=admin_user)
response = DashboardView.as_view()(request)
assert response.status_code == 302
assert response["Location"] == reverse("shuup_admin:home")
shop.maintenance_mode = False
shop.save()
request = apply_request_middleware(rf.get("/"), user=admin_user)
response = DashboardView.as_view()(request)
assert response.status_code == 200
@pytest.mark.django_db
def test_product_blocks(rf, admin_user, settings):
shop = get_default_shop()
blocks = get_blocks(rf, admin_user)
assert any(["New product" in action["text"] for b in blocks for action in b.actions])
@pytest.mark.django_db
def test_product_category_block(rf, admin_user):
shop = get_default_shop()
blocks = get_blocks(rf, admin_user)
new_category_url = reverse("shuup_admin:category.new")
assert any([new_category_url in action["url"] for b in blocks for action in b.actions])
@pytest.mark.django_db
def test_campaign_block(rf, admin_user):
shop = get_default_shop()
assert not has_block_with_text("campaign", rf, admin_user)
@pytest.mark.django_db
def test_users_block(rf, admin_user):
shop = get_default_shop()
assert not has_block_with_text("users", rf, admin_user)
@pytest.mark.django_db
def test_cms_block(rf, admin_user):
shop = get_default_shop()
request = apply_request_middleware(rf.get("/"), user=admin_user)
response = HomeView.as_view()(request)
assert not any("web page" in b.text for b in response.context_data["blocks"])
@pytest.mark.django_db
def test_xtheme_block(rf, admin_user):
shop = get_default_shop()
blocks = get_blocks(rf, admin_user)
assert not has_done_block_with_text("look and feel", rf, admin_user)
|
thousandparsec/daneel-ai
|
daneel-ai.py
|
Python
|
gpl-2.0
| 9,338
| 0.019383
|
#! /usr/bin/python
try:
import requirements
except ImportError:
pass
import time
import random
import logging
import sys
import os
import inspect
from optparse import OptionParser
import tp.client.threads
from tp.netlib.client import url2bits
from tp.netlib import Connection
from tp.netlib import failed, constants, objects
from tp.client.cache import Cache
import daneel
from daneel.rulesystem import RuleSystem, BoundConstraint
import picklegamestate
import cPickle
version = (0, 0, 3)
mods = []
if hasattr(sys, "frozen"):
installpath = os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding( )))
else:
installpath = os.path.realpath(os.path.dirname(__file__))
def callback(mode, state, message="", todownload=None, total=None, amount=None):
logging.getLogger("daneel").debug("Downloading %s %s Message:%s", mode, state, message)
def connect(uri='tp://daneel-ai:cannonfodder@localhost/tp'):
debug = False
host, username, game, password = url2bits(uri)
print host, username, game, password
if not game is None:
username = "%s@%s" % (username, game)
connection = Connection()
# Download the entire universe
try:
connection.setup(host=host, debug=debug)
except Exception,e: #TODO make the exception more specific
print "Unable to connect to the host."
return
if failed(connection.connect("daneel-ai/%i.%i.%i" % version)):
print "Unable to connect to the host."
return
if failed(connection.login(username, password)):
# Try creating the user..
print "User did not exist, trying to create user."
if failed(connection.account(username, password, "", "daneel-ai bot")):
print "Username / Password incorrect."
return
if failed(connection.login(username, password)):
print "Created username, but still couldn't login :/"
return
games = connection.games()
if failed(games):
print "Getting the game object failed!"
return
cache = Cache(Cache.key(host, games[0], username))
return connection, cache
def getDataDir():
if hasattr(sys, "frozen"):
return os.path.join(installpath, "share", "daneel-ai")
if "site-packages" in daneel.__file__:
datadir = os.path.join(os.path.dirname(daneel.__file__), "..", "..", "..", "..", "share", "daneel-ai")
else:
datadir = os.path.join(os.path.dirname(daneel.__file__), "..")
return datadir
def createRuleSystem(rulesfile,verbosity,cache,connection):
global mods
cons,rules = [],[]
funcs = {}
rf = open(os.path.join(getDataDir(), 'rules', rulesfile))
l = stripline(rf.readline())
while l != "[Modules]":
l = stripline(rf.readline())
l = stripline(rf.readline())
while l != "[Constraints]":
if l != "":
m = getattr(__import__("daneel."+l), l)
print l, m
mods.append(m)
try:
cons.extend(m.constraints)
except AttributeError:
pass
try:
rules.extend(m.rules)
except AttributeError:
pass
try:
exec("".join(m.functions),funcs)
except AttributeError:
pass
l = stripline(rf.readline())
l = stripline(rf.readline())
while l != "[Rules]":
if l != "": cons.append(l)
l = stripline(rf.readline())
l = stripline(rf.readline())
while l != "[Functions]":
if l != "": rules.append(l)
l = stripline(rf.readline())
exec("".join(rf.readlines()),funcs)
funcs['cache'] = cache
if connection != None:
funcs['connection'] = connection
return RuleSystem(cons,rules,funcs,verbosity)
def stripline(line):
if line[0] == "#": return ""
return line.strip()
def startTurn(cache,store, delta):
for m in mods:
#call startTurn if it exists in m
if "startTurn" in [x[0] for x in inspect.getmembers(m)]:
m.startTurn(cache,store, delta)
def endTurn(cache,rulesystem,connection):
for m in mods:
#call endTurn if it exists in m
if "endTurn" in [x[0] for x in inspect.getmembers(m)]:
m.endTurn(cache,rulesystem,connection)
def saveGame(cache):
root_dir = getDataDir()
save_dir = root_dir + "/states/"
writeable = checkSaveFolderWriteable(root_dir, save_dir)
# NB assumes there is enough space to write
if not writeable:
logging.getLogger("daneel").error("Cannot save information")
else:
cache.file = save_dir + time.time().__str__() + ".gamestate"
cache.save()
def checkSaveFolderWriteable(root_dir, save_dir):
dir_exists = os.access(save_d
|
ir, os.F_OK)
dir_writeable = os.access(save_dir, os.W_OK)
dir_root_writeable = os.access(root_dir, os.W_OK)
if dir_exists and dir_writeable:
return True
if dir_exists and not dir_wr
|
iteable:
return False
if dir_root_writeable:
os.mkdir(save_dir)
return True
else:
return False
def init(cache,rulesystem,connection):
for m in mods:
#call init if it exists in m
if "init" in [x[0] for x in inspect.getmembers(m)]:
m.init(cache,rulesystem,connection)
#this is for optimisation
if "optimisationValues" in [x[0] for x in inspect.getmembers(m)]:
m.optimisationValues(optimiseValue)
def pickle(variable, file_name):
file = open(file_name, 'wb')
cPickle.dump(variable, file)
file.close()
return
def gameLoop(rulesfile,turns=-1,uri='tp://daneel-ai:cannonfodder@localhost/tp',verbosity=0,benchmark=0):
try:
level = {0:logging.WARNING,1:logging.INFO,2:logging.DEBUG}[verbosity]
except KeyError:
level = 1
fmt = "%(asctime)s [%(levelname)s] %(name)s:%(message)s"
logging.basicConfig(level=level,stream=sys.stdout,format=fmt)
try:
connection, cache = connect(uri)
except Exception, e: #TODO Null make the exception more specific
import traceback
traceback.print_exc()
print "Connection failed."
print e
return
# state = picklegamestate.GameState(rulesfile,turns,None,None,verbosity)
# state.pickle("./states/" + time.time().__str__() + ".gamestate")
gameLoopWrapped(rulesfile,turns,connection,cache,verbosity,benchmark)
def gameLoopWrapped(rulesfile,turns,connection,cache,verbosity,benchmark):
rulesystem = createRuleSystem(rulesfile,verbosity,cache,connection)
logging.getLogger("daneel").info("Downloading all data")
cache.update(connection,callback)
# state = picklegamestate.GameState(rulesfile,turns,None,cache,verbosity)
# state.pickle("./states/" + time.time().__str__() + ".gamestate")
init(cache,rulesystem,connection)
delta = True
while turns != 0:
turns = turns - 1
logging.getLogger("daneel").info("Downloading updates")
cache.update(connection,callback)
# store the cache
#saveGame(cache)
lastturn = connection.time().turn_num
startTurn(cache,rulesystem,delta)
rulesystem.addConstraint("cacheentered")
endTurn(cache,rulesystem,connection)
rulesystem.clearStore()
connection.turnfinished()
waitfor = connection.time()
logging.getLogger("daneel").info("Awaiting end of turn %s est: (%s s)..." % (lastturn,waitfor.time))
try:
while lastturn == connection.get_objects(0)[0].Informational[0][0]:
waitfor = connection.time()
time.sleep(max(1, min(10, waitfor.time / 100)))
except IOError:
print "Connection lost"
exit(2)
def gameLoopBenchMark(rulesfile,turns,connection,cache,verbosity):
rulesystem = createRuleSystem(rulesfile,verbosity,cache,connection)
logging.getLogger("daneel").info("Downloading all data")
init(cache,rulesystem,connection)
delta = False
startTurn(cache,rulesystem,delta)
rulesystem.addConstraint("cacheentered")
endTurn(cache,rulesystem,None)
rulesystem.clearStore()
return
optimiseValue = None
if __name__ == "__main__":
parser = OptionParser(version
|
lexsos/heligate
|
dj-server/apps/accounts_web/auth_ldap.py
|
Python
|
gpl-3.0
| 2,203
| 0.00227
|
import ldap
def extruct_group(fqdn):
return fqdn.split(',')[0].replace('CN=', '')
def extruct_group_list(data):
groups = []
for fqdn in data:
groups.append(extruct_group(fqdn).lower())
return groups
def get_user_info(
ldap_domain,
ldap_tree_scoupe,
user_name,
bind_user_name,
bind_password,
):
l = ldap.initialize("ldap://" + ldap_domain)
l.set_option(ldap.OPT_REFERRALS, 0)
l.protocol_version = 3
try:
l.simple_bind_s(bind_user_name + "@" + ldap_domain, bind_password)
except:
return None
r = l.search(
ldap_tree_scoupe,
ldap.SCOPE_SUBTREE,
'(&(objectCategory=person)(objectClass=user)(sAMAccountName={0}))'.format(user_name),
['sAMAccountName', 'memberOf', 'displayName']
)
Type, Rez = l.result(r, 1, 10)
params = {}
params['full_name'] = Rez[0][1]['displayName'][0]
params['user_name'] = Rez[0][1]['sAMAccountName'][0].lower()
params['groups'] = extruct_group_list(Rez[0][1]['memberOf'])
return params
class LdapAuthHelper(object):
def __init__(self, config):
super(LdapAuthHelper, self).__init__()
self.ldap_domain = config['LDAP_DOMAIN']
self.ldap_tree_scoupe = config['LDAP_TREE']
self.ldap_inet_group = config['LDAP_INET_GROUPT']
self.ldap_bind_user = config['LDAP_BIND_USER']
self.ldap_bind_password = config['LDAP_BIND_PASSWORD']
def get_user_info(self, user_name, password):
bind_user_name = user_name
bind_password = password
if (not self.ldap_bind_user is None) an
|
d (not self.ldap_bind_password is None):
bind_user_name = self.ldap_bind_user
bind_password = self.ldap_bind_password
return get_user_info(
self.ldap_domain,
self.ldap_tree_scoupe,
|
user_name,
bind_user_name,
bind_password,
)
def auth(self, user_name, password):
info = self.get_user_info(user_name, password)
if info is None:
return False
if self.ldap_inet_group in info['groups']:
return True
return False
|
clinton-hall/nzbToMedia
|
core/utils/processes.py
|
Python
|
gpl-3.0
| 3,534
| 0.001132
|
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import socket
import subprocess
import sys
import core
from core import logger, version_check, APP_FILENAME, SYS_ARGV
if os.name == 'nt':
from win32event import CreateMutex
fr
|
om win32api import CloseHandle, GetLastError
from winerror import ERROR_ALREADY_EXISTS
class WindowsProcess(object):
def __init__(self):
self.mutex = None
self.mutexname = 'nzbtomedia_{pi
|
d}'.format(pid=core.PID_FILE.replace('\\', '/')) # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}'
self.CreateMutex = CreateMutex
self.CloseHandle = CloseHandle
self.GetLastError = GetLastError
self.ERROR_ALREADY_EXISTS = ERROR_ALREADY_EXISTS
def alreadyrunning(self):
self.mutex = self.CreateMutex(None, 0, self.mutexname)
self.lasterror = self.GetLastError()
if self.lasterror == self.ERROR_ALREADY_EXISTS:
self.CloseHandle(self.mutex)
return True
else:
return False
def __del__(self):
if self.mutex:
self.CloseHandle(self.mutex)
class PosixProcess(object):
def __init__(self):
self.pidpath = core.PID_FILE
self.lock_socket = None
def alreadyrunning(self):
try:
self.lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.lock_socket.bind('\0{path}'.format(path=self.pidpath))
self.lasterror = False
return self.lasterror
except socket.error as e:
if 'Address already in use' in str(e):
self.lasterror = True
return self.lasterror
except AttributeError:
pass
if os.path.exists(self.pidpath):
# Make sure it is not a 'stale' pidFile
try:
pid = int(open(self.pidpath, 'r').read().strip())
except Exception:
pid = None
# Check list of running pids, if not running it is stale so overwrite
if isinstance(pid, int):
try:
os.kill(pid, 0)
self.lasterror = True
except OSError:
self.lasterror = False
else:
self.lasterror = False
else:
self.lasterror = False
if not self.lasterror:
# Write my pid into pidFile to keep multiple copies of program from running
try:
fp = open(self.pidpath, 'w')
fp.write(str(os.getpid()))
fp.close()
except Exception:
pass
return self.lasterror
def __del__(self):
if not self.lasterror:
if self.lock_socket:
self.lock_socket.close()
if os.path.isfile(self.pidpath):
os.unlink(self.pidpath)
if os.name == 'nt':
RunningProcess = WindowsProcess
else:
RunningProcess = PosixProcess
def restart():
install_type = version_check.CheckVersion().install_type
status = 0
popen_list = []
if install_type in ('git', 'source'):
popen_list = [sys.executable, APP_FILENAME]
if popen_list:
popen_list += SYS_ARGV
logger.log(u'Restarting nzbToMedia with {args}'.format(args=popen_list))
logger.close()
p = subprocess.Popen(popen_list, cwd=os.getcwd())
p.wait()
status = p.returncode
os._exit(status)
|
ziplokk1/python-amazon-mws
|
mws/parsers/fulfillment/listinboundshipmentitems.py
|
Python
|
unlicense
| 2,481
| 0.006046
|
from mws.parsers.base import first_element, BaseElementWrapper, BaseResponseMixin
from mws._mws import InboundShipments
namespaces = {
'a': 'http://mws.amazonaws.com/FulfillmentInboundShipment/2010-10-01/'
}
class Member(BaseElementWrapper):
def __init__(self, element):
BaseElementWrapper.__init__(self, element)
@property
@first_element
def quantity_shipped(self):
return self.element.xpath('./a:QuantityShipped/text()', namespaces=namespaces)
@property
@first_element
def shipment_id(self):
return self.element.xpath('./a:ShipmentId/text()', namespaces=namespaces)
@property
@first_element
def fulfillment_network_sku(self):
return self.element.xpath('./a:FulfillmentNetworkSKU/text()', namespaces=namespaces)
@property
def asin(self):
return self.fulfillment_network_sku
@property
@first_element
def seller_sku(self):
return self.element.xpath('./a:SellerSKU/text()', namespaces=namespaces)
@property
@first_element
def quantity_received(self):
return self.element.xpath('./a:QuantityReceived/text()', namespaces=namespaces)
@property
@first_element
def quantity_in_case(self):
return self.element.xpath('./a:QuantityInCase/text()', namespaces=namespaces)
class ListInboundShipmentItemsResponse(BaseElementWrapper, BaseResponseMixin):
@property
def shipment_items(self):
return
|
[Member(x) for x in self.element.xpath('//a:member', namespaces=namespaces)]
@property
@first_element
def next_token(self):
return self.element.xpath('//a:NextToken/text()', namespaces=namespaces)
@classmethod
|
def from_next_token(cls, mws_access_key, mws_secret_key, mws_account_id, next_token, mws_auth_token=None):
api = InboundShipments(mws_access_key, mws_secret_key, mws_account_id, auth_token=mws_auth_token)
response = api.list_inbound_shipment_items_by_next_token(next_token)
return cls.load(response.original)
@classmethod
def request(cls, mws_access_key, mws_secret_key, mws_account_id, shipment_id,
mws_auth_token=None, last_updated_after=None, last_updated_before=None):
api = InboundShipments(mws_access_key, mws_secret_key, mws_account_id, auth_token=mws_auth_token)
response = api.list_inbound_shipment_items(shipment_id, last_updated_after, last_updated_before)
return cls.load(response.original)
|
luci/luci-py
|
appengine/auth_service/realms/permissions_test.py
|
Python
|
apache-2.0
| 3,254
| 0.008297
|
#!/usr/bin/env vpython
# Copyright 2020 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import logging
import sys
import unittest
import test_env
test_env.setup_test_env()
from proto import realms_config_pb2
from realms import permissions
from test_support import test_case
class BuilderTest(test_case.TestCase):
def setUp(self):
super(BuilderTest, self).setUp()
self.builder = permissions.Builder('rev')
self.permission = self.builder.permission
self.include = self.builder.include
self.role = self.builder.role
def check(self, perms=None, roles=None):
db = self.builder.finish()
self.assertEquals(db.revision, 'rev')
if perms is not None:
self.assertEquals(sorted(db.permissions), perms)
if roles is not None:
self.assertEquals(
db.roles,
{n: permissions.Role(n, perms) for n, perms in roles.items()})
def test_empty(self):
self.check([], {})
def test_permissions_only(self):
self.permission('luci.dev.p1')
self.permission('luci.dev.p2')
self.permission('luci.dev.p1') # redeclaration is ok
self.check(perms=['luci.dev.p1', 'luci.dev.p2'])
def test_bad_permission_name(self):
with self.assertRaises(ValueError):
self.permission('luci.dev')
with self.assertRaises(ValueError):
self.permission('luci.dev.something.something')
def test_simple_role(self):
self.role('role/dev.a', [
self.permission('luci.dev.p1'),
self.permission('luci.dev.p2'),
])
self.check(
perms=['luci.dev.p1', 'luci.dev.p2'],
roles={'role/dev.a': ('luci.dev.p1', 'luci.dev.p2')})
def test_complex_role(self):
self.role('role/dev.a', [
self.permission('luci.dev.p1'),
self.permission('luci.dev.p2'),
])
self.role('role/dev.b', [
self.permission('luci.dev.p2'),
self.permission('luci.dev.p3'),
self.include('role/dev.a'),
])
self.check(
perms=['luci.dev
|
.p1', 'luci.dev.p2', 'luci.dev.p3'],
roles={
'role/dev.a': ('luci.dev.p1', 'luci.dev.p2'),
'role/dev.b': ('luci.dev.p1', 'luci.dev.p2', 'luci.dev.p3'),
})
def test_role_redeclaration(self):
self.role('role/dev.a', [])
with self.assertRaises(ValueError):
self.role('role/dev.a
|
', [])
def test_bad_role_name(self):
with self.assertRaises(ValueError):
self.role('zzz/role', [])
def test_referencing_undeclared_role(self):
with self.assertRaises(ValueError):
self.include('role/zzz')
def test_non_idempotent_perm(self):
self.permission('luci.dev.p1')
self.permission('luci.dev.p1')
with self.assertRaises(ValueError):
self.permission('luci.dev.p1', internal=True)
class HardcodedDBTest(test_case.TestCase):
def test_can_be_built(self):
db = permissions.db()
for b in db.implicit_root_bindings('proj'):
self.assertIsInstance(b, realms_config_pb2.Binding)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.FATAL)
unittest.main()
|
kyubifire/softlayer-python
|
SoftLayer/CLI/user/permissions.py
|
Python
|
mit
| 1,893
| 0.001585
|
"""List A users permissions."""
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@environment.pass_env
|
def cli(env, identifier):
"""User Permissions. TODO change to list all permissions, and which users have them"""
mgr = SoftLayer.UserManager(env.client)
user_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'username')
object
|
_mask = "mask[id, permissions, isMasterUserFlag, roles]"
user = mgr.get_user(user_id, object_mask)
all_permissions = mgr.get_all_permissions()
user_permissions = perms_to_dict(user['permissions'])
if user['isMasterUserFlag']:
click.secho('This account is the Master User and has all permissions enabled', fg='green')
env.fout(roles_table(user))
env.fout(permission_table(user_permissions, all_permissions))
def perms_to_dict(perms):
"""Takes a list of permissions and transforms it into a dictionary for better searching"""
permission_dict = {}
for perm in perms:
permission_dict[perm['keyName']] = True
return permission_dict
def permission_table(user_permissions, all_permissions):
"""Creates a table of available permissions"""
table = formatting.Table(['Description', 'KeyName', 'Assigned'])
table.align['KeyName'] = 'l'
table.align['Description'] = 'l'
table.align['Assigned'] = 'l'
for perm in all_permissions:
assigned = user_permissions.get(perm['keyName'], False)
table.add_row([perm['name'], perm['keyName'], assigned])
return table
def roles_table(user):
"""Creates a table for a users roles"""
table = formatting.Table(['id', 'Role Name', 'Description'])
for role in user['roles']:
table.add_row([role['id'], role['name'], role['description']])
return table
|
factorlibre/l10n-spain
|
l10n_es_ticketbai_api_batuz/__init__.py
|
Python
|
agpl-3.0
| 40
| 0
|
f
|
rom . import models
from . import lroe
| |
DataDog/sensei
|
clients/python/sensei/sensei_components.py
|
Python
|
apache-2.0
| 35,124
| 0.017082
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
import urllib2
import json
import sys
import logging
import datetime
from datetime import datetime
import time
import re
logger = logging.getLogger("sensei_components")
#
# REST API parameter constants
#
PARAM_OFFSET = "start"
PARAM_COUNT = "rows"
PARAM_QUERY = "q"
PARAM_QUERY_PARAM = "qparam"
PARAM_SORT = "sort"
PARAM_SORT_ASC = "asc"
PARAM_SORT_DESC = "desc"
PARAM_SORT_SCORE = "relevance"
PARAM_SORT_SCORE_REVERSE = "relrev"
PARAM_SORT_DOC = "doc"
PARAM_SORT_DOC_REVERSE = "docrev"
PARAM_FETCH_STORED = "fetchstored"
PARAM_SHOW_EXPLAIN = "showexplain"
PARAM_ROUTE_PARAM = "routeparam"
PARAM_GROUP_BY = "groupby"
PARAM_MAX_PER_GROUP = "maxpergroup"
PARAM_SELECT = "select"
PARAM_SELECT_VAL = "val"
PARAM_SELECT_NOT = "not"
PARAM_SELECT_OP = "op"
PARAM_SELECT_OP_AND = "and"
PARAM_SELECT_OP_OR = "or"
PARAM_SELECT_PROP = "prop"
PARAM_FACET = "facet"
PARAM_DYNAMIC_INIT = "dyn"
PARAM_PARTITIONS = "partitions"
PARAM_FACET_EXPAND = "expand"
PARAM_FACET_MAX = "max"
PARAM_FACET_MINHIT = "minhit"
PARAM_FACET_ORDER = "order"
PARAM_FACET_ORDER_HITS = "hits"
PARAM_FACET_ORDER_VAL = "val"
PARAM_DYNAMIC_TYPE = "type"
PARAM_DYNAMIC_TYPE_STRING = "string"
PARAM_DYNAMIC_TYPE_BYTEARRAY = "bytearray"
PARAM_DYNAMIC_TYPE_BOOL = "boolean"
PARAM_DYNAMIC_TYPE_INT = "int"
PARAM_DYNAMIC_TYPE_LONG = "long"
PARAM_DYNAMIC_TYPE_DOUBLE = "double"
PARAM_DYNAMIC_VAL = "vals"
PARAM_RESULT_PARSEDQUERY = "par
|
sedquery"
PARAM_RESULT_HIT_STORED_FIELDS = "stored"
PARAM_RESULT_HIT_STORED_FIELDS_NAME = "name"
PARAM_RES
|
ULT_HIT_STORED_FIELDS_VALUE = "val"
PARAM_RESULT_HIT_EXPLANATION = "explanation"
PARAM_RESULT_FACETS = "facets"
PARAM_RESULT_TID = "tid"
PARAM_RESULT_TOTALDOCS = "totaldocs"
PARAM_RESULT_NUMHITS = "numhits"
PARAM_RESULT_HITS = "hits"
PARAM_RESULT_HIT_UID = "uid"
PARAM_RESULT_HIT_DOCID = "docid"
PARAM_RESULT_HIT_SCORE = "score"
PARAM_RESULT_HIT_SRC_DATA = "srcdata"
PARAM_RESULT_TIME = "time"
PARAM_RESULT_SELECT_LIST = "select_list"
PARAM_SYSINFO_NUMDOCS = "numdocs"
PARAM_SYSINFO_LASTMODIFIED = "lastmodified"
PARAM_SYSINFO_VERSION = "version"
PARAM_SYSINFO_FACETS = "facets"
PARAM_SYSINFO_FACETS_NAME = "name"
PARAM_SYSINFO_FACETS_RUNTIME = "runtime"
PARAM_SYSINFO_FACETS_PROPS = "props"
PARAM_SYSINFO_CLUSTERINFO = "clusterinfo"
PARAM_SYSINFO_CLUSTERINFO_ID = "id"
PARAM_SYSINFO_CLUSTERINFO_PARTITIONS = "partitions"
PARAM_SYSINFO_CLUSTERINFO_NODELINK = "nodelink"
PARAM_SYSINFO_CLUSTERINFO_ADMINLINK = "adminlink"
PARAM_RESULT_HITS_EXPL_VALUE = "value"
PARAM_RESULT_HITS_EXPL_DESC = "description"
PARAM_RESULT_HITS_EXPL_DETAILS = "details"
PARAM_RESULT_FACET_INFO_VALUE = "value"
PARAM_RESULT_FACET_INFO_COUNT = "count"
PARAM_RESULT_FACET_INFO_SELECTED = "selected"
#
# JSON API parameter constants
#
JSON_PARAM_COLUMNS = "columns"
JSON_PARAM_EXPLAIN = "explain"
JSON_PARAM_FACETS = "facets"
JSON_PARAM_FACET_INIT = "facetInit"
JSON_PARAM_FETCH_STORED = "fetchStored"
JSON_PARAM_FETCH_TERM_VECTORS = "fetchTermVectors"
JSON_PARAM_FILTER = "filter"
JSON_PARAM_FROM = "from"
JSON_PARAM_GROUPBY = "groupBy"
JSON_PARAM_PARTITIONS = "partitions"
JSON_PARAM_QUERY = "query"
JSON_PARAM_QUERY_STRING = "query_string"
JSON_PARAM_ROUTEPARAM = "routeParam"
JSON_PARAM_SELECTIONS = "selections"
JSON_PARAM_SIZE = "size"
JSON_PARAM_SORT = "sort"
JSON_PARAM_TOP = "top"
JSON_PARAM_VALUES = "values"
JSON_PARAM_EXCLUDES = "excludes"
JSON_PARAM_OPERATOR = "operator"
JSON_PARAM_NO_OPTIMIZE = "_noOptimize"
# Group by related column names
GROUP_VALUE = "groupvalue"
GROUP_HITS = "grouphits"
# Default constants
DEFAULT_REQUEST_OFFSET = 0
DEFAULT_REQUEST_COUNT = 10
DEFAULT_REQUEST_MAX_PER_GROUP = 10
DEFAULT_FACET_MINHIT = 1
DEFAULT_FACET_MAXHIT = 10
DEFAULT_FACET_ORDER = PARAM_FACET_ORDER_HITS
#
# Utilities for result display
#
def print_line(keys, max_lens, char='-', sep_char='+'):
sys.stdout.write(sep_char)
for key in keys:
sys.stdout.write(char * (max_lens[key] + 2) + sep_char)
sys.stdout.write('\n')
def print_header(keys, max_lens, char='-', sep_char='+'):
print_line(keys, max_lens, char=char, sep_char=sep_char)
sys.stdout.write('|')
for key in keys:
sys.stdout.write(' %s%s |' % (key, ' ' * (max_lens[key] - len(key))))
sys.stdout.write('\n')
print_line(keys, max_lens, char=char, sep_char=sep_char)
def print_footer(keys, max_lens, char='-', sep_char='+'):
print_line(keys, max_lens, char=char, sep_char=sep_char)
def safe_str(obj):
"""Return the byte string representation of obj."""
try:
return str(obj)
except UnicodeEncodeError:
# obj is unicode
return unicode(obj).encode("unicode_escape")
class SenseiClientError(Exception):
"""Exception raised for all errors related to Sensei client."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SenseiFacet:
def __init__(self,expand=False,minHits=1,maxCounts=10,orderBy=PARAM_FACET_ORDER_HITS):
self.expand = expand
self.minHits = minHits
self.maxCounts = maxCounts
self.orderBy = orderBy
class SenseiSelections:
def __init__(self, type):
self.type = type;
self.selection = {}
def get_type(self):
return self.type
def get_selection(self):
return self.selection
class SenseiQuery:
def __init__(self, type):
self.type = type
self.query = {}
def get_type(self):
return self.type
def get_query(self):
return self.query
class SenseiQueryMatchAll(SenseiQuery):
def __init__(self):
SenseiQuery.__init__(self, "match_all")
self.query={"match_all":{"boost":1.0}}
def set_boost(self, boost):
target = (self.query)["match_all"]
target["boost"]=boost
class SenseiQueryIDs(SenseiQuery):
def __init__(self, values, excludes):
SenseiQuery.__init__(self, "ids")
self.query={"ids" : {"values" : [], "excludes":[], "boost":1.0}}
if isinstance(values, list) and isinstance(excludes, list):
self.query = {"ids" : {"values" : values, "excludes":excludes, "boost":1.0}}
def add_values(self, values):
if self.query.has_key("ids"):
values_excludes = self.query["ids"]
if values_excludes.has_key("values"):
orig_values = values_excludes["values"]
orig_set = set(orig_values)
for new_value in values:
if new_value not in orig_set:
orig_values.append(new_value)
def add_excludes(self, excludes):
if self.query.has_key("ids"):
values_excludes = self.query["ids"]
if values_excludes.has_key("excludes"):
orig_excludes = values_excludes["excludes"]
orig_set = set(orig_excludes)
for new_value in excludes:
if new_value not in orig_set:
orig_excludes.append(new_value)
def set_boost(self, boost):
target = (self.query)["ids"]
target["boost"]=boost
class SenseiQueryString(SenseiQuery):
def __init__(self, query):
SenseiQuery.__init__(self, "query_string")
self.query={"query_string":{"query":query,
"default_field":"contents",
"default_operator":"OR",
"allow_leading_wildcard":True,
"lowercase_expanded_terms":True,
"enable_position_increments":True,
"fuzzy_prefix_length":0,
"fuzzy_min_sim":0.5,
"phrase_slop":0,
"boost":1.0,
"auto_generate_phrase_queries":False,
"fields":[],
|
access-missouri/am-django-project
|
am/legislative/migrations/0002_auto_20170705_2126.py
|
Python
|
bsd-2-clause
| 1,234
| 0.003241
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-05 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('legislative', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='last_action_date',
field=models.DateField(blank=True, help_text='Date when the last action on bill happened.', null=True),
),
migrations.AlterField(
model_name='bill',
name='last_action_description',
field=models.CharField(blank=True, help_text="Description of the bill's last action.", max_length=300, null=True),
),
migrations.AlterField(
model_name='bill',
name='lr_number',
field=models.CharField(blank=True, help_text='Legislative Research (?) number.', max_length=100, n
|
ull=True),
),
migrations.AlterField(
model_name='bill',
name='proposed_effective_date',
field=models.DateField(blank=True, help_text='Proposed date when the bill, if passed, would go into effect.', null=Tr
|
ue),
),
]
|
weddige/moneypenny
|
pywcl/scheduler/__init__.py
|
Python
|
mit
| 3,088
| 0.003886
|
# -*- coding: UTF-8 -*-
from datetime import datetime
from threading import Timer
from queue import Queue
import uuid
import logging
#Fallbacl for python < 3.3
try:
from time import perf_counter
except ImportError:
from time import clock as perf_counter
log = logging.getLogger(__name__)
class _Task:
_processing_time = 10
_scheduler = None
def __init__(self, function, due=None
|
, interval=None, repeat=0):
self._function = function
if hasattr(due, '__iter__'):
self._due_iter = iter(due)
|
self._due = self._due_iter.__next__()
else:
self._due_iter = None
self._due = due
self._interval = interval
self._repeat = repeat
if not (self._due or self._interval):
raise ValueError
def __call__(self, *args, job_uuid=None, **kwargs):
start = perf_counter()
result = self._function(*args, **kwargs)
self._processing_time = perf_counter() - start
if self._scheduler:
del self._scheduler._scheduled[job_uuid]
if self._interval and self._repeat != 1:
if self._repeat > 0:
self._repeat -= 1
self._scheduler.schedule(self, *args, job_uuid=job_uuid, **kwargs)
if self._due_iter:
self._due = self._due_iter.__next__()
if self._due:
self._scheduler.schedule(self, *args, job_uuid=job_uuid, **kwargs)
return result
def __get__(self, obj, type=None):
if obj is None:
return self
new_func = self._function.__get__(obj, type)
return self.__class__(new_func, self._due_iter or self._due, self._interval, self._repeat)
class Task:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, function):
return _Task(function, *self.args, **self.kwargs)
class Scheduler:
_queue = Queue()
_scheduled = dict()
def __init__(self):
pass
def schedule(self, function, *args, job_uuid=None, **kwargs):
if isinstance(function, _Task):
if not job_uuid:
job_uuid = uuid.uuid4()
kwargs['job_uuid'] = job_uuid
function._scheduler = self
if function._interval:
timer = Timer(function._interval, function, args, kwargs)
else:
remainder = (function._due - datetime.now()).total_seconds()
timer = Timer(remainder - function._processing_time, function, args, kwargs)
self._scheduled[job_uuid] = timer
timer.start()
return job_uuid
else:
self.queue.put((function, args, kwargs))
def cancel(self, job_uuid=None):
if job_uuid:
self._scheduled[job_uuid].cancel()
del self._scheduled[job_uuid]
else:
for job_uuid in self._scheduled:
self._scheduled[job_uuid].cancel()
del self._scheduled[job_uuid]
|
leifurhauks/django-mailbox
|
django_mailbox/south_migrations/0005_rename_fields.py
|
Python
|
mit
| 1,958
| 0.005618
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('django_mailbox_message', 'from_address', 'address')
db.rename_column('django_mailbox_message', 'received', 'processed')
def backwards(self, orm):
db.rename_column('django_mailbox_message', 'address', 'from_address')
db.rename_column('django_mailbox_message', 'processed', 'received')
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"})
|
,
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'defau
|
lt': 'False'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['django_mailbox']
|
mivade/qCamera
|
qcamera/thorlabs_dcx.py
|
Python
|
bsd-2-clause
| 8,844
| 0.004636
|
"""Thorlabs DCx series cameras
Drivers for Windows and Linux can be downloaded from Thorlabs__.
__ http://www.thorlabs.de/software_pages/viewsoftwarepage.cfm?code=DCx
Python implementation of ueye interface: https://github.com/bernardokyotoku/pydcu
"""
from __future__ import print_function
import sys
import ctypes
from ctypes import byref, c_double, c_int
import numpy as np
from .camera import Camera
from .exceptions import ThorlabsDCxError
class CamInfo(ctypes.Structure):
_fields_ = [
("SerNo", ctypes.c_char*12),
("ID", ctypes.c_char*20),
("Version", ctypes.c_char*10),
("Date", ctypes.c_char*12),
("Select", ctypes.c_byte),
("Type", ctypes.c_byte),
("Reserved", ctypes.c_char)
]
class ThorlabsDCx(Camera):
"""Class for Th
|
orlabs DCx series cameras."""
# Setup and shutdown
# -------------------------------------------------------------------------
# TODO: Change to use a logger!
def _chk(self, msg):
"""Check for errors from the C library."""
if msg:
if msg == 127:
print("Out of memory, probably because of a memory leak!!!")
|
if msg == 125:
print(
"125: IS_INVALID_PARAMETER: One of the submitted " + \
"parameters is outside the valid range or is not " + \
"supported for this sensor or is not available in this mode.")
print("msg:", msg)
def _initialize(self, **kwargs):
"""Initialize the camera."""
# Load the library.
if 'win' in sys.platform:
try:
self.clib = ctypes.cdll.uc480_64
except:
self.clib = ctypes.cdll.uc480
else:
self.clib = ctypes.cdll.LoadLibrary('libueye_api.so')
# Initialize the camera. The filehandle being 0 initially
# means that the first available camera will be used. This is
# not really the right way of doing things if there are
# multiple cameras installed, but it's good enough for a lot
# of cases.
number_of_cameras = ctypes.c_int(0)
self._chk(self.clib.is_GetNumberOfCameras(byref(number_of_cameras)))
if number_of_cameras.value < 1:
raise ThorlabsDCxError("No camera detected!")
self.filehandle = ctypes.c_int(0)
self._chk(self.clib.is_InitCamera(
ctypes.pointer(self.filehandle)))
# Resolution of camera. (height, width)
self.shape = (1024, 1280)
# Allocate memory. Declare variables for storing memory ID and
# memory start location:
self.pid = ctypes.c_int()
self.ppcImgMem = ctypes.c_char_p()
# Allocate the right amount of memory:
bitdepth = 8 # Camera is 8 bit.
self._chk(self.clib.is_AllocImageMem(
self.filehandle, self.shape[1], self.shape[0],
bitdepth, byref(self.ppcImgMem), byref(self.pid)))
# Tell the driver to use the newly allocated memory:
self._chk(self.clib.is_SetImageMem(self.filehandle, self.ppcImgMem , self.pid))
# Enable autoclosing. This allows for safely closing the
# camera if it is disconnected.
self._chk(self.clib.is_EnableAutoExit(self.filehandle, 1))
def get_camera_properties(self):
filename = 'thorlabs_dcx.json'
self.logger.warning("Warning: Warnings do not work!")
self.props.load(filename)
def close(self):
"""Close the camera safely."""
self._chk(self.clib.is_ExitCamera(self.filehandle))
# Image acquisition
# -------------------------------------------------------------------------
def set_acquisition_mode(self, mode):
"""Set the image acquisition mode."""
def _acquire_image_data(self):
"""Code for getting image data from the camera should be
placed here.
"""
# Allocate memory for image:
img_size = self.shape[0]*self.shape[1]/self.bins**2
c_array = ctypes.c_char*img_size
c_img = c_array()
# Take one picture: wait time is waittime * 10 ms:
waittime = c_int(20)
self._chk(self.clib.is_FreezeVideo(self.filehandle, waittime))
# Copy image data from the driver allocated memory to the
# memory that we allocated.
self._chk(self.clib.is_CopyImageMem(self.filehandle, self.ppcImgMem, self.pid, c_img))
# Pythonize and return
img_array = np.frombuffer(c_img, dtype=ctypes.c_ubyte)
img_array.shape = np.array(self.shape)
return img_array
# Triggering
# -------------------------------------------------------------------------
def get_trigger_mode(self):
"""Query the current trigger mode."""
def set_trigger_mode(self, mode):
"""Setup trigger mode."""
def trigger(self):
"""Send a software trigger to take an image immediately."""
def start(self):
"""Do nothing for this camera."""
pass
def stop(self):
"""Do nothing for this camera."""
pass
# Gain and exposure time
# -------------------------------------------------------------------------
def _update_exposure_time(self, t):
"""Set the exposure time."""
IS_EXPOSURE_CMD_SET_EXPOSURE = 12
nCommand = IS_EXPOSURE_CMD_SET_EXPOSURE
Param = c_double(t)
SizeOfParam = 8
self._chk(self.clib.is_Exposure(self.filehandle, nCommand, byref(Param), SizeOfParam))
def get_gain(self):
"""Query the current gain settings."""
def set_gain(self, gain, **kwargs):
"""Set the camera gain."""
if __name__ == "__main__":
with ThorlabsDCx() as cam:
pass
"""This was in the class. It shouldn't have been!
def test(self):
print("Testing camera:")
os_version = self.clib.is_GetOsVersion()
print("OS version:",os_version)
if os_version == 12:
print("Windows 7")
number_of_cameras = ctypes.c_int(0)
mypoint = ctypes.pointer(number_of_cameras)
self.clib.is_GetNumberOfCameras(mypoint)
print("Number of cameras:",number_of_cameras.value)
if number_of_cameras >= 1:
return_value = ctypes.c_int()
caminfo = CamInfo()
return_value = self.clib.is_GetCameraInfo(self.filehandle, ctypes.pointer(caminfo))
if return_value == 0:
print("SerNo: ",caminfo.SerNo)
print("ID: ",caminfo.ID)
print("Version: ",caminfo.Version)
print("Date: ",caminfo.Date)
print("Select: ",caminfo.Select)
print("Type: ",caminfo.Type)
print("Reserved: ", caminfo.Reserved)
else:
print("No camera detected!")
print("returned:",return_value)
# Allocate image storage
img_size = self.shape[0]*self.shape[1]/self.bins**2
c_array = ctypes.c_char*img_size
c_img = c_array()
print(self.shape)
pid = ctypes.c_int()
mem = ctypes.c_char_p
ppcImgMem = mem()
self._chk(self.clib.is_AllocImageMem(self.filehandle, 1280, 1024, 8, byref(ppcImgMem), byref(pid)))
print("Inquiring about memory:")
print(pid)
width = c_int()
height = c_int()
bitdepth = c_int()
self._chk(self.clib.is_InquireImageMem(self.filehandle, ppcImgMem, pid, byref(width), byref(height), byref(bitdepth), None))
print("width:",width)
print("height:",height)
print("depth:",bitdepth)
print("SetImageMem")
self._chk(self.clib.is_SetImageMem(self.filehandle, ppcImgMem , pid))
# print("Set display mode")
# self._chk(self.clib.is_SetDisplayMode(self.filehandle, 1))
#pMem = ctypes.pointer(ctypes.c_void_p())
#s
|
sebrandon1/tempest
|
tempest/api/compute/admin/test_security_groups.py
|
Python
|
apache-2.0
| 3,588
| 0
|
# Copyright 2013 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import test
class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_clients(cls):
super(SecurityGroupsTestAdminJSON, cls).setup_clients()
cls.adm_client = cls.os_adm.compute_security_groups_client
cls.client = cls.security_groups_client
def _delete_security_group(self, securitygroup_id, admin=True):
if admin:
self.adm_client.delete_security_group(securitygroup_id)
else:
self.client.delete_security_group(securitygroup_id)
@test.idempotent_id('49667619-5af9-4c63-ab5d-2cfdd1c8f7f1')
@test.services('network')
def test_list_security_groups_list_all_tenants_filter(self):
# Admin can list security groups of all tenants
# List of all security groups created
security_group_list = []
# Create two security groups for a non-admin tenant
for i in range(2):
name = data_utils.rand_name('securitygroup')
description = data_utils.rand_name('description')
securitygroup = self.client.create_security_group(
name=name, description=description)['security_group']
self.addCleanup(self._delete_security_group,
securitygroup['id'], admin=False)
security_group_list.append(securitygroup)
client_tenant_id = securitygroup['tenant_id']
# Create two security groups for admin tenant
for i in range(2):
name = data_utils.rand_name('securitygroup')
description = data_utils.rand_name('description')
adm_securitygroup = self.adm_client.create_security_group(
name=name, description=description)['security_group']
self.addCleanup(self._delete_security_group,
adm_securitygroup['id'])
security_group_list.append(adm_securitygroup)
# Fetch all security groups based on 'all_tenants' search filter
fetched_list = self.adm_client.list_security_groups(
all_tenants='true')['security_groups']
sec_group_id_list = [sg['id'] for sg in fetched_list]
# Now check if all cre
|
ated Security Groups are present in fetched list
for sec_group in security_group_list:
self.assertIn(sec_group['id'],
|
sec_group_id_list)
# Fetch all security groups for non-admin user with 'all_tenants'
# search filter
fetched_list = (self.client.list_security_groups(all_tenants='true')
['security_groups'])
# Now check if all created Security Groups are present in fetched list
for sec_group in fetched_list:
self.assertEqual(sec_group['tenant_id'], client_tenant_id,
"Failed to get all security groups for "
"non admin user.")
|
GbalsaC/bitnamiP
|
venv/src/edx-milestones/milestones/models.py
|
Python
|
agpl-3.0
| 5,901
| 0.000508
|
# pylint: disable=no-init
# pylint: disable=old-style-class
# pylint: disable=too-few-public-methods
"""
Database ORM models managed by this Django app
Please do not integrate directly with these models!!! This app currently
offers two APIs -- api.py for direct Python integration and receivers.py,
which leverages Django's signal framework.
"""
from django.db import models
from model_utils.models import TimeStampedModel
class Milestone(TimeStampedModel):
"""
A Milestone is a representation of an accomplishment which can be
attained by a user. Milestones have a base set of meta data
describing the milestone, including id, name, and description.
Milestones can be used to drive functionality and behavior 'behind
the scenes' in Open edX, such as with the Pre-Requisite Course and
Course Entrance Exam use cases.
"""
namespace = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=255, db_index=True)
display_name = models.CharField(max_length=255)
description = models.TextField()
active = models.BooleanField(default=True)
class Meta:
""" Meta class for this Django model """
unique_together = (("namespace", "name"),)
class MilestoneRelationshipType(TimeStampedModel):
"""
A MilestoneRelationshipType represents a category of link available
between a Milestone and a particular learning object (such as a
Course). In addition to learning objects, a MilestoneRelationshipType
can also represent the link between a Milestone and other platform
entities (such as a User). For example, a Course Author may
indicate that Course 101 "fulfills" Milestone A, creating a new
CourseMilestone record in the process. When a User completes
Course 101, a new UserMilestone record is created reflecting the
newly-attained Milestone A. The Course Author may also indicate
that Course 102 "requires" Milestone A, yielding a second
CourseMilestone record. Because the User has gained Milestone A
(via Course 101), they can access Course 102.
This same process of indicating MilestoneRelationshipTypes can be
applied to other learning objects as well, such as course content
(XBlocks/modules).
"""
# name = models.CharField(max_length=255, db_index=True, unique=True)
name = models.CharField(max_length=25, db_index=True, unique=True)
description = models.TextField(blank=True)
active = models.BooleanField(default=True)
@classmethod
# pylint: disable=invalid-name
def get_supported_milestone_relationship_types(cls):
""" The set of currently-allowed m
|
ilestone relationship types (names) """
RELATIONSHIP_TYPE_CHOICES = {
'REQUIRES': 'requires',
'FULFILLS': 'fulfills',
}
return RELATIONSHIP_TYPE_CHOICES
class CourseMilestone(TimeStampedModel):
"""
A CourseMilestone represents the link between a Course and a
Milestone. Because Courses are not true Open edX entities (in the
Django/ORM sense) the modeling and
|
integrity will be limited to that
of specifying CourseKeyFields in this model, as well as related ones
below. In addition, a MilestoneRelationshipType specifies the
particular sort of relationship that exists between the Course and
the Milestone, such as "requires".
"""
course_id = models.CharField(max_length=255, db_index=True)
milestone = models.ForeignKey(Milestone, db_index=True)
milestone_relationship_type = models.ForeignKey(MilestoneRelationshipType, db_index=True)
active = models.BooleanField(default=True)
class Meta:
""" Meta class for this Django model """
unique_together = (("course_id", "milestone"),)
class CourseContentMilestone(TimeStampedModel):
"""
A CourseContentMilestone represents the link between a specific
Course module (such as an XBlock) and a Milestone. Because
CourseContent objects are not true Open edX entities (in the
Django/ORM sense) the modeling and integrity will be limited to that
of specifying LocationKeyFields in this model, as well as related
ones. In addition, a MilestoneRelationshipType specifies the
particular sort of relationship that exists between the Milestone
and the CourseContent, such as "requires" or "fulfills".
"""
course_id = models.CharField(max_length=255, db_index=True)
content_id = models.CharField(max_length=255, db_index=True)
milestone = models.ForeignKey(Milestone, db_index=True)
milestone_relationship_type = models.ForeignKey(MilestoneRelationshipType, db_index=True)
active = models.BooleanField(default=True)
class Meta:
""" Meta class for this Django model """
unique_together = (("course_id", "content_id", "milestone"),)
class UserMilestone(TimeStampedModel):
"""
A UserMilestone represents an stage reached or event experienced
by a User during their interactions with the Open edX platform.
The use of the 'collected' field in this model could support future
use cases such as "Goals", in which a User might keep a list of
Milestones they are interested in attaining. Side Note: In the
Mozilla Open Badges world, this collection concept is referred
to as the user's "backpack".
The 'source' field was originally introduced as a free-form auditing
field to document the method, location, or event which triggered the
collection of the milestone by this user.
"""
user_id = models.IntegerField(db_index=True)
milestone = models.ForeignKey(Milestone, db_index=True)
source = models.TextField(blank=True)
collected = models.DateTimeField(blank=True, null=True)
active = models.BooleanField(default=True)
class Meta:
""" Meta class for this Django model """
unique_together = ("user_id", "milestone")
|
ak110/pytoolkit
|
pytoolkit/evaluations/classification_test.py
|
Python
|
mit
| 808
| 0.001238
|
import numpy as np
import pytoolkit as tk
def test_print_classification_multi():
y_true = np.array([0, 1, 1, 1, 2])
prob_pred = np.array(
[
[0.75, 0.00, 0.25],
[0.25, 0.75, 0.00],
[0.25, 0.75, 0.00],
[0.25, 0.00, 0.75],
[0.25, 0.75, 0.00],
]
)
tk.evaluations.print_classification(y_true, prob_pred)
def test_print_classification_binary():
y_true = np.array([0, 1, 1, 0])
prob_pred = np.array([0.25, 0.25, 0.75, 0.25])
tk.evaluations.print_classification(y_true, prob_pred)
def test_print_classifica
|
tion_binary_multi():
y_true = np.array([0, 1, 1, 0])
prob_pred = np.array([[0.25, 0.75], [0.25, 0.75], [0.75, 0.25], [0.25, 0.75]])
tk.evaluat
|
ions.print_classification(y_true, prob_pred)
|
cghall/salesforce-reporting
|
test/common.py
|
Python
|
mit
| 304
| 0.003289
|
import json
import os
import unittest
class Par
|
serTest(unittest.TestCase):
def build_mock_report(self, report):
path = os.path.join(os.path.dirname(__file__), 'test_data', report) + '.json'
|
path = os.path.abspath(path)
with open(path) as f:
return json.load(f)
|
justanr/py3traits
|
src/pytraits/core/singleton.py
|
Python
|
apache-2.0
| 2,036
| 0.000491
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
'''
Copyright 2014-2015 Teppo Perä
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from pytraits.core.errors import SingletonError
class Singleton(type):
"""
Turn the class to immutable singleton.
>>> class Example(object, metaclass=Singleton):
... pass
...
>>> a = Example()
>>> b = Example()
>>> id(a) == id(b)
True
Having your instance as a singleton is faster than creating from scra
|
tch
>>> import timeit
>>> class MySingleton(object, metaclass=Singleton):
...
|
def __init__(self):
... self._store = dict(one=1, two=2, three=3, four=4)
...
>>> class NonSingleton(object):
... def __init__(self):
... self._store = dict(one=1, two=2, three=3, four=4)
...
>>> #timeit.timeit(NonSingleton) > timeit.timeit(MySingleton)
True
>>> MySingleton().new_item = False
Traceback (most recent call last):
...
pytraits.core.errors.SingletonError: Singletons are immutable!
"""
def __call__(self, *args, **kwargs):
try:
return self.__instance
except AttributeError:
def immutable_object(*args):
raise SingletonError()
self.__instance = super(Singleton, self).__call__(*args, **kwargs)
self.__setitem__ = immutable_object
self.__setattr__ = immutable_object
return self.__instance
if __name__ == "__main__":
import doctest
doctest.testmod()
|
JuBra/GEMEditor
|
GEMEditor/base/functions.py
|
Python
|
gpl-3.0
| 8,494
| 0.000706
|
from collections import defaultdict
from six import iteritems
def invert_mapping(mapping):
""" Invert a mapping dictionary
Parameters
----------
mapping: dict
Returns
-------
"""
inverted_mapping = defaultdict(list)
for key, value in mapping.items():
if isinstance(value, (list, set)):
for element in value:
inverted_mapping[element].append(key)
else:
inverted_mapping[value].append(key)
return inverted_mapping
def generate_copy_id(base_id, collection, suffix="_copy"):
""" Generate a new id that is not present in collection
Parameters
----------
base_id: str, Original id while copying or New for new entries
collection: dict or list
suffix: str, Suffix that is added to the base id
Returns
-------
"""
composite_id = str(base_id) + suffix
new_id = composite_id
n = 0
# Make sure there is no metabolite with the same id
while new_id in collection:
# Add number to end of id
n += 1
new_id = composite_id + str(n)
return new_id
def get_annotation_to_item_map(list_of_items):
""" Find model items with overlapping annotations
Parameters
----------
item
list_of_items
Returns
-------
"""
annotation_to_item = defaultdict(list)
for item in list_of_items:
for annotation in item.annotation:
annotation_to_item[annotation].append(item)
return annotation_to_item
def convert_to_bool(input_str):
""" Convert string of boolean value to actual bolean
PyQt5 stores boolean values as strings 'true' and 'false
in the settings. In order to use those stored values
they need to be converted back to the boolean values.
Parameters
----------
input_str: str
Returns
-------
bool
"""
mapping = {"true": True,
"false": False,
"none": None}
if isinstance(input_str, bool):
return input_str
elif not isinstance(input_str, str):
raise TypeError("Input should be a string or boolean")
else:
return mapping[input_str.lower()]
def check_charge_balance(metabolites):
""" Check charge balance of the reaction """
# Check that charge is set for all metabolites
if not all(x.charge i
|
s not None for x in metabolites.keys()):
return None
else:
return sum([metabolite.charge * coefficient for metabolite, coefficient in iteritems(metabolites)])
def check_element_balance(metabolites):
""" Check that the reaction is elementally balanced """
metabolite_elements = defaultdict(int)
for metabolite, coefficient in iteritems(metabolites):
for element, count in iteritems(metabolite.elements):
metabolite_elements[element] += coeffi
|
cient * count
return {k: v for k, v in iteritems(metabolite_elements) if v != 0}
def reaction_string(stoichiometry, use_metabolite_names=True):
"""Generate the reaction string """
attrib = "id"
if use_metabolite_names:
attrib = "name"
educts = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value < 0.]
products = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value > 0.]
return " + ".join([" ".join(x) for x in educts])+" --> "+" + ".join([" ".join(x) for x in products])
def unbalanced_metabolites_to_string(in_dict):
substrings = ['{0}: {1:.1f}'.format(*x) for x in in_dict.items()]
return "<br>".join(substrings)
def reaction_balance(metabolites):
""" Check the balancing status of the stoichiometry
Parameters
----------
metabolites : dict - Dictionary of metabolites with stoichiometric coefficnets
Returns
-------
charge_str : str or bool
element_str : str or bool
balanced : str or bool
"""
element_result = check_element_balance(metabolites)
charge_result = check_charge_balance(metabolites)
if charge_result is None:
charge_str = "Unknown"
elif charge_result == 0:
charge_str = "OK"
else:
charge_str = str(charge_result)
if not all(x.formula for x in metabolites.keys()):
element_str = "Unknown"
elif element_result == {}:
element_str = "OK"
else:
element_str = unbalanced_metabolites_to_string(element_result)
if len(metabolites) < 2:
balanced = None
elif element_str == "OK" and charge_str == "OK":
balanced = True
elif element_str not in ("OK", "Unknown") or charge_str not in ("OK", "Unknown"):
balanced = False
else:
balanced = "Unknown"
return charge_str, element_str, balanced
def merge_groups_by_overlap(data):
""" Merge sets
Parameters
----------
data: list
Returns
-------
"""
new_index = list(range(len(data)))
mapping = dict()
data = [set(m) for m in data]
# Iterate over groups in data and merge groups
# to the one with the lowest index
for i, group in enumerate(data):
for element in group:
if element not in mapping:
# Element has not been seen before
# point element to current index
mapping[element] = i
continue
else:
# Get the new location location of the group
# to which the element mapping points
destination = new_location(new_index, mapping[element])
if destination == i:
# Group has already been merged
continue
elif destination > i:
# Merge to lowest index always
destination, i = i, destination
# Merge current group with the one
# the item has been found in before
data[destination].update(data[i])
data[i] = None
# Store new index of group
new_index[i] = destination
i = destination
# Filter out the empty groups
return [g for g in data if g]
def new_location(new_index, n):
""" Find new location
Iteratively follow pointers to new location.
Parameters
----------
new_index: list, Should be initialized from range
n: int
Returns
-------
int
"""
while new_index[n] != n:
n = new_index[n]
return n
def unpack(iterable, cls):
""" Unpack the value
Parameters
----------
iterable
cls
Returns
-------
"""
if len(iterable) == 1:
return iterable.pop()
else:
return cls(iterable)
def text_is_different(input, state):
""" Check if the input is different from output
Test if the input is different to the output
while ignoring the difference between None
and empty string.
Parameters
----------
input: str or None
state: str
Returns
-------
bool
"""
if not input and not state:
return False
else:
return input != state
def restore_state(object, state):
""" Restore the state of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreState(state)
def restore_geometry(object, state):
""" Restore the geometry of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreGeometry(state)
def split_dict_by_value(dictionary):
""" Split dictionary by values
This functions splits dictionary entries based
on the value into positive, negative and zero
dictionaries.
Parameters
----------
dictionary: dict,
Input dictionary
Returns
-------
positive: dict,
Dictionary containg all items with positive value
negative: dict,
Dictionary cotaining all items with negative value
zero: dict,
|
plotly/python-api
|
packages/python/plotly/plotly/validators/ohlc/_lowsrc.py
|
Python
|
mit
| 432
| 0
|
import _plotly_utils.basevalidators
class Lo
|
wsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="lowsrc", parent_name="ohlc", **kwargs):
super(LowsrcValidator, self).__init__(
|
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
rezoo/chainer
|
chainer/testing/serializer.py
|
Python
|
mit
| 1,562
| 0
|
import os
from chainer import serializers
from chainer import utils
def save_and_load(src, dst, filename, saver, loader):
"""Saves ``src`` and loads it to ``dst`` using a de/serializer.
This function simply runs a serialization and deserialization to check if
the serialization code is correctly implemented. The save and load are
done within a temporary directory.
Args:
src: An object to save from.
dst: An object to load into.
filename (str): File name used during the save/load.
saver (callable): Function that saves the source object.
loader (callable): Function that loads the file into the destination
object.
"""
with utils.tempdir() as tempdir:
path = os.path.join(tempdir, filename)
saver(path, src)
loader(path, dst)
def save_and_load_npz(src, dst):
"""Saves ``src`` to an NPZ file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using NPZ de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.npz',
serializers.save_npz, serializers.load_npz)
def save_and_load_hdf
|
5(src, dst):
"""Saves ``src`` to an HDF5 file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using HDF5 de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_lo
|
ad(src, dst, 'tmp.h5',
serializers.save_hdf5, serializers.load_hdf5)
|
tedelhourani/ansible
|
test/units/plugins/inventory/test_script.py
|
Python
|
gpl-3.0
| 4,169
| 0.002883
|
# -*- coding: utf-8 -*-
# Copyright 2017 Chris Meyers <cmeyers@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.errors import AnsibleError
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.inventory.script import InventoryModule
class TestInventoryModule(unittest.TestCase):
def setUp(self):
class Inventory():
cache = dict()
class PopenResult():
returncode = 0
stdout = b""
stderr = b""
def communicate(self):
return (self.stdout, self.stderr)
self.popen_result = PopenResult()
self.inventory = Inventory()
self.loader = mock.MagicMock()
self.loader.load = mock.MagicMock()
def register_patch(name):
patcher = mock.patch(name)
self.addCleanup(patcher.stop)
return patcher.start()
self.popen = register_patch('subprocess.Popen')
self.popen.return_value = self.popen_result
self.BaseInventoryPlugin = register_patch('ansible.plugins.inventory.BaseInventoryPlugin')
self.BaseInventoryPlugin.get_cache_prefix.return_value = 'abc123'
def test_parse_subprocess_path_not_found_fail(self):
self.popen.side_effect = OSError("dummy text")
inventory_module = InventoryModule()
with pytest.raises(AnsibleError) as e:
inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == "problem running /foo/bar/foobar.py --list (dummy text)"
def test_parse_subprocess_err_code_fail(self):
self.popen_result.stdout = to_bytes(u"fooébar", errors='surrogate_escape')
self.popen_result.stderr = to_bytes(u"dummyédata")
self.popen_result.returncode = 1
inventory_module = InventoryModule()
with pytest.raises(AnsibleError) as e:
inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == to_native("Inventory script (/foo/bar/foobar.py) had an execution error: "
|
"dummyédata\n ")
def test_parse_utf8_fail(self):
self.popen_result.returncode = 0
self.popen_result.stderr = to_bytes("dummyédata")
self.loader.load.side_effect = TypeError('obj must be string')
inventory_module = InventoryModule()
with pytest.raises(AnsibleError) as e:
inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == to_native("fail
|
ed to parse executable inventory script results from "
"/foo/bar/foobar.py: obj must be string\ndummyédata\n")
def test_parse_dict_fail(self):
self.popen_result.returncode = 0
self.popen_result.stderr = to_bytes("dummyédata")
self.loader.load.return_value = 'i am not a dict'
inventory_module = InventoryModule()
with pytest.raises(AnsibleError) as e:
inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == to_native("failed to parse executable inventory script results from "
"/foo/bar/foobar.py: needs to be a json dict\ndummyédata\n")
|
paramite/blazar
|
climate/db/migration/alembic_migrations/versions/23d6240b51b2_add_status_to_leases.py
|
Python
|
apache-2.0
| 1,737
| 0.001151
|
# Copyright 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic
|
able law or agreed to in writing, software
# distributed und
|
er the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add status to leases
Revision ID: 23d6240b51b2
Revises: 2bcfe76b0474
Create Date: 2014-04-25 10:41:09.183430
"""
# revision identifiers, used by Alembic.
revision = '23d6240b51b2'
down_revision = '2bcfe76b0474'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('leases', sa.Column(
'action', sa.String(length=255), nullable=True))
op.add_column('leases', sa.Column(
'status', sa.String(length=255), nullable=True))
op.add_column('leases', sa.Column(
'status_reason', sa.String(length=255), nullable=True))
def downgrade():
engine = op.get_bind().engine
if engine.name == 'sqlite':
# Only for testing purposes with sqlite
op.execute('CREATE TABLE tmp_leases as SELECT created_at, updated_at, '
'id, name, user_id, project_id, start_date, '
'end_date, trust_id FROM leases')
op.execute('DROP TABLE leases')
op.execute('ALTER TABLE tmp_leases RENAME TO leases')
return
op.drop_column('leases', 'action')
op.drop_column('leases', 'status')
op.drop_column('leases', 'status_reason')
|
stuglaser/pychan
|
examples/ajmani-adv-patt.py
|
Python
|
bsd-3-clause
| 4,939
| 0.00081
|
#!/usr/bin/env python
#
# Examples from the talk:
# Sameer Ajmani - Advanced Go Concurrency Patterns - Google I/O 2013
# https://www.youtube.com/watch?v=QDDwwePbDtw
# https://code.google.com/p/go/source/browse/2013/advconc?repo=talks
import argparse
import collections
import random
import threading
import time
from chan import Chan, chanselect, quickthread
MOCK_POSTS = [
'First post from ',
'This is the second post from ',
'A third post? How interesting. Thanks ',
'Woah! A fourth post from '
]
Item = collections.namedtuple('Item', ['channel', 'title'])
class MockFetcher(object):
def __init__(self, domain):
self.domain = domain
self.posts = [p + self.domain for p in MOCK_POSTS]
def fetch(self):
if not self.posts:
return [], time.time() + 1000.0
item_list = [Item(self.domain, self.posts.pop(0))]
next_time = time.time() + random.random() * 0.2 + 0.1
return item_list, next_time
def timeout_after(delay):
def thread(ch):
time.sleep(delay)
ch.put(None)
c = Chan()
t = threading.Thread(name='timeout', target=thread, args=(c,))
t.daemon = True
t.start()
return c
class Subscription(object):
def __init__(self, fetcher):
self.fetcher = fetcher
self.updates_chan = Chan()
self.quit = Chan()
self.thread = threading.Thread(
name='Subscription',
target=self._run)
#self.thread.daemon = True
self.thread.start()
def _run(self):
next_time = time.time()
pending = [] # First is most recent. Should be a deque
err = None
while True:
start_fetch = timeout_after(max(0.0, next_time - time.time()))
# Does or doesn't wait on updates_chan depending on if we have
# items ready.
if pending:
outchans = [(self.updates_chan, pending[0])]
else:
outchans = []
ch, value = chanselect([self.quit, start_fetch], outchans)
if ch == self.quit:
errc = value
self.updates_chan.close()
errc.put(err)
return
elif ch == start_fetch:
try:
err = None
item_list, next_time = self.fetcher.fetch()
except Exception as ex:
err = ex
next_time = time.time() + 10.0
continue
pending.extend(item_list)
else: # self.updates_chan
pending.pop(0) # Pops the sent item
def updates(self):
return self.updates_chan
def close(self):
errc = Chan()
self.quit.put(errc)
result = errc.get()
self.thread.join(0.2)
assert not self.thread.is_alive()
return result
class Merged(object):
def __init__(self, subscriptions):
self.subscriptions = subscriptions
self.updates_chan = Chan()
self.quit = Chan()
self.thread = threading.Thread(
name="Merged",
target=self._run)
self.thread.start()
def _close_subs_collect_errs(self):
return [sub.close() for sub in self.subscriptions]
def _run(self):
subchans = [sub.updates() for sub in self
|
.subscriptions]
while True:
c, value = chanselect(subchans + [self.quit], [])
if c == self.quit:
|
value.put(self._close_subs_collect_errs())
self.updates_chan.close()
return
else:
item = value
c, _ = chanselect([self.quit], [(self.updates_chan, item)])
if c == self.quit:
value.put(self._close_subs_collect_errs())
self.updates_chan.close()
return
else:
pass # Send successful
def updates(self):
return self.updates_chan
def close(self):
errc = Chan()
self.quit.put(errc)
result = errc.get()
self.thread.join(timeout=0.2)
assert not self.thread.is_alive()
return result
def main():
FetcherCls = MockFetcher
merged = Merged([
Subscription(FetcherCls('blog.golang.org')),
Subscription(FetcherCls('googleblog.blogspot.com')),
Subscription(FetcherCls('googledevelopers.blogspot.com'))])
# Close after a while
def close_later():
time.sleep(3)
print("Closed: {}".format(merged.close()))
quickthread(close_later)
for it in merged.updates():
print("{} -- {}".format(it.channel, it.title))
time.sleep(0.1)
print("Still active: (should only be _MainThread and timeouts)")
for active in threading._active.itervalues():
print(" {}".format(active))
if __name__ == '__main__':
main()
|
fredo-editor/FreDo
|
setup.py
|
Python
|
bsd-3-clause
| 684
| 0.001462
|
# Parts of this file were derived from the setup.py file of scikit-image
# scikit-image license can be found at
# https://github.com/scikit-image/scikit-image/blob/master/LICENSE.txt
|
from setuptools import setup, find_packages
with open('requirements.txt') as f:
INSTALL_REQUIRES = [l.strip() for l in f.readline
|
s() if l]
setup(name='FreDo-Editor',
version='0.1.0_dev',
description='Frequency Domain Image Editor',
author='Vighnesh Birodkar',
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
author_email='vighneshbirodkar@nyu.edu',
entry_points={
'gui_scripts': ['fredo = fredo.editor.main:run']
}
)
|
10clouds/edx-platform
|
lms/djangoapps/certificates/apis/v0/tests/test_views.py
|
Python
|
agpl-3.0
| 4,367
| 0.000687
|
"""
Tests for the Certificate REST APIs.
"""
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from certificates.models import CertificateStatuses
from certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class CertificatesRestApiTest(SharedModuleStoreTestCase, APITestCase):
"""
Test for the Certificates REST APIs
"""
@classmethod
def setUpClass(cls):
super(CertificatesRestApiTest, cls).setUpClass()
cls.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course'
)
def setUp(self):
super(CertificatesRestApiTest, self).setUp()
self.student = UserFactory.create(password='test')
self.student_no_cert = UserFactory.create(password='test')
self.staff_user = UserFactory.create(password='test', is_staff=True)
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url='www.google.com',
grade="0.88"
)
self.namespaced_url = 'certificates_api:v0:certificates:detail'
def get_url(self, username):
"""
Helper function to create the url for certificates
"""
return reverse(
self.namespaced_url,
kwargs={
'course_id': self.course.id,
'username': username
}
)
def test_permissions(self):
"""
Test that only the owner of the certificate can access the url
"""
# anonymous user
resp = self.client.get(self.get_url(self.student.username))
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
# another student
self.client.login(username=self.student_no_cert.username, password='test')
resp = self.client.get(self.get_url(self.student.username))
# gets 404 instead of 403 for security reasons
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data, {u'detail': u'Not found.'}) # pylint: disable=no-member
self.client.logout()
# same student of the certificate
self.client.login(username=self.student.username, password='test')
resp = self.client.get(self.get_url(self.student.username))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.client.logout()
# staff user
self.client.login(username=self.staff_user.username, password='test')
resp = self.client.get(self.get_url(self.student.username))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_no_certificate_for_user(self):
"""
Test for case with no certificate available
"""
self.client.login(username=self.student_no_cert.username, password='test')
resp = self.client.get(self.get_url(self.student_no_cert.username))
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn('error_code', resp.data) # pylint: disable=no-member
self.assertEqual(
resp.data['error_code'], # pylint: disable=no-member
'no_certificate_for_user'
)
def test_certificate_for_user(self):
"""
Tests case user that pulls her own certifica
|
te
"""
self.client.login(username=self.student.username, password='test')
resp = self.client.get(self.get_url(self.student.username))
self.assertEqual(resp.status_code, status.HT
|
TP_200_OK)
self.assertEqual(
resp.data, # pylint: disable=no-member
{
'username': self.student.username,
'status': CertificateStatuses.downloadable,
'grade': '0.88',
'download_url': 'www.google.com',
'certificate_type': CourseMode.VERIFIED,
'course_id': unicode(self.course.id),
}
)
|
Tinkerforge/brickv
|
src/brickv/plugin_system/plugins/dual_relay/dual_relay.py
|
Python
|
gpl-2.0
| 5,730
| 0.001396
|
# -*- coding: utf-8 -*-
"""
Dual Relay Plugin
Copyright (C) 2011-2012 Olaf Lüke <olaf@tinkerforge.com>
Copyright (C) 2014 Matthias Bolte <matthias@tinkerforge.com>
dual_relay.py: Dual Relay Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from brickv.plugin_system.plugin_base import PluginBase
from brickv.plugin_system.plugins.dual_relay.ui_dual_relay import Ui_DualRelay
from brickv.bindings.bricklet_dual_relay import BrickletDualRelay
from brickv.async_call import async_call
from brickv.load_pixmap import load_masked_pixmap
from brickv.monoflop import Monoflop
class DualRelay(PluginBase, Ui_DualRelay):
def __init__(self, *args):
PluginBase.__init__(self, BrickletDualRelay, *args)
self.setupUi(self)
self.dr = self.device
self.state1_combobox.setItemData(0, True)
self.state1_combobox.setItemData(1, False)
self.state2_combobox.setItemData(0, True)
self.state2_combobox.setItemData(1, False)
self.monoflop = Monoflop(self.dr,
[1, 2],
[self.state1_combobox, self.state2_combobox],
self.cb_state_change_by_monoflop,
[self.time1_spinbox, self.time2_spinbox],
None,
self)
self.dr1_button.clicked.connect(self.dr1_clicked)
self.dr2_button.clicked.connect(self.dr2_clicked)
self.go1_button.clicked.connect(self.go1_clicked)
self.go2_button.clicked.connect(self.go2_clicked)
self.a1_pixmap = load_masked_pixmap('plugin_system/plugins/dual_relay/relay_a1.bmp')
self.a2_pixmap = load_masked_pixmap('plugin_system/plugins/dual_relay/relay_a2.bmp')
self.b1_pixmap = load_masked_pixmap('plugin_system/plugins/dual_relay/relay_b1.bmp')
self.b2_pixmap = load_masked_pixmap('plugin_system/plugins/dual_relay/relay_b2.bmp')
def get_state_async(self, dr1, dr2):
width = self.dr1_button.width()
if self.dr1_button.minimumWidth() < width:
self.dr1_button.setMinimumWidth(width)
width = self.dr2_button.width()
if self.dr2_button.minimumWidth() < width:
self.dr2_button.setMinimumWidth(width)
if dr1:
self.dr1_button.setText('Switch Off')
self.dr1_image.setPixmap(self.a1_pixmap)
else:
self.dr1_button.setText('Switch On')
self.dr1_image.setPixmap(self.b1_pixma
|
p)
if dr2:
self.dr2_button.setText('Switch Off')
self.dr2_image.setPixmap(self.a2_pixmap)
else:
self.dr2_button.setText('Switch On')
self.dr2_image.setPixmap(self.b2_pixmap)
def start(self):
async_call(self.dr.get_state, None, self.get_state_async, self.increase_error_count,
expand_result_tuple_for_callback=True)
|
self.monoflop.start()
def stop(self):
self.monoflop.stop()
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletDualRelay.DEVICE_IDENTIFIER
def dr1_clicked(self):
width = self.dr1_button.width()
if self.dr1_button.minimumWidth() < width:
self.dr1_button.setMinimumWidth(width)
state = 'On' in self.dr1_button.text().replace('&', '')
if state:
self.dr1_button.setText('Switch Off')
self.dr1_image.setPixmap(self.a1_pixmap)
else:
self.dr1_button.setText('Switch On')
self.dr1_image.setPixmap(self.b1_pixmap)
async_call(self.dr.set_selected_state, (1, state), None, self.increase_error_count)
def dr2_clicked(self):
width = self.dr2_button.width()
if self.dr2_button.minimumWidth() < width:
self.dr2_button.setMinimumWidth(width)
state = 'On' in self.dr2_button.text().replace('&', '')
if state:
self.dr2_button.setText('Switch Off')
self.dr2_image.setPixmap(self.a2_pixmap)
else:
self.dr2_button.setText('Switch On')
self.dr2_image.setPixmap(self.b2_pixmap)
async_call(self.dr.set_selected_state, (2, state), None, self.increase_error_count)
def go1_clicked(self):
self.monoflop.trigger(1)
def go2_clicked(self):
self.monoflop.trigger(2)
def cb_state_change_by_monoflop(self, relay, state):
if relay == 1:
if state:
self.dr1_button.setText('Switch Off')
self.dr1_image.setPixmap(self.a1_pixmap)
else:
self.dr1_button.setText('Switch On')
self.dr1_image.setPixmap(self.b1_pixmap)
elif relay == 2:
if state:
self.dr2_button.setText('Switch Off')
self.dr2_image.setPixmap(self.a2_pixmap)
else:
self.dr2_button.setText('Switch On')
self.dr2_image.setPixmap(self.b2_pixmap)
|
electronicdaisy/WeissSchwarzTCGDatabase
|
card.py
|
Python
|
mit
| 3,176
| 0.004094
|
# img
# trigger = attributes[12]
# http://ws-tcg.com/en/cardlist
# edit
import os
import requests
import sqlite3
def get_card(browser):
attributes = browser.find_elements_by_xpath('//table[@class="status"]/tbody/tr/td')
image = attributes[0].find_element_by_xpath('./img').get_attribute('src')
if attributes[1].find_element_by_xpath('./span[@class="kana"]').text:
card_name = attributes[1].find_element_by_xpath('./span[@class="kana"]').text
else:
card_name = None
card_no = attributes[2].text if attributes[2].text else None
rarity = attributes[3].text if attributes[3].text else None
expansion = attributes[4].text if attributes[4].text else None
if attributes[5].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/w.gif":
side = "Weiß"
elif attributes[5].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/s.gif":
side = "Schwarz"
else:
side = None
card_type = attributes[6].text if attributes[6].text else None
if attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/yellow.gif":
color = "Yellow"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/green.gif":
color = "Green"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/red.gif":
color = "Red"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/blue.gif":
color = "Blue"
else:
color = None
level = attributes[8].text if attributes[8].text else None
cost = attributes[9].text if attributes[9].text else None
power = attributes[10].text if attributes[10].text else None
soul = len(attributes[11].find_elements_by_xpath('./img[contains(@src, "http://ws-tcg.com/en/cardlist/partimages/soul.gif")]'))
special_attribute = attributes[13].text if attributes[13].text else None
text = attributes[14].text if attributes[14].text else None
flavor_text = attributes[15].text if attributes[15].text else None
if not os.path.exists("images"):
os.makedirs("images")
if not os.path.exists("images/" + card_no.split("/")[0]):
os.makedirs("images/" + card_no.split("/")[0])
r = requests.get(image, stream=True)
if r.status_code == 200:
with open("images/" + card_no + ".jpg", 'wb') as f:
for chunk in r:
f.write(chunk)
card = (card_name, card_no, rarity, expansion, side, card_type, color, level, cost, power, soul,
special_attribute, text, flavor_text)
connection = sqlite3.connect('car
|
ds.sqlite3')
cursor
|
= connection.cursor()
cursor.execute('INSERT INTO cards (name, no, rarity, expansion, side, type, color, level, cost, power, soul,'
'special_attribute, text, flavor_text) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?)', card)
connection.commit()
connection.close()
|
tehpwny/insurrection_bot
|
insurrection_bot.py
|
Python
|
gpl-3.0
| 9,673
| 0.002791
|
"""
# TODO
"""
import os
import sys
from time import sleep
from subprocess import Popen
from random import choice, randrange
PRETTY = '--pretty' in sys.argv
class Tiqqun:
words = {
"things_we_like": [
"rupture", "insurrection", "crisis", "social war",
"zones of indistinction which need no justification",
"indifference", "direct action", "sabotage", "art",
"self concisousness", "permanent revolution"
],
"things_we_dont_like": [
"activism", "representation", "humanism", "landlords",
"totality", "passivity", "banality", "leftists",
"fossilization of our desires", "vote", "power",
"mobilization", "impotentiality", "individualism",
"normalization", "absence", "patriarcat", "governements",
"economy", "consumption", "marchandise", "music", "domination",
"work", "productivity", "accumulation", "privatisation", "privation",
"proletarian revolution", "leninism", "social democracy", "democracy",
"classes war", "classes", "the state", "jails", "prisons",
"state justice",
"marxism"
],
"people_we_dont_like": [
"the milieu", "liberalism",
"the bureaucrats of revolt",
"anarcho-liberalism", "politicians",
"bourgeoisie", "statists communists",
"police"
],
"things_we_do": [
"desire", "riot", "occupy everything", "become ungovernable"
],
"our_things": [
"communes", "multiplicities", "encounters", "zads", "squats",
"becomings", "zones of offensive opacity", "desiring-bodies",
],
"symbolic_things": [
"burning dumpster", "smashed window", "dead CEO",
"moment of friendship", "car set aflame", "dead cop",
"barricaded hallway", "barricades", "burnt cop", "smashed bank"
],
"things_we_do_to_things": [
"destroy", "shatter", "negate", "reject", "burn", "crush",
"void", "cancel", ""
],
"things_we_dont_do": [
"organize", "negotiate",
"make demands", "be productive"
],
"how_we_do_things": [
"in secret", "without illusions", "for once and for all",
"absolutely"
],
"describing_good_things": [
"singular", "immanent", "inoperative", "radical"
],
"describing_bad_things": [
"homogenous", "pathetic", "compulsiv
|
e", "alienated"
],
"fancy_words": [
"logic", "structure", "being", "temporality", "teleology"
],
"happiness": ["joy", "ecstasy"],
"sadness": ["misery", "catastrophe", "delusion"],
"really": [
"by
|
any means necessary", "with every weapon at our disposal",
"without looking back", "at all costs"
],
"making_things": [
"articulation", "construction", "elaboration", "setting forth",
"realization"
],
"plans": ["plan", "project", "concept"],
"antiplans": [
"a state of exception", "a line of flight",
"an event"
],
"events": ["orgies", "festivals", "conspiracies"],
"fun_stuff": ["destruction", "negation"],
"get_along": ["dialogue", "criticism", "sympathy"],
"go_away": ["scorn", "contempt", "derision"],
"dont_do": ["refuse", "neglect", "fail"],
"preposition": ["on", "towards"],
"alienation": ["alienation", "isolation", "detachment", "estrangement",
"distance", "separation", "severance", "parting",
"division", "divorce"],
"unification": ["unification", "union", "amalgamation", "junction", "conjunction"]
}
def get_word(self, key):
return self.words.get(key)
def get_words(self, keys):
for key in keys:
yield self.words.get(key)
def get_rand_word(self, key):
return choice(self.words.get(key))
def get_rand_words(self, *keys):
already_said = []
chosen = None
for key in keys:
words = self.words.get(key)
if words is not None:
while chosen in already_said or chosen is None:
chosen = choice(words)
already_said.append(chosen)
yield chosen
def rant(self):
rants = [
self.recognize,
self.do_something,
self.in_the,
self.break_things,
self.this_call,
self.whats_needed,
self.every_what,
self.joke,
self.necessary,
self.symbols,
self.spectale
]
rant = choice(rants)
return str(rant()).replace("\n", " ")
def recognize(self):
return """Confronted with those who {} to recognize themselves in our {} of {}, we offer neither {} nor {} but only our {}.""".format(
*self.get_rand_words("dont_do", "events", "fun_stuff", "get_along",
"get_along", "go_away")
)
def do_something(self):
return """Our need to {} is less the {} of a {} than the {} of {}.
""".format(*self.get_rand_words(
"things_we_do",
"making_things",
"plans",
"making_things",
"antiplans"
))
def in_the(self):
return """In the {} of {}, we {} those who would have us give up the {} {} of {} for the {} of {}.""".format(*self.get_rand_words(
"making_things", "our_things", "things_we_do_to_things",
"describing_good_things", "happiness", "things_we_like",
"sadness", "things_we_dont_like"
))
def title(self):
return """Leaving {} behind: Notes {} {}
""".format(*self.get_rand_words(
"things_we_dont_like", "preposition", "things_we_like"
))
def break_things(self):
return """We must {} all {} {}.
""".format(*self.get_rand_words(
"things_we_do_to_things",
"things_we_dont_like",
"how_we_do_things"))
def this_call(self):
return """This is a call to {}, not an insistence on {}.
""".format(*self.get_rand_words("things_we_like", "things_we_dont_like"))
def whats_needed(self):
return """What's needed is not {}, and even far less {}, but a putting-into-practice of {} {}, a rejection in all forms of the {} of {}.
""".format(*self.get_rand_words("things_we_dont_like",
"things_we_dont_like",
"describing_good_things",
"things_we_like",
"fancy_words",
"things_we_dont_like"))
def every_what(self):
return """Every {} is a refusal to {}, a blow against the {} of {}, a recognition of the {} {} inherent in the articulation of {}.
""".format(*self.get_rand_words(
"symbolic_things",
"things_we_dont_do",
"fancy_words",
"people_we_dont_like",
"describing_good_things",
"fancy_words",
"our_things"
))
def joke(self):
return """The {} {} proposed to us is like a bad joke, and instead of laughter we respond with {}.
""".format(*self.get_rand_words(
"describing_bad_things",
"things_we_dont_like",
"things_we_like"
))
def necessary(self):
return """It is necessary to commence {} to dream of new ways to {}, but to make manifest the subterranean {} in the heart of each {}.
""".format(*self.get_rand_words(
"how_we_do_things",
"things_we_dont_do",
"our_things",
"symbolic_things"
))
def symbols(self):
return """To those who deride the {} {} in a {} or a {}, we propose nothin
|
akrherz/iem
|
scripts/dl/download_hrrr.py
|
Python
|
mit
| 3,606
| 0
|
"""
Since the NOAAPort feed of HRRR data does not have radiation, we should
download this manually from NCEP
Run at 40 AFTER for the previous hour
"""
import subprocess
import sys
import datetime
import tempfile
import os
import requests
import pygrib
from pyiem.util import exponential_backoff, logger, utc
LOG = logger()
def need_to_run(valid):
"""Check to see if we already have the radiation data we need"""
gribfn = valid.strftime(
"/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/"
"%H/hrrr.t%Hz.3kmf00.grib2"
)
if not os.path.isfile(gribfn):
return True
try:
grbs = pygrib.open(gribfn)
for name in [
"Downward short-wave radiation flux",
"Upward long-wave radiation flux",
]:
grbs.select(name=name)
# print("%s had everything we desired!" % (gribfn, ))
return False
except Exception:
return True
def fetch(valid):
"""Fetch the radiation data for this timestamp
80:54371554:d=2014101002:ULWRF:top of atmosphere:anl:
81:56146124:d=2014101002:DSWRF:surface:anl:
"""
uri = valid.strftime(
(
"https://nomads.ncep.noaa.gov/pub/data/nccf/"
"com/hrrr/prod/hrrr.%Y%m%d/conus/hrrr.t%Hz."
"wrfprsf00.grib2.idx"
)
)
req = exponential_backoff(requests.get, uri, timeout=30)
if req is None or req.status_code != 200:
LOG.info("failed to get idx %s", uri)
return
offsets = []
neednext = False
for line in req.content.decode("utf-8").split("\n"):
tokens = line.split(":")
if len(tokens) < 3:
continue
if neednext:
offsets[-1].append(int(tokens[1]))
neednext = False
if tokens[3] in ["ULWRF", "DSWRF"]:
offsets.append([int(tokens[1])])
neednext = True
# Save soil temp and water at surface, 10cm and 40cm
if tokens[3] in ["TSOIL", "SOILW"]:
if tokens[4] in [
|
"0-0 m below ground",
"0.1-0.1 m below ground",
"0.3-0.3 m below ground",
|
"0.6-0.6 m below ground",
"1-1 m below ground",
]:
offsets.append([int(tokens[1])])
neednext = True
pqstr = valid.strftime(
"data u %Y%m%d%H00 bogus model/hrrr/%H/hrrr.t%Hz.3kmf00.grib2 grib2"
)
if len(offsets) != 13:
LOG.info("warning, found %s gribs for %s", len(offsets), valid)
for pr in offsets:
headers = {"Range": "bytes=%s-%s" % (pr[0], pr[1])}
req = exponential_backoff(
requests.get, uri[:-4], headers=headers, timeout=30
)
if req is None:
LOG.info("failure for uri: %s", uri)
continue
tmpfd = tempfile.NamedTemporaryFile(delete=False)
tmpfd.write(req.content)
tmpfd.close()
subprocess.call(
"pqinsert -p '%s' %s" % (pqstr, tmpfd.name), shell=True
)
os.unlink(tmpfd.name)
def main(argv):
"""Go Main Go"""
times = []
if len(argv) == 5:
times.append(
utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
)
else:
times.append(utc() - datetime.timedelta(hours=1))
times.append(utc() - datetime.timedelta(hours=6))
times.append(utc() - datetime.timedelta(hours=24))
for ts in times:
if not need_to_run(ts):
continue
LOG.debug("running for %s", ts)
fetch(ts)
if __name__ == "__main__":
main(sys.argv)
|
CentralLabFacilities/m3meka
|
python/scripts/demo/m3_demo_behaviors.py
|
Python
|
mit
| 2,341
| 0.026057
|
#! /usr/bin/python
#Copyright 2010, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMIT
|
ED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CO
|
NTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
import m3.rt_proxy as m3p
import m3.toolbox as m3t
import m3.toolbox_beh as m3b
class DemoBehaviors:
def __init__(self):
self.ts=time.time()
self.tl=time.time()
def always_print(self):
print 'Always',time.time()-self.ts
return m3b.res_continue
def every_print(self):
print 'Every',time.time()-self.ts
return m3b.res_continue #m3b.res_finished #run once
def random_print(self):
print 'Random',time.time()-self.ts
return m3b.res_continue #allow to timeout
def whenever_print(self):
print 'Whenever',time.time()-self.ts
return m3b.res_continue #allow to timeout
def whenever_cond(self): #strobe every 3 secs
ret = time.time()-self.tl>3.0
if ret:
self.tl=time.time()
return ret
beh=m3b.M3BehaviorEngine()
db=DemoBehaviors()
beh.define_resource('foo')
beh.define_resource('bar')
beh.always('foo','always_printer',priority=0,action=db.always_print)
beh.every('foo','every_printer',priority=1,action=db.every_print, period=1.0, timeout=0.5, inhibit=2.0)
#beh.whenever('foo','whenever_printer',priority=2,action=db.whenever_print,cond=db.whenever_cond,timeout=0.1)
#beh.random('foo','random_printer',priority=3,action=db.random_print,chance=0.1,timeout=0.25)
for i in range(1000):
beh.step()
|
Tinkerforge/brickv
|
src/brickv/mac_pasteboard_mime_fixed.py
|
Python
|
gpl-2.0
| 2,660
| 0.001128
|
# -*- coding: utf-8 -*-
"""
brickv (Brick Viewer)
Copyright (C) 2019 Matthias Bolte <matthias@tinkerforge.com>
mac_pasteboard_mime_fixed.py: Don't add UTF BOM when copying text to the clipboard
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
# https://bugreports.qt.io/browse/QTBUG-61562
from PyQt5.QtMacExtras import QMacPasteboardMime
class MacPasteboardMimeFixed(QMacPasteboardMime):
def __init__(self):
super().__init__(QMacPasteboardMime.MIME_CLIP)
def convertorName(self):
return 'UnicodeTextUtf8Default'
def flavorFor(self, mime):
if mime == 'text/plain':
return 'public.utf8-plain-text'
parts = mime.split('charset=', 1)
if len(parts) > 1:
charset
|
= parts[1].split(';', 1)[0]
if charset == 'system':
return 'public.utf8-plain-text'
if charset in ['iso-106464-ucs-2', 'utf16']:
return 'public.utf16-plain-text'
|
return None
def canConvert(self, mime, flavor):
return mime.startswith('text/plain') and flavor in ['public.utf8-plain-text', 'public.utf16-plain-text']
def mimeFor(self, flavor):
if flavor == 'public.utf8-plain-text':
return 'text/plain'
if flavor == 'public.utf16-plain-text':
return 'text/plain;charset=utf16'
return None
def convertFromMime(self, mime, data, flavor):
if flavor == 'public.utf8-plain-text':
return [data.encode('utf-8')]
if flavor == 'public.utf16-plain-text':
return [data.encode('utf-16')]
return []
def convertToMime(self, mime, data, flavor):
if len(data) > 1:
raise ValueError('Cannot handle multiple data members')
data = data[0]
if flavor == 'public.utf8-plain-text':
return data.decode('utf-8')
if flavor == 'public.utf16-plain-text':
return data.decode('utf-16')
raise ValueError('Unhandled MIME type: {0}'.format(mime))
|
timothycrosley/WebBot
|
instant_templates/update_webbot_appengine/WebElements/StringUtils.py
|
Python
|
gpl-2.0
| 7,945
| 0.00365
|
'''
StringUtils.py
Provides methods that ease complex python string operations
Copyright (C) 2013 Timothy Edmund Crosley
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
import random
import re
import string
import types
from urllib import urlencode
from .MultiplePythonSupport import *
from . import ClientSide
INVALID_CONTROL_CHARACTERS = [
chr(0x00),
chr(0x01),
chr(0x02),
chr(0x03),
chr(0x04),
chr(0x05),
chr(0x06),
chr(0x07),
chr(0x08),
chr(0x0b),
chr(0x0c),
chr(0x0e),
chr(0x0f),
chr(0x10),
chr(0x11),
chr(0x12),
chr(0x13),
chr(0x14),
chr(0x15),
chr(0x16),
chr(0x17),
chr(0x18),
chr(0x19),
chr(0x1a),
chr(0x1b),
chr(0x1c),
chr(0x1d),
chr(0x1e),
chr(0x1f)
]
def patternSplit(text, pattern):
"""
Splits a string into a list of strings at each point it matches a pattern:
test - the text to match against
pattern - a regex pattern to match against
"""
matchObj = re.compile(pattern).split(text)
tokenList = []
for element in matchObj:
if element != "":
tokenList.append(element.upper())
return tokenList
def removeAlphas(value):
"""
Returns a string removed of any extra formatting in the string or combined numbers and characters.
"""
newValue = ''
for part in value:
if part.isdigit():
newValue += part
return newValue
def convertIterableToString(iterable):
"""
Returns a string representation of an iterable value.
"""
return ' '.join([interpretAsString(item) or '' for item in iterable])
def convertBoolToString(boolean):
"""
Returns a string representation of a boolean value.
"""
return unicode(boolean).lower()
def convertFloatToString(value):
"""
Returns a string representation of a float value.
"""
return "%f%%" % (value * 100.0)
typeDict = {bool:convertBoolToString, float:convertFloatToString}
for pythonType in (str, unicode) + (int, long):
typeDict[pythonType] = unicode
for pythonType in (types.GeneratorType, list, tuple, set):
typeDict[pythonType] = convertIterableToString
getTypeDict = typeDict.get
def interpretAsString(value):
"""
returns a string from lists, booleans, dictionaries or a
callbacks, or function/instance methods
"""
if value is None:
return ''
call = getTypeDict(type(value), None)
if call:
return call(value)
elif not value:
return None
elif isinstance(value, dict):
asString = ""
for dictKey, dictValue in iteritems(value):
dictValue = interpretAsString(dictValue)
if dictValue is not None:
asString += unicode(dictKey) + ':' + dictValue + ';'
return asString
elif hasattr(value, "__call__"):
return interpretAsString(value())
elif type(value) == float:
return "%f%%" % (value * 100.0)
return unicode(value)
def interpretFromString(value):
"""
returns the python equivalent value from an xml string (such as an a
|
ttribute value):
value - the html value to interpret
"""
lowerCaseValue = value.lower()
if lowerCaseValue == "true":
return True
elif
|
lowerCaseValue == "false":
return False
elif lowerCaseValue == "none":
return None
return value
def listReplace(inString, listOfItems, replacement):
"""
Replaces instaces of items withing listOfItems with replacement:
inString - the string to do replacements on
listOfItems - a list of strings to replace
replacement - what to replace it with (or a list of replacements the same lenght as the list of items)
"""
isStringReplace = type(replacement) in (str, unicode)
for item in listOfItems:
if isStringReplace:
inString = inString.replace(item, replacement)
else:
inString = inString.replace(item, replacement[listOfItems.index(item)])
return inString
def removeDelimiters(inString, replacement=""):
"""
Removes the specified delimiters from the inString.
"""
return listReplace(inString, ['.', ',', '+', '-', '/', '\\'], replacement)
def stripControlChars(text, fromFront=True, fromBack=True):
"""
Removes control characters from supplied text.
"""
if not text:
return ''
invalidChars = ''.join(INVALID_CONTROL_CHARACTERS)
if fromFront and fromBack:
return text.strip(invalidChars)
elif fromFront:
return text.lstrip(invalidChars)
elif fromBack:
return text.rstrip(invalidChars)
else:
return text
def findIndexes(text, subString):
"""
Returns a set of all indexes of subString in text.
"""
indexes = set()
lastFoundIndex = 0
while True:
foundIndex = text.find(subString, lastFoundIndex)
if foundIndex == -1:
break
indexes.add(foundIndex)
lastFoundIndex = foundIndex + 1
return indexes
def encodeAnything(anything, encoding='utf8'):
"""
Returns any data that is passed in encoded into the specified encoding or throws an exception.
"""
if type(anything) in (str, unicode):
return unicode(anything).encode(encoding)
if isinstance(anything, list):
for index, thing in enumerate(anything):
anything[index] = encodeAnything(thing, encoding)
return anything
if isinstance(anything, dict):
for key, thing in iteritems(anything):
anything[key] = encodeAnything(thing, encoding)
return anything
if type(anything) == tuple:
return tuple([encodeAnything(thing) for thing in anything])
return anything
def generateRandomKey(size=20, chars=string.ascii_uppercase + string.digits):
"""
Generates a random key of a certain length, based on a given pool of characters
size - the lenght of the random key
chars - the pool of characters from which to pool each item
"""
return ''.join(random.choice(chars) for x in range(size))
def everyDirAndSub(directory):
"""
Splits a directory to get every directory and subdirectory as a list.
"""
ret = []
idx = 0
while True:
try:
idx = directory.index('/', idx + 1)
except:
break
ret += [directory[:idx]]
ret += [directory]
return ret
def scriptURL(argumentDictionary):
"""
Encodes a dictionary into a URL, while allowing scripts to be ran to form the URL client side
"""
scriptParams = []
for argumentName, argumentValue in argumentDictionary.items():
if isinstance(argumentValue, ClientSide.Script):
argumentDictionary.pop(argumentName)
scriptParams.append('%s=" + %s' % (argumentName, argumentValue.claim()))
if not scriptParams:
return urlencode(argumentDictionary)
elif argumentDictionary:
scriptParams += urlencode(argumentDictionary)
urlScript = ' + "'.join(scriptParams)
if not argumentDictionary:
urlScript = '"' + urlScript
return ClientSide.Script(urlScript)
|
Critical-Impact/ffrpg-gen
|
django/settings/dev.py
|
Python
|
mit
| 650
| 0.013846
|
from settings.common import Common
class Dev(Common):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'po
|
stgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'ffrpg.sql', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # E
|
mpty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
|
jrleeman/MetPy
|
metpy/io/_tools.py
|
Python
|
bsd-3-clause
| 13,290
| 0.001204
|
# Copyright (c) 2009,2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""A collection of general purpose tools for reading files."""
from __future__ import print_function
import bz2
from collections import namedtuple
import gzip
import logging
from struct import Struct
import zlib
from ..units import UndefinedUnitError, units
log = logging.getLogger(__name__)
# This works around problems on early Python 2.7 where Struct.unpack_from() can't handle
# being given a bytearray; use memoryview on Python 3, since calling bytearray again isn't
# cheap.
try:
bytearray_to_buff = buffer
except NameError:
bytearray_to_buff = memoryview
def open_as_needed(filename):
"""Return a file-object given either a filename or an object.
Handles opening with the right class based on the file extension.
"""
if hasattr(filename, 'read'):
return filename
if filename.endswith('.bz2'):
return bz2.BZ2File(filename, 'rb')
elif filename.endswith('.gz'):
return gzip.GzipFile(filename, 'rb')
else:
return open(filename, 'rb')
class UnitLinker(object):
r"""Wrap a :class:`metpy.io.cdm.Variable` and handle units.
Converts any attached unit attribute to a class:`pint.Unit`. It also handles converting
data returns to be instances of class:`pint.Quantity` rather than bare (unit-less) arrays.
"""
def __init__(self, var):
r"""Construct a new :class:`UnitLinker`.
Parameters
----------
var : Variable
The :class:`metpy.io.cdm.Variable` to be wrapped.
"""
self._var = var
try:
self._unit = units(self._var.units)
except (AttributeError, UndefinedUnitError):
self._unit = None
def __getitem__(self, ind):
"""Get data from the underlying variable and add units."""
ret = self._var[ind]
return ret if self._unit is None else ret * self._unit
def __getattr__(self, item):
"""Forward all attribute access onto underlying variable."""
return getattr(self._var, item)
@property
def units(self):
"""Access the units from the underlying variable as a :class:`pint.Quantity`."""
return self._unit
@units.setter
def units(self, val):
"""Override the units on the underlying variable."""
if isinstance(val, units.Unit):
self._unit = val
else:
self._unit = units(val)
class NamedStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields."""
def __init__(self, info, prefmt='', tuple_name=None):
"""Initialize the NamedStruct."""
if tuple_name is None:
tuple_name = 'NamedStruct'
names, fmts = zip(*info)
self.converters = {}
conv_off = 0
for ind, i in enumerate(info):
if len(i) > 2:
self.converters[ind - conv_off] = i[-1]
elif not i[0]: # Skip items with no name
conv_off += 1
self._tuple = namedtuple(tuple_name, ' '.join(n for n in names if n))
super(NamedStruct, self).__init__(prefmt + ''.join(f for f in fmts if f))
def _create(self, items):
if self.converters:
items = list(items)
for ind, conv in self.converters.items():
items[ind] = conv(items[ind])
if len(items) < len(self._tuple._fields):
items.extend([None] * (len(self._tuple._fields) - len(items)))
return self.make_tuple(*items)
def make_tuple(self, *args, **kwargs):
"""Construct the underlying tuple from values."""
return self._tuple(*args, **kwargs)
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(NamedStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Read bytes from a buffer and return as a namedtuple."""
return self._create(super(NamedStruct, self).unpack_from(buff, offset))
def unpack_file(self, fobj):
"""Unpack the next bytes from a file object."""
return self.unpack(fobj.read(self.size))
# This works around times when we have mo
|
re than 255 items and can't use
# NamedStruct. This is a CPython limit for arguments.
class DictStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields using dictionary access."""
def __init__(self, info, prefmt=''):
"""Initialize the DictStruct."""
names, formats = zip(*info)
# Remove empty names
|
self._names = [n for n in names if n]
super(DictStruct, self).__init__(prefmt + ''.join(f for f in formats if f))
def _create(self, items):
return dict(zip(self._names, items))
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(DictStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Unpack the next bytes from a file object."""
return self._create(super(DictStruct, self).unpack_from(buff, offset))
class Enum(object):
"""Map values to specific strings."""
def __init__(self, *args, **kwargs):
"""Initialize the mapping."""
# Assign values for args in order starting at 0
self.val_map = {ind: a for ind, a in enumerate(args)}
# Invert the kwargs dict so that we can map from value to name
self.val_map.update(zip(kwargs.values(), kwargs.keys()))
def __call__(self, val):
"""Map an integer to the string representation."""
return self.val_map.get(val, 'Unknown ({})'.format(val))
class Bits(object):
"""Breaks an integer into a specified number of True/False bits."""
def __init__(self, num_bits):
"""Initialize the number of bits."""
self._bits = range(num_bits)
def __call__(self, val):
"""Convert the integer to the list of True/False values."""
return [bool((val >> i) & 0x1) for i in self._bits]
class BitField(object):
"""Convert an integer to a string for each bit."""
def __init__(self, *names):
"""Initialize the list of named bits."""
self._names = names
def __call__(self, val):
"""Return a list with a string for each True bit in the integer."""
if not val:
return None
bits = []
for n in self._names:
if val & 0x1:
bits.append(n)
val >>= 1
if not val:
break
# Return whole list if empty or multiple items, otherwise just single item
return bits[0] if len(bits) == 1 else bits
class Array(object):
"""Use a Struct as a callable to unpack a bunch of bytes as a list."""
def __init__(self, fmt):
"""Initialize the Struct unpacker."""
self._struct = Struct(fmt)
def __call__(self, buf):
"""Perform the actual unpacking."""
return list(self._struct.unpack(buf))
class IOBuffer(object):
"""Holds bytes from a buffer to simplify parsing and random access."""
def __init__(self, source):
"""Initialize the IOBuffer with the source data."""
self._data = bytearray(source)
self._offset = 0
self.clear_marks()
@classmethod
def fromfile(cls, fobj):
"""Initialize the IOBuffer with the contents of the file object."""
return cls(fobj.read())
def set_mark(self):
"""Mark the current location and return its id so that the buffer can return later."""
self._bookmarks.append(self._offset)
return len(self._bookmarks) - 1
def jump_to(self, mark, offset=0):
"""Jump to a previously set mark."""
self._offset = self._bookmarks[mark] + offset
def offset_from(self, mark):
"""Calculate the current offset relative to a marked location."""
return self._offset - self._bookmarks[mark]
def clear_marks(self):
"""Clear all marked locations."""
self._bookmarks = []
def splice(self, mark, newdata):
"""Replace the data afte
|
antoinecarme/pyaf
|
tests/perf/test_long_cycles_nbrows_cycle_length_11000_440.py
|
Python
|
bsd-3-clause
| 89
| 0.022472
|
import tests.perf.test_cycles_full_long_long as gen
ge
|
n.test_nbrows_c
|
ycle(11000 , 440)
|
rongoro/clusto
|
src/clusto/drivers/devices/powerstrips/servertech.py
|
Python
|
bsd-3-clause
| 4,454
| 0.021105
|
"""
Server Technology Power Strips
"""
from basicpowerstrip import BasicPowerStrip
from clusto.drivers.devices.common import IPMixin, SNMPMixin
from clusto.drivers.resourcemanagers import IPManager
from clusto.exceptions import DriverException
import re
class PowerTowerXM(BasicPowerStrip, IPMixin, SNMPMixin):
"""
Provides support for Power Tower XL/XM
Power Port designations start with 1 at the upper left (.aa1) down to 32
at the bottom right (.bb8).
"""
_driver_name = "powertowerxm"
_properties = {'withslave':0}
_portmeta = { 'pwr-nema-L5': { 'numports':2 },
'pwr-nema-5' : { 'numports':16, },
'nic-eth' : { 'numports':1, },
'console-
|
serial' : { 'numports':1, },
}
_portmap = {'aa1':1,'aa2':2,'aa3':3,'aa4':4,'aa5':5,'aa6':6,'aa7':7,'aa8':8,
'ab1':9,'ab2':10,'ab3':11,'ab4':12,'ab5':13,'ab6':14,'ab7':15,
'ab8':16,'ba1':17,'ba2':18,'ba3':19,'ba4':20,'ba5':21,'ba6':22,
'ba7':23,'ba8':24,'bb1':25,'bb2':26,'bb3':27,'b
|
b4':28,'bb5':29,
'bb6':30,'bb7':31,'bb8':32}
_outlet_states = ['idleOff', 'idleOn', 'wakeOff', 'wakeOn', 'off', 'on', 'lockedOff', 'reboot', 'shutdown', 'pendOn', 'pendOff', 'minimumOff', 'minimumOn', 'eventOff', 'eventOn', 'eventReboot', 'eventShutdown']
def _ensure_portnum(self, porttype, portnum):
"""map powertower port names to clusto port numbers"""
if not self._portmeta.has_key(porttype):
msg = "No port %s:%s exists on %s." % (porttype, str(num), self.name)
raise ConnectionException(msg)
if isinstance(portnum, int):
num = portnum
else:
if portnum.startswith('.'):
portnum = portnum[1:]
if self._portmap.has_key(portnum):
num = self._portmap[portnum]
else:
msg = "No port %s:%s exists on %s." % (porttype, str(num),
self.name)
raise ConnectionException(msg)
numports = self._portmeta[porttype]
if self.withslave:
if porttype in ['mains', 'pwr']:
numports *= 2
if num < 0 or num >= numports:
msg = "No port %s:%s exists on %s." % (porttype, str(num),
self.name)
raise ConnectionException(msg)
return num
def _get_port_oid(self, outlet):
for oid, value in self._snmp_walk('1.3.6.1.4.1.1718.3.2.3.1.2'):
if value.lower() == outlet:
return oid
def get_outlet_state(self, outlet):
oid = self._get_port_oid(outlet)
oid = oid.replace('1.3.6.1.4.1.1718.3.2.3.1.2', '1.3.6.1.4.1.1718.3.2.3.1.10')
state = self._snmp_get(oid)
return self._outlet_states[int(state)]
def set_outlet_state(self, outlet, state, session=None):
oid = self._get_port_oid(outlet)
oid = oid.replace('1.3.6.1.4.1.1718.3.2.3.1.2', '1.3.6.1.4.1.1718.3.2.3.1.11')
r = self._snmp_set(oid, state)
if r.PDU.varbindlist[0].value.val != state:
raise DriverException('Unable to set SNMP state')
def set_power_off(self, porttype, portnum):
if porttype != 'pwr-nema-5':
raise DriverException('Cannot turn off ports of type: %s' % str(porttype))
portnum = portnum.lstrip('.').lower()
state = self.set_outlet_state(portnum, 2)
def set_power_on(self, porttype, portnum):
if porttype != 'pwr-nema-5':
raise DriverException('Cannot turn off ports of type: %s' % str(porttype))
portnum = portnum.lstrip('.').lower()
state = self.set_outlet_state(portnum, 1)
def reboot(self, porttype, portnum):
if porttype != 'pwr-nema-5':
raise DriverException('Cannot reboot ports of type: %s' % str(porttype))
portnum = portnum.lstrip('.').lower()
state = self.get_outlet_state(portnum)
nextstate = None
if state == 'off':
nextstate = 1
if state in ('idleOn', 'on', 'wakeOn'):
nextstate = 3
if not nextstate:
raise DriverException('Outlet in unexpected state: %s' % state)
self.set_outlet_state(portnum, nextstate)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/astroid/tests/unittest_inference.py
|
Python
|
agpl-3.0
| 59,256
| 0.002582
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""tests for the astroid inference capabilities
"""
import sys
from functools import partial
import unittest
import six
from astroid import InferenceError, builder, nodes
from astroid.inference import infer_end as inference_infer_end
from astroid.bases import YES, Instance, BoundMethod, UnboundMethod,\
path_wrapper, BUILTINS
from astroid import test_utils
from astroid.tests import resources
def get_node_of_class(start_from, klass):
return next(start_from.nodes_of_class(klass))
builder = builder.AstroidBuilder()
if sys.version_info < (3, 0):
EXC_MODULE = 'exceptions'
else:
EXC_MODULE = BUILTINS
class InferenceUtilsTest(unittest.TestCase):
def test_path_wrapper(self):
def infer_default(self, *args):
raise InferenceError
infer_default = path_wrapper(infer_default)
infer_end = path_wrapper(inference_infer_end)
with self.assertRaises(InferenceError):
next(infer_default(1))
self.assertEqual(next(infer_end(1)), 1)
def _assertInferElts(node_type, self, node, elts):
infered = next(node.infer())
self.assertIsInstance(infered, node_type)
self.assertEqual(sorted(elt.value for elt in infered.elts),
elts)
def partialmethod(func, arg):
"""similar to functools.partial but return a lambda instead of a class so returned value may be
turned into a method.
"""
return lambda *args, **kwargs: func(arg, *args, **kwargs)
class InferenceTest(resources.SysPathSetup, unittest.TestCase):
# additional assertInfer* method for builtin types
def assertInferConst(self, node, expected):
infered = next(node.infer())
self.assertIsInstance(infered, nodes.Const)
self.assertEqual(infered.value, expected)
def assertInferDict(self, node, expected):
infered = next(node.infer())
self.assertIsInstance(infered, nodes.Dict)
elts = set([(key.value, value.value)
for (key, value) in infered.items])
self.assertEqual(sorted(elts), sorted(expected.items()))
assertInferTuple = partialmethod(_assertInferElts, nodes.Tuple)
assertInferList = partialmethod(_assertInferElts, nodes.List)
assertInferSet = partialmethod(_assertInferElts, nodes.Set)
CODE = '''
class C(object):
"new style"
attr = 4
def meth1(self, arg1, optarg=0):
var = object()
print ("yo", arg1, optarg)
self.iattr = "hop"
return var
def meth2(self):
self.meth1(*self.meth3)
def meth3(self, d=attr):
b = self.attr
c = self.iattr
return b, c
ex = Exception("msg")
v = C().meth1(1)
m_unbound = C.meth1
m_bound = C().meth1
a, b, c = ex, 1, "bonjour"
[d, e, f] = [ex, 1.0, ("bonjour", v)]
g, h = f
i, (j, k) = "glup
|
", f
a, b= b, a # Gasp !
'''
ast = test_utils.build_module(CODE, __name__)
def test_module_inference(self):
infered = self.ast.infer()
obj = next(infered)
self.assertEqual(obj.name, __name__)
self.assertEqual(obj.root().name, __name__)
self.assertRaises(StopIteration, partial(next, infered))
def test_class_inference(self):
infered = self.ast['C'].infer()
obj = n
|
ext(infered)
self.assertEqual(obj.name, 'C')
self.assertEqual(obj.root().name, __name__)
self.assertRaises(StopIteration, partial(next, infered))
def test_function_inference(self):
infered = self.ast['C']['meth1'].infer()
obj = next(infered)
self.assertEqual(obj.name, 'meth1')
self.assertEqual(obj.root().name, __name__)
self.assertRaises(StopIteration, partial(next, infered))
def test_builtin_name_inference(self):
infered = self.ast['C']['meth1']['var'].infer()
var = next(infered)
self.assertEqual(var.name, 'object')
self.assertEqual(var.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, infered))
def test_tupleassign_name_inference(self):
infered = self.ast['a'].infer()
exc = next(infered)
self.assertIsInstance(exc, Instance)
self.assertEqual(exc.name, 'Exception')
self.assertEqual(exc.root().name, EXC_MODULE)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['b'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, 1)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['c'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, "bonjour")
self.assertRaises(StopIteration, partial(next, infered))
def test_listassign_name_inference(self):
infered = self.ast['d'].infer()
exc = next(infered)
self.assertIsInstance(exc, Instance)
self.assertEqual(exc.name, 'Exception')
self.assertEqual(exc.root().name, EXC_MODULE)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['e'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, 1.0)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['f'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Tuple)
self.assertRaises(StopIteration, partial(next, infered))
def test_advanced_tupleassign_name_inference1(self):
infered = self.ast['g'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, "bonjour")
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['h'].infer()
var = next(infered)
self.assertEqual(var.name, 'object')
self.assertEqual(var.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, infered))
def test_advanced_tupleassign_name_inference2(self):
infered = self.ast['i'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, u"glup")
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['j'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, "bonjour")
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['k'].infer()
var = next(infered)
self.assertEqual(var.name, 'object')
self.assertEqual(var.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, infered))
def test_swap_assign_inference(self):
infered = self.ast.locals['a'][1].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, 1)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast.locals['b'][1].infer()
exc = next(infered)
se
|
elastic7327/django-tdd-restful-api
|
src/posts/tests/base.py
|
Python
|
mit
| 1,621
| 0
|
"""
File: base.py
Author: Me
Email: yourname@email.com
Github: https://github.com/yourname
Description:
"""
from datetime import timedelta
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from rest_framework.test import APITestCase
# from oauth2_provider.tests.test_utils import TestCaseUtils
from oauth2_provider.models import get_application_model, AccessToken
from rest_framework import status
import json
import pytest
from mixer.backend.django import mixer
Application = get_application_model()
pytestmark = pyt
|
est.mark.django_db
class PostsBaseTest(APITestCase):
def test_create_user_model(self):
User.objects.create(
username='Hello_World'
)
assert User.objects.count() == 1, "Should be equal"
def set_oauth2_app_by_admin(self
|
, user):
app = Application.objects.create(
name='SuperAPI OAUTH2 APP',
user=user,
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_PASSWORD,
)
return app
def get_token(self, access_user, app):
random = get_random_string(length=1024)
access_token = AccessToken.objects.create(
user=access_user,
scope='read write',
expires=timezone.now() + timedelta(minutes=5),
token=f'{random}---{access_user.username}',
application=app
)
return access_token.token
|
bniemczyk/symbolic
|
symath/datastructures/onetimequeue.py
|
Python
|
bsd-2-clause
| 333
| 0.021021
|
from collec
|
tions import deque
class onetimequeue(object):
def __init__ (self):
self._q = deque()
self._seen = s
|
et()
def push(self, obj):
if obj in self._seen:
return
self._seen.add(obj)
self._q.append(obj)
def pop(self):
return self._q.popleft()
def __len__(self):
return len(self._q)
|
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_MovingMedian/cycle_7/ar_12/test_artificial_1024_Logit_MovingMedian_7_12_100.py
|
Python
|
bsd-3-clause
| 266
| 0.086466
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_
|
artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0,
|
trendtype = "MovingMedian", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 12);
|
crs4/seal
|
tests/tseal/seqal/test_reducer.py
|
Python
|
gpl-3.0
| 23,258
| 0.008814
|
# Copyright (C) 2011-2012 CRS4.
#
# This file is part of Seal.
#
# Seal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Seal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Seal. If not, see <http://www.gnu.org/licenses/>.
import unittest
import re
import sys
from pydoop.mapreduce.api import JobConf
from seal.lib.aligner.sam_mapping import SAMMapping
from seal.lib.mr.test_utils import reduce_context
from seal.seqal.reducer import reducer
from seal.seqal import PAIR_STRING
import seal.lib.io.protobuf_mapping as proto
import seal.lib.aligner.sam_flags as sam_flags
import test_utils # specific to seqal
class TestSeqalReducer(unittest.TestCase):
def setUp(self):
self.__jc = JobConf([])
self.__ctx = reduce_context(self.__jc, [])
self.__reducer = reducer(self.__ctx)
self.__reducer.discard_duplicates = True
self.__clean_reducer = reducer(self.__ctx) # unmodified
def test_emit_on_left_key(self):
# load pair 1
p = test_utils.pair1()
# use the first read to create the map-reduce key
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p))
self.__reducer.reduce(self.__ctx)
self.__ensure_only_pair1_emitted()
def test_no_emit_on_right_key(self):
# load pair 1
p = test_utils.pair1()
# use the SECOND read to create the map-reduce key
self.__ctx.add_value(test_utils.make_key(p[1]), PAIR_STRING)
self.__reducer.reduce(self.__ctx)
# we should have no output
self.assertEqual(0, len(self.__ctx.emitted.keys()))
def test_duplicate_pairs(self):
# Two identical pairs. Ensure only one is emitted
p = test_utils.pair1()
# use the first read to create the map-reduce key
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p))
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p)) # add it twice
self.__reducer.reduce(self.__ctx)
self.assertEqual(1, len(self.__ctx.emitted.keys()))
self.assertEqual(2, len(self.__ctx.emitted.values()[0])) # two SAM records associated with the same key
self.__ensure_only_pair1_emitted()
# check counter
if self.__ctx.counters.has_key(self.__frag_counter_name()):
self.assertEqual(0, self.__ctx.counters[self.__frag_counter_name()])
self.assertTrue(self.__ctx.counters.has_key(self.__pair_counter_name()))
self.assertEqual(1, self.__ctx.counters[self.__pair_counter_name()])
def test_duplicate_pairs_no_discard(self):
# Two identical pairs. Ensure only one is emitted
p = test_utils.pair1()
# use the first read to create the map-reduce key
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p))
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p)) # add it twice
self.__reducer.discard_duplicates = False
self.__reducer.reduce(self.__ctx)
self.assertEqual(1, len(self.__ctx.emitted.keys()))
self.assertEqual(4, len(self.__ctx.emitted.values()[0])) # four SAM records associated with the same key
flags = map(lambda sam: int(*re.match("(\d+).*", sam).groups(1)), self.__ctx.emitted.values()[0])
# ensure we have two marked as duplicates
self.assertEqual(2, len(filter(lambda flag: flag & sam_flags.SAM_FDP, flags)) )
# ensure we have two NOT marked as duplicates
self.assertEqual(2, len(filter(lambda flag: flag & sam_flags.SAM_FDP == 0, flags)) )
# check counter
if self.__ctx.counters.has_key(self.__frag_counter_name()):
self.assertEqual(0, self.__ctx.counters[self.__frag_counter_name()])
self.assertTrue(self.__ctx.counters.has_key(self.__pair_counter_name()))
self.assertEqual(1, self.__ctx.counters[self.__pair_counter_name()])
def test_duplicate_pairs_right_key(self):
# Two identical pairs on the right key
# Ensure nothing is emitted
p = test_utils.pair1()
# use the first read to create the map-reduce key
self.__ctx.add_value(test_utils.make_key(p[1]), PAIR_STRING)
self.__ctx.add_value(test_utils.make_key(p[1]), PAIR_STRING) # add it twice
self.__reducer.reduce(self.__ctx)
self.assertEqual(0, len(self.__ctx.emitted.keys()))
# check counter
if self.__ctx.counters.has_key(self.__pair_counter_name()):
self.assertEqual(0, self.__ctx.counters[self.__pair_counter_name()])
if self.__ctx.counters.has_key(self.__frag_counter_name()):
self.assertEqual(0, self.__ctx.counters[self.__frag_counter_name()])
def test_duplicate_fragments_read1(self):
# load pair 1
p = list(test_utils.pair1())
p = test_utils.erase_read2(p)
p0 = p[0]
# insert the pair into the context, twice
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p))
self.__ctx.add_value(test_utils.make_key(p[0]),
|
proto.serialize_pair(p))
self.__reducer.reduce(self.__ctx)
self.assertEqual(1, len(self.__ctx.emitted.keys()))
self.assertEqual(1, len(self.__ctx.emitted.values()[0])) # only one SAM record
|
associated with the key
short_name = p0.get_name()[0:-2]
self.assertEqual(short_name, self.__ctx.emitted.keys()[0])
self.assertTrue( re.match("\d+\s+%s\s+%d\s+.*" % (p0.tid, p0.pos), self.__ctx.emitted[short_name][0]) )
# check counter
self.assertFalse(self.__ctx.counters.has_key(self.__pair_counter_name()))
self.assertTrue(self.__ctx.counters.has_key(self.__frag_counter_name()))
self.assertEqual(1, self.__ctx.counters[self.__frag_counter_name()])
def test_duplicate_fragments_read1_no_discard(self):
# load pair 1 and erase its second read
p = list(test_utils.pair1())
p = test_utils.erase_read2(p)
p0 = p[0]
# insert the pair into the context, twice
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p))
self.__ctx.add_value(test_utils.make_key(p[0]), proto.serialize_pair(p))
self.__reducer.discard_duplicates = False
self.__reducer.reduce(self.__ctx)
self.assertEqual(1, len(self.__ctx.emitted.keys()))
self.assertEqual(2, len(self.__ctx.emitted.values()[0])) # Two SAM records associated with the key
short_name = p0.get_name()[0:-2]
self.assertEqual(short_name, self.__ctx.emitted.keys()[0])
flags = map(lambda sam: int(*re.match("(\d+).*", sam).groups(1)), self.__ctx.emitted.values()[0])
# ensure we have one marked as duplicate
self.assertEqual(1, len(filter(lambda flag: flag & sam_flags.SAM_FDP, flags)) )
# and ensure we have one NOT marked as duplicates
self.assertEqual(1, len(filter(lambda flag: flag & sam_flags.SAM_FDP == 0, flags)) )
# check counter
self.assertFalse(self.__ctx.counters.has_key(self.__pair_counter_name()))
self.assertTrue(self.__ctx.counters.has_key(self.__frag_counter_name()))
self.assertEqual(1, self.__ctx.counters[self.__frag_counter_name()])
def test_empty_read1(self):
# Ensure the reducer raises an exception if the pair[0] is None
p = test_utils.erase_read1(list(test_utils.pair1()))
self.__ctx.add_value(test_utils.make_key(p[1]), proto.serialize_pair(p))
self.assertRaises(ValueError, self.__reducer.reduce, self.__ctx)
def test_fragment_with_duplicate_in_pair_1(self):
# Ensure the reducer catches a fragment duplicate of pair[0]
p = list(test_utils.pair1())
self.__ctx.add_value(test_utils.
|
jminuscula/dixit-online
|
server/src/dixit/game/test/round.py
|
Python
|
mit
| 13,429
| 0.003053
|
from django.test import TestCase
from django.contrib.auth.models import User
from dixit import settings
from dixit.game.models.game import Game
from dixit.game.models.player import Player
from dixit.game.models.round import Round, RoundStatus, Play
from dixit.game.models.card import Card
from dixit.game.exceptions import GameInvalidPlay, GameRoundIncomplete, GameDeckExhausted
class PlayTest(TestCase):
fixtures = ['game_testcards.json', ]
def setUp(self):
self.user = User.objects.create(username='test', email='test@localhost', password='test')
self.user2 = User.objects.create(username='test2', email='test2@localhost', password='test')
self.user3 = User.objects.create(username='test3', email='test3@localhost', password='test')
self.game = Game.new_game(name='test', user=self.user, player_name='storyteller')
self.current = self.game.current_round
self.player2 = self.game.add_player(self.user2, 'player2')
self.player3 = self.game.add_player(self.user3, 'player3')
def test_play_can_be_performed_for_round(self):
story_card = self.game.storyteller._pick_card()
Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
self.assertEqual(self.current.plays.count(), 1)
def test_storyteller_can_provide_card(self):
story_play = Play(game_round=self.current, player=self.game.storyteller)
story_play.provide_card(self.game.storyteller._pick_card(), 'story')
self.assertEqual(self.current.plays.count(), 1)
def test_players_cant_provide_card_before_storyteller(self):
with self.assertRaises(GameInvalidPlay):
Play.play_for_round(self.current, self.player2, self.player2._pick_card())
def test_players_can_provide_card_after_storyteller(self):
Play.play_for_round(self.current, self.game.storyteller, self.game.storyteller._pick_card(), 'story')
Play.play_for_round(self.current, self.player2, self.player2._pick_card())
self.assertEqual(self.current.plays.count(), 2)
def test_players_can_not_provide_card_after_voting(self):
# TODO
pass
def test_players_can_choose_played_card(self):
story_card = self.game.storyteller._pick_card()
story_play = Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
play2 = Play.play_for_round(self.current, self.player2, self.player2._pick_card())
play3 = Play.play_for_round(self.current, self.player3, self.player3._pick_card())
self.assertEqual(self.current.status, RoundStatus.VOTING)
play2.vote_card(story_card)
def test_players_can_not_choose_unplayed_card(self):
story_card = self.game.storyteller._pick_card()
story_play = Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
card2 = self.player2._pick_card()
play2 = Play.play_for_round(self.current, self.player2, card2)
with self.assertRaises(GameInvalidPlay):
other_card = Card.objects.available_for_game(self.game)[0]
play2.vote_card(other_card)
def test_players_can_not_choose_own_card(self):
story_card = self.game.storyteller._pick_card()
story_play = Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
card2 = self.player2._pick_card()
play2 = Play.play_for_round(self.current, self.player2, card2)
with self.assertRaises(GameInvalidPlay):
play2.vote_card(card2)
def test_storytellers_cant_vote_card(self):
story_card = self.game.storyteller._pick_card()
story_play = Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
card2 = self.player2._pick_card()
play2 = Play.play_for_round(self.current, self.player2, card2)
with self.assertRaises(GameInvalidPlay):
story_play.vote_card(card2)
class RoundTest(TestCase):
fixtures = ['game_testcards.json', ]
def setUp(self):
self.user = User.objects.create(username='test', email='test@localhost', password='test')
self.user2 = User.objects.create(username='test2', email='test2@localhost', password='test')
self.user3 = User.objects.create(username='test3', email='test3@localhost', password='test')
self.user4 = User.objects.create(username='test4', email='test4@localhost', password='test')
self.game = Game.new_game(name='test', user=self.user, player_name='storyteller')
self.current = self.game.current_round
self.player2 = self.game.add_player(self.user2, 'player2')
self.player3 = self.game.add_player(self.user3, 'player3')
def test_round_starts_new(self):
self.assertEqual(self.current.status, RoundStatus.NEW)
def test_round_is_new_when_o
|
nly_storyteller_has_played(self):
story_card = self.game.storyteller._pick_card()
Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
self.assertEqual(self.current.status, RoundStatus.NEW)
def test_round_is_providing_until_all_players_have_provided(self):
story_card = self.game.storyteller._pick_card()
Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
players = self.game.players.exclude(id=se
|
lf.game.storyteller.id)
for player in players[1:]:
Play.play_for_round(self.current, player, player._pick_card())
self.assertEqual(self.current.status, RoundStatus.PROVIDING)
def test_round_is_voting_when_all_players_have_provided_a_card(self):
Play.play_for_round(self.current, self.game.storyteller, self.game.storyteller._pick_card(), 'story')
players = self.game.players.all().exclude(id=self.game.storyteller.id)
for player in players:
Play.play_for_round(self.current, player, player._pick_card())
self.assertEqual(self.current.status, RoundStatus.VOTING)
def test_round_is_voting_until_all_players_have_voted(self):
story_card = self.current.turn._pick_card()
Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
players = self.game.players.all().exclude(id=self.game.storyteller.id)
for player in players:
Play.play_for_round(self.current, player, player._pick_card())
plays = self.current.plays.all().exclude(player=self.game.storyteller)
for play in plays[1:]:
play.vote_card(story_card)
self.assertEqual(self.current.status, RoundStatus.VOTING)
def test_round_is_complete_when_all_players_have_voted(self):
story_card = self.current.turn._pick_card()
Play.play_for_round(self.current, self.game.storyteller, story_card, 'story')
players = self.game.players.all().exclude(id=self.game.storyteller.id)
for player in players:
Play.play_for_round(self.current, player, player._pick_card())
plays = self.current.plays.all().exclude(player=self.game.storyteller)
for play in plays:
play.vote_card(story_card)
self.assertEqual(self.current.status, RoundStatus.COMPLETE)
def test_round_deals_hands_once_to_players(self):
game_round = Round(game=self.game, number=self.current.number + 1, turn=self.current.turn)
game_round.deal()
game_round.deal()
game_round.deal()
hand_sizes = (p.cards.count() for p in self.game.players.all())
self.assertTrue(all(s == settings.GAME_HAND_SIZE for s in hand_sizes))
def test_round_deals_system_card(self):
game_round = Round(game=self.game, number=self.current.number + 1, turn=self.current.turn)
game_round.deal()
self.assertTrue(game_round.card is not None)
def test_round_deals_system_card_once(self):
game_round = Round(game=self.game, number=self.current.number + 1, turn=self.current.turn)
game_round.deal()
system_card = game_round.card
game_round.deal()
self.assertEqual(system_card, game_round.card)
def test_deal_fails_when_not_enough_cards_available(self):
|
kenwith/cs561
|
cs561-as1-kenwith/.scratch/foo7.py
|
Python
|
gpl-3.0
| 929
| 0.018299
|
#!/usr/bin/python
"""a simple test script"""
from mininet.util import ensureRoot, dumpNodeConnections
from mininet.topo import MinimalTopo, Topo
from mininet.net import Mininet
from time import sleep
#from subprocess import Popen
import subprocess
from time import sleep
class SingleSwitchTopo(Topo):
"Single switch connected to n hosts."
def build(self, n=
|
2):
switch = self.addSwitch('s1')
# Python's range(N) generates 0..N-1
for h in range(n):
host = self.addHost('h%s' % (h + 1))
self.addLink(host, switch)
def main():
# Ensure this script is being run as root.
ensureRoot()
topo = SingleSwitchTop
|
o(n=2)
net = Mininet(topo)
net.start()
h1 = net.get('h1')
h2 = net.get('h2')
print "Starting test..."
h1.sendCmd("ifconfig")
sleep(5)
output = h1.waitOutput()
print(output)
net.stop()
if __name__ == "__main__":
main()
|
bmazin/ARCONS-pipeline
|
examples/palomar-2011/palomar-2011.py
|
Python
|
gpl-2.0
| 1,501
| 0.009327
|
#
# Look at a .h5 file from the Palomar 2011 run
#
# Set the environment variable MKID_RAW_PATH to point to the data location
#
# Example use:
#
# $ export MKID_RAW_PATH=/Volumes/data/Palomar2011/Pal20110728
# python palomar-2011.py obs_20110729-151443.h5
import sys, os
import tables
print sys.argv
if (len(sys.argv) < 2):
print "Usage: ",sys.argv[0]," hdf5FileName"
sys.exit(1)
# make the full file name by joining the input name to the MKID_RAW_PATH (or .)
hdf5FileName = sys.argv[1]
dataDir = os.getenv('MKID_RAW_PATH','.')
hdf5FullFileName = os.path.join(dataDir,hdf5FileName)
print "full file name is ",hdf5FullFileName
if (not os.path.exists(hdf5FullFileName)):
print "file does not exist: ",hdf5FullFileName
sys.exit(1)
# open the actual file. This might take a while
fid = tables.openFile(hdf5FullFileName, mode='r')
# get the beam image. This is a 2d array of roach board/pixel/time locations
bea
|
mImage = fid.getNode("/beammap/beamimage")
# count the total number of photons in the file
nPhoton = 0
iRow = -1
for rows in beamImage:
iRow += 1
print "Begin iRow = ",iRow
iCol = -1
for pixel in rows:
iCol += 1
print " iCol = ",iCol
print " pixel=",pixel
# so now we have a roach board/pixel/time.
for sec in fid.getNode(pixel):
for packet in sec:
# here is the 64-bit
|
number.
packet = int(packet)
nPhoton += 1
print "nPhoton=",nPhoton
|
indonoso/small-Inventory
|
migrations/versions/f719fe7c700a_.py
|
Python
|
mit
| 1,179
| 0.005937
|
"""empty message
Revision ID: f719fe7c700a
Revises: b8fa640ec739
Create Date: 2017-02-15 19:55:55.163798
"""
# revision identifiers, used by Alembic.
revision = 'f719fe7c700a'
down_revision = 'b8fa640ec739'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('production_ne
|
eds', sa.Column('product', sa.Integer(), nullable=False))
|
op.drop_constraint('production_needs_product_out_fkey', 'production_needs', type_='foreignkey')
op.create_foreign_key(None, 'production_needs', 'product', ['product'], ['id_'])
op.drop_column('production_needs', 'product_out')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('production_needs', sa.Column('product_out', sa.INTEGER(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'production_needs', type_='foreignkey')
op.create_foreign_key('production_needs_product_out_fkey', 'production_needs', 'product', ['product_out'], ['id_'])
op.drop_column('production_needs', 'product')
# ### end Alembic commands ###
|
sjkingo/django-breadcrumbs3
|
breadcrumbs3/tests/views.py
|
Python
|
bsd-2-clause
| 660
| 0.004545
|
from django.http import HttpResponse
from django.template import Template, RequestContext
breadcrumb_template_html = Template("""
{% load breadcrumbs %}
{% breadcrumbs %}
""")
def render(request):
"""
Renders a simple template that calls the breadcrumbs templatetag.
"""
context = RequestContext(request)
|
return HttpResponse(breadcrumb_template_html.render(context=context))
def some_view_no_url(request):
request.breadcrumbs('Some title', None)
return render(request)
def some_view_with_url(request):
from .tests import Br
|
eadcrumbsTest as test
request.breadcrumbs('Some other title', test.s)
return render(request)
|
stormi/tsunami
|
src/primaires/interpreteur/masque/fonctions.py
|
Python
|
bsd-3-clause
| 2,032
| 0.003457
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and
|
/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without speci
|
fic prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant des fonctions utiles à la manipulation de masques."""
def chaine_vers_liste(chaine):
"""Convertit une chaîne en sa liste de caractères"""
return list(chaine)
def liste_vers_chaine(liste):
"""Convertit une lsite de caractères en une chaîne"""
return "".join(liste)
def lstrip(liste):
"""Retire les espaces à gauche de la chaîne-liste"""
while liste and liste[0] == " ":
del liste[0]
return liste
|
vsoch/myconnectome
|
myconnectome/qa/run_qap_func.py
|
Python
|
mit
| 2,129
| 0.053546
|
"""
run quality assurance measures on functional data
"""
import sys,glob
sys.path.append('/corral-repl/utexas/poldracklab/software_lonestar/quality-assessment-protocol')
import os
import numpy
from run_shell_cmd import run_shell_cmd
from compute_fd import compute_fd
from qap import load_func,load_image, load_mask, summary_mask, cnr,efc,fber,fwhm,artifacts,ghost_all,calc_mean_func,calc_dvars,mean_outlier_timepoints,mean_quality_timepoints
basedir='/corral-repl/utexas/poldracklab/data/selftracking/shared_dataset'
funcfiles=glob.glob(os.path.join(basedir,'sub*/BOLD/resting_run001/bold.nii.gz'))
funcdata={'subcode':[],'func_efc':[],'func_fber':[],'func_fwhm':[],'func_gsr':[],'func_dvars':[],'func_outlier':[],'func_quality':[],'func_mean_fd':[],'func_num_fd':[],'func_perc_fd':[]}
#for funcfile in funcfiles:
func_file=funcfiles[0]
if 1:
subcode=func_file.spl
|
it('/')[7]
print 'processing',subcode
funcdata['subcode'].append(subcode)
mask_file=func_file.replace('.nii.gz','_brain_mask.nii.gz')
if not os.path.exists(mask_file):
|
cmd='bet %s %s -m -F'%(func_file,func_file.replace('.nii.gz','_brain'))
print cmd
run_shell_cmd(cmd)
func_data = load_func(func_file,mask_file)
mean_func_data = calc_mean_func(func_file)
func_mask = load_mask(mask_file)
func_efc = efc(func_data)
#func_fber = fber(func_data, func_mask)
#func_fwhm = fwhm(func_file, mask_file, out_vox=False)
print 'running ghost_all'
_,func_gsr,_=ghost_all(mean_func_data,func_mask)
print 'running calc_dvars'
func_dvars = calc_dvars(func_data, output_all=False)
print 'running mean_outlier_timepoints'
func_outlier = mean_outlier_timepoints(func_file, mask_file, out_fraction=True)
print 'running compute_fd'
motpars=numpy.loadtxt(func_file.replace('.nii.gz','_mcf.par'))
fd=compute_fd(motpars)
sdf
funcdata['mean_gm'].append(mean_gm)
funcdata['mean_wm'].append(mean_wm)
funcdata['std_bg'].append(std_bg)
funcdata['anat_efc'].append(anat_efc)
funcdata['anat_fber'].append(anat_fber)
funcdata['anat_fwhm'].append(anat_fwhm)
funcdata['anat_qi1'].append(anat_qi1)
|
tonioo/modoboa
|
modoboa/core/migrations/0010_auto_20161026_1011.py
|
Python
|
isc
| 1,939
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-10-26 08:11
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
def set_parameter(store, parameter):
"""Add parameter to the specified store."""
app, name = parameter.name.split(".")
if app not in store:
store[app] = {}
# decode value
value = parameter.value.decode("unicode_escape")
if value == "yes":
|
value = True
elif value == "no":
value = False
elif value.isdigit():
value = int(value)
store[app][name.lower()] = value
def move_parameters(apps,
|
schema_editor):
"""Move global and user parameters."""
Parameter = apps.get_model("lib", "Parameter")
LocalConfig = apps.get_model("core", "LocalConfig")
parameters = {}
for parameter in Parameter.objects.all():
set_parameter(parameters, parameter)
LocalConfig.objects.all().update(_parameters=parameters)
User = apps.get_model("core", "User")
for user in User.objects.prefetch_related("userparameter_set"):
for parameter in user.userparameter_set.all():
set_parameter(user._parameters, parameter)
user.save()
def clear_parameters(apps, schema_editor):
"""Reverse operation."""
pass
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20161026_1003'),
('lib', '0005_auto_20160416_1449'),
]
operations = [
migrations.AddField(
model_name='localconfig',
name='_parameters',
field=jsonfield.fields.JSONField(default={}),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='_parameters',
field=jsonfield.fields.JSONField(default={}),
preserve_default=False,
),
migrations.RunPython(move_parameters, clear_parameters)
]
|
spxiwh/pycbc-glue
|
glue/segmentdb/__init__.py
|
Python
|
gpl-3.0
| 1,209
| 0.000827
|
#
# Copyright (C) 2006 Larne Pekowsky
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GN
|
U General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ==================================================================
|
===========
#
# Preamble
#
# =============================================================================
#
"""
Utilities for working with segment databases
"""
from glue import git_version
__date__ = git_version.date
__version__ = git_version.id
__author__ = "Larne Pekowsky <lppekows@physics.syr.edu>"
__all__ = ["query_engine", "segmentdb_utils.py"]
|
LettError/glyphBrowser
|
buildExtension.py
|
Python
|
bsd-3-clause
| 2,493
| 0.003209
|
# build RF extension
# run in RF
import os
from mojo.extensions import ExtensionBundle
from mojo.U
|
I import createModifier
print("did you update the names?")
#modifier = createModifier(command=True, shift=True)
#print(f"({modifier}, ']')")
# get current folder
basePath = os.path.dirname(__file__)
# folder with python files
libPath = os.path.join(basePath, 'lib')
# folder with html files
htmlPath = os.path.join(basePath, 'html')
if not os.path.exists(htmlPath):
htmlPath = None
# folder with resources
reso
|
urcesPath = os.path.join(basePath, 'resources')
if not os.path.exists(resourcesPath):
resourcesPath = None
# load license text from file
# see http://choosealicense.com/ for more open-source licenses
licensePath = os.path.join(basePath, 'license.txt')
if not os.path.exists(licensePath):
licensePath = None
# boolean indicating if only .pyc should be included
pycOnly = False
# name of the compiled extension file
extensionFile = 'GlyphBrowser.roboFontExt'
# path of the compiled extension
buildPath = basePath
extensionPath = os.path.join(buildPath, extensionFile)
# initiate the extension builder
B = ExtensionBundle()
# name of the extension
B.name = "GlyphBrowser"
# name of the developer
B.developer = 'LettError'
# URL of the developer
B.developerURL = 'http://letterror.com/tools.html'
if resourcesPath:
# extension icon (file path or NSImage)
imagePath = os.path.join(resourcesPath, 'icon.png')
imagePath = 'GlyphBrowserMechanicIcon.png'
B.icon = imagePath
# version of the extension
B.version = '2.6.5'
# should the extension be launched at start-up?
B.launchAtStartUp = False
# script to be executed when RF starts
#B.mainScript = 'startup.py'
# does the extension contain html help files?
B.html = htmlPath is not None
# minimum RoboFont version required for this extension
B.requiresVersionMajor = '3'
B.requiresVersionMinor = '2'
# scripts which should appear in Extensions menu
B.addToMenu = [
{
'path' : 'browser.py',
'preferredName': 'GlyphBrowser',
'shortKey' : 'g',
},
]
if licensePath:
# license for the extension
with open(licensePath) as license:
B.license = license.read()
# compile and save the extension bundle
print('building extension...', end=' ')
B.save(extensionPath, libPath=libPath, htmlPath=htmlPath, resourcesPath=resourcesPath, pycOnly=pycOnly)
print('done!')
# check for problems in the compiled extension
print()
print(B.validationErrors())
|
mission-peace/interview
|
python/dynamic/knapsack_01.py
|
Python
|
apache-2.0
| 2,542
| 0.003541
|
"""
Problem Statement
=================
0/1 Knapsack Problem - Given items of certain weights/values and maximum allowed weight how to pick items to pick items
from this set to maximize sum of value of items such that sum
|
of weights is
|
less than or equal to maximum allowed
weight.
Runtime Analysis
----------------
Time complexity - O(W*total items)
Video
-----
* Topdown DP - https://youtu.be/149WSzQ4E1g
* Bottomup DP - https://youtu.be/8LusJS5-AGo
References
----------
* http://www.geeksforgeeks.org/dynamic-programming-set-10-0-1-knapsack-problem/
* https://en.wikipedia.org/wiki/Knapsack_problem
"""
def knapsack_01(values, weights, total):
total_items = len(weights)
rows = total_items + 1
cols = total + 1
T = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(1, rows):
for j in range(1, cols):
if j < weights[i - 1]:
T[i][j] = T[i - 1][j]
else:
T[i][j] = max(T[i - 1][j], values[i - 1] + T[i - 1][j - weights[i - 1]])
return T[rows - 1][cols -1]
def knapsack_01_recursive_util(values, weights, remaining_weight, total_items, current_item, memo):
if current_item >= total_items or remaining_weight <= 0:
return 0
key = (total_items - current_item - 1, remaining_weight)
if key in memo:
return memo[key]
if remaining_weight < weights[current_item]:
max_value = knapsack_01_recursive_util(values, weights, remaining_weight, total_items, current_item + 1, memo)
else:
max_value = max(values[current_item] + knapsack_01_recursive_util(values, weights, remaining_weight - weights[current_item], total_items, current_item + 1, memo),
knapsack_01_recursive_util(values, weights, remaining_weight, total_items, current_item + 1, memo))
memo[key] = max_value
return max_value
def knapsack_01_recursive(values, weights, total_weight):
memo = dict()
return knapsack_01_recursive_util(values, weights, total_weight, len(values), 0, memo)
if __name__ == '__main__':
total_weight = 7
weights = [1, 3, 4, 5]
values = [1, 4, 5, 7]
expected = 9
assert expected == knapsack_01(values, weights, total_weight)
assert expected == knapsack_01_recursive(values, weights, total_weight)
total_weight = 8
weights = [2, 2, 4, 5]
values = [2, 4, 6, 9]
expected = 13
assert expected == knapsack_01(values, weights, total_weight)
assert expected == knapsack_01_recursive(values, weights, total_weight)
|
dreadrel/UWF_2014_spring_COP3990C-2507
|
notebooks/scripts/book_code/code/timesqrt.py
|
Python
|
apache-2.0
| 621
| 0.011272
|
# File timesqrt.py
import sys, timer2
reps = 10000
repslist = range(reps) # Pull out range list time for 2.6
from math import sqrt # Not math.sqrt: adds attr fetch time
def mathMod():
for i in repslist:
res
|
= sqrt(i)
return res
def powCall():
for i in repslist:
|
res = pow(i, .5)
return res
def powExpr():
for i in repslist:
res = i ** .5
return res
print(sys.version)
for test in (mathMod, powCall, powExpr):
elapsed, result = timer2.bestoftotal(test, _reps1=3, _reps=1000)
print ('%s: %.5f => %s' % (test.__name__, elapsed, result))
|
patilsangram/erpnext
|
erpnext/selling/doctype/customer/customer_dashboard.py
|
Python
|
gpl-3.0
| 679
| 0.050074
|
from frappe import _
def get_data():
return {
'heatmap': True,
'heatmap_message': _('This is based on transactions against this Customer.
|
See timeline below for details'),
'fieldname': 'customer',
'transactions': [
{
'label': _('Pre Sales'),
'items': ['Opportunity', 'Q
|
uotation']
},
{
'label': _('Orders'),
'items': ['Sales Order', 'Delivery Note', 'Sales Invoice']
},
{
'label': _('Support'),
'items': ['Issue']
},
{
'label': _('Projects'),
'items': ['Project']
},
{
'label': _('Pricing'),
'items': ['Pricing Rule']
},
{
'label': _('Subscriptions'),
'items': ['Subscription']
}
]
}
|
rahulunair/nova
|
nova/tests/unit/privsep/test_fs.py
|
Python
|
apache-2.0
| 15,685
| 0.000191
|
# Copyright 2013 OpenStack Foundation
# Copyright 2019 Aptira Pty Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import nova.privsep.fs
from nova import test
from nova.tests import fixtures
class PrivsepFilesystemHelpersTestCase(test.NoDBTestCase):
"""Test filesystem related utility methods."""
def setUp(self):
super(PrivsepFilesystemHelpersTestCase, self).setUp()
self.useFixture(fixtures.PrivsepFixture())
@mock.patch('oslo_concurrency.processutils.execute')
def test_mount_simple(self, mock_execute):
nova.privsep.fs.mount(None, '/dev/nosuch', '/fake/path', None)
mock_execute.assert_called_with('mount', '/dev/nosuch', '/fake/path')
@mock.patch('oslo_concurrency.processutils.execute')
def test_mount_less_simple(self, mock_execute):
|
nova.privsep.fs.mount('ext4', '/dev/nosuch', '/fake/path',
['-o', 'remount'])
mock_execute.assert_called_with('mount', '-t', 'ext4',
'-o', 'remount',
|
'/dev/nosuch', '/fake/path')
@mock.patch('oslo_concurrency.processutils.execute')
def test_umount(self, mock_execute):
nova.privsep.fs.umount('/fake/path')
mock_execute.assert_called_with('umount', '/fake/path',
attempts=3, delay_on_retry=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_lvcreate_simple(self, mock_execute):
nova.privsep.fs.lvcreate(1024, 'lv', 'vg')
mock_execute.assert_called_with('lvcreate', '-L', '1024b', '-n', 'lv',
'vg', attempts=3)
@mock.patch('oslo_concurrency.processutils.execute')
def test_lvcreate_preallocated(self, mock_execute):
nova.privsep.fs.lvcreate(1024, 'lv', 'vg', preallocated=512)
mock_execute.assert_called_with('lvcreate', '-L', '512b',
'--virtualsize', '1024b',
'-n', 'lv', 'vg', attempts=3)
@mock.patch('oslo_concurrency.processutils.execute')
def test_vginfo(self, mock_execute):
nova.privsep.fs.vginfo('vg')
mock_execute.assert_called_with('vgs', '--noheadings', '--nosuffix',
'--separator', '|', '--units', 'b',
'-o', 'vg_size,vg_free', 'vg')
@mock.patch('oslo_concurrency.processutils.execute')
def test_lvlist(self, mock_execute):
nova.privsep.fs.lvlist('vg')
mock_execute.assert_called_with('lvs', '--noheadings', '-o',
'lv_name', 'vg')
@mock.patch('oslo_concurrency.processutils.execute')
def test_lvinfo(self, mock_execute):
nova.privsep.fs.lvinfo('/path/to/lv')
mock_execute.assert_called_with('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', '/path/to/lv')
@mock.patch('oslo_concurrency.processutils.execute')
def test_lvremove(self, mock_execute):
nova.privsep.fs.lvremove('/path/to/lv')
mock_execute.assert_called_with('lvremove', '-f', '/path/to/lv',
attempts=3)
@mock.patch('oslo_concurrency.processutils.execute')
def test_blockdev_size(self, mock_execute):
nova.privsep.fs.blockdev_size('/dev/nosuch')
mock_execute.assert_called_with('blockdev', '--getsize64',
'/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_blockdev_flush(self, mock_execute):
nova.privsep.fs.blockdev_flush('/dev/nosuch')
mock_execute.assert_called_with('blockdev', '--flushbufs',
'/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_clear_simple(self, mock_execute):
nova.privsep.fs.clear('/dev/nosuch', 1024)
mock_execute.assert_called_with('shred', '-n0', '-z', '-s1024',
'/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_clear_with_shred(self, mock_execute):
nova.privsep.fs.clear('/dev/nosuch', 1024, shred=True)
mock_execute.assert_called_with('shred', '-n3', '-s1024',
'/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_loopsetup(self, mock_execute):
nova.privsep.fs.loopsetup('/dev/nosuch')
mock_execute.assert_called_with('losetup', '--find', '--show',
'/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_loopremove(self, mock_execute):
nova.privsep.fs.loopremove('/dev/nosuch')
mock_execute.assert_called_with('losetup', '--detach', '/dev/nosuch',
attempts=3)
@mock.patch('oslo_concurrency.processutils.execute')
def test_nbd_connect(self, mock_execute):
nova.privsep.fs.nbd_connect('/dev/nosuch', '/fake/path')
mock_execute.assert_called_with('qemu-nbd', '-c', '/dev/nosuch',
'/fake/path')
@mock.patch('oslo_concurrency.processutils.execute')
def test_nbd_disconnect(self, mock_execute):
nova.privsep.fs.nbd_disconnect('/dev/nosuch')
mock_execute.assert_called_with('qemu-nbd', '-d', '/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_create_device_maps(self, mock_execute):
nova.privsep.fs.create_device_maps('/dev/nosuch')
mock_execute.assert_called_with('kpartx', '-a', '/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_remove_device_maps(self, mock_execute):
nova.privsep.fs.remove_device_maps('/dev/nosuch')
mock_execute.assert_called_with('kpartx', '-d', '/dev/nosuch')
@mock.patch('oslo_concurrency.processutils.execute')
def test_get_filesystem_type(self, mock_execute):
nova.privsep.fs.get_filesystem_type('/dev/nosuch')
mock_execute.assert_called_with('blkid', '-o', 'value', '-s',
'TYPE', '/dev/nosuch',
check_exit_code=[0, 2])
@mock.patch('oslo_concurrency.processutils.execute')
def test_privileged_e2fsck(self, mock_execute):
nova.privsep.fs.e2fsck('/path/nosuch')
mock_execute.assert_called_with('e2fsck', '-fp', '/path/nosuch',
check_exit_code=[0, 1, 2])
@mock.patch('oslo_concurrency.processutils.execute')
def test_privileged_e2fsck_with_flags(self, mock_execute):
nova.privsep.fs.e2fsck('/path/nosuch', flags='festive')
mock_execute.assert_called_with('e2fsck', 'festive', '/path/nosuch',
check_exit_code=[0, 1, 2])
@mock.patch('oslo_concurrency.processutils.execute')
def test_unprivileged_e2fsck(self, mock_execute):
nova.privsep.fs.unprivileged_e2fsck('/path/nosuch')
mock_execute.assert_called_with('e2fsck', '-fp', '/path/nosuch',
check_exit_code=[0, 1, 2])
@mock.patch('oslo_concurrency.processutils.execute')
def test_unprivileged_e2fsck_with_flags(self, mock_execute):
nova.privsep.fs.unprivileged_e2fsck('/path/nosuch', flags='festive')
mock_execute.assert_called_with('e2fsck', 'festive', '/path/nosuch',
check_exit_code=[0, 1, 2
|
smmribeiro/intellij-community
|
python/testData/refactoring/extractmethod/ElseBody.before.py
|
Python
|
apache-2.0
| 325
| 0.024615
|
def foo():
for arg in sys.argv[1:]:
try:
f = open(arg, 'r')
except IOError
|
:
print('cannot open', arg)
else:
<selection>length = len(f.readlines()) #<---extract something from here
print("hi from else")</selection>
#anything els
|
e you need
|
mksachs/UberCC
|
uber_API.py
|
Python
|
mit
| 3,244
| 0.014797
|
#!/usr/bin/env python
import json
import dateutil.parser
import datetime
import numpy as np
import calendar
import itertools
from flask import Flask, request, Response, render_template, redirect, url_for
import Uber
app = Flask(__name__)
'''
The index page has links to the from_file API and the from_stream API.
'''
@app.route('/')
def index():
return render_template('index.html', links={'from_file':url_for('from_file', data_file='uber_demand_prediction_challenge.json'), 'from_stream':url_for('from_stream')})
'
|
''
The from_file API. Accepts a get parameter 'data_file' that points at a data file
containing the login data.
'''
@app.route('/from_file', methods=['GET'])
def from_file():
if request.method == 'GET':
data_file = request.args.get('data_file', '')
dp = Uber.DemandPredictor()
f = open(data_file,'r')
logins = json.loads(f.read())
f
|
.close()
logins_np = np.array([dateutil.parser.parse(x) for x in logins], dtype=datetime.datetime)
for login in logins_np:
dp.addLogin(login)
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
forecast = []
start_date = datetime.datetime(2012, 5, 1, hour = 0, minute = 0, second = 0)
end_date = datetime.datetime(2012, 5, 15, hour = 23, minute = 59, second = 59)
current_date = datetime.datetime(1972, 11, 16, hour = 0, minute = 0, second = 0)
day_index = -1
for single_date in Uber.daterange(start_date, end_date, increment='hours'):
if single_date.date() != current_date.date():
forecast.append(
{
'display_date': '%s, %s %i'%(days[single_date.weekday()], calendar.month_name[single_date.month], single_date.day),
'forecasts': [dp.forecast(single_date.weekday(), single_date.hour)]
}
)
current_date = single_date
day_index += 1
else:
forecast[day_index]['forecasts'].append(dp.forecast(single_date.weekday(), single_date.hour));
return render_template('from_file.html', forecast=json.dumps(forecast))
'''
The from_stream API.
'''
@app.route('/from_stream')
def from_stream():
dp = Uber.DemandPredictor()
'''
This is a fake stream of data. It loops over the provided JSON file.
'''
def login_stream(logins):
for login in itertools.cycle(logins):
parsed_login = dateutil.parser.parse(login)
dp.addLogin(parsed_login)
day = parsed_login.weekday()
hour = parsed_login.hour
forecast = dp.forecast(day, hour)
ret = {'day':day, 'hour':hour, 'forecast':forecast}
yield "data: %s\n\n" % (json.dumps(ret))
data_file = 'uber_demand_prediction_challenge.json'
f = open(data_file,'r')
logins = json.loads(f.read())
f.close()
if request.headers.get('accept') == 'text/event-stream':
return Response(login_stream(logins), content_type='text/event-stream')
return redirect(url_for('static', filename='from_stream.html'))
if __name__ == '__main__':
app.run(debug=True, threaded=True)
|
sbc100/yapf
|
yapf/yapflib/style.py
|
Python
|
apache-2.0
| 23,126
| 0.004886
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python formatting style settings."""
import os
import re
import textwrap
from yapf.yapflib import errors
from yapf.yapflib import py3compat
class StyleConfigError(errors.YapfError):
"""Raised when there's a problem reading the style configuration."""
pass
def Get(setting_name):
"""Get a style setting."""
return _style[setting_name]
def Help():
"""Return dict mapping style names to help strings."""
return _STYLE_HELP
def SetGlobalStyle(style):
"""Set a style dict."""
global _style
global _GLOBAL_STYLE_FACTORY
factory = _GetStyleFactory(style)
if factory:
_GLOBAL_STYLE_FACTORY = factory
_style = style
_STYLE_HELP = dict(
ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=textwrap.dedent("""\
Align closing bracket with visual indentation."""),
ALLOW_MULTILINE_LAMBDAS=textwrap.dedent("""\
Allow lambdas to be formatted on more than one line."""),
ALLOW_MULTILINE_DICTIONARY_KEYS=textwrap.dedent("""\
Allow dictionary keys to exist on multiple lines. For example:
x = {
('this is the first element of a tuple',
'this is the second element of a tuple'):
value,
}"""),
ALLOW_SPLIT_BEFORE_DICT_VALUE=textwrap.dedent("""\
Allow splits before the dictionary value."""),
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=textwrap.dedent("""\
Insert a blank line before a 'def' or 'class' immediately nested
within another 'def' or 'class'. For example:
class Foo:
# <------ this blank line
def method():
..."""),
BLANK_LINE_BEFORE_CLASS_DOCSTRING=textwrap.dedent("""\
Insert a blank line before a class-level docstring."""),
BLANK_LINE_BEFORE_MODULE_DOCSTRING=textwrap.dedent("""\
Insert a blank line before a module docstring."""),
BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=textwrap.dedent("""\
Number of blank lines surrounding top-level function and class
definitions."""),
COALESCE_BRACKETS=textwrap.dedent("""\
Do not split consecutive brackets. Only relevant when
dedent_closing_brackets is set. For example:
call_func_that_takes_a_dict(
{
'key1': 'value1',
'key2': 'value2',
}
)
would reformat to:
call_func_that_takes_a_dict({
'key1': 'value1',
'key2': 'value2',
|
})"""),
COLUMN_LIMIT=textwrap.dedent("""\
The column limit."""),
CONTINUATION_ALIGN_STYLE=textwrap.dedent("""\
The style for continuation alignment. Possible values are:
- SPACE: Use spaces for continuation alignment. This is default behavior.
- FIXED: Use fixed number (CONTINUATION_INDENT_WIDTH) of columns
(ie:
|
CONTINUATION_INDENT_WIDTH/INDENT_WIDTH tabs) for continuation
alignment.
- LESS: Slightly left if cannot vertically align continuation lines with
indent characters.
- VALIGN-RIGHT: Vertically align continuation lines with indent
characters. Slightly right (one more indent character) if cannot
vertically align continuation lines with indent characters.
For options FIXED, and VALIGN-RIGHT are only available when USE_TABS is
enabled."""),
CONTINUATION_INDENT_WIDTH=textwrap.dedent("""\
Indent width used for line continuations."""),
DEDENT_CLOSING_BRACKETS=textwrap.dedent("""\
Put closing brackets on a separate line, dedented, if the bracketed
expression can't fit in a single line. Applies to all kinds of brackets,
including function definitions and calls. For example:
config = {
'key1': 'value1',
'key2': 'value2',
} # <--- this bracket is dedented and on a separate line
time_series = self.remote_client.query_entity_counters(
entity='dev3246.region1',
key='dns.query_latency_tcp',
transform=Transformation.AVERAGE(window=timedelta(seconds=60)),
start_ts=now()-timedelta(days=3),
end_ts=now(),
) # <--- this bracket is dedented and on a separate line"""),
EACH_DICT_ENTRY_ON_SEPARATE_LINE=textwrap.dedent("""\
Place each dictionary entry onto its own line."""),
I18N_COMMENT=textwrap.dedent("""\
The regex for an i18n comment. The presence of this comment stops
reformatting of that line, because the comments are required to be
next to the string they translate."""),
I18N_FUNCTION_CALL=textwrap.dedent("""\
The i18n function call names. The presence of this function stops
reformattting on that line, because the string it has cannot be moved
away from the i18n comment."""),
INDENT_DICTIONARY_VALUE=textwrap.dedent("""\
Indent the dictionary value if it cannot fit on the same line as the
dictionary key. For example:
config = {
'key1':
'value1',
'key2': value1 +
value2,
}"""),
INDENT_WIDTH=textwrap.dedent("""\
The number of columns to use for indentation."""),
JOIN_MULTIPLE_LINES=textwrap.dedent("""\
Join short lines into one line. E.g., single line 'if' statements."""),
NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=textwrap.dedent("""\
Do not include spaces around selected binary operators. For example:
1 + 2 * 3 - 4 / 5
will be formatted as follows when configured with a value "*,/":
1 + 2*3 - 4/5
"""),
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=textwrap.dedent("""\
Insert a space between the ending comma and closing bracket of a list,
etc."""),
SPACES_AROUND_POWER_OPERATOR=textwrap.dedent("""\
Use spaces around the power operator."""),
SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=textwrap.dedent("""\
Use spaces around default or named assigns."""),
SPACES_BEFORE_COMMENT=textwrap.dedent("""\
The number of spaces required before a trailing comment."""),
SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=textwrap.dedent("""\
Split before arguments if the argument list is terminated by a
comma."""),
SPLIT_ALL_COMMA_SEPARATED_VALUES=textwrap.dedent("""\
Split before arguments"""),
SPLIT_BEFORE_BITWISE_OPERATOR=textwrap.dedent("""\
Set to True to prefer splitting before '&', '|' or '^' rather than
after."""),
SPLIT_BEFORE_CLOSING_BRACKET=textwrap.dedent("""\
Split before the closing bracket if a list or dict literal doesn't fit on
a single line."""),
SPLIT_BEFORE_DICT_SET_GENERATOR=textwrap.dedent("""\
Split before a dictionary or set generator (comp_for). For example, note
the split before the 'for':
foo = {
variable: 'Hello world, have a nice day!'
for variable in bar if variable != 42
}"""),
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=textwrap.dedent("""\
Split after the opening paren which surrounds an expression if it doesn't
fit on a single line.
"""),
SPLIT_BEFORE_FIRST_ARGUMENT=textwrap.dedent("""\
If an argument / parameter list is going to be split, then split before
the first argument."""),
SPLIT_BEFORE_LOGICAL_OPERATOR=textwrap.dedent("""\
Set to True to prefer splitting before 'and' or 'or' rather than
after."""),
SPLIT_BEFORE_NAMED_ASSIGNS=textwrap.dedent("""\
Split named assignments onto individual lines."""),
SPLIT_COMPLEX_COMPREHENSION=textwrap.dedent("""\
Set to Tru
|
samaitra/kafka
|
tests/kafkatest/tests/connect_test.py
|
Python
|
apache-2.0
| 4,906
| 0.005911
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.connect import ConnectStandaloneService
from kafkatest.services.console_consumer import ConsoleConsumer
from ducktape.utils.util import wait_until
from ducktape.mark import parametrize
import hashlib, subprocess, json
class ConnectStandaloneFileTest(KafkaTest):
"""
Simple test of Kafka Connect that produces data from a file in one
standalone process and consumes it on another, validating the output is
identical to the input.
"""
INPUT_FILE = "/mnt/connect.input"
OUTPUT_FILE = "/mnt/connect.output"
OFFSETS_FILE = "/mnt/connect.offsets"
TOPIC = "test"
FIRST_INPUT_LIST = ["foo", "bar", "baz"]
FIRST_INPUT = "\n".join(FIRS
|
T_INPUT_LIST) + "\
|
n"
SECOND_INPUT_LIST = ["razz", "ma", "tazz"]
SECOND_INPUT = "\n".join(SECOND_INPUT_LIST) + "\n"
SCHEMA = { "type": "string", "optional": False }
def __init__(self, test_context):
super(ConnectStandaloneFileTest, self).__init__(test_context, num_zk=1, num_brokers=1, topics={
'test' : { 'partitions': 1, 'replication-factor': 1 }
})
self.source = ConnectStandaloneService(test_context, self.kafka, [self.INPUT_FILE, self.OFFSETS_FILE])
self.sink = ConnectStandaloneService(test_context, self.kafka, [self.OUTPUT_FILE, self.OFFSETS_FILE])
self.consumer_validator = ConsoleConsumer(test_context, 1, self.kafka, self.TOPIC, consumer_timeout_ms=1000)
@parametrize(converter="org.apache.kafka.connect.json.JsonConverter", schemas=True)
@parametrize(converter="org.apache.kafka.connect.json.JsonConverter", schemas=False)
@parametrize(converter="org.apache.kafka.connect.storage.StringConverter", schemas=None)
def test_file_source_and_sink(self, converter="org.apache.kafka.connect.json.JsonConverter", schemas=True):
assert converter != None, "converter type must be set"
# Template parameters
self.key_converter = converter
self.value_converter = converter
self.schemas = schemas
self.source.set_configs(lambda node: self.render("connect-standalone.properties", node=node), [self.render("connect-file-source.properties")])
self.sink.set_configs(lambda node: self.render("connect-standalone.properties", node=node), [self.render("connect-file-sink.properties")])
self.source.start()
self.sink.start()
# Generating data on the source node should generate new records and create new output on the sink node
self.source.node.account.ssh("echo -e -n " + repr(self.FIRST_INPUT) + " >> " + self.INPUT_FILE)
wait_until(lambda: self.validate_output(self.FIRST_INPUT), timeout_sec=60, err_msg="Data added to input file was not seen in the output file in a reasonable amount of time.")
# Restarting both should result in them picking up where they left off,
# only processing new data.
self.source.restart()
self.sink.restart()
self.source.node.account.ssh("echo -e -n " + repr(self.SECOND_INPUT) + " >> " + self.INPUT_FILE)
wait_until(lambda: self.validate_output(self.FIRST_INPUT + self.SECOND_INPUT), timeout_sec=60, err_msg="Sink output file never converged to the same state as the input file")
# Validate the format of the data in the Kafka topic
self.consumer_validator.run()
expected = json.dumps([line if not self.schemas else { "schema": self.SCHEMA, "payload": line } for line in self.FIRST_INPUT_LIST + self.SECOND_INPUT_LIST])
decoder = (json.loads if converter.endswith("JsonConverter") else str)
actual = json.dumps([decoder(x) for x in self.consumer_validator.messages_consumed[1]])
assert expected == actual, "Expected %s but saw %s in Kafka" % (expected, actual)
def validate_output(self, value):
try:
output_hash = list(self.sink.node.account.ssh_capture("md5sum " + self.OUTPUT_FILE))[0].strip().split()[0]
return output_hash == hashlib.md5(value).hexdigest()
except subprocess.CalledProcessError:
return False
|
dan-blanchard/conda-build
|
conda_build/main_metapackage.py
|
Python
|
bsd-3-clause
| 4,053
| 0.001727
|
# (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function
import argparse
from collections import defaultdict
import conda.config
from conda.cli.conda_argparse import ArgumentParser
|
from conda_build.main_build import args_func
from conda_build.metadata import MetaData
from conda_build.build import build, bldpkg_path
from conda_build.main_build import handle_b
|
instar_upload
def main():
p = ArgumentParser(
description='''
Tool for building conda metapackages. A metapackage is a package with no
files, only metadata. They are typically used to collect several packages
together into a single package via dependencies.
NOTE: Metapackages can also be created by creating a recipe with the necessary
metadata in the meta.yaml, but a metapackage can be created entirely from the
command line with the conda metapackage command.
''',
)
p.add_argument(
"--no-anaconda-upload",
action="store_false",
help="Do not ask to upload the package to anaconda.org.",
dest='binstar_upload',
default=conda.config.binstar_upload,
)
p.add_argument(
"--no-binstar-upload",
action="store_false",
help=argparse.SUPPRESS,
dest='binstar_upload',
default=conda.config.binstar_upload,
)
p.add_argument(
"name",
action="store",
help="Name of the created package.",
)
p.add_argument(
"version",
action="store",
help="Version of the created package.",
)
p.add_argument(
"--build-number",
action="store",
type=int,
default=0,
help="Build number for the package (default is 0).",
)
p.add_argument(
"--build-string",
action="store",
default=None,
help="Build string for the package (default is automatically generated).",
)
p.add_argument(
"--dependencies", "-d",
nargs='*',
default=(),
help="""The dependencies of the package. To specify a version restriction for a
dependency, wrap the dependency in quotes, like 'package >=2.0'.""",
)
p.add_argument(
"--home",
action="store",
help="The homepage for the metapackage."
)
p.add_argument(
"--license",
action="store",
help="The license of the metapackage.",
)
p.add_argument(
"--summary",
action="store",
help="""Summary of the package. Pass this in as a string on the command
line, like --summary 'A metapackage for X'. It is recommended to use
single quotes if you are not doing variable substitution to avoid
interpretation of special characters.""",
)
p.add_argument(
"--entry-points",
nargs='*',
default=(),
help="""Python entry points to create automatically. They should use the same
syntax as in the meta.yaml of a recipe, e.g., --entry-points
bsdiff4=bsdiff4.cli:main_bsdiff4 will create an entry point called
bsdiff4 that calls bsdiff4.cli.main_bsdiff4(). """,
)
p.set_defaults(func=execute)
args = p.parse_args()
args_func(args, p)
def execute(args, parser):
d = defaultdict(dict)
d['package']['name'] = args.name
d['package']['version'] = args.version
d['build']['number'] = args.build_number
d['build']['entry_points'] = args.entry_points
# MetaData does the auto stuff if the build string is None
d['build']['string'] = args.build_string
d['requirements']['run'] = args.dependencies
d['about']['home'] = args.home
d['about']['license'] = args.license
d['about']['summary'] = args.summary
d = dict(d)
m = MetaData.fromdict(d)
build(m)
handle_binstar_upload(bldpkg_path(m), args)
if __name__ == '__main__':
main()
|
nnugumanov/yandex-tank
|
yandextank/plugins/Android/plugin.py
|
Python
|
lgpl-2.1
| 7,170
| 0.001534
|
import logging
import subprocess
import time
import urllib
import sys
import glob
import os
from multiprocessing import Process
from signal import SIGKILL
try:
from volta.analysis import grab, uploader
except Exception:
raise RuntimeError("Please install volta. https://github.com/yandex-load/volta")
from pkg_resources import resource_filename
from ...common.interfaces import AbstractPlugin, GeneratorPlugin
from .reader import AndroidReader, AndroidStatsReader
logger = logging.getLogger(__name__)
class Plugin(AbstractPlug
|
in, GeneratorPlugin):
SECTION = "android"
SECTION_META = "meta"
def __init__(self, core):
super(Plugin, self).__init__(core)
self.apk_path = None
self.test_path = None
self.package = None
self.package_test = None
self.clazz = None
self.device = None
self
|
.test_runner = None
self.process_test = None
self.process_stderr = None
self.process_grabber = None
self.apk = "./app.apk"
self.test = "./app-test.apk"
self.grab_log = "./output.bin"
self.event_log = "./events.log"
@staticmethod
def get_key():
return __file__
def get_available_options(self):
opts = ["package", "test_package", "apk", "test_apk", "class", "test_runner"]
return opts
def configure(self):
# plugin part
self.apk_path = self.get_option("apk")
self.test_path = self.get_option("test_apk")
self.clazz = self.get_option("class")
self.package = self.get_option("package")
self.package_test = self.get_option("test_package")
self.test_runner = self.get_option("test_runner")
def prepare_test(self):
aggregator = self.core.job.aggregator_plugin
if aggregator:
aggregator.reader = AndroidReader()
aggregator.stats_reader = AndroidStatsReader()
ports = None
logger.info("Trying to find device")
if sys.platform.startswith('linux'):
ports = glob.glob('/dev/ttyUSB[0-9]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/cu.wchusbserial[0-9]*')
else:
logger.info('Your OS is not supported yet')
logger.info("Ports = " + ''.join(ports))
try:
self.device = [port for port in ports if 'Bluetooth' not in port][0]
logger.info("Found device = " + self.device)
except Exception:
logger.info("Device not found")
logger.info("Download apk...")
urllib.urlretrieve(self.apk_path, self.apk)
logger.info("Download test...")
urllib.urlretrieve(self.test_path, self.test)
logger.info("Uninstall the lightning...")
subprocess.check_output(["adb", "uninstall", "net.yandex.overload.lightning"])
logger.info("Uninstall the app...")
subprocess.check_output(["adb", "uninstall", self.package])
logger.info("Uninstall the test...")
subprocess.check_output(["adb", "uninstall", self.package_test])
lightning = resource_filename(__name__, 'binary/lightning.apk')
logger.info("Get from resources " + lightning)
logger.info("Install the lightning...")
subprocess.check_output(["adb", "install", lightning])
logger.info("Install the apk...")
subprocess.check_output(["adb", "install", self.apk])
logger.info("Install the test...")
subprocess.check_output(["adb", "install", self.test])
logger.info("Clear logcat...")
subprocess.check_output(["adb", "logcat", "-c"])
def start_test(self):
if self.device:
logger.info("Start grabber...")
args = {
'device': self.device,
'seconds': 10800,
'output': self.grab_log,
'debug': False,
'binary': False
}
self.process_grabber = Process(target=grab.main, args=(args,))
self.process_grabber.start()
process_stderr_file = self.core.mkstemp(".log", "android_")
self.core.add_artifact_file(process_stderr_file)
self.process_stderr = open(process_stderr_file, 'w')
logger.info("Start flashlight...")
args = ["adb", "shell", "am", "start", "-n",
"net.yandex.overload.lightning/net.yandex.overload.lightning.MainActivity"]
subprocess.Popen(args)
time.sleep(12)
args = ["adb", "shell", "am", "instrument", "-w", "-e", "class", self.clazz,
'{package}/{runner}'.format(package=self.package_test, runner=self.test_runner)]
logger.info("Starting: %s", args)
self.process_test = subprocess.Popen(
args,
stderr=self.process_stderr,
stdout=self.process_stderr,
close_fds=True
)
def is_test_finished(self):
retcode = self.process_test.poll()
if retcode is not None:
logger.info("Subprocess done its work with exit code: %s", retcode)
return abs(retcode)
else:
return -1
def end_test(self, retcode):
if self.process_grabber:
logger.info("Kill grabber...")
os.kill(self.process_grabber.pid, SIGKILL)
logger.info("Get logcat dump...")
subprocess.check_call('adb logcat -d > {file}'.format(file=self.event_log), shell=True)
if os.path.exists(self.grab_log):
logger.info("Upload logs...")
args = {
'filename': self.grab_log,
'events': self.event_log,
'samplerate': 500,
'slope': 1,
'offset': 0,
'bynary': False,
'job_config': {
'task': self.core.get_option(self.SECTION_META, 'task').decode('utf8'),
'jobname': self.core.get_option(self.SECTION_META, 'job_name').decode('utf8'),
'dsc': self.core.get_option(self.SECTION_META, 'job_dsc').decode('utf8'),
'component': self.core.get_option('meta', 'component')
}
}
process_uploader = Process(target=uploader.main, args=(args,))
process_uploader.start()
process_uploader.join()
if self.process_test and self.process_test.poll() is None:
logger.info("Terminating tests with PID %s", self.process_test.pid)
self.process_test.terminate()
if self.process_stderr:
self.process_stderr.close()
logger.info("Uninstall the app...")
subprocess.check_output(["adb", "uninstall", self.package])
logger.info("Uninstall the test...")
subprocess.check_output(["adb", "uninstall", self.package_test])
return retcode
def get_info(self):
return AndroidInfo()
class AndroidInfo(object):
def __init__(self):
self.address = ''
self.port = 80
self.ammo_file = ''
self.duration = 0
self.loop_count = 1
self.instances = 1
self.rps_schedule = ''
|
dimagi/commcare-hq
|
corehq/apps/commtrack/migrations/0006_remove_sqlcommtrackconfig_couch_id.py
|
Python
|
bsd-3-clause
| 414
| 0
|
# -*- coding: utf-8 -*-
#
|
Generated by Django 1.11.28 on 2020-05-03 02:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('commtrack', '0005_populate_config_models'),
]
operations = [
migrations.RemoveField(
model_name='sqlcommtrackconfig',
name='couch_id'
|
,
),
]
|
Ircam-Web/mezzanine-organization
|
organization/network/migrations/0099_organization_validation_status.py
|
Python
|
agpl-3.0
| 585
| 0.001709
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11
|
on 2017-04-07 09:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization_network', '0098_producerdata'),
]
operations = [
migrations.AddField(
model_name='organization',
name='validation_status',
field=models.IntegerField(choices=[(0, 'rejected'), (1, 'pending'), (2, 'in process'), (3, 'accepted')], default=1,
|
verbose_name='validation status'),
),
]
|
denverfoundation/storybase
|
apps/storybase_story/migrations/0030_auto__add_storyrelation__add_field_storytranslation_connected_prompt__.py
|
Python
|
mit
| 29,792
| 0.007452
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StoryRelation'
db.create_table('storybase_story_storyrelation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('relation_id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, blank=True)),
('relation_type', self.gf('django.db.models.fields.CharField')(default='connected', max_length=25)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(related_name='target', to=orm['storybase_story.Story'])),
('target', self.gf('django.db.models.fields.related.ForeignKey')(related_name='source', to=orm['storybase_story.Story'])),
))
db.send_create_signal('storybase_story', ['StoryRelation'])
# Adding field 'StoryTranslation.connected_prompt'
db.add_column('storybase_story_storytranslation', 'connected_prompt', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'Story.connected'
db.add_column('storybase_story_story', 'connected', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting model 'StoryRelation'
db.delete_table('storybase_story_storyrelation')
# Deleting field 'StoryTranslation.connected_prompt'
db.delete_column('storybase_story_storytranslation', 'connected_prompt')
# Deleting field 'Story.connected'
db.delete_column('storybase_story_story', 'connected')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
|
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': '
|
True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links_to_file': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerFie
|
dr4g0nsr/phoenix-kodi-addon
|
plugin.video.phstreams/default.py
|
Python
|
gpl-2.0
| 5,936
| 0.003706
|
# -*- coding: utf-8 -*-
'''
Phoenix Add-on
Copyright (C) 2015 Blazetamer
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse,sys
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
try:
action = params['action']
except:
action = None
try:
name = params['name']
except:
name = '0'
try:
url = params['url']
except:
url = '0'
try:
playable = params['playable']
except:
playable = '0'
try:
content = params['content']
except:
content = '0'
try:
tvshow = params['tvshow']
except:
tvshow = '0'
try:
audio = params['audio']
except:
audio = '0'
try:
image = params['image']
except:
image = '0'
try:
fanart = params['fanart']
except:
fanart = '0'
if action == None:
from resources.lib.indexers import phstreams
phstreams.getCategory()
elif action == 'dmode' or action == 'ndmode':
from resources.lib.indexers import phstreams
phstreams.getDirectory(name, url, audio, image, fanart, playable, content)
elif action == 'subDirectory':
from resources.lib.indexers import phstreams
phstreams.subDirectory(name, url, audio, image, fanart, playable, tvshow, content)
elif action == 'localDirectory':
from resources.lib.indexers import phstreams
phstreams.localDirectory()
elif action == 'search':
from resources.lib.indexers import phstreams
phstreams.getSearch()
elif action == 'searchDirectory':
from resources.lib.indexers import phstreams
phstreams.searchDirectory()
elif action == 'searchDirectory2':
from resources.lib.indexers import phstreams
phstreams.searchDirectory(url)
elif action == 'clearSearch':
from resources.lib.indexers import phstreams
phstreams.clearSearch()
elif action == 'resolveUrl':
from resources.lib.indexers import phstreams
phstreams.resolveUrl(name, url, audio, image, fanart, playable, content)
elif action == 'openDialog':
from resources.lib.libraries import phdialogs
phdialogs.openDialog(url,audio)
elif action == 'openSettings':
from resources.lib.libraries import control
control.openSettings()
elif action == 'addView':
from resources.lib.libraries import views
views.addView(content)
elif action == 'downloader':
from resourc
|
es.lib.libraries import downloader
downloader.downloader()
elif action == 'addDownload':
from resources.lib.libraries import downloader
downloader.addDownload(name,url,image)
elif action == 'removeDownload':
from resources.lib.libraries import downloader
downloade
|
r.removeDownload(url)
elif action == 'startDownload':
from resources.lib.libraries import downloader
downloader.startDownload()
elif action == 'startDownloadThread':
from resources.lib.libraries import downloader
downloader.startDownloadThread()
elif action == 'stopDownload':
from resources.lib.libraries import downloader
downloader.stopDownload()
elif action == 'statusDownload':
from resources.lib.libraries import downloader
downloader.statusDownload()
elif action == 'trailer':
from resources.lib.libraries import trailer
trailer.trailer().play(name)
elif action == 'clearCache':
from resources.lib.libraries import cache
cache.clear()
elif action == 'radioDirectory':
from resources.lib.indexers import phradios
phradios.radioDirectory()
elif action == 'radioResolve':
from resources.lib.indexers import phradios
phradios.radioResolve(name, url, image)
elif action == 'radio1fm':
from resources.lib.indexers import phradios
phradios.radio1fm(image, fanart)
elif action == 'radio181fm':
from resources.lib.indexers import phradios
phradios.radio181fm(image, fanart)
elif action == 'radiotunes':
from resources.lib.indexers import phradios
phradios.radiotunes(image, fanart)
elif action == 'Kickinradio':
from resources.lib.indexers import phradios
phradios.Kickinradio(image, fanart)
elif action == 'Kickinradiocats':
from resources.lib.indexers import phradios
phradios.Kickinradiocats(url, image, fanart)
elif action == 'CartoonDirectory':
from resources.lib.indexers import phtoons
phtoons.CartoonDirectory()
elif action == 'CartoonCrazy':
from resources.lib.indexers import phtoons
phtoons.CartoonCrazy(image, fanart)
elif action == 'CCsearch':
from resources.lib.indexers import phtoons
phtoons.CCsearch(url, image, fanart)
elif action == 'CCcat':
from resources.lib.indexers import phtoons
phtoons.CCcat(url, image, fanart)
elif action == 'CCpart':
from resources.lib.indexers import phtoons
phtoons.CCpart(url, image, fanart)
elif action == 'CCstream':
from resources.lib.indexers import phtoons
phtoons.CCstream(url)
elif action == 'nhlDirectory':
from resources.lib.indexers import nhlcom
nhlcom.nhlDirectory()
elif action == 'nhlScoreboard':
from resources.lib.indexers import nhlcom
nhlcom.nhlScoreboard()
elif action == 'nhlArchives':
from resources.lib.indexers import nhlcom
nhlcom.nhlArchives()
elif action == 'nhlStreams':
from resources.lib.indexers import nhlcom
nhlcom.nhlStreams(name,url)
elif action == 'nhlResolve':
from resources.lib.indexers import nhlcom
nhlcom.nhlResolve(url)
|
mozman/ezdxf
|
integration_tests/test_geo.py
|
Python
|
mit
| 1,836
| 0
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.entities import factory
from ezdxf.render.forms import square, translate
from ezdxf.lldxf import const
from ezdxf.addons import geo
shapely_geometry = pytest.importorskip("shapely.geometry")
def test_shapely_geo_interface():
point = shapely_geometry.shape(
{
"type": "Point",
"coordinates": (0, 0),
}
)
assert (point.x, point.y) == (0, 0)
def validate(geo_proxy: geo.GeoProxy) -> bool:
return shapely_geometry.shape(geo_proxy).is_valid
def test_resolved_hatch_with_intersecting_holes():
hatch = factory.new("HATCH")
paths = hatch.paths
paths.add_polyline_path(square(10), flags=const.BOUNDARY_PATH_EXTERNAL)
paths.add_polyline_path(
translate(square(3), (1, 1)), flags=const.BOUNDARY_PATH_DEFAULT
)
paths.add_polyline_path(
translate(square(3), (2, 2)), flags=const.BOUNDARY_PATH_DEFAULT
)
p = geo.proxy(hatch)
# Overlapping holes already resolved by fast_bbox_detection()
|
polygon = shapely_geometry.shape(p)
assert polygon.is_valid is True
p.filter(validate)
assert p.root["type"] == "Polygon"
assert len(p.root["coordinates"]) == 2
def test_valid_hatch():
hatch = factory.new("HATCH")
paths = hatch.paths
paths.add_polyline_path(square(10), flags=const.BOUNDARY_PATH_EXTERNAL)
paths.add_p
|
olyline_path(
translate(square(3), (1, 1)), flags=const.BOUNDARY_PATH_DEFAULT
)
paths.add_polyline_path(
translate(square(3), (5, 1)), flags=const.BOUNDARY_PATH_DEFAULT
)
p = geo.proxy(hatch)
polygon = shapely_geometry.shape(p)
assert polygon.is_valid is True
p.filter(validate)
assert p.root != {}
if __name__ == "__main__":
pytest.main([__file__])
|
fw1121/ete
|
test/test_treeview.py
|
Python
|
gpl-3.0
| 5,124
| 0.003708
|
import unittest
import random
import sys
import os
ETEPATH = os.path.abspath(os.path.split(os.path.realpath(__file__))[0]+'/../')
sys.path.insert(0, ETEPATH)
from ete2 import Tree, TreeStyle, NodeStyle, PhyloTree, faces, random_color
from ete2.treeview.faces import *
from ete2.treeview.main import _NODE_TYPE_CHECKER, FACE_POSITIONS
sys.path.insert(0, os.path.join(ETEPATH, "examples/treeview"))
import fac
|
e_grid, bubble_map, item_faces, node_style, node_background, face_positions, face_rotation, seq_motif
|
_faces, barchart_and_piechart_faces
sys.path.insert(0, os.path.join(ETEPATH, "examples/phylogenies"))
import phylotree_visualization
CONT = 0
class Test_Coretype_Treeview(unittest.TestCase):
""" Tests tree basics. """
def test_renderer(self):
main_tree = Tree()
main_tree.dist = 0
t, ts = face_grid.get_example_tree()
t_grid = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_grid, 0, "aligned")
t, ts = bubble_map.get_example_tree()
t_bubble = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_bubble, 0, "aligned")
t, ts = item_faces.get_example_tree()
t_items = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_items, 0, "aligned")
t, ts = node_style.get_example_tree()
t_nodest = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_nodest, 0, "aligned")
t, ts = node_background.get_example_tree()
t_bg = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_bg, 0, "aligned")
t, ts = face_positions.get_example_tree()
t_fpos = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_fpos, 0, "aligned")
t, ts = phylotree_visualization.get_example_tree()
t_phylo = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_phylo, 0, "aligned")
t, ts = face_rotation.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
t, ts = seq_motif_faces.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
t, ts = barchart_and_piechart_faces.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
#Test orphan nodes and trees with 0 branch length
t, ts = Tree(), TreeStyle()
t.populate(5)
for n in t.traverse():
n.dist = 0
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts.optimal_scale_level = "full"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts = TreeStyle()
t.populate(5)
ts.mode = "c"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts.optimal_scale_level = "full"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
temp_tface = TreeFace(Tree('node;'), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
ts.mode = "c"
temp_tface = TreeFace(Tree('node;'), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
ts.mode = "c"
temp_tface = TreeFace(Tree(), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
temp_tface = TreeFace(Tree(), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
# TEST TIGHT TEST WRAPPING
chars = ["." "p", "j", "jJ"]
def layout(node):
global CONT
if CONT >= len(chars):
CONT = 0
if node.is_leaf():
node.img_style["size"] = 0
F2= AttrFace("name", tight_text=True)
F= TextFace(chars[CONT], tight_text=True)
F.inner_border.width = 0
F2.inner_border.width = 0
#faces.add_face_to_node(F ,node, 0, position="branch-right")
faces.add_face_to_node(F2 ,node, 1, position="branch-right")
CONT += 1
t = Tree()
t.populate(20, random_branches=True)
ts = TreeStyle()
ts.layout_fn = layout
ts.mode = "c"
ts.show_leaf_name = False
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
# MAIN TREE
ms = TreeStyle()
ms.mode = "r"
ms.show_leaf_name = False
main_tree.render('test.png', tree_style=ms)
main_tree.render('test.svg', tree_style=ms)
if __name__ == '__main__':
unittest.main()
|
poobalan-arumugam/stateproto
|
src/extensions/lang/python/reader/__init__.py
|
Python
|
bsd-2-clause
| 65
| 0
|
from .
|
parseStateProtoFile import *
from .StateTreeModel i
|
mport *
|
pombredanne/parakeet
|
test/core_language/test_div_bool.py
|
Python
|
bsd-3-clause
| 679
| 0.013255
|
import numpy as np
from parakeet import jit, testing_helpers
@jit
def true_divided(x):
return True / x
def test_true_divided_bool():
testing_
|
helpers.expect(true_divid
|
ed, [True], True)
def test_true_divided_int():
testing_helpers.expect(true_divided, [1], 1)
testing_helpers.expect(true_divided, [2], 0)
def test_true_divided_float():
testing_helpers.expect(true_divided, [1.0], 1.0)
testing_helpers.expect(true_divided, [2.0], 0.5)
def test_true_divided_uint8():
testing_helpers.expect(true_divided, [np.uint8(1)], 1)
testing_helpers.expect(true_divided, [np.uint8(2)], 0)
if __name__ == '__main__':
testing_helpers.run_local_tests()
|
PanDAWMS/panda-bigmon-atlas
|
atlas/getdatasets/models.py
|
Python
|
apache-2.0
| 1,240
| 0.007258
|
from django.db import models
class ProductionDatasetsExec(models.Model):
name = models.CharField(max_length=200, db_column='NAME', primary_key=True)
taskid = models.DecimalField(decimal_places=0, max_digits=10, db_colu
|
mn='TASK_ID', null=False, default=0)
status = models.CharField(max_length=12, db_column='STATUS', null=True)
phys_group = models.CharField(max_length=20, db_column='PHYS_GROUP', null=True)
events = models.DecimalField(decimal_places=0, max_digits=7, db_column='EVENTS', null=False, default=0)
class Meta:
app_label = "grisli"
managed = False
db_table = 'T_PRODUCTIONDATASETS_EXEC'
class TaskProdSys1(models.Model):
taskid = models.DecimalField(decimal_place
|
s=0, max_digits=10, db_column='REQID', primary_key=True)
total_events = models.DecimalField(decimal_places=0, max_digits=10, db_column='TOTAL_EVENTS')
task_name = models.CharField(max_length=130, db_column='TASKNAME')
status = models.CharField(max_length=12, db_column='STATUS')
class Meta:
app_label = "grisli"
managed = False
db_table = 'T_TASK_REQUEST'
class TRequest(models.Model):
request = models.CharField(max_length=200, db_column='REQUEST', null=True)
|
dikujepsen/OpenTran
|
v3.0/test/C/run.py
|
Python
|
mit
| 4,191
| 0.019327
|
import os, os.path
import subprocess
import shutil
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--make", help="run make clean && make on all files",
action="store_true")
parser.add_argument("-c", "--check", help="run ./check.sh on all files",
action="store_true")
parser.add_argument("-p", "--printresult", help="Compiles the code with printing of the result enabled",
action="store_true")
parser.add_argument("-t", "--tag", help="tag this benchmark with a string")
parser.add_argument("-r", "--run", help="run all binary files for the given device", choices=['CPU', 'GPU'])
parser.add_argument("-i", "--input", help="input choice for the binarys", choices=['basic', 'K20Max'])
parser.add_argument("-n", "--numberofiterations", help="the number of iterations we benchmark a given binary.", type=int, default=1)
args = parser.parse_args()
benchmark = ['MatMul',
'Jacobi',
'KNearest',
'NBody',
'Laplace',
'GaussianDerivates'
]
cmdlineoptsbasic = {'MatMul' : '-n 1024' ,
'Jacobi' : '-n 1024' ,
'KNearest' : '-n 1024 -k 16' ,
'NBody' : '-n 1024' ,
'Laplace' : '-n 256 -k 3' ,
'GaussianDerivates' : '-n 256 -m 256 -k 3'}
cmdlineoptsK20Max = {'MatMul' : '-n 12544' ,
'Jacobi' : '-n 16384' ,
'KNearest' : '-n 16384 -k 16' ,
'NBody' : '-n 1081600' ,
'Laplace' : '-n 215296 -k 5' ,
'GaussianDerivates' : '-n 4608 -m 4608 -k 3'}
## benchmark = ['MatMul']
# Check all folder are actually there
for n in benchmark:
if not os.path.exists(n):
raise Exception('Folder ' + n + 'does not exist')
if args.make or args.check:
# run the makefile in each folder
if args.make:
command = "make clean && make"
|
if args.printresult:
command += " DEF=PRINT"
if args.check:
command = "./check.sh"
|
for n in benchmark:
os.chdir(n)
p1 = subprocess.Popen(command, shell=True,\
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
erracc = ''
while True:
line = p1.stdout.readline()
if not line:
line = p1.stderr.readline()
if not line: break
erracc += line
if line[0:9] == 'make: ***':
raise Exception('Program ' + n + ' did not compile: ' + erracc)
if args.check:
print line
os.chdir('..')
if args.run is not None:
dev = args.run
# run each exe in benchmark
if args.input == 'K20Max':
cmdlineopts = cmdlineoptsK20Max
else:
cmdlineopts = cmdlineoptsbasic
tag = ''
if args.tag:
tag = args.tag + '_'
for n in benchmark:
m = n + dev
uniqueid = open('logs/.uniqueid.txt','r')
uid = uniqueid.readline()
uniqueid.close()
uniqueid = open('logs/.uniqueid.txt','w')
uniqueid.write(str(int(uid) + 1))
log = open('logs/' + uid + '_' + tag + m + cmdlineopts[n].replace(" ", "_") \
.replace("-", "_") + '.txt','w')
os.chdir(n)
for k in xrange(args.numberofiterations):
p1 = subprocess.Popen('./' + m +'.exe ' + cmdlineopts[n], shell=True,\
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
acc = '$Func ' + m + ', $Defines ' + cmdlineopts[n]
while True:
line = p1.stdout.readline()
if not line:
line = p1.stderr.readline()
if not line: break
acc += ', ' + line[:-1]
log.write(acc + '\n')
log.flush()
os.fsync(log)
#print acc + '\n'
os.chdir('..')
log.close()
uniqueid.close()
|
memee/py-tons
|
pytons/files.py
|
Python
|
mit
| 2,232
| 0.000896
|
import six
#==============================================================================
# https://docs.python.org/2/library/csv.html
#==============================================================================
if six.PY2:
import csv
import codecs
|
import cStringIO
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
|
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
|
mozfreddyb/room-availability
|
ifb.py
|
Python
|
mpl-2.0
| 882
| 0.013605
|
""" implement internet free/busy vcal extension
very basic just to get something like this running:
BEGIN:V
|
CALENDAR
PRODID:Zimbra-Calendar-Provider
VERSION:2.0
METHOD:PUBLISH
BEGIN:VFREEBUSY
ORGANIZER:mailto:ber201@mozilla.com
DTSTAMP:20140811T130952Z
DTSTART:20140811T130952Z
DTEND:20140812T130952Z
URL:http://zmmbox6.mail.corp.phx1.mozilla.com:8080/service/home/ber201@mozilla.com?view=day&date=20140811&fmt=ifb&start=0d&end=24h
FREEBUSY;FBTYPE=BUSY:20140812T100000Z/20140812T103000Z
END:VFREEBUSY
END:VCALENDAR
"""
from dateutil import parser
# all calculations in UTC.
de
|
f get_busy_times(s):
lines = s.split("\r\n")
for line in lines:
if line == "":
continue
key,value = line.split(":", 1)
if key == "FREEBUSY;FBTYPE=BUSY":
start,end = value.split("/")
start = parser.parse(start)
end = parser.parse(end)
yield (start,end)
|
adaptive-learning/proso-apps
|
proso_models/migrations/0001_initial.py
|
Python
|
mit
| 11,335
| 0.003882
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-08-01 07:59
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import proso.django.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('proso_user', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('proso_common', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(default=datetime.datetime.now)),
('response_time', models.IntegerField()),
('guess', models.FloatField(default=0)),
('type', models.CharField(max_length=10)),
('lang', models.CharField(blank=True, default=None, max_length=2, null=True)),
('config', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_common.Config')),
],
),
migrations.CreateModel(
name='AnswerMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('content_hash', models.CharField(db_index=True, max_length=40, unique=True)),
],
),
migrations.CreateModel(
name='Audit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=50)),
('value', models.FloatField()),
('time', models.DateTimeField(default=datetime.datetime.now)),
('answer', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.Answer')),
],
),
migrations.CreateModel(
name='EnvironmentInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.IntegerField(choices=[(0, 'disabled'), (1, 'loading'), (2, 'enabled'), (3, 'active')], default=1)),
('revision', models.IntegerField()),
('load_progress', models.IntegerField(default=0)),
('updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('config', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='proso_common.Config')),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
],
bases=(models.Model, proso.django.models.ModelDiffMixin),
),
migrations.CreateModel(
name='ItemRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('visible', models.BooleanField(default=True)),
('active', models.BooleanField(default=True)),
('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child_relations', to='proso_models.Item')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent_relations', to='proso_models.Item')),
],
bases=(models.Model, proso.django.models.ModelDiffMixin),
),
migrations.CreateModel(
name='ItemType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model', models.CharField(max_length=100)),
('table', models.CharField(max_length=100)),
('foreign_key', models.CharField(max_length=100)),
('language', models.CharField(blank=True, default=None, max_length=100, null=True)),
('valid', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='PracticeContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('content_hash', models.CharField(db_index=True, max_length=40, unique=True)),
],
),
migrations.CreateModel(
name='PracticeSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('finished', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permanent', models.BooleanField(default=False)),
('key', models.CharField(max_length=50)),
('value', models.FloatField()),
('audit', models.BooleanField(default=True)),
('updated', models.DateTimeField(default=datetime.datetime.now)),
('answer', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.Answer')),
('info', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.EnvironmentInfo')),
('item_primary', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_primary_variables', to='proso_models.Item')),
('item_secondary', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_secondary_variables', to='proso_models.Item')),
('user', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='itemtype',
unique_together=set([('model', 'foreign_key'), ('table', 'foreign_key')]),
),
migrations.AddField(
model_name='item',
name='children',
field=models.ManyToManyField(related_name='parents', through='proso_models.ItemRelation', to='proso_models.Item'),
),
migrations.AddField(
|
model_name='item',
name='item_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.ItemType'),
),
migrations.AddField(
model_name='audit',
name='info',
field=model
|
s.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.EnvironmentInfo'),
),
migrations.AddField(
model_name='audit',
name='item_primary',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_primary_audits', to='proso_models.Item'),
),
migrations.AddField(
model_name='audit',
name='item_secondary',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_secondary_audits', to='proso_models.Item'),
),
migrations.AddField(
model_name='audit',
name='user',
field
|
frossigneux/python-kwstandbyclient
|
kwstandbyclient/client.py
|
Python
|
apache-2.0
| 1,371
| 0
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from k
|
wstandbyclient import exception
from kwstandbyclient.openstack.common.gettextutils import _ # noqa
from kwstandbyclient.openstack.common import importutils
def Client(version=1, *args, **kwargs):
version_map = {
'1': 'kwstandbyclient.v1.client.Client',
'1a0': 'kwstandbyclient.v1.client.Cli
|
ent',
}
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = _("Invalid client version '%(version)s'. "
"Must be one of: %(available_version)s") % ({
'version': version,
'available_version': ', '.join(version_map.keys())
})
raise exception.UnsupportedVersion(msg)
return importutils.import_object(client_path, *args, **kwargs)
|
spektom/incubator-airflow
|
tests/providers/qubole/operators/test_qubole_check.py
|
Python
|
apache-2.0
| 4,944
| 0.001416
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements
|
. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License
|
, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from datetime import datetime
import mock
from qds_sdk.commands import HiveCommand
from airflow.exceptions import AirflowException
from airflow.models import DAG
from airflow.providers.qubole.hooks.qubole import QuboleHook
from airflow.providers.qubole.hooks.qubole_check import QuboleCheckHook
from airflow.providers.qubole.operators.qubole_check import QuboleValueCheckOperator
class TestQuboleValueCheckOperator(unittest.TestCase):
def setUp(self):
self.task_id = 'test_task'
self.conn_id = 'default_conn'
def __construct_operator(self, query, pass_value, tolerance=None,
results_parser_callable=None):
dag = DAG('test_dag', start_date=datetime(2017, 1, 1))
return QuboleValueCheckOperator(
dag=dag,
task_id=self.task_id,
conn_id=self.conn_id,
query=query,
pass_value=pass_value,
results_parser_callable=results_parser_callable,
command_type='hivecmd',
tolerance=tolerance)
def test_pass_value_template(self):
pass_value_str = "2018-03-22"
operator = self.__construct_operator('select date from tab1;', "{{ ds }}")
result = operator.render_template(operator.pass_value, {'ds': pass_value_str})
self.assertEqual(operator.task_id, self.task_id)
self.assertEqual(result, pass_value_str)
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_pass(self, mock_get_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [10]
mock_get_hook.return_value = mock_hook
query = 'select value from tab1 limit 1;'
operator = self.__construct_operator(query, 5, 1)
operator.execute(None)
mock_hook.get_first.assert_called_once_with(query)
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_assertion_fail(self, mock_get_hook):
mock_cmd = mock.Mock()
mock_cmd.status = 'done'
mock_cmd.id = 123
mock_cmd.is_success = mock.Mock(
return_value=HiveCommand.is_success(mock_cmd.status))
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_hook.cmd = mock_cmd
mock_get_hook.return_value = mock_hook
operator = self.__construct_operator('select value from tab1 limit 1;', 5, 1)
with self.assertRaisesRegex(AirflowException,
'Qubole Command Id: ' + str(mock_cmd.id)):
operator.execute()
mock_cmd.is_success.assert_called_once_with(mock_cmd.status)
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_assert_query_fail(self, mock_get_hook):
mock_cmd = mock.Mock()
mock_cmd.status = 'error'
mock_cmd.id = 123
mock_cmd.is_success = mock.Mock(
return_value=HiveCommand.is_success(mock_cmd.status))
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_hook.cmd = mock_cmd
mock_get_hook.return_value = mock_hook
operator = self.__construct_operator('select value from tab1 limit 1;', 5, 1)
with self.assertRaises(AirflowException) as cm:
operator.execute()
self.assertNotIn('Qubole Command Id: ', str(cm.exception))
mock_cmd.is_success.assert_called_once_with(mock_cmd.status)
@mock.patch.object(QuboleCheckHook, 'get_query_results')
@mock.patch.object(QuboleHook, 'execute')
def test_results_parser_callable(self, mock_execute, mock_get_query_results):
mock_execute.return_value = None
pass_value = 'pass_value'
mock_get_query_results.return_value = pass_value
results_parser_callable = mock.Mock()
results_parser_callable.return_value = [pass_value]
operator = self.__construct_operator('select value from tab1 limit 1;',
pass_value, None, results_parser_callable)
operator.execute()
results_parser_callable.assert_called_once_with([pass_value])
|
garrettcap/Bulletproof-Backup
|
wx/lib/agw/ribbon/gallery.py
|
Python
|
gpl-2.0
| 32,914
| 0.007717
|
"""
A ribbon gallery is like a :class:`ListBox`, but for bitmaps rather than strings.
Description
===========
It displays a collection of bitmaps arranged in a grid and allows the user to
choose one. As there are typically more bitmaps in a gallery than can be displayed
in the space used for a ribbon, a gallery always has scroll buttons to allow the
user to navigate through the entire gallery.
It also has an "extension" button, the behaviour of which is outside the scope of
the gallery control itself, though it typically displays some kind of dialog related
to the gallery.
Events Processing
=================
This class processes the following events:
=================================== ===================================
Event Name Description
=================================== ===================================
``EVT_RIBBONGALLERY_SELECTED`` Triggered when the user selects an item from the gallery. Note that the ID is that of the gallery, not of the item.
``EVT_RIBBONGALLERY_HOVER_CHANGED`` Triggered when the item being hovered over by the user changes. The item in the event will be the new item being hovered, or ``None`` if there is no longer an item being hovered. Note that the ID is that of the gallery, not of the item.
``EVT_RIBBONGALLERY_CLICKED`` Triggered when the user clicks on an item in the gallery.
``EVT_BUTTON`` Triggered when the "extension" button of the gallery is pressed.
=================================== ===================================
"""
import wx
from control import RibbonControl
from art import *
wxEVT_COMMAND_RIBBONGALLERY_HOVER_CHANGED = wx.NewEventType()
wxEVT_COMMAND_RIBBONGALLERY_SELECTED = wx.NewEventType()
wxEVT_COMMAND_RIBBONGALLERY_CLICKED = wx.NewEventType()
EVT_RIBBONGALLERY_HOVER_CHANGED = wx.PyEventBinder(wxEVT_COMMAND_RIBBONGALLERY_HOVER_CHANGED, 1)
EVT_RIBBONGALLERY_SELECTED = wx.PyEventBinder(wxEVT_COMMAND_RIBBONGALLERY_SELECTED, 1)
EVT_RIBBONGALLERY_CLICKED = wx.PyEventBinder(wxEVT_COMMAND_RIBBONGALLERY_CLICKED, 1)
class RibbonGalleryEvent(wx.PyCommandEvent):
""" Handles events related to :class:`RibbonGallery`. """
def __init__(self, command_type=None, win_id=0, gallery=None, item=None):
"""
Default class constructor.
:param integer `command_type`: the event type;
:param integer `win_id`: the event identifier;
:param `gallery`: an instance of :class:`RibbonGallery`;
:param `item`: an instance of :class:`RibbonGalleryItem`.
"""
wx.PyCommandEvent.__init__(self, command_type, win_id)
self._gallery = gallery
self._item = item
def GetGallery(self):
""" Returns the gallery which the event relates to. """
return self._gallery
def GetGalleryItem(self):
""" Returns the gallery item which the event relates to, or ``None`` if it does not relate to an item. """
return self._item
def SetGallery(self, gallery):
"""
Sets the gallery relating to this event.
:param `gallery`: an instance of :class:`RibbonGallery`.
"""
self._gallery = gallery
def SetGalleryItem(self, item):
"""
Sets the gallery item relating to this event.
:param `item`: an instance of :class:`RibbonGalleryItem`.
"""
self._item = item
class RibbonGalleryItem(object):
def __init__(self):
self._id = 0
self._is_visible = False
self._client_data = None
self._position = wx.Rect()
def SetId(self, id):
self._id = id
def SetBitmap(self, bitmap):
self._bitmap = bitmap
def GetBitmap(self):
return self._bitmap
def SetIsVisible(self, visible):
self._is_visible = visible
def SetPosition(self, x, y, size):
self._position = wx.RectPS(wx.Point(x, y), size)
def IsVisible(self):
return self._is_visible
def GetPosition(self):
return self._position
def SetClientData(self, data):
self._client_data = data
def GetClientData(self):
return self._client_data
class RibbonGallery(RibbonControl):
"""
A ribbon gallery is like a :class:`ListBox`, but for bitmaps rather than strings.
"""
def __i
|
nit__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, agwStyle=0,
name="RibbonGallery"):
"""
|
Default class constructor.
:param `parent`: pointer to a parent window, typically a :class:`~lib.agw.ribbon.panel.RibbonPanel`;
:param `id`: window identifier. If ``wx.ID_ANY``, will automatically create an
identifier;
:param `pos`: window position. ``wx.DefaultPosition`` indicates that wxPython
should generate a default position for the window;
:param `size`: window size. ``wx.DefaultSize`` indicates that wxPython should
generate a default size for the window. If no suitable size can be found, the
window will be sized to 20x20 pixels so that the window is visible but obviously
not correctly sized;
:param `agwStyle`: the AGW-specific window style, currently unused;
:param `name`: the window name.
"""
RibbonControl.__init__(self, parent, id, pos, size, style=wx.BORDER_NONE, name=name)
self.CommonInit(agwStyle)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnMouseEnter)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeave)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseUp)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnMouseDClick)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
def CommonInit(self, agwStyle):
self._selected_item = None
self._hovered_item = None
self._active_item = None
self._scroll_up_button_rect = wx.Rect(0, 0, 0, 0)
self._scroll_down_button_rect = wx.Rect(0, 0, 0, 0)
self._extension_button_rect = wx.Rect(0, 0, 0, 0)
self._mouse_active_rect = None
self._bitmap_size = wx.Size(64, 32)
self._bitmap_padded_size = self._bitmap_size
self._item_separation_x = 0
self._item_separation_y = 0
self._scroll_amount = 0
self._scroll_limit = 0
self._up_button_state = RIBBON_GALLERY_BUTTON_DISABLED
self._down_button_state = RIBBON_GALLERY_BUTTON_NORMAL
self._extension_button_state = RIBBON_GALLERY_BUTTON_NORMAL
self._hovered = False
self._items = []
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
def OnMouseEnter(self, event):
"""
Handles the ``wx.EVT_ENTER_WINDOW`` event for :class:`RibbonGallery`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self._hovered = True
if self._mouse_active_rect is not None and not event.LeftIsDown():
self._mouse_active_rect = None
self._active_item = None
self.Refresh(False)
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for :class:`RibbonGallery`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
refresh = False
pos = event.GetPosition()
result1, self._up_button_state = self.TestButtonHover(self._scroll_up_button_rect, pos, self._up_button_state)
result2, self._down_button_state = self.TestButtonHover(self._scroll_down_button_rect, pos, self._down_button_state)
result3, self._extension_button_state = self.TestButtonHover(self._extension_button_rect, pos, self._extension_button_state)
if result1 or result2 or result3:
refresh = True
hovered_item = active_item = None
if self._client_rect.Contains(pos):
if s
|
Rediker-Software/litle-sdk-for-python
|
litleSdkPythonTest/certification/TestCert4.py
|
Python
|
mit
| 11,409
| 0.007976
|
#Copyright (c) 2011-2012 Litle & Co.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
class certTest4(unittest.TestCase):
def test37(self):
verification = litleXmlFields.echeckVerification()
verification.orderId = "37"
verification.amount = 3001
verification.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "Tom"
billtoaddress.lastName = "Black"
verification.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "10@BC99999"
echeck.accType = 'Checking'
echeck.routingNum = "053100300"
verification.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(verification)
self.assertEquals("301", response.response)
self.assertEquals("Invalid Account Number", response.message)
def test38(self):
verification = litleXmlFields.echeckVerification()
verification.orderId = "38"
verification.amount = 3002
verification.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "John"
billtoaddress.lastName = "Smith"
billtoaddress.phone = "999-999-999"
verification.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "1099999999"
echeck.accType = 'Checking'
echeck.routingNum = "053000219"
verification.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(verification)
self.assertEquals("000", response.response)
self.assertEquals("Approved", response.message)
def test39(self):
verification = litleXmlFields.echeckVerification()
verification.orderId = "39"
verification.amount = 3003
verification.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "Robert"
billtoaddress.lastName = "Jones"
billtoaddress.companyName = "Good Goods Inc"
billtoaddress.phone = "9999999999"
verification.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "3099999999"
echeck.accType = 'Corporate'
echeck.routingNum = "053100300"
verification.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(verification)
self.assertEquals("950", response.response)
self.assertEquals("Declined - Negative Information on File", response.message)
def test40(self):
verification = litleXmlFields.echeckVerification()
verification.orderId = "40"
verification.amount = 3004
verification.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "Peter"
billtoaddress.lastName = "Green"
billtoaddress.companyName = "Green Co"
billtoaddress.phone = "9999999999"
verification.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "8099999999"
echeck.accType = 'Corporate'
echeck.routingNum = "063102152"
verification.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(verification)
self.assertEquals("951", response.response)
self.assertEquals("Absolute Decline", response.message)
def test41(self):
sale = litleXmlFields.echeckSale()
sale.ord
|
erId = "41"
sale.amount = 2008
sale.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "Mike"
billtoaddress.middleInitial = "J"
billtoaddress.lastName = "Hammer"
sale.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "10@BC99999"
echeck.accType = 'Checking'
echeck.routingNum = "
|
053100300"
sale.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(sale)
self.assertEquals("301", response.response)
self.assertEquals("Invalid Account Number", response.message)
def test42(self):
sale = litleXmlFields.echeckSale()
sale.orderId = "42"
sale.amount = 2004
sale.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "Tom"
billtoaddress.lastName = "Black"
sale.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "4099999992"
echeck.accType = 'Checking'
echeck.routingNum = "211370545"
sale.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(sale)
self.assertEquals("000", response.response)
self.assertEquals("Approved", response.message)
def test43(self):
sale = litleXmlFields.echeckSale()
sale.orderId = "43"
sale.amount = 2007
sale.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "Peter"
billtoaddress.lastName = "Green"
billtoaddress.companyName = "Green Co"
sale.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "6099999992"
echeck.accType = 'Corporate'
echeck.routingNum = "211370545"
sale.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(sale)
self.assertEquals("000", response.response)
self.assertEquals("Approved", response.message)
def test44(self):
sale = litleXmlFields.echeckSale()
sale.orderId = "44"
sale.amount = 2009
sale.orderSource = 'telephone'
billtoaddress = litleXmlFields.contact()
billtoaddress.firstName = "Peter"
billtoaddress.lastName = "Green"
billtoaddress.companyName = "Green Co"
sale.billToAddress = billtoaddress
echeck = litleXmlFields.echeck()
echeck.accNum = "9099999992"
echeck.accType = 'Corporate'
echeck.routingNum = "053133052"
sale.echeckOrEcheckToken = echeck
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(sale)
self.assertEquals("900", response.response)
self.assertEquals("Invalid Bank Routing Number", response.message)
def test45(self):
credit = litleXmlFields.echeckCredit()
credit.orderId = "4
|
axonchisel/ax_metrics
|
py/axonchisel/metrics/foundation/query/mql.py
|
Python
|
mit
| 11,429
| 0.002362
|
"""
Ax_Metrics - MQL Metrics Query Language Parser
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
import copy
import yaml
from datetime import date, datetime
from axonchisel.metrics.foundation.ax.obj import AxObj
from axonchisel.metrics.foundation.chrono.ghost import Ghost
from .query import Query
from .queryset import QuerySet
from .qdata import QMetric
# ----------------------------------------------------------------------------
class MQLParseError(Exception):
"""Error parsing MQL"""
pass
# ----------------------------------------------------------------------------
class QueryParser(AxObj):
"""
Given raw MQL YAML strings, parse into Query objects.
Supports multiple parse passes on the same object, allowing defaults and
extensions of defaults.
Usage: Create, parse, parse, parse, ..., get, destroy (or reset).
Variables prefixed with "y" reference parsed YAML currently existing as
Python structures.
Variables prefixed with "ystr_" reference raw YAML strings.
Example partial YAML:
---
id: pct_new_paid_accounts_rolling_30d
data:
metrics:
- metric: num_new_paid_accounts
div: num_total_paid_accounts
goal: 10
goal_mode: CONSTANT
timeframe:
range_val: 3
range_unit: MONTH
gran_unit: DAY
mode: CURRENT
smooth_un
|
it: DAY
|
smooth_val: 30
reframe_dt: 2014-11-01
format:
some_erout_plugin_id:
type: type1
title: "New Paid Accounts %"
subtitle: "(rolling 30d)"
ghosts:
- PREV_PERIOD1
- PREV_YEAR1
- PREV_YEAR2
"""
def __init__(self, base=None):
"""
Initialize parser and internal Query.
If base Query specified, copies as base to extend.
After parsing, either destroy or call reset to parse a new object.
"""
self.reset(base=base)
#
# Public Methods
#
def reset(self, base=None):
"""
Reset and prepare new Query object for parsing.
If base Query specified, copies as base to extend.
"""
if base is not None:
self._assert_type("base", base, Query)
base = copy.deepcopy(base)
else:
base = Query()
self._query = base
def get_query(self):
"""
Get the wrapped up Query object after parsing.
"""
return self._query
def parse_ystr_query(self, ystr_query):
"""
Given raw MQL YAML str, parse into internal Query.
Only set attributes that are specified, leaving others at default.
Can be called multiple times to build up the Query.
Returns currently wrapped Query.
"""
yquery = yaml.load(ystr_query)
return self.parse_yquery(yquery)
def parse_yquery(self, yquery):
"""
Given dict as parsed from YAML, parse into internal Query.
Only set attributes that are specified, leaving others at default.
Can be called multiple times to build up the Query.
Returns currently wrapped Query.
"""
self._parse_item(yquery, 'id')
self._parse_data(yquery)
self._parse_timeframe(yquery)
self._parse_format(yquery)
self._parse_ghosts(yquery)
return self.get_query()
#
# Internal Methods
#
def _parse_item(self, yobj, yname, attr=None, obj=None):
"""
Set object attr to value of specific item in yobj,
but only if present.
If attr unspecified, uses same name as yname.
If obj unspecified, uses self._query.
"""
if obj is None:
obj = self._query
if yname not in yobj:
return
if attr is None:
attr = yname
setattr(obj, attr, yobj[yname])
def _parse_data(self, yquery):
"""
Helper - Parse data section in yquery.
"""
ydata = yquery.get('data')
if ydata is None:
raise MQLParseError("Query #{query.id} missing data section"
.format(query=self._query))
ymetrics = ydata.get('metrics')
if ymetrics is None:
raise MQLParseError("Query #{query.id} missing data.metrics"
.format(query=self._query))
if not isinstance(ymetrics, list):
raise MQLParseError("Query #{query.id} data.metrics not list: {t}"
.format(query=self._query, t=type(ymetrics)))
for ymetric in ymetrics:
qmetric1 = self._parse_data_qmetric(ymetric)
self._query._qdata.add_qmetric(qmetric1)
def _parse_data_qmetric(self, ymetric):
"""
Helper - parse single data.metric definition, return QMetric.
"""
if not isinstance(ymetric, dict):
raise MQLParseError("Query #{query.id} data.metrics has non-dict: {t}"
.format(query=self._query, t=type(ymetric)))
qmetric1 = QMetric()
def _parse_ymetric_item(yname, attr=None):
self._parse_item(ymetric, yname, attr=attr, obj=qmetric1)
_parse_ymetric_item('id')
_parse_ymetric_item('metric', 'metric_id')
_parse_ymetric_item('div', 'div_metric_id')
_parse_ymetric_item('label')
_parse_ymetric_item('goal')
_parse_ymetric_item('goal_mode')
_parse_ymetric_item('rag')
_parse_ymetric_item('impact')
return qmetric1
def _parse_timeframe(self, yquery):
"""
Helper - parse timeframe section in yquery.
"""
ytimeframe = yquery.get('timeframe')
if ytimeframe is None:
raise MQLParseError("Query #{query.id} missing timeframe section"
.format(query=self._query))
if not isinstance(ytimeframe, dict):
raise MQLParseError("Query #{query.id} timeframe not dict: {t}"
.format(query=self._query, t=type(ytimeframe)))
def _parse_ytimeframe_item(yname, attr=None):
self._parse_item(ytimeframe, yname, attr=attr,
obj=self._query.qtimeframe.tmfrspec)
_parse_ytimeframe_item('id')
_parse_ytimeframe_item('range_unit')
_parse_ytimeframe_item('range_val')
_parse_ytimeframe_item('gran_unit')
_parse_ytimeframe_item('smooth_unit')
_parse_ytimeframe_item('smooth_val')
_parse_ytimeframe_item('mode')
_parse_ytimeframe_item('accumulate')
_parse_ytimeframe_item('allow_overflow_begin')
_parse_ytimeframe_item('allow_overflow_end')
if 'reframe_dt' in ytimeframe:
rdt = ytimeframe['reframe_dt']
if not isinstance(rdt, date):
raise MQLParseError(
"Query #{query.id} timeframe reframe_dt not date: {t}"
.format(query=self._query, t=type(rdt)))
if not isinstance(rdt, datetime):
rdt = datetime(rdt.year, rdt.month, rdt.day)
self._query.qtimeframe.tmfrspec.reframe_dt = rdt
def _parse_format(self, yquery):
"""
Helper - parse format section in yquery.
"""
yformat = yquery.get('format')
if yformat is None:
return
if not isinstance(yformat, dict):
raise MQLParseError("Query #{query.id} format not dict: {t}"
.format(query=self._query, t=type(yformat)))
qformat1 = self._query.qformat
for domain, yoptions in yformat.iteritems():
if not isinstance(yoptions, dict):
raise MQLParseError(
"Query #{query.id} format domain #{domain} not dict: {t}"
.format(query=self._query, domain=domain, t=type(yoptions)))
dopts = qformat1.get_domain(domain)
for k, v in yoptions.iteritems():
|
T2DREAM/t2dream-portal
|
src/encoded/tests/test_auditor.py
|
Python
|
mit
| 4,492
| 0.002004
|
import pytest
def raising_checker(value, system):
from snovault.auditor import AuditFailure
if not value.get('checker1'):
raise AuditFailure('testchecker', 'Missing checker1')
def returning_checker(value, system):
from snovault.auditor import AuditFailure
if not value.get('checker1'):
return AuditFailure('testchecker', 'Missing checker1')
def yielding_checker(value, system):
from snovault.auditor import AuditFailure
if not value.get('checker1'):
yield AuditFailure('testchecker', 'Missing checker1')
def has_condition1(value, system):
return value.get('condition1')
@pytest.fixture(params=[
raising_checker,
returning_checker,
yielding_checker,
])
def auditor(request):
from snovault.auditor import Auditor
auditor = Auditor()
auditor.add_audit_checker(request.param, 'test')
return auditor
@pytest.fixture
def auditor_conditions():
from snovault.auditor import Auditor
auditor = Audit
|
or()
auditor.add_audit_checker(raising_checker, 'test', has_condition1)
return auditor
@pytest.fixture
def dummy_request(registry):
from pyramid.testing import DummyRequest
_embed = {}
request = DummyRequest(registry=registry, _embed=_embed, embed=lambda path: _embed[path])
return request
def test_audit_pass(auditor, dummy_request):
value = {'
|
checker1': True}
dummy_request._embed['/foo/@@embedded'] = value
errors = auditor.audit(request=dummy_request, path='/foo/', types='test')
assert errors == []
def test_audit_failure(auditor, dummy_request):
value = {}
dummy_request._embed['/foo/@@embedded'] = value
error, = auditor.audit(request=dummy_request, path='/foo/', types='test')
assert error['detail'] == 'Missing checker1'
assert error['category'] == 'testchecker'
assert error['level'] == 0
assert error['path'] == '/foo/'
def test_audit_conditions(auditor_conditions, dummy_request):
value = {}
dummy_request._embed['/foo/@@embedded'] = value
assert auditor_conditions.audit(request=dummy_request, path='/foo/', types='test') == []
value = {'condition1': True}
dummy_request._embed['/foo/@@embedded'] = value
error, = auditor_conditions.audit(request=dummy_request, path='/foo/', types='test')
assert error['detail'] == 'Missing checker1'
assert error['category'] == 'testchecker'
assert error['level'] == 0
assert error['path'] == '/foo/'
def test_declarative_config(dummy_request):
from snovault.interfaces import AUDITOR
from pyramid.config import Configurator
config = Configurator()
config.include('snovault.config')
config.include('snovault.auditor')
config.include('.testing_auditor')
config.commit()
auditor = config.registry[AUDITOR]
value = {'condition1': True}
dummy_request._embed['/foo/@@embedded'] = value
error, = auditor.audit(request=dummy_request, path='/foo/', types='TestingLinkSource')
assert error['detail'] == 'Missing checker1'
assert error['category'] == 'testchecker'
assert error['level'] == 0
assert error['path'] == '/foo/'
def test_link_target_audit_fail(testapp):
target = {'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f', 'status': 'CHECK'}
testapp.post_json('/testing_link_target', target, status=201)
res = testapp.get('/%s/@@index-data' % target['uuid']).maybe_follow()
errors_dict = res.json['audit']
errors_list = []
for error_type in errors_dict:
errors_list.extend(errors_dict[error_type])
errors = [e for e in errors_list if e['name'] == 'testing_link_target_status']
error, = errors
assert error['detail'] == 'Missing reverse items'
assert error['category'] == 'status'
assert error['level'] == 0
assert error['path'] == res.json['object']['@id']
def test_link_target_audit_pass(testapp):
target = {'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f', 'status': 'CHECK'}
testapp.post_json('/testing_link_target', target, status=201)
source = {'uuid': '16157204-8c8f-4672-a1a4-14f4b8021fcd', 'target': target['uuid']}
testapp.post_json('/testing_link_source', source, status=201)
res = testapp.get('/%s/@@index-data' % target['uuid']).maybe_follow()
errors_dict = res.json['audit']
errors_list = []
for error_type in errors_dict:
errors_list.extend(errors_dict[error_type])
errors = [e for e in errors_list if e['name'] == 'testing_link_target_status']
assert errors == []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.